summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/9p/protocol.c22
-rw-r--r--net/Kconfig4
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/bluetooth/Makefile2
-rw-r--r--net/bluetooth/hci_conn.c16
-rw-r--r--net/bluetooth/hci_core.c21
-rw-r--r--net/bluetooth/hci_event.c42
-rw-r--r--net/bluetooth/hci_sock.c52
-rw-r--r--net/bluetooth/l2cap.c92
-rw-r--r--net/bluetooth/mgmt.c308
-rw-r--r--net/bluetooth/rfcomm/core.c3
-rw-r--r--net/bridge/br_fdb.c4
-rw-r--r--net/bridge/br_forward.c16
-rw-r--r--net/bridge/br_multicast.c28
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/caif/cfcnfg.c9
-rw-r--r--net/caif/chnl_net.c18
-rw-r--r--net/can/bcm.c7
-rw-r--r--net/can/raw.c3
-rw-r--r--net/ceph/ceph_hash.c3
-rw-r--r--net/ceph/messenger.c67
-rw-r--r--net/ceph/osd_client.c25
-rw-r--r--net/ceph/osdmap.c4
-rw-r--r--net/ceph/pagevec.c16
-rw-r--r--net/core/dev.c197
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/fib_rules.c3
-rw-r--r--net/core/filter.c74
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/core/skbuff.c11
-rw-r--r--net/core/sock.c8
-rw-r--r--net/dcb/Makefile2
-rw-r--r--net/dcb/dcbevent.c40
-rw-r--r--net/dcb/dcbnl.c448
-rw-r--r--net/dccp/Kconfig4
-rw-r--r--net/dccp/dccp.h3
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/sysctl.c4
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/dsa/dsa.c4
-rw-r--r--net/econet/af_econet.c4
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/ipv4/Kconfig4
-rw-r--r--net/ipv4/af_inet.c16
-rw-r--r--net/ipv4/ah4.c7
-rw-r--r--net/ipv4/arp.c40
-rw-r--r--net/ipv4/fib_frontend.c16
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/ip_fragment.c34
-rw-r--r--net/ipv4/ipmr.c76
-rw-r--r--net/ipv4/netfilter/arp_tables.c45
-rw-r--r--net/ipv4/netfilter/arpt_mangle.c6
-rw-r--r--net/ipv4/netfilter/ip_tables.c45
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c2
-rw-r--r--net/ipv4/raw.c19
-rw-r--r--net/ipv4/route.c85
-rw-r--r--net/ipv4/tcp_input.c31
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_output.c13
-rw-r--r--net/ipv6/addrconf.c84
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/ah6.c8
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_output.c15
-rw-r--r--net/ipv6/ip6mr.c75
-rw-r--r--net/ipv6/netfilter/ip6_tables.c45
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c8
-rw-r--r--net/ipv6/raw.c19
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/sysctl_net_ipv6.c9
-rw-r--r--net/ipv6/xfrm6_output.c16
-rw-r--r--net/ipv6/xfrm6_policy.c6
-rw-r--r--net/irda/af_irda.c19
-rw-r--r--net/irda/irnet/irnet_ppp.c1
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/mac80211/Kconfig8
-rw-r--r--net/mac80211/agg-rx.c11
-rw-r--r--net/mac80211/agg-tx.c7
-rw-r--r--net/mac80211/cfg.c195
-rw-r--r--net/mac80211/debugfs_key.c37
-rw-r--r--net/mac80211/debugfs_key.h8
-rw-r--r--net/mac80211/debugfs_sta.c2
-rw-r--r--net/mac80211/driver-ops.h32
-rw-r--r--net/mac80211/driver-trace.h80
-rw-r--r--net/mac80211/ieee80211_i.h46
-rw-r--r--net/mac80211/iface.c22
-rw-r--r--net/mac80211/key.c89
-rw-r--r--net/mac80211/key.h3
-rw-r--r--net/mac80211/led.c186
-rw-r--r--net/mac80211/led.h45
-rw-r--r--net/mac80211/main.c33
-rw-r--r--net/mac80211/mesh.c52
-rw-r--r--net/mac80211/mesh.h22
-rw-r--r--net/mac80211/mesh_plink.c3
-rw-r--r--net/mac80211/offchannel.c85
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/mac80211/rx.c164
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/tx.c42
-rw-r--r--net/mac80211/util.c3
-rw-r--r--net/mac80211/wme.c20
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c30
-rw-r--r--net/netfilter/nf_conntrack_ecache.c3
-rw-r--r--net/netfilter/nf_conntrack_expect.c10
-rw-r--r--net/netfilter/nf_conntrack_netlink.c43
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/x_tables.c3
-rw-r--r--net/netfilter/xt_iprange.c16
-rw-r--r--net/phonet/af_phonet.c6
-rw-r--r--net/rfkill/Kconfig4
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/sched/Kconfig2
-rw-r--r--net/sched/act_csum.c3
-rw-r--r--net/sched/act_ipt.c3
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/act_nat.c3
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c3
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c3
-rw-r--r--net/sched/sch_atm.c6
-rw-r--r--net/sched/sch_cbq.c5
-rw-r--r--net/sched/sch_drr.c8
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_fifo.c7
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--net/sched/sch_htb.c27
-rw-r--r--net/sched/sch_ingress.c3
-rw-r--r--net/sched/sch_multiq.c3
-rw-r--r--net/sched/sch_netem.c5
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_red.c13
-rw-r--r--net/sched/sch_sfq.c297
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sched/sch_teql.c30
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/socket.c52
-rw-r--r--net/sunrpc/auth.c28
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c44
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/bc_svc.c2
-rw-r--r--net/sunrpc/cache.c54
-rw-r--r--net/sunrpc/clnt.c45
-rw-r--r--net/sunrpc/rpc_pipe.c16
-rw-r--r--net/sunrpc/rpcb_clnt.c147
-rw-r--r--net/sunrpc/stats.c4
-rw-r--r--net/sunrpc/svc.c39
-rw-r--r--net/sunrpc/svc_xprt.c107
-rw-r--r--net/sunrpc/svcauth.c1
-rw-r--r--net/sunrpc/svcauth_unix.c17
-rw-r--r--net/sunrpc/svcsock.c126
-rw-r--r--net/sunrpc/xdr.c155
-rw-r--r--net/sunrpc/xprt.c5
-rw-r--r--net/sunrpc/xprtsock.c36
-rw-r--r--net/tipc/Kconfig35
-rw-r--r--net/tipc/Makefile4
-rw-r--r--net/tipc/addr.c8
-rw-r--r--net/tipc/addr.h25
-rw-r--r--net/tipc/bcast.c42
-rw-r--r--net/tipc/bcast.h1
-rw-r--r--net/tipc/bearer.c10
-rw-r--r--net/tipc/cluster.c550
-rw-r--r--net/tipc/cluster.h92
-rw-r--r--net/tipc/config.c106
-rw-r--r--net/tipc/core.c60
-rw-r--r--net/tipc/core.h64
-rw-r--r--net/tipc/discover.c12
-rw-r--r--net/tipc/eth_media.c11
-rw-r--r--net/tipc/handler.c2
-rw-r--r--net/tipc/link.c369
-rw-r--r--net/tipc/link.h9
-rw-r--r--net/tipc/log.c (renamed from net/tipc/dbg.c)111
-rw-r--r--net/tipc/log.h (renamed from net/tipc/dbg.h)6
-rw-r--r--net/tipc/msg.c73
-rw-r--r--net/tipc/msg.h17
-rw-r--r--net/tipc/name_distr.c45
-rw-r--r--net/tipc/name_table.c55
-rw-r--r--net/tipc/name_table.h2
-rw-r--r--net/tipc/net.c67
-rw-r--r--net/tipc/net.h14
-rw-r--r--net/tipc/node.c294
-rw-r--r--net/tipc/node.h27
-rw-r--r--net/tipc/port.c55
-rw-r--r--net/tipc/port.h9
-rw-r--r--net/tipc/ref.c8
-rw-r--r--net/tipc/socket.c148
-rw-r--r--net/tipc/subscr.c36
-rw-r--r--net/tipc/user_reg.c218
-rw-r--r--net/tipc/user_reg.h51
-rw-r--r--net/tipc/zone.c159
-rw-r--r--net/tipc/zone.h70
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wireless/Kconfig2
-rw-r--r--net/wireless/core.c22
-rw-r--r--net/wireless/core.h5
-rw-r--r--net/wireless/mesh.c24
-rw-r--r--net/wireless/mlme.c22
-rw-r--r--net/wireless/nl80211.c276
-rw-r--r--net/wireless/nl80211.h6
-rw-r--r--net/wireless/reg.c5
-rw-r--r--net/wireless/scan.c11
-rw-r--r--net/wireless/util.c3
-rw-r--r--net/wireless/wext-compat.c8
-rw-r--r--net/x25/x25_facilities.c28
-rw-r--r--net/x25/x25_in.c14
-rw-r--r--net/xfrm/xfrm_user.c4
214 files changed, 4292 insertions, 4039 deletions
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 798beac7f100..1e308f210928 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -178,27 +178,24 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
break;
case 's':{
char **sptr = va_arg(ap, char **);
- int16_t len;
- int size;
+ uint16_t len;
errcode = p9pdu_readf(pdu, proto_version,
"w", &len);
if (errcode)
break;
- size = max_t(int16_t, len, 0);
-
- *sptr = kmalloc(size + 1, GFP_KERNEL);
+ *sptr = kmalloc(len + 1, GFP_KERNEL);
if (*sptr == NULL) {
errcode = -EFAULT;
break;
}
- if (pdu_read(pdu, *sptr, size)) {
+ if (pdu_read(pdu, *sptr, len)) {
errcode = -EFAULT;
kfree(*sptr);
*sptr = NULL;
} else
- (*sptr)[size] = 0;
+ (*sptr)[len] = 0;
}
break;
case 'Q':{
@@ -234,14 +231,14 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case 'D':{
- int32_t *count = va_arg(ap, int32_t *);
+ uint32_t *count = va_arg(ap, uint32_t *);
void **data = va_arg(ap, void **);
errcode =
p9pdu_readf(pdu, proto_version, "d", count);
if (!errcode) {
*count =
- min_t(int32_t, *count,
+ min_t(uint32_t, *count,
pdu->size - pdu->offset);
*data = &pdu->sdata[pdu->offset];
}
@@ -404,9 +401,10 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
break;
case 's':{
const char *sptr = va_arg(ap, const char *);
- int16_t len = 0;
+ uint16_t len = 0;
if (sptr)
- len = min_t(int16_t, strlen(sptr), USHRT_MAX);
+ len = min_t(uint16_t, strlen(sptr),
+ USHRT_MAX);
errcode = p9pdu_writef(pdu, proto_version,
"w", len);
@@ -438,7 +436,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
stbuf->n_gid, stbuf->n_muid);
} break;
case 'D':{
- int32_t count = va_arg(ap, int32_t);
+ uint32_t count = va_arg(ap, uint32_t);
const void *data = va_arg(ap, const void *);
errcode = p9pdu_writef(pdu, proto_version, "d",
diff --git a/net/Kconfig b/net/Kconfig
index ad0aafe903f8..72840626284b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -253,7 +253,9 @@ config NET_TCPPROBE
what was just said, you don't need it: say N.
Documentation on how to use TCP connection probing can be found
- at http://linux-net.osdl.org/index.php/TcpProbe
+ at:
+
+ http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe
To compile this code as a module, choose M here: the
module will be called tcp_probe.
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 179e04bc99dd..38754fdb88ba 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1607,7 +1607,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
struct lec_arp_table *entry;
int i;
- cancel_rearming_delayed_work(&priv->lec_arp_work);
+ cancel_delayed_work_sync(&priv->lec_arp_work);
/*
* Remove all entries
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index bb86d2932394..6da5daeebab7 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1392,7 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
ax25_cb *ax25;
int err = 0;
- memset(fsa, 0, sizeof(fsa));
+ memset(fsa, 0, sizeof(*fsa));
lock_sock(sk);
ax25 = ax25_sk(sk);
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 7ca1f46a471a..250f954f0213 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_BT_BNEP) += bnep/
obj-$(CONFIG_BT_CMTP) += cmtp/
obj-$(CONFIG_BT_HIDP) += hidp/
-bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o hci_sock.o hci_sysfs.o lib.o
+bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 6b90a4191734..99cd8d9d891b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -379,14 +379,10 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
hci_conn_hold(acl);
if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
- acl->sec_level = sec_level;
+ acl->sec_level = BT_SECURITY_LOW;
+ acl->pending_sec_level = sec_level;
acl->auth_type = auth_type;
hci_acl_connect(acl);
- } else {
- if (acl->sec_level < sec_level)
- acl->sec_level = sec_level;
- if (acl->auth_type < auth_type)
- acl->auth_type = auth_type;
}
if (type == ACL_LINK)
@@ -442,11 +438,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
BT_DBG("conn %p", conn);
+ if (conn->pending_sec_level > sec_level)
+ sec_level = conn->pending_sec_level;
+
if (sec_level > conn->sec_level)
- conn->sec_level = sec_level;
+ conn->pending_sec_level = sec_level;
else if (conn->link_mode & HCI_LM_AUTH)
return 1;
+ /* Make sure we preserve an existing MITM requirement*/
+ auth_type |= (conn->auth_type & 0x01);
+
conn->auth_type = auth_type;
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 51c61f75a797..9c4541bc488a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -91,9 +91,16 @@ static void hci_notify(struct hci_dev *hdev, int event)
/* ---- HCI requests ---- */
-void hci_req_complete(struct hci_dev *hdev, int result)
+void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
{
- BT_DBG("%s result 0x%2.2x", hdev->name, result);
+ BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
+
+ /* If the request has set req_last_cmd (typical for multi-HCI
+ * command requests) check if the completed command matches
+ * this, and if not just return. Single HCI command requests
+ * typically leave req_last_cmd as 0 */
+ if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
+ return;
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
@@ -149,7 +156,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
break;
}
- hdev->req_status = hdev->req_result = 0;
+ hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
BT_DBG("%s end: err %d", hdev->name, err);
@@ -252,6 +259,8 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
/* Connection accept timeout ~20 secs */
param = cpu_to_le16(0x7d00);
hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
+
+ hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
}
static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
@@ -960,6 +969,7 @@ int hci_register_dev(struct hci_dev *hdev)
}
}
+ mgmt_index_added(hdev->id);
hci_notify(hdev, HCI_DEV_REG);
return id;
@@ -989,6 +999,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
for (i = 0; i < NUM_REASSEMBLY; i++)
kfree_skb(hdev->reassembly[i]);
+ mgmt_index_removed(hdev->id);
hci_notify(hdev, HCI_DEV_UNREG);
if (hdev->rfkill) {
@@ -1000,6 +1011,10 @@ int hci_unregister_dev(struct hci_dev *hdev)
destroy_workqueue(hdev->workqueue);
+ hci_dev_lock_bh(hdev);
+ hci_blacklist_clear(hdev);
+ hci_dev_unlock_bh(hdev);
+
__hci_dev_put(hdev);
return 0;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 8923b36a67a2..a290854fdaa6 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -58,7 +58,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
clear_bit(HCI_INQUIRY, &hdev->flags);
- hci_req_complete(hdev, status);
+ hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
hci_conn_check_pending(hdev);
}
@@ -174,7 +174,7 @@ static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *s
if (!status)
hdev->link_policy = get_unaligned_le16(sent);
- hci_req_complete(hdev, status);
+ hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
}
static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
@@ -183,7 +183,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, status);
- hci_req_complete(hdev, status);
+ hci_req_complete(hdev, HCI_OP_RESET, status);
}
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -235,7 +235,7 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
clear_bit(HCI_AUTH, &hdev->flags);
}
- hci_req_complete(hdev, status);
+ hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
}
static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
@@ -258,7 +258,7 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
clear_bit(HCI_ENCRYPT, &hdev->flags);
}
- hci_req_complete(hdev, status);
+ hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
}
static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -285,7 +285,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
set_bit(HCI_PSCAN, &hdev->flags);
}
- hci_req_complete(hdev, status);
+ hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
}
static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -383,7 +383,7 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, status);
- hci_req_complete(hdev, status);
+ hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
}
static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
@@ -536,7 +536,16 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
if (!rp->status)
bacpy(&hdev->bdaddr, &rp->bdaddr);
- hci_req_complete(hdev, rp->status);
+ hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
+}
+
+static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
}
static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
@@ -544,7 +553,7 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
BT_DBG("%s status 0x%x", hdev->name, status);
if (status) {
- hci_req_complete(hdev, status);
+ hci_req_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
} else
@@ -683,13 +692,13 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
if (conn->state != BT_CONFIG || !conn->out)
return 0;
- if (conn->sec_level == BT_SECURITY_SDP)
+ if (conn->pending_sec_level == BT_SECURITY_SDP)
return 0;
/* Only request authentication for SSP connections or non-SSP
* devices with sec_level HIGH */
if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
- conn->sec_level != BT_SECURITY_HIGH)
+ conn->pending_sec_level != BT_SECURITY_HIGH)
return 0;
return 1;
@@ -871,7 +880,7 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
clear_bit(HCI_INQUIRY, &hdev->flags);
- hci_req_complete(hdev, status);
+ hci_req_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
}
@@ -1086,9 +1095,10 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (conn) {
- if (!ev->status)
+ if (!ev->status) {
conn->link_mode |= HCI_LM_AUTH;
- else
+ conn->sec_level = conn->pending_sec_level;
+ } else
conn->sec_level = BT_SECURITY_LOW;
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
@@ -1379,6 +1389,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_read_bd_addr(hdev, skb);
break;
+ case HCI_OP_WRITE_CA_TIMEOUT:
+ hci_cc_write_ca_timeout(hdev, skb);
+ break;
+
default:
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
break;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index b3753bad2a55..29827c77f6ce 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -49,6 +49,8 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+static int enable_mgmt;
+
/* ----- HCI socket interface ----- */
static inline int hci_test_bit(int nr, void *addr)
@@ -102,6 +104,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
if (skb->sk == sk)
continue;
+ if (bt_cb(skb)->channel != hci_pi(sk)->channel)
+ continue;
+
+ if (bt_cb(skb)->channel == HCI_CHANNEL_CONTROL)
+ goto clone;
+
/* Apply filter */
flt = &hci_pi(sk)->filter;
@@ -125,12 +133,14 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
continue;
}
+clone:
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
continue;
/* Put type byte before the data */
- memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1);
+ if (bt_cb(skb)->channel == HCI_CHANNEL_RAW)
+ memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1);
if (sock_queue_rcv_skb(sk, nskb))
kfree_skb(nskb);
@@ -353,25 +363,38 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
- struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
+ struct sockaddr_hci haddr;
struct sock *sk = sock->sk;
struct hci_dev *hdev = NULL;
- int err = 0;
+ int len, err = 0;
BT_DBG("sock %p sk %p", sock, sk);
- if (!haddr || haddr->hci_family != AF_BLUETOOTH)
+ if (!addr)
+ return -EINVAL;
+
+ memset(&haddr, 0, sizeof(haddr));
+ len = min_t(unsigned int, sizeof(haddr), addr_len);
+ memcpy(&haddr, addr, len);
+
+ if (haddr.hci_family != AF_BLUETOOTH)
+ return -EINVAL;
+
+ if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
+ return -EINVAL;
+
+ if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt)
return -EINVAL;
lock_sock(sk);
- if (hci_pi(sk)->hdev) {
+ if (sk->sk_state == BT_BOUND || hci_pi(sk)->hdev) {
err = -EALREADY;
goto done;
}
- if (haddr->hci_dev != HCI_DEV_NONE) {
- hdev = hci_dev_get(haddr->hci_dev);
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ hdev = hci_dev_get(haddr.hci_dev);
if (!hdev) {
err = -ENODEV;
goto done;
@@ -380,6 +403,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
atomic_inc(&hdev->promisc);
}
+ hci_pi(sk)->channel = haddr.hci_channel;
hci_pi(sk)->hdev = hdev;
sk->sk_state = BT_BOUND;
@@ -502,6 +526,17 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
lock_sock(sk);
+ switch (hci_pi(sk)->channel) {
+ case HCI_CHANNEL_RAW:
+ break;
+ case HCI_CHANNEL_CONTROL:
+ err = mgmt_control(sk, msg, len);
+ goto done;
+ default:
+ err = -EINVAL;
+ goto done;
+ }
+
hdev = hci_pi(sk)->hdev;
if (!hdev) {
err = -EBADFD;
@@ -831,3 +866,6 @@ void __exit hci_sock_cleanup(void)
proto_unregister(&hci_sk_proto);
}
+
+module_param(enable_mgmt, bool, 0644);
+MODULE_PARM_DESC(enable_mgmt, "Enable Management interface");
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index c12eccfdfe01..7550abb0c96a 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -305,33 +305,44 @@ static void l2cap_chan_del(struct sock *sk, int err)
}
}
-/* Service level security */
-static inline int l2cap_check_security(struct sock *sk)
+static inline u8 l2cap_get_auth_type(struct sock *sk)
{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- __u8 auth_type;
+ if (sk->sk_type == SOCK_RAW) {
+ switch (l2cap_pi(sk)->sec_level) {
+ case BT_SECURITY_HIGH:
+ return HCI_AT_DEDICATED_BONDING_MITM;
+ case BT_SECURITY_MEDIUM:
+ return HCI_AT_DEDICATED_BONDING;
+ default:
+ return HCI_AT_NO_BONDING;
+ }
+ } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
- if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
- auth_type = HCI_AT_NO_BONDING_MITM;
+ return HCI_AT_NO_BONDING_MITM;
else
- auth_type = HCI_AT_NO_BONDING;
-
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
- l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+ return HCI_AT_NO_BONDING;
} else {
switch (l2cap_pi(sk)->sec_level) {
case BT_SECURITY_HIGH:
- auth_type = HCI_AT_GENERAL_BONDING_MITM;
- break;
+ return HCI_AT_GENERAL_BONDING_MITM;
case BT_SECURITY_MEDIUM:
- auth_type = HCI_AT_GENERAL_BONDING;
- break;
+ return HCI_AT_GENERAL_BONDING;
default:
- auth_type = HCI_AT_NO_BONDING;
- break;
+ return HCI_AT_NO_BONDING;
}
}
+}
+
+/* Service level security */
+static inline int l2cap_check_security(struct sock *sk)
+{
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ __u8 auth_type;
+
+ auth_type = l2cap_get_auth_type(sk);
return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
auth_type);
@@ -1068,39 +1079,7 @@ static int l2cap_do_connect(struct sock *sk)
err = -ENOMEM;
- if (sk->sk_type == SOCK_RAW) {
- switch (l2cap_pi(sk)->sec_level) {
- case BT_SECURITY_HIGH:
- auth_type = HCI_AT_DEDICATED_BONDING_MITM;
- break;
- case BT_SECURITY_MEDIUM:
- auth_type = HCI_AT_DEDICATED_BONDING;
- break;
- default:
- auth_type = HCI_AT_NO_BONDING;
- break;
- }
- } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
- auth_type = HCI_AT_NO_BONDING_MITM;
- else
- auth_type = HCI_AT_NO_BONDING;
-
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
- l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
- } else {
- switch (l2cap_pi(sk)->sec_level) {
- case BT_SECURITY_HIGH:
- auth_type = HCI_AT_GENERAL_BONDING_MITM;
- break;
- case BT_SECURITY_MEDIUM:
- auth_type = HCI_AT_GENERAL_BONDING;
- break;
- default:
- auth_type = HCI_AT_NO_BONDING;
- break;
- }
- }
+ auth_type = l2cap_get_auth_type(sk);
hcon = hci_connect(hdev, ACL_LINK, dst,
l2cap_pi(sk)->sec_level, auth_type);
@@ -1127,7 +1106,8 @@ static int l2cap_do_connect(struct sock *sk)
if (sk->sk_type != SOCK_SEQPACKET &&
sk->sk_type != SOCK_STREAM) {
l2cap_sock_clear_timer(sk);
- sk->sk_state = BT_CONNECTED;
+ if (l2cap_check_security(sk))
+ sk->sk_state = BT_CONNECTED;
} else
l2cap_do_start(sk);
}
@@ -1893,8 +1873,8 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
if (pi->mode == L2CAP_MODE_STREAMING) {
l2cap_streaming_send(sk);
} else {
- if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
- pi->conn_state && L2CAP_CONN_WAIT_F) {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ (pi->conn_state & L2CAP_CONN_WAIT_F)) {
err = len;
break;
}
@@ -3124,8 +3104,14 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (!sk)
return -ENOENT;
- if (sk->sk_state == BT_DISCONN)
+ if (sk->sk_state != BT_CONFIG) {
+ struct l2cap_cmd_rej rej;
+
+ rej.reason = cpu_to_le16(0x0002);
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
+ sizeof(rej), &rej);
goto unlock;
+ }
/* Reject if config buffer is too small. */
len = cmd_len - sizeof(*req);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
new file mode 100644
index 000000000000..f827fd908380
--- /dev/null
+++ b/net/bluetooth/mgmt.c
@@ -0,0 +1,308 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2010 Nokia Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+/* Bluetooth HCI Management interface */
+
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/mgmt.h>
+
+#define MGMT_VERSION 0
+#define MGMT_REVISION 1
+
+static int cmd_status(struct sock *sk, u16 cmd, u8 status)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+ struct mgmt_ev_cmd_status *ev;
+
+ BT_DBG("sock %p", sk);
+
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (void *) skb_put(skb, sizeof(*hdr));
+
+ hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
+ hdr->len = cpu_to_le16(sizeof(*ev));
+
+ ev = (void *) skb_put(skb, sizeof(*ev));
+ ev->status = status;
+ put_unaligned_le16(cmd, &ev->opcode);
+
+ if (sock_queue_rcv_skb(sk, skb) < 0)
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int read_version(struct sock *sk)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+ struct mgmt_ev_cmd_complete *ev;
+ struct mgmt_rp_read_version *rp;
+
+ BT_DBG("sock %p", sk);
+
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (void *) skb_put(skb, sizeof(*hdr));
+ hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
+ hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
+
+ ev = (void *) skb_put(skb, sizeof(*ev));
+ put_unaligned_le16(MGMT_OP_READ_VERSION, &ev->opcode);
+
+ rp = (void *) skb_put(skb, sizeof(*rp));
+ rp->version = MGMT_VERSION;
+ put_unaligned_le16(MGMT_REVISION, &rp->revision);
+
+ if (sock_queue_rcv_skb(sk, skb) < 0)
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int read_index_list(struct sock *sk)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+ struct mgmt_ev_cmd_complete *ev;
+ struct mgmt_rp_read_index_list *rp;
+ struct list_head *p;
+ size_t body_len;
+ u16 count;
+ int i;
+
+ BT_DBG("sock %p", sk);
+
+ read_lock(&hci_dev_list_lock);
+
+ count = 0;
+ list_for_each(p, &hci_dev_list) {
+ count++;
+ }
+
+ body_len = sizeof(*ev) + sizeof(*rp) + (2 * count);
+ skb = alloc_skb(sizeof(*hdr) + body_len, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (void *) skb_put(skb, sizeof(*hdr));
+ hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
+ hdr->len = cpu_to_le16(body_len);
+
+ ev = (void *) skb_put(skb, sizeof(*ev));
+ put_unaligned_le16(MGMT_OP_READ_INDEX_LIST, &ev->opcode);
+
+ rp = (void *) skb_put(skb, sizeof(*rp) + (2 * count));
+ put_unaligned_le16(count, &rp->num_controllers);
+
+ i = 0;
+ list_for_each(p, &hci_dev_list) {
+ struct hci_dev *d = list_entry(p, struct hci_dev, list);
+ put_unaligned_le16(d->id, &rp->index[i++]);
+ BT_DBG("Added hci%u", d->id);
+ }
+
+ read_unlock(&hci_dev_list_lock);
+
+ if (sock_queue_rcv_skb(sk, skb) < 0)
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+ struct mgmt_ev_cmd_complete *ev;
+ struct mgmt_rp_read_info *rp;
+ struct mgmt_cp_read_info *cp;
+ struct hci_dev *hdev;
+ u16 dev_id;
+
+ BT_DBG("sock %p", sk);
+
+ if (len != 2)
+ return cmd_status(sk, MGMT_OP_READ_INFO, EINVAL);
+
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (void *) skb_put(skb, sizeof(*hdr));
+ hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
+ hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
+
+ ev = (void *) skb_put(skb, sizeof(*ev));
+ put_unaligned_le16(MGMT_OP_READ_INFO, &ev->opcode);
+
+ rp = (void *) skb_put(skb, sizeof(*rp));
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ BT_DBG("request for hci%u", dev_id);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev) {
+ kfree_skb(skb);
+ return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV);
+ }
+
+ hci_dev_lock_bh(hdev);
+
+ put_unaligned_le16(hdev->id, &rp->index);
+ rp->type = hdev->dev_type;
+
+ rp->powered = test_bit(HCI_UP, &hdev->flags);
+ rp->discoverable = test_bit(HCI_ISCAN, &hdev->flags);
+ rp->pairable = test_bit(HCI_PSCAN, &hdev->flags);
+
+ if (test_bit(HCI_AUTH, &hdev->flags))
+ rp->sec_mode = 3;
+ else if (hdev->ssp_mode > 0)
+ rp->sec_mode = 4;
+ else
+ rp->sec_mode = 2;
+
+ bacpy(&rp->bdaddr, &hdev->bdaddr);
+ memcpy(rp->features, hdev->features, 8);
+ memcpy(rp->dev_class, hdev->dev_class, 3);
+ put_unaligned_le16(hdev->manufacturer, &rp->manufacturer);
+ rp->hci_ver = hdev->hci_ver;
+ put_unaligned_le16(hdev->hci_rev, &rp->hci_rev);
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ if (sock_queue_rcv_skb(sk, skb) < 0)
+ kfree_skb(skb);
+
+ return 0;
+}
+
+int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
+{
+ unsigned char *buf;
+ struct mgmt_hdr *hdr;
+ u16 opcode, len;
+ int err;
+
+ BT_DBG("got %zu bytes", msglen);
+
+ if (msglen < sizeof(*hdr))
+ return -EINVAL;
+
+ buf = kmalloc(msglen, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
+ err = -EFAULT;
+ goto done;
+ }
+
+ hdr = (struct mgmt_hdr *) buf;
+ opcode = get_unaligned_le16(&hdr->opcode);
+ len = get_unaligned_le16(&hdr->len);
+
+ if (len != msglen - sizeof(*hdr)) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ switch (opcode) {
+ case MGMT_OP_READ_VERSION:
+ err = read_version(sk);
+ break;
+ case MGMT_OP_READ_INDEX_LIST:
+ err = read_index_list(sk);
+ break;
+ case MGMT_OP_READ_INFO:
+ err = read_controller_info(sk, buf + sizeof(*hdr), len);
+ break;
+ default:
+ BT_DBG("Unknown op %u", opcode);
+ err = cmd_status(sk, opcode, 0x01);
+ break;
+ }
+
+ if (err < 0)
+ goto done;
+
+ err = msglen;
+
+done:
+ kfree(buf);
+ return err;
+}
+
+static int mgmt_event(u16 event, void *data, u16 data_len)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+
+ skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
+
+ hdr = (void *) skb_put(skb, sizeof(*hdr));
+ hdr->opcode = cpu_to_le16(event);
+ hdr->len = cpu_to_le16(data_len);
+
+ memcpy(skb_put(skb, data_len), data, data_len);
+
+ hci_send_to_sock(NULL, skb);
+ kfree_skb(skb);
+
+ return 0;
+}
+
+int mgmt_index_added(u16 index)
+{
+ struct mgmt_ev_index_added ev;
+
+ put_unaligned_le16(index, &ev.index);
+
+ return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev));
+}
+
+int mgmt_index_removed(u16 index)
+{
+ struct mgmt_ev_index_added ev;
+
+ put_unaligned_le16(index, &ev.index);
+
+ return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev));
+}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index ff8aaa736650..6b83776534fb 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1164,7 +1164,8 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
* initiator rfcomm_process_rx already calls
* rfcomm_session_put() */
if (s->sock->sk->sk_state != BT_CLOSED)
- rfcomm_session_put(s);
+ if (list_empty(&s->dlcs))
+ rfcomm_session_put(s);
break;
}
}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 2872393b2939..88485cc74dc3 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -328,12 +328,12 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
if (fdb) {
memcpy(fdb->addr.addr, addr, ETH_ALEN);
- hlist_add_head_rcu(&fdb->hlist, head);
-
fdb->dst = source;
fdb->is_local = is_local;
fdb->is_static = is_local;
fdb->ageing_timer = jiffies;
+
+ hlist_add_head_rcu(&fdb->hlist, head);
}
return fdb;
}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 2bd11ec6d166..ee64287f1290 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -41,17 +41,13 @@ static inline unsigned packet_length(const struct sk_buff *skb)
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
- /* drop mtu oversized packets except gso */
- if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
+ /* ip_fragment doesn't copy the MAC header */
+ if (nf_bridge_maybe_copy_header(skb) ||
+ (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))) {
kfree_skb(skb);
- else {
- /* ip_fragment doesn't copy the MAC header */
- if (nf_bridge_maybe_copy_header(skb))
- kfree_skb(skb);
- else {
- skb_push(skb, ETH_HLEN);
- dev_queue_xmit(skb);
- }
+ } else {
+ skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
}
return 0;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index f03e2e98cd44..f701a21acb34 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1451,7 +1451,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb)
{
- struct sk_buff *skb2 = skb;
+ struct sk_buff *skb2;
struct ipv6hdr *ip6h;
struct icmp6hdr *icmp6h;
u8 nexthdr;
@@ -1490,15 +1490,15 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
if (!skb2)
return -ENOMEM;
+ err = -EINVAL;
+ if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr)))
+ goto out;
+
len -= offset - skb_network_offset(skb2);
__skb_pull(skb2, offset);
skb_reset_transport_header(skb2);
- err = -EINVAL;
- if (!pskb_may_pull(skb2, sizeof(*icmp6h)))
- goto out;
-
icmp6h = icmp6_hdr(skb2);
switch (icmp6h->icmp6_type) {
@@ -1537,7 +1537,12 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
switch (icmp6h->icmp6_type) {
case ICMPV6_MGM_REPORT:
{
- struct mld_msg *mld = (struct mld_msg *)icmp6h;
+ struct mld_msg *mld;
+ if (!pskb_may_pull(skb2, sizeof(*mld))) {
+ err = -EINVAL;
+ goto out;
+ }
+ mld = (struct mld_msg *)skb_transport_header(skb2);
BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
break;
@@ -1550,15 +1555,18 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
break;
case ICMPV6_MGM_REDUCTION:
{
- struct mld_msg *mld = (struct mld_msg *)icmp6h;
+ struct mld_msg *mld;
+ if (!pskb_may_pull(skb2, sizeof(*mld))) {
+ err = -EINVAL;
+ goto out;
+ }
+ mld = (struct mld_msg *)skb_transport_header(skb2);
br_ip6_multicast_leave_group(br, port, &mld->mld_mca);
}
}
out:
- __skb_push(skb2, offset);
- if (skb2 != skb)
- kfree_skb(skb2);
+ kfree_skb(skb2);
return err;
}
#endif
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 3d9a55d3822f..289646ec9b7b 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -50,6 +50,8 @@ static void br_send_bpdu(struct net_bridge_port *p,
llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr);
+ skb_reset_mac_header(skb);
+
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
dev_queue_xmit);
}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 1d8826914cbf..79372d4a4055 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -145,7 +145,7 @@ static void br_stp_stop(struct net_bridge *br)
char *envp[] = { NULL };
if (br->stp_enabled == BR_USER_STP) {
- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
br_info(br, "userspace STP stopped, return code %d\n", r);
/* To start timers on any ports left in blocking */
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index cbc9f395ab1e..16df0532d4b9 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1147,7 +1147,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table)
void *p;
if (input_table == NULL || (repl = input_table->table) == NULL ||
- repl->entries == 0 || repl->entries_size == 0 ||
+ repl->entries == NULL || repl->entries_size == 0 ||
repl->counters != NULL || input_table->private != NULL) {
BUGPRINT("Bad table data for ebt_register_table!!!\n");
return ERR_PTR(-EINVAL);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 1bf0cf503796..8184c031d028 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -740,12 +740,12 @@ static int setsockopt(struct socket *sock,
if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
return -ENOPROTOOPT;
lock_sock(&(cf_sk->sk));
- cf_sk->conn_req.param.size = ol;
if (ol > sizeof(cf_sk->conn_req.param.data) ||
copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
release_sock(&cf_sk->sk);
return -EINVAL;
}
+ cf_sk->conn_req.param.size = ol;
release_sock(&cf_sk->sk);
return 0;
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 21ede141018a..c665de778b60 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -191,6 +191,7 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
struct cflayer *servl = NULL;
struct cfcnfg_phyinfo *phyinfo = NULL;
u8 phyid = 0;
+
caif_assert(adap_layer != NULL);
channel_id = adap_layer->id;
if (adap_layer->dn == NULL || channel_id == 0) {
@@ -199,16 +200,16 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
goto end;
}
servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id);
- if (servl == NULL)
- goto end;
- layer_set_up(servl, NULL);
- ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
if (servl == NULL) {
pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)",
channel_id);
ret = -EINVAL;
goto end;
}
+ layer_set_up(servl, NULL);
+ ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
+ if (ret)
+ goto end;
caif_assert(channel_id == servl->id);
if (adap_layer->dn != NULL) {
phyid = cfsrvl_getphyid(adap_layer->dn);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 84a422c98941..fa9dab372b68 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -76,6 +76,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
int pktlen;
int err = 0;
+ const u8 *ip_version;
+ u8 buf;
priv = container_of(layr, struct chnl_net, chnl);
@@ -90,7 +92,21 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
* send the packet to the net stack.
*/
skb->dev = priv->netdev;
- skb->protocol = htons(ETH_P_IP);
+
+ /* check the version of IP */
+ ip_version = skb_header_pointer(skb, 0, 1, &buf);
+ if (!ip_version)
+ return -EINVAL;
+ switch (*ip_version >> 4) {
+ case 4:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 6:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ return -EINVAL;
+ }
/* If we change the header in loop mode, the checksum is corrupted. */
if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6faa8256e10c..092dc88a7c64 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -125,7 +125,7 @@ struct bcm_sock {
struct list_head tx_ops;
unsigned long dropped_usr_msgs;
struct proc_dir_entry *bcm_proc_read;
- char procname [20]; /* pointer printed in ASCII with \0 */
+ char procname [32]; /* inode number in decimal with \0 */
};
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
@@ -1256,6 +1256,9 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
struct sockaddr_can *addr =
(struct sockaddr_can *)msg->msg_name;
+ if (msg->msg_namelen < sizeof(*addr))
+ return -EINVAL;
+
if (addr->can_family != AF_CAN)
return -EINVAL;
@@ -1521,7 +1524,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
if (proc_dir) {
/* unique socket address as filename */
- sprintf(bo->procname, "%p", sock);
+ sprintf(bo->procname, "%lu", sock_i_ino(sk));
bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
proc_dir,
&bcm_proc_fops, sk);
diff --git a/net/can/raw.c b/net/can/raw.c
index e88f610fdb7b..883e9d74fddf 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -649,6 +649,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
struct sockaddr_can *addr =
(struct sockaddr_can *)msg->msg_name;
+ if (msg->msg_namelen < sizeof(*addr))
+ return -EINVAL;
+
if (addr->can_family != AF_CAN)
return -EINVAL;
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 815ef8826796..0a1b53bce76d 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -1,5 +1,6 @@
#include <linux/ceph/types.h>
+#include <linux/module.h>
/*
* Robert Jenkin's hash function.
@@ -104,6 +105,7 @@ unsigned ceph_str_hash(int type, const char *s, unsigned len)
return -1;
}
}
+EXPORT_SYMBOL(ceph_str_hash);
const char *ceph_str_hash_name(int type)
{
@@ -116,3 +118,4 @@ const char *ceph_str_hash_name(int type)
return "unknown";
}
}
+EXPORT_SYMBOL(ceph_str_hash_name);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 0e8157ee5d43..dff633d62e5b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -96,12 +96,10 @@ struct workqueue_struct *ceph_msgr_wq;
int ceph_msgr_init(void)
{
- ceph_msgr_wq = create_workqueue("ceph-msgr");
- if (IS_ERR(ceph_msgr_wq)) {
- int ret = PTR_ERR(ceph_msgr_wq);
- pr_err("msgr_init failed to create workqueue: %d\n", ret);
- ceph_msgr_wq = NULL;
- return ret;
+ ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
+ if (!ceph_msgr_wq) {
+ pr_err("msgr_init failed to create workqueue\n");
+ return -ENOMEM;
}
return 0;
}
@@ -540,8 +538,7 @@ static void prepare_write_message(struct ceph_connection *con)
/* initialize page iterator */
con->out_msg_pos.page = 0;
if (m->pages)
- con->out_msg_pos.page_pos =
- le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
+ con->out_msg_pos.page_pos = m->page_alignment;
else
con->out_msg_pos.page_pos = 0;
con->out_msg_pos.data_pos = 0;
@@ -1491,7 +1488,7 @@ static int read_partial_message(struct ceph_connection *con)
struct ceph_msg *m = con->in_msg;
int ret;
int to, left;
- unsigned front_len, middle_len, data_len, data_off;
+ unsigned front_len, middle_len, data_len;
int datacrc = con->msgr->nocrc;
int skip;
u64 seq;
@@ -1527,19 +1524,17 @@ static int read_partial_message(struct ceph_connection *con)
data_len = le32_to_cpu(con->in_hdr.data_len);
if (data_len > CEPH_MSG_MAX_DATA_LEN)
return -EIO;
- data_off = le16_to_cpu(con->in_hdr.data_off);
/* verify seq# */
seq = le64_to_cpu(con->in_hdr.seq);
if ((s64)seq - (s64)con->in_seq < 1) {
- pr_info("skipping %s%lld %s seq %lld, expected %lld\n",
+ pr_info("skipping %s%lld %s seq %lld expected %lld\n",
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr),
seq, con->in_seq + 1);
con->in_base_pos = -front_len - middle_len - data_len -
sizeof(m->footer);
con->in_tag = CEPH_MSGR_TAG_READY;
- con->in_seq++;
return 0;
} else if ((s64)seq - (s64)con->in_seq > 1) {
pr_err("read_partial_message bad seq %lld expected %lld\n",
@@ -1576,7 +1571,7 @@ static int read_partial_message(struct ceph_connection *con)
con->in_msg_pos.page = 0;
if (m->pages)
- con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
+ con->in_msg_pos.page_pos = m->page_alignment;
else
con->in_msg_pos.page_pos = 0;
con->in_msg_pos.data_pos = 0;
@@ -1925,20 +1920,6 @@ bad_tag:
/*
* Atomically queue work on a connection. Bump @con reference to
* avoid races with connection teardown.
- *
- * There is some trickery going on with QUEUED and BUSY because we
- * only want a _single_ thread operating on each connection at any
- * point in time, but we want to use all available CPUs.
- *
- * The worker thread only proceeds if it can atomically set BUSY. It
- * clears QUEUED and does it's thing. When it thinks it's done, it
- * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
- * (tries again to set BUSY).
- *
- * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
- * try to queue work. If that fails (work is already queued, or BUSY)
- * we give up (work also already being done or is queued) but leave QUEUED
- * set so that the worker thread will loop if necessary.
*/
static void queue_con(struct ceph_connection *con)
{
@@ -1953,11 +1934,7 @@ static void queue_con(struct ceph_connection *con)
return;
}
- set_bit(QUEUED, &con->state);
- if (test_bit(BUSY, &con->state)) {
- dout("queue_con %p - already BUSY\n", con);
- con->ops->put(con);
- } else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
+ if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
dout("queue_con %p - already queued\n", con);
con->ops->put(con);
} else {
@@ -1972,15 +1949,6 @@ static void con_work(struct work_struct *work)
{
struct ceph_connection *con = container_of(work, struct ceph_connection,
work.work);
- int backoff = 0;
-
-more:
- if (test_and_set_bit(BUSY, &con->state) != 0) {
- dout("con_work %p BUSY already set\n", con);
- goto out;
- }
- dout("con_work %p start, clearing QUEUED\n", con);
- clear_bit(QUEUED, &con->state);
mutex_lock(&con->mutex);
@@ -1999,28 +1967,13 @@ more:
try_read(con) < 0 ||
try_write(con) < 0) {
mutex_unlock(&con->mutex);
- backoff = 1;
ceph_fault(con); /* error/fault path */
goto done_unlocked;
}
done:
mutex_unlock(&con->mutex);
-
done_unlocked:
- clear_bit(BUSY, &con->state);
- dout("con->state=%lu\n", con->state);
- if (test_bit(QUEUED, &con->state)) {
- if (!backoff || test_bit(OPENING, &con->state)) {
- dout("con_work %p QUEUED reset, looping\n", con);
- goto more;
- }
- dout("con_work %p QUEUED reset, but just faulted\n", con);
- clear_bit(QUEUED, &con->state);
- }
- dout("con_work %p done\n", con);
-
-out:
con->ops->put(con);
}
@@ -2301,6 +2254,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
/* data */
m->nr_pages = 0;
+ m->page_alignment = 0;
m->pages = NULL;
m->pagelist = NULL;
m->bio = NULL;
@@ -2370,6 +2324,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
type, front_len);
return NULL;
}
+ msg->page_alignment = le16_to_cpu(hdr->data_off);
}
memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 79391994b3ed..3e20a122ffa2 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -71,6 +71,7 @@ void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
op->extent.length = objlen;
}
req->r_num_pages = calc_pages_for(off, *plen);
+ req->r_page_alignment = off & ~PAGE_MASK;
if (op->op == CEPH_OSD_OP_WRITE)
op->payload_len = *plen;
@@ -390,6 +391,8 @@ void ceph_osdc_build_request(struct ceph_osd_request *req,
req->r_request->hdr.data_len = cpu_to_le32(data_len);
}
+ req->r_request->page_alignment = req->r_page_alignment;
+
BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
msg_size = p - msg->front.iov_base;
msg->front.iov_len = msg_size;
@@ -419,7 +422,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
u32 truncate_seq,
u64 truncate_size,
struct timespec *mtime,
- bool use_mempool, int num_reply)
+ bool use_mempool, int num_reply,
+ int page_align)
{
struct ceph_osd_req_op ops[3];
struct ceph_osd_request *req;
@@ -447,6 +451,10 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
calc_layout(osdc, vino, layout, off, plen, req, ops);
req->r_file_layout = *layout; /* keep a copy */
+ /* in case it differs from natural alignment that calc_layout
+ filled in for us */
+ req->r_page_alignment = page_align;
+
ceph_osdc_build_request(req, off, plen, ops,
snapc,
mtime,
@@ -1489,7 +1497,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
struct ceph_vino vino, struct ceph_file_layout *layout,
u64 off, u64 *plen,
u32 truncate_seq, u64 truncate_size,
- struct page **pages, int num_pages)
+ struct page **pages, int num_pages, int page_align)
{
struct ceph_osd_request *req;
int rc = 0;
@@ -1499,15 +1507,15 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
NULL, 0, truncate_seq, truncate_size, NULL,
- false, 1);
+ false, 1, page_align);
if (!req)
return -ENOMEM;
/* it may be a short read due to an object boundary */
req->r_pages = pages;
- dout("readpages final extent is %llu~%llu (%d pages)\n",
- off, *plen, req->r_num_pages);
+ dout("readpages final extent is %llu~%llu (%d pages align %d)\n",
+ off, *plen, req->r_num_pages, page_align);
rc = ceph_osdc_start_request(osdc, req, false);
if (!rc)
@@ -1533,6 +1541,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
{
struct ceph_osd_request *req;
int rc = 0;
+ int page_align = off & ~PAGE_MASK;
BUG_ON(vino.snap != CEPH_NOSNAP);
req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
@@ -1541,7 +1550,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
CEPH_OSD_FLAG_WRITE,
snapc, do_sync,
truncate_seq, truncate_size, mtime,
- nofail, 1);
+ nofail, 1, page_align);
if (!req)
return -ENOMEM;
@@ -1638,8 +1647,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
m = ceph_msg_get(req->r_reply);
if (data_len > 0) {
- unsigned data_off = le16_to_cpu(hdr->data_off);
- int want = calc_pages_for(data_off & ~PAGE_MASK, data_len);
+ int want = calc_pages_for(req->r_page_alignment, data_len);
if (unlikely(req->r_num_pages < want)) {
pr_warning("tid %lld reply %d > expected %d pages\n",
@@ -1651,6 +1659,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
}
m->pages = req->r_pages;
m->nr_pages = req->r_num_pages;
+ m->page_alignment = req->r_page_alignment;
#ifdef CONFIG_BLOCK
m->bio = req->r_bio;
#endif
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d73f3f6efa36..71603ac3dff5 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -605,8 +605,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
goto bad;
}
err = __decode_pool(p, end, pi);
- if (err < 0)
+ if (err < 0) {
+ kfree(pi);
goto bad;
+ }
__insert_pg_pool(&map->pg_pools, pi);
}
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 54caf0687155..1a040e64c69f 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -13,8 +13,7 @@
* build a vector of user pages
*/
struct page **ceph_get_direct_page_vector(const char __user *data,
- int num_pages,
- loff_t off, size_t len)
+ int num_pages, bool write_page)
{
struct page **pages;
int rc;
@@ -25,24 +24,27 @@ struct page **ceph_get_direct_page_vector(const char __user *data,
down_read(&current->mm->mmap_sem);
rc = get_user_pages(current, current->mm, (unsigned long)data,
- num_pages, 0, 0, pages, NULL);
+ num_pages, write_page, 0, pages, NULL);
up_read(&current->mm->mmap_sem);
- if (rc < 0)
+ if (rc < num_pages)
goto fail;
return pages;
fail:
- kfree(pages);
+ ceph_put_page_vector(pages, rc > 0 ? rc : 0, false);
return ERR_PTR(rc);
}
EXPORT_SYMBOL(ceph_get_direct_page_vector);
-void ceph_put_page_vector(struct page **pages, int num_pages)
+void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
{
int i;
- for (i = 0; i < num_pages; i++)
+ for (i = 0; i < num_pages; i++) {
+ if (dirty)
+ set_page_dirty_lock(pages[i]);
put_page(pages[i]);
+ }
kfree(pages);
}
EXPORT_SYMBOL(ceph_put_page_vector);
diff --git a/net/core/dev.c b/net/core/dev.c
index 59877290bca7..b6d0bf875a8e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -749,7 +749,8 @@ EXPORT_SYMBOL(dev_get_by_index);
* @ha: hardware address
*
* Search for an interface by MAC address. Returns NULL if the device
- * is not found or a pointer to the device. The caller must hold RCU
+ * is not found or a pointer to the device.
+ * The caller must hold RCU or RTNL.
* The returned device has not had its ref count increased
* and the caller must therefore be careful about locking
*
@@ -1547,13 +1548,6 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *skb2 = NULL;
struct packet_type *pt_prev = NULL;
-#ifdef CONFIG_NET_CLS_ACT
- if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
- net_timestamp_set(skb);
-#else
- net_timestamp_set(skb);
-#endif
-
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_all, list) {
/* Never send packets back to the socket
@@ -1572,6 +1566,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
if (!skb2)
break;
+ net_timestamp_set(skb2);
+
/* skb->nh should be correctly
set by sender, so that the second statement is
just protection against buggy protocols.
@@ -1737,33 +1733,6 @@ void netif_device_attach(struct net_device *dev)
}
EXPORT_SYMBOL(netif_device_attach);
-static bool can_checksum_protocol(unsigned long features, __be16 protocol)
-{
- return ((features & NETIF_F_NO_CSUM) ||
- ((features & NETIF_F_V4_CSUM) &&
- protocol == htons(ETH_P_IP)) ||
- ((features & NETIF_F_V6_CSUM) &&
- protocol == htons(ETH_P_IPV6)) ||
- ((features & NETIF_F_FCOE_CRC) &&
- protocol == htons(ETH_P_FCOE)));
-}
-
-static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
-{
- __be16 protocol = skb->protocol;
- int features = dev->features;
-
- if (vlan_tx_tag_present(skb)) {
- features &= dev->vlan_features;
- } else if (protocol == htons(ETH_P_8021Q)) {
- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
- protocol = veh->h_vlan_encapsulated_proto;
- features &= dev->vlan_features;
- }
-
- return can_checksum_protocol(features, protocol);
-}
-
/**
* skb_dev_set -- assign a new device to a buffer
* @skb: buffer for the new device
@@ -1976,16 +1945,14 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
/**
* dev_gso_segment - Perform emulated hardware segmentation on skb.
* @skb: buffer to segment
+ * @features: device features as applicable to this skb
*
* This function segments the given skb and stores the list of segments
* in skb->next.
*/
-static int dev_gso_segment(struct sk_buff *skb)
+static int dev_gso_segment(struct sk_buff *skb, int features)
{
- struct net_device *dev = skb->dev;
struct sk_buff *segs;
- int features = dev->features & ~(illegal_highdma(dev, skb) ?
- NETIF_F_SG : 0);
segs = skb_gso_segment(skb, features);
@@ -2022,22 +1989,52 @@ static inline void skb_orphan_try(struct sk_buff *skb)
}
}
-int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
+static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+{
+ return ((features & NETIF_F_GEN_CSUM) ||
+ ((features & NETIF_F_V4_CSUM) &&
+ protocol == htons(ETH_P_IP)) ||
+ ((features & NETIF_F_V6_CSUM) &&
+ protocol == htons(ETH_P_IPV6)) ||
+ ((features & NETIF_F_FCOE_CRC) &&
+ protocol == htons(ETH_P_FCOE)));
+}
+
+static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
+{
+ if (!can_checksum_protocol(features, protocol)) {
+ features &= ~NETIF_F_ALL_CSUM;
+ features &= ~NETIF_F_SG;
+ } else if (illegal_highdma(skb->dev, skb)) {
+ features &= ~NETIF_F_SG;
+ }
+
+ return features;
+}
+
+int netif_skb_features(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
+ int features = skb->dev->features;
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
- } else if (!skb->vlan_tci)
- return dev->features;
+ } else if (!vlan_tx_tag_present(skb)) {
+ return harmonize_features(skb, protocol, features);
+ }
- if (protocol != htons(ETH_P_8021Q))
- return dev->features & dev->vlan_features;
- else
- return 0;
+ features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
+
+ if (protocol != htons(ETH_P_8021Q)) {
+ return harmonize_features(skb, protocol, features);
+ } else {
+ features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
+ NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
+ return harmonize_features(skb, protocol, features);
+ }
}
-EXPORT_SYMBOL(netif_get_vlan_features);
+EXPORT_SYMBOL(netif_skb_features);
/*
* Returns true if either:
@@ -2047,22 +2044,13 @@ EXPORT_SYMBOL(netif_get_vlan_features);
* support DMA from it.
*/
static inline int skb_needs_linearize(struct sk_buff *skb,
- struct net_device *dev)
+ int features)
{
- if (skb_is_nonlinear(skb)) {
- int features = dev->features;
-
- if (vlan_tx_tag_present(skb))
- features &= dev->vlan_features;
-
- return (skb_has_frag_list(skb) &&
- !(features & NETIF_F_FRAGLIST)) ||
+ return skb_is_nonlinear(skb) &&
+ ((skb_has_frag_list(skb) &&
+ !(features & NETIF_F_FRAGLIST)) ||
(skb_shinfo(skb)->nr_frags &&
- (!(features & NETIF_F_SG) ||
- illegal_highdma(dev, skb)));
- }
-
- return 0;
+ !(features & NETIF_F_SG)));
}
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -2072,6 +2060,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int rc = NETDEV_TX_OK;
if (likely(!skb->next)) {
+ int features;
+
/*
* If device doesnt need skb->dst, release it right now while
* its hot in this cpu cache
@@ -2084,8 +2074,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb_orphan_try(skb);
+ features = netif_skb_features(skb);
+
if (vlan_tx_tag_present(skb) &&
- !(dev->features & NETIF_F_HW_VLAN_TX)) {
+ !(features & NETIF_F_HW_VLAN_TX)) {
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
if (unlikely(!skb))
goto out;
@@ -2093,13 +2085,13 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb->vlan_tci = 0;
}
- if (netif_needs_gso(dev, skb)) {
- if (unlikely(dev_gso_segment(skb)))
+ if (netif_needs_gso(skb, features)) {
+ if (unlikely(dev_gso_segment(skb, features)))
goto out_kfree_skb;
if (skb->next)
goto gso;
} else {
- if (skb_needs_linearize(skb, dev) &&
+ if (skb_needs_linearize(skb, features) &&
__skb_linearize(skb))
goto out_kfree_skb;
@@ -2110,7 +2102,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_set_transport_header(skb,
skb_checksum_start_offset(skb));
- if (!dev_can_checksum(dev, skb) &&
+ if (!(features & NETIF_F_ALL_CSUM) &&
skb_checksum_help(skb))
goto out_kfree_skb;
}
@@ -2306,7 +2298,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
*/
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb_dst_force(skb);
- __qdisc_update_bstats(q, skb->len);
+
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
+ qdisc_bstats_update(q, skb);
+
if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
@@ -2568,7 +2563,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
map = rcu_dereference(rxqueue->rps_map);
if (map) {
- if (map->len == 1) {
+ if (map->len == 1 &&
+ !rcu_dereference_raw(rxqueue->rps_flow_table)) {
tcpu = map->cpus[0];
if (cpu_online(tcpu))
cpu = tcpu;
@@ -3429,6 +3425,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
__skb_pull(skb, skb_headlen(skb));
skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
skb->vlan_tci = 0;
+ skb->dev = napi->dev;
+ skb->skb_iif = 0;
napi->skb = skb;
}
@@ -5529,34 +5527,6 @@ void netdev_run_todo(void)
}
}
-/**
- * dev_txq_stats_fold - fold tx_queues stats
- * @dev: device to get statistics from
- * @stats: struct rtnl_link_stats64 to hold results
- */
-void dev_txq_stats_fold(const struct net_device *dev,
- struct rtnl_link_stats64 *stats)
-{
- u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
- unsigned int i;
- struct netdev_queue *txq;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- txq = netdev_get_tx_queue(dev, i);
- spin_lock_bh(&txq->_xmit_lock);
- tx_bytes += txq->tx_bytes;
- tx_packets += txq->tx_packets;
- tx_dropped += txq->tx_dropped;
- spin_unlock_bh(&txq->_xmit_lock);
- }
- if (tx_bytes || tx_packets || tx_dropped) {
- stats->tx_bytes = tx_bytes;
- stats->tx_packets = tx_packets;
- stats->tx_dropped = tx_dropped;
- }
-}
-EXPORT_SYMBOL(dev_txq_stats_fold);
-
/* Convert net_device_stats to rtnl_link_stats64. They have the same
* fields in the same order, with only the type differing.
*/
@@ -5600,7 +5570,6 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
} else {
netdev_stats_to_stats64(storage, &dev->stats);
- dev_txq_stats_fold(dev, storage);
}
storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
return storage;
@@ -5626,18 +5595,20 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
}
/**
- * alloc_netdev_mq - allocate network device
+ * alloc_netdev_mqs - allocate network device
* @sizeof_priv: size of private data to allocate space for
* @name: device name format string
* @setup: callback to initialize device
- * @queue_count: the number of subqueues to allocate
+ * @txqs: the number of TX subqueues to allocate
+ * @rxqs: the number of RX subqueues to allocate
*
* Allocates a struct net_device with private data area for driver use
* and performs basic initialization. Also allocates subquue structs
- * for each queue on the device at the end of the netdevice.
+ * for each queue on the device.
*/
-struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
- void (*setup)(struct net_device *), unsigned int queue_count)
+struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ void (*setup)(struct net_device *),
+ unsigned int txqs, unsigned int rxqs)
{
struct net_device *dev;
size_t alloc_size;
@@ -5645,12 +5616,20 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
BUG_ON(strlen(name) >= sizeof(dev->name));
- if (queue_count < 1) {
+ if (txqs < 1) {
pr_err("alloc_netdev: Unable to allocate device "
"with zero queues.\n");
return NULL;
}
+#ifdef CONFIG_RPS
+ if (rxqs < 1) {
+ pr_err("alloc_netdev: Unable to allocate device "
+ "with zero RX queues.\n");
+ return NULL;
+ }
+#endif
+
alloc_size = sizeof(struct net_device);
if (sizeof_priv) {
/* ensure 32-byte alignment of private area */
@@ -5681,14 +5660,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
dev_net_set(dev, &init_net);
- dev->num_tx_queues = queue_count;
- dev->real_num_tx_queues = queue_count;
+ dev->num_tx_queues = txqs;
+ dev->real_num_tx_queues = txqs;
if (netif_alloc_netdev_queues(dev))
goto free_pcpu;
#ifdef CONFIG_RPS
- dev->num_rx_queues = queue_count;
- dev->real_num_rx_queues = queue_count;
+ dev->num_rx_queues = rxqs;
+ dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev))
goto free_pcpu;
#endif
@@ -5716,7 +5695,7 @@ free_p:
kfree(p);
return NULL;
}
-EXPORT_SYMBOL(alloc_netdev_mq);
+EXPORT_SYMBOL(alloc_netdev_mqs);
/**
* free_netdev - free network device
@@ -6214,7 +6193,7 @@ static void __net_exit default_device_exit(struct net *net)
static void __net_exit default_device_exit_batch(struct list_head *net_list)
{
/* At exit all network devices most be removed from a network
- * namespace. Do this in the reverse order of registeration.
+ * namespace. Do this in the reverse order of registration.
* Do this across as many network namespaces as possible to
* improve batching efficiency.
*/
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 17741782a345..ff2302910b5e 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -817,7 +817,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
if (regs.len > reglen)
regs.len = reglen;
- regbuf = vmalloc(reglen);
+ regbuf = vzalloc(reglen);
if (!regbuf)
return -ENOMEM;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 82a4369ae150..a20e5d3bbfa0 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -181,8 +181,7 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
{
int ret = 0;
- if (rule->iifindex && (rule->iifindex != fl->iif) &&
- !(fl->flags & FLOWI_FLAG_MATCH_ANY_IIF))
+ if (rule->iifindex && (rule->iifindex != fl->iif))
goto out;
if (rule->oifindex && (rule->oifindex != fl->oif))
diff --git a/net/core/filter.c b/net/core/filter.c
index e8a6ac411ffb..afc58374ca96 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -85,6 +85,17 @@ enum {
BPF_S_JMP_JGT_X,
BPF_S_JMP_JSET_K,
BPF_S_JMP_JSET_X,
+ /* Ancillary data */
+ BPF_S_ANC_PROTOCOL,
+ BPF_S_ANC_PKTTYPE,
+ BPF_S_ANC_IFINDEX,
+ BPF_S_ANC_NLATTR,
+ BPF_S_ANC_NLATTR_NEST,
+ BPF_S_ANC_MARK,
+ BPF_S_ANC_QUEUE,
+ BPF_S_ANC_HATYPE,
+ BPF_S_ANC_RXHASH,
+ BPF_S_ANC_CPU,
};
/* No hurry in this branch */
@@ -107,11 +118,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
{
if (k >= 0)
return skb_header_pointer(skb, k, size, buffer);
- else {
- if (k >= SKF_AD_OFF)
- return NULL;
- return __load_pointer(skb, k, size);
- }
+ return __load_pointer(skb, k, size);
}
/**
@@ -151,7 +158,7 @@ EXPORT_SYMBOL(sk_filter);
/**
* sk_run_filter - run a filter on a socket
* @skb: buffer to run the filter on
- * @filter: filter to apply
+ * @fentry: filter to apply
*
* Decode and apply filter instructions to the skb->data.
* Return length to keep, 0 for none. @skb is the data we are
@@ -269,7 +276,7 @@ load_w:
A = get_unaligned_be32(ptr);
continue;
}
- break;
+ return 0;
case BPF_S_LD_H_ABS:
k = K;
load_h:
@@ -278,7 +285,7 @@ load_h:
A = get_unaligned_be16(ptr);
continue;
}
- break;
+ return 0;
case BPF_S_LD_B_ABS:
k = K;
load_b:
@@ -287,7 +294,7 @@ load_b:
A = *(u8 *)ptr;
continue;
}
- break;
+ return 0;
case BPF_S_LD_W_LEN:
A = skb->len;
continue;
@@ -338,45 +345,35 @@ load_b:
case BPF_S_STX:
mem[K] = X;
continue;
- default:
- WARN_ON(1);
- return 0;
- }
-
- /*
- * Handle ancillary data, which are impossible
- * (or very difficult) to get parsing packet contents.
- */
- switch (k-SKF_AD_OFF) {
- case SKF_AD_PROTOCOL:
+ case BPF_S_ANC_PROTOCOL:
A = ntohs(skb->protocol);
continue;
- case SKF_AD_PKTTYPE:
+ case BPF_S_ANC_PKTTYPE:
A = skb->pkt_type;
continue;
- case SKF_AD_IFINDEX:
+ case BPF_S_ANC_IFINDEX:
if (!skb->dev)
return 0;
A = skb->dev->ifindex;
continue;
- case SKF_AD_MARK:
+ case BPF_S_ANC_MARK:
A = skb->mark;
continue;
- case SKF_AD_QUEUE:
+ case BPF_S_ANC_QUEUE:
A = skb->queue_mapping;
continue;
- case SKF_AD_HATYPE:
+ case BPF_S_ANC_HATYPE:
if (!skb->dev)
return 0;
A = skb->dev->type;
continue;
- case SKF_AD_RXHASH:
+ case BPF_S_ANC_RXHASH:
A = skb->rxhash;
continue;
- case SKF_AD_CPU:
+ case BPF_S_ANC_CPU:
A = raw_smp_processor_id();
continue;
- case SKF_AD_NLATTR: {
+ case BPF_S_ANC_NLATTR: {
struct nlattr *nla;
if (skb_is_nonlinear(skb))
@@ -392,7 +389,7 @@ load_b:
A = 0;
continue;
}
- case SKF_AD_NLATTR_NEST: {
+ case BPF_S_ANC_NLATTR_NEST: {
struct nlattr *nla;
if (skb_is_nonlinear(skb))
@@ -412,6 +409,7 @@ load_b:
continue;
}
default:
+ WARN_ON(1);
return 0;
}
}
@@ -600,6 +598,24 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
pc + ftest->jf + 1 >= flen)
return -EINVAL;
break;
+ case BPF_S_LD_W_ABS:
+ case BPF_S_LD_H_ABS:
+ case BPF_S_LD_B_ABS:
+#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
+ code = BPF_S_ANC_##CODE; \
+ break
+ switch (ftest->k) {
+ ANCILLARY(PROTOCOL);
+ ANCILLARY(PKTTYPE);
+ ANCILLARY(IFINDEX);
+ ANCILLARY(NLATTR);
+ ANCILLARY(NLATTR_NEST);
+ ANCILLARY(MARK);
+ ANCILLARY(QUEUE);
+ ANCILLARY(HATYPE);
+ ANCILLARY(RXHASH);
+ ANCILLARY(CPU);
+ }
}
ftest->code = code;
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 72d9b50109fc..02dc2cbcbe86 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -923,7 +923,7 @@ void __netpoll_cleanup(struct netpoll *np)
skb_queue_purge(&npinfo->arp_tx);
skb_queue_purge(&npinfo->txq);
- cancel_rearming_delayed_work(&npinfo->tx_work);
+ cancel_delayed_work_sync(&npinfo->tx_work);
/* clean after last, unfinished work */
__skb_queue_purge(&npinfo->txq);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 750db57f3bb3..2d65c6bb24c1 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1121,8 +1121,7 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
return -EOPNOTSUPP;
if (af_ops->validate_link_af) {
- err = af_ops->validate_link_af(dev,
- tb[IFLA_AF_SPEC]);
+ err = af_ops->validate_link_af(dev, af);
if (err < 0)
return err;
}
@@ -1672,6 +1671,9 @@ replay:
snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
dest_net = rtnl_link_get_net(net, tb);
+ if (IS_ERR(dest_net))
+ return PTR_ERR(dest_net);
+
dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
if (IS_ERR(dev))
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 19d6c21220fd..d883dcc78b6b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -210,6 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
+ kmemcheck_annotate_variable(shinfo->destructor_arg);
if (fclone) {
struct sk_buff *child = skb + 1;
@@ -380,6 +381,8 @@ static void skb_release_head_state(struct sk_buff *skb)
}
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb->nfct);
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
nf_conntrack_put_reasm(skb->nfct_reasm);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
@@ -2742,8 +2745,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
merge:
if (offset > headlen) {
- skbinfo->frags[0].page_offset += offset - headlen;
- skbinfo->frags[0].size -= offset - headlen;
+ unsigned int eat = offset - headlen;
+
+ skbinfo->frags[0].page_offset += eat;
+ skbinfo->frags[0].size -= eat;
+ skb->data_len -= eat;
+ skb->len -= eat;
offset = headlen;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index a6b9e8061f34..7dfed792434d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -157,7 +157,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
"sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
"sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
"sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
- "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
+ "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
"sk_lock-AF_MAX"
};
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
@@ -173,7 +173,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-27" , "slock-28" , "slock-AF_CAN" ,
"slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
"slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
- "slock-AF_IEEE802154", "slock-AF_CAIF" ,
+ "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
"slock-AF_MAX"
};
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
@@ -189,7 +189,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-27" , "clock-28" , "clock-AF_CAN" ,
"clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
"clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
- "clock-AF_IEEE802154", "clock-AF_CAIF" ,
+ "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
"clock-AF_MAX"
};
@@ -1908,7 +1908,7 @@ static void sock_def_readable(struct sock *sk, int len)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
diff --git a/net/dcb/Makefile b/net/dcb/Makefile
index 9930f4cde818..c1282c9e64fa 100644
--- a/net/dcb/Makefile
+++ b/net/dcb/Makefile
@@ -1 +1 @@
-obj-$(CONFIG_DCB) += dcbnl.o
+obj-$(CONFIG_DCB) += dcbnl.o dcbevent.o
diff --git a/net/dcb/dcbevent.c b/net/dcb/dcbevent.c
new file mode 100644
index 000000000000..665a8802105a
--- /dev/null
+++ b/net/dcb/dcbevent.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: John Fastabend <john.r.fastabend@intel.com>
+ */
+
+#include <linux/rtnetlink.h>
+#include <linux/notifier.h>
+
+static ATOMIC_NOTIFIER_HEAD(dcbevent_notif_chain);
+
+int register_dcbevent_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&dcbevent_notif_chain, nb);
+}
+EXPORT_SYMBOL(register_dcbevent_notifier);
+
+int unregister_dcbevent_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&dcbevent_notif_chain, nb);
+}
+EXPORT_SYMBOL(unregister_dcbevent_notifier);
+
+int call_dcbevent_notifiers(unsigned long val, void *v)
+{
+ return atomic_notifier_call_chain(&dcbevent_notif_chain, val, v);
+}
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 19ac2b985485..6b03f561caec 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -23,6 +23,7 @@
#include <net/netlink.h>
#include <net/rtnetlink.h>
#include <linux/dcbnl.h>
+#include <net/dcbevent.h>
#include <linux/rtnetlink.h>
#include <net/sock.h>
@@ -66,6 +67,9 @@ static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
[DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
[DCB_ATTR_BCN] = {.type = NLA_NESTED},
[DCB_ATTR_APP] = {.type = NLA_NESTED},
+ [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
+ [DCB_ATTR_DCBX] = {.type = NLA_U8},
+ [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
};
/* DCB priority flow control to User Priority nested attributes */
@@ -122,6 +126,7 @@ static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
[DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
[DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
[DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
+ [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
};
/* DCB capabilities nested attributes. */
@@ -167,6 +172,28 @@ static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
[DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
};
+/* IEEE 802.1Qaz nested attributes. */
+static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
+ [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
+ [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
+ [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
+};
+
+static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
+ [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)},
+};
+
+/* DCB number of traffic classes nested attributes. */
+static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
+ [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
+ [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
+ [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
+ [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
+};
+
+static LIST_HEAD(dcb_app_list);
+static DEFINE_SPINLOCK(dcb_lock);
+
/* standard netlink reply call */
static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
u32 seq, u16 flags)
@@ -556,7 +583,7 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
u8 up, idtype;
int ret = -EINVAL;
- if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp)
+ if (!tb[DCB_ATTR_APP])
goto out;
ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
@@ -577,7 +604,16 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
goto out;
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
- up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+
+ if (netdev->dcbnl_ops->getapp) {
+ up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+ } else {
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+ up = dcb_getapp(netdev, &app);
+ }
/* send this back */
dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -622,12 +658,12 @@ out:
static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
u32 pid, u32 seq, u16 flags)
{
- int ret = -EINVAL;
+ int err, ret = -EINVAL;
u16 id;
u8 up, idtype;
struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
- if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->setapp)
+ if (!tb[DCB_ATTR_APP])
goto out;
ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
@@ -651,9 +687,18 @@ static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
- ret = dcbnl_reply(netdev->dcbnl_ops->setapp(netdev, idtype, id, up),
- RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
- pid, seq, flags);
+ if (netdev->dcbnl_ops->setapp) {
+ err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
+ } else {
+ struct dcb_app app;
+ app.selector = idtype;
+ app.protocol = id;
+ app.priority = up;
+ err = dcb_setapp(netdev, &app);
+ }
+
+ ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
+ pid, seq, flags);
out:
return ret;
}
@@ -1118,6 +1163,281 @@ err:
return ret;
}
+/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
+ * be completed the entire msg is aborted and error value is returned.
+ * No attempt is made to reconcile the case where only part of the
+ * cmd can be completed.
+ */
+static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
+ u32 pid, u32 seq, u16 flags)
+{
+ const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+ struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
+ int err = -EOPNOTSUPP;
+
+ if (!ops)
+ goto err;
+
+ err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
+ tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
+ if (err)
+ goto err;
+
+ if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
+ struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
+ err = ops->ieee_setets(netdev, ets);
+ if (err)
+ goto err;
+ }
+
+ if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) {
+ struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
+ err = ops->ieee_setpfc(netdev, pfc);
+ if (err)
+ goto err;
+ }
+
+ if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
+ struct nlattr *attr;
+ int rem;
+
+ nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
+ struct dcb_app *app_data;
+ if (nla_type(attr) != DCB_ATTR_IEEE_APP)
+ continue;
+ app_data = nla_data(attr);
+ if (ops->ieee_setapp)
+ err = ops->ieee_setapp(netdev, app_data);
+ else
+ err = dcb_setapp(netdev, app_data);
+ if (err)
+ goto err;
+ }
+ }
+
+err:
+ dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
+ pid, seq, flags);
+ return err;
+}
+
+
+/* Handle IEEE 802.1Qaz GET commands. */
+static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
+ u32 pid, u32 seq, u16 flags)
+{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct dcbmsg *dcb;
+ struct nlattr *ieee, *app;
+ struct dcb_app_type *itr;
+ const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+ int err;
+
+ if (!ops)
+ return -EOPNOTSUPP;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
+
+ dcb = NLMSG_DATA(nlh);
+ dcb->dcb_family = AF_UNSPEC;
+ dcb->cmd = DCB_CMD_IEEE_GET;
+
+ NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
+
+ ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
+ if (!ieee)
+ goto nla_put_failure;
+
+ if (ops->ieee_getets) {
+ struct ieee_ets ets;
+ err = ops->ieee_getets(netdev, &ets);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
+ }
+
+ if (ops->ieee_getpfc) {
+ struct ieee_pfc pfc;
+ err = ops->ieee_getpfc(netdev, &pfc);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
+ }
+
+ app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
+ if (!app)
+ goto nla_put_failure;
+
+ spin_lock(&dcb_lock);
+ list_for_each_entry(itr, &dcb_app_list, list) {
+ if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) {
+ err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
+ &itr->app);
+ if (err) {
+ spin_unlock(&dcb_lock);
+ goto nla_put_failure;
+ }
+ }
+ }
+ spin_unlock(&dcb_lock);
+ nla_nest_end(skb, app);
+
+ nla_nest_end(skb, ieee);
+ nlmsg_end(skb, nlh);
+
+ return rtnl_unicast(skb, &init_net, pid);
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+nlmsg_failure:
+ kfree_skb(skb);
+ return -1;
+}
+
+/* DCBX configuration */
+static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
+ u32 pid, u32 seq, u16 flags)
+{
+ int ret;
+
+ if (!netdev->dcbnl_ops->getdcbx)
+ return -EOPNOTSUPP;
+
+ ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB,
+ DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags);
+
+ return ret;
+}
+
+static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
+ u32 pid, u32 seq, u16 flags)
+{
+ int ret;
+ u8 value;
+
+ if (!netdev->dcbnl_ops->setdcbx)
+ return -EOPNOTSUPP;
+
+ if (!tb[DCB_ATTR_DCBX])
+ return -EINVAL;
+
+ value = nla_get_u8(tb[DCB_ATTR_DCBX]);
+
+ ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value),
+ RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX,
+ pid, seq, flags);
+
+ return ret;
+}
+
+static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
+ u32 pid, u32 seq, u16 flags)
+{
+ struct sk_buff *dcbnl_skb;
+ struct nlmsghdr *nlh;
+ struct dcbmsg *dcb;
+ struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
+ u8 value;
+ int ret, i;
+ int getall = 0;
+
+ if (!netdev->dcbnl_ops->getfeatcfg)
+ return -EOPNOTSUPP;
+
+ if (!tb[DCB_ATTR_FEATCFG])
+ return -EINVAL;
+
+ ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
+ dcbnl_featcfg_nest);
+ if (ret)
+ goto err_out;
+
+ dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!dcbnl_skb) {
+ ret = -ENOBUFS;
+ goto err_out;
+ }
+
+ nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
+
+ dcb = NLMSG_DATA(nlh);
+ dcb->dcb_family = AF_UNSPEC;
+ dcb->cmd = DCB_CMD_GFEATCFG;
+
+ nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG);
+ if (!nest) {
+ ret = -EMSGSIZE;
+ goto nla_put_failure;
+ }
+
+ if (data[DCB_FEATCFG_ATTR_ALL])
+ getall = 1;
+
+ for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
+ if (!getall && !data[i])
+ continue;
+
+ ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
+ if (!ret)
+ ret = nla_put_u8(dcbnl_skb, i, value);
+
+ if (ret) {
+ nla_nest_cancel(dcbnl_skb, nest);
+ goto nla_put_failure;
+ }
+ }
+ nla_nest_end(dcbnl_skb, nest);
+
+ nlmsg_end(dcbnl_skb, nlh);
+
+ return rtnl_unicast(dcbnl_skb, &init_net, pid);
+nla_put_failure:
+ nlmsg_cancel(dcbnl_skb, nlh);
+nlmsg_failure:
+ kfree_skb(dcbnl_skb);
+err_out:
+ return ret;
+}
+
+static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
+ u32 pid, u32 seq, u16 flags)
+{
+ struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
+ int ret, i;
+ u8 value;
+
+ if (!netdev->dcbnl_ops->setfeatcfg)
+ return -ENOTSUPP;
+
+ if (!tb[DCB_ATTR_FEATCFG])
+ return -EINVAL;
+
+ ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
+ dcbnl_featcfg_nest);
+
+ if (ret)
+ goto err;
+
+ for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
+ if (data[i] == NULL)
+ continue;
+
+ value = nla_get_u8(data[i]);
+
+ ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
+
+ if (ret)
+ goto err;
+ }
+err:
+ dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG,
+ pid, seq, flags);
+
+ return ret;
+}
+
static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
@@ -1223,6 +1543,30 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
nlh->nlmsg_flags);
goto out;
+ case DCB_CMD_IEEE_SET:
+ ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
+ nlh->nlmsg_flags);
+ goto out;
+ case DCB_CMD_IEEE_GET:
+ ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
+ nlh->nlmsg_flags);
+ goto out;
+ case DCB_CMD_GDCBX:
+ ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
+ nlh->nlmsg_flags);
+ goto out;
+ case DCB_CMD_SDCBX:
+ ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
+ nlh->nlmsg_flags);
+ goto out;
+ case DCB_CMD_GFEATCFG:
+ ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
+ nlh->nlmsg_flags);
+ goto out;
+ case DCB_CMD_SFEATCFG:
+ ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
+ nlh->nlmsg_flags);
+ goto out;
default:
goto errout;
}
@@ -1233,8 +1577,95 @@ out:
return ret;
}
+/**
+ * dcb_getapp - retrieve the DCBX application user priority
+ *
+ * On success returns a non-zero 802.1p user priority bitmap
+ * otherwise returns 0 as the invalid user priority bitmap to
+ * indicate an error.
+ */
+u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct dcb_app_type *itr;
+ u8 prio = 0;
+
+ spin_lock(&dcb_lock);
+ list_for_each_entry(itr, &dcb_app_list, list) {
+ if (itr->app.selector == app->selector &&
+ itr->app.protocol == app->protocol &&
+ (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ prio = itr->app.priority;
+ break;
+ }
+ }
+ spin_unlock(&dcb_lock);
+
+ return prio;
+}
+EXPORT_SYMBOL(dcb_getapp);
+
+/**
+ * ixgbe_dcbnl_setapp - add dcb application data to app list
+ *
+ * Priority 0 is the default priority this removes applications
+ * from the app list if the priority is set to zero.
+ */
+u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
+{
+ struct dcb_app_type *itr;
+
+ spin_lock(&dcb_lock);
+ /* Search for existing match and replace */
+ list_for_each_entry(itr, &dcb_app_list, list) {
+ if (itr->app.selector == new->selector &&
+ itr->app.protocol == new->protocol &&
+ (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ if (new->priority)
+ itr->app.priority = new->priority;
+ else {
+ list_del(&itr->list);
+ kfree(itr);
+ }
+ goto out;
+ }
+ }
+ /* App type does not exist add new application type */
+ if (new->priority) {
+ struct dcb_app_type *entry;
+ entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
+ if (!entry) {
+ spin_unlock(&dcb_lock);
+ return -ENOMEM;
+ }
+
+ memcpy(&entry->app, new, sizeof(*new));
+ strncpy(entry->name, dev->name, IFNAMSIZ);
+ list_add(&entry->list, &dcb_app_list);
+ }
+out:
+ spin_unlock(&dcb_lock);
+ call_dcbevent_notifiers(DCB_APP_EVENT, new);
+ return 0;
+}
+EXPORT_SYMBOL(dcb_setapp);
+
+static void dcb_flushapp(void)
+{
+ struct dcb_app_type *app;
+ struct dcb_app_type *tmp;
+
+ spin_lock(&dcb_lock);
+ list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
+ list_del(&app->list);
+ kfree(app);
+ }
+ spin_unlock(&dcb_lock);
+}
+
static int __init dcbnl_init(void)
{
+ INIT_LIST_HEAD(&dcb_app_list);
+
rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL);
rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL);
@@ -1246,7 +1677,6 @@ static void __exit dcbnl_exit(void)
{
rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
+ dcb_flushapp();
}
module_exit(dcbnl_exit);
-
-
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index ad6dffd9070e..b75968a04017 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -49,7 +49,9 @@ config NET_DCCPPROBE
what was just said, you don't need it: say N.
Documentation on how to use DCCP connection probing can be found
- at http://linux-net.osdl.org/index.php/DccpProbe
+ at:
+
+ http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe
To compile this code as a module, choose M here: the
module will be called dccp_probe.
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 45087052d894..5fdb07229017 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -426,7 +426,8 @@ static inline void dccp_update_gsr(struct sock *sk, u64 seq)
{
struct dccp_sock *dp = dccp_sk(sk);
- dp->dccps_gsr = seq;
+ if (after48(seq, dp->dccps_gsr))
+ dp->dccps_gsr = seq;
/* Sequence validity window depends on remote Sequence Window (7.5.1) */
dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4);
/*
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 15af247ea007..8cde009e8b85 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -260,7 +260,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
*/
if (time_before(now, (dp->dccps_rate_last +
sysctl_dccp_sync_ratelimit)))
- return 0;
+ return -1;
DCCP_WARN("Step 6 failed for %s packet, "
"(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 563943822e58..42348824ee31 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -21,7 +21,8 @@
/* Boundary values */
static int zero = 0,
u8_max = 0xFF;
-static unsigned long seqw_min = 32;
+static unsigned long seqw_min = DCCPF_SEQ_WMIN,
+ seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */
static struct ctl_table dccp_default_table[] = {
{
@@ -31,6 +32,7 @@ static struct ctl_table dccp_default_table[] = {
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = &seqw_min, /* RFC 4340, 7.5.2 */
+ .extra2 = &seqw_max,
},
{
.procname = "rx_ccid",
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 0ba15633c418..0dcaa903e00e 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1130,7 +1130,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
/*
* This processes a device up event. We only start up
* the loopback device & ethernet devices with correct
- * MAC addreses automatically. Others must be started
+ * MAC addresses automatically. Others must be started
* specifically.
*
* FIXME: How should we configure the loopback address ? If we could dispense
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 6112a12578b2..3fb14b7c13cf 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -390,7 +390,7 @@ static int dsa_remove(struct platform_device *pdev)
if (dst->link_poll_needed)
del_timer_sync(&dst->link_poll_timer);
- flush_scheduled_work();
+ flush_work_sync(&dst->link_poll_work);
for (i = 0; i < dst->pd->nr_chips; i++) {
struct dsa_switch *ds = dst->ds[i];
@@ -428,7 +428,7 @@ static void __exit dsa_cleanup_module(void)
}
module_exit(dsa_cleanup_module);
-MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>")
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dsa");
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 15dcc1a586b4..0c2826337919 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -265,13 +265,13 @@ static void ec_tx_done(struct sk_buff *skb, int result)
static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
- struct sock *sk = sock->sk;
struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
struct net_device *dev;
struct ec_addr addr;
int err;
unsigned char port, cb;
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
+ struct sock *sk = sock->sk;
struct sk_buff *skb;
struct ec_cb *eb;
#endif
@@ -488,10 +488,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
error_free_buf:
vfree(userbuf);
+error:
#else
err = -EPROTOTYPE;
#endif
- error:
mutex_unlock(&econet_mutex);
return err;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f00ef2f1d814..44d2b42fda56 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -347,10 +347,11 @@ void ether_setup(struct net_device *dev)
EXPORT_SYMBOL(ether_setup);
/**
- * alloc_etherdev_mq - Allocates and sets up an Ethernet device
+ * alloc_etherdev_mqs - Allocates and sets up an Ethernet device
* @sizeof_priv: Size of additional driver-private structure to be allocated
* for this Ethernet device
- * @queue_count: The number of queues this device has.
+ * @txqs: The number of TX queues this device has.
+ * @rxqs: The number of RX queues this device has.
*
* Fill in the fields of the device structure with Ethernet-generic
* values. Basically does everything except registering the device.
@@ -360,11 +361,12 @@ EXPORT_SYMBOL(ether_setup);
* this private data area.
*/
-struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
+struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+ unsigned int rxqs)
{
- return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count);
+ return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
}
-EXPORT_SYMBOL(alloc_etherdev_mq);
+EXPORT_SYMBOL(alloc_etherdev_mqs);
static size_t _format_mac_addr(char *buf, int buflen,
const unsigned char *addr, int len)
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 9e95d7fb6d5a..a5a1050595d1 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -432,7 +432,9 @@ config INET_DIAG
---help---
Support for INET (TCP, DCCP, etc) socket monitoring interface used by
native Linux tools such as ss. ss is included in iproute2, currently
- downloadable at <http://linux-net.osdl.org/index.php/Iproute2>.
+ downloadable at:
+
+ http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2
If unsure, say Y.
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f2b61107df6c..45b89d7bda5a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -880,6 +880,19 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
EXPORT_SYMBOL(inet_ioctl);
+#ifdef CONFIG_COMPAT
+int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ int err = -ENOIOCTLCMD;
+
+ if (sk->sk_prot->compat_ioctl)
+ err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
+
+ return err;
+}
+#endif
+
const struct proto_ops inet_stream_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
@@ -903,6 +916,7 @@ const struct proto_ops inet_stream_ops = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
+ .compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_stream_ops);
@@ -929,6 +943,7 @@ const struct proto_ops inet_dgram_ops = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
+ .compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_dgram_ops);
@@ -959,6 +974,7 @@ static const struct proto_ops inet_sockraw_ops = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
+ .compat_ioctl = inet_compat_ioctl,
#endif
};
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 880a5ec6dce0..86961bec70ab 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -314,14 +314,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
skb->ip_summed = CHECKSUM_NONE;
- ah = (struct ip_auth_hdr *)skb->data;
- iph = ip_hdr(skb);
- ihl = ip_hdrlen(skb);
if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
goto out;
nfrags = err;
+ ah = (struct ip_auth_hdr *)skb->data;
+ iph = ip_hdr(skb);
+ ihl = ip_hdrlen(skb);
+
work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
if (!work_iph)
goto out;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a2fc7b961dbc..7927589813b5 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,13 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
return 0;
}
- if (__in_dev_get_rcu(dev)) {
- IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on);
+ if (__in_dev_get_rtnl(dev)) {
+ IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on);
return 0;
}
return -ENXIO;
}
-/* must be called with rcu_read_lock() */
static int arp_req_set_public(struct net *net, struct arpreq *r,
struct net_device *dev)
{
@@ -1143,6 +1142,23 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
return err;
}
+int arp_invalidate(struct net_device *dev, __be32 ip)
+{
+ struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev);
+ int err = -ENXIO;
+
+ if (neigh) {
+ if (neigh->nud_state & ~NUD_NOARP)
+ err = neigh_update(neigh, NULL, NUD_FAILED,
+ NEIGH_UPDATE_F_OVERRIDE|
+ NEIGH_UPDATE_F_ADMIN);
+ neigh_release(neigh);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(arp_invalidate);
+
static int arp_req_delete_public(struct net *net, struct arpreq *r,
struct net_device *dev)
{
@@ -1163,7 +1179,6 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
{
int err;
__be32 ip;
- struct neighbour *neigh;
if (r->arp_flags & ATF_PUBL)
return arp_req_delete_public(net, r, dev);
@@ -1181,16 +1196,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
if (!dev)
return -EINVAL;
}
- err = -ENXIO;
- neigh = neigh_lookup(&arp_tbl, &ip, dev);
- if (neigh) {
- if (neigh->nud_state & ~NUD_NOARP)
- err = neigh_update(neigh, NULL, NUD_FAILED,
- NEIGH_UPDATE_F_OVERRIDE|
- NEIGH_UPDATE_F_ADMIN);
- neigh_release(neigh);
- }
- return err;
+ return arp_invalidate(dev, ip);
}
/*
@@ -1226,10 +1232,10 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (!(r.arp_flags & ATF_NETMASK))
((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
htonl(0xFFFFFFFFUL);
- rcu_read_lock();
+ rtnl_lock();
if (r.arp_dev[0]) {
err = -ENODEV;
- dev = dev_get_by_name_rcu(net, r.arp_dev);
+ dev = __dev_get_by_name(net, r.arp_dev);
if (dev == NULL)
goto out;
@@ -1256,7 +1262,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
break;
}
out:
- rcu_read_unlock();
+ rtnl_unlock();
if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
err = -EFAULT;
return err;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index d3a1112b9d9c..1d2cdd43a878 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -159,13 +159,19 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
{
struct flowi fl = {
.fl4_dst = addr,
- .flags = FLOWI_FLAG_MATCH_ANY_IIF
};
struct fib_result res = { 0 };
struct net_device *dev = NULL;
+ struct fib_table *local_table;
+
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+ res.r = NULL;
+#endif
rcu_read_lock();
- if (fib_lookup(net, &fl, &res)) {
+ local_table = fib_get_table(net, RT_TABLE_LOCAL);
+ if (!local_table ||
+ fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
rcu_read_unlock();
return NULL;
}
@@ -987,7 +993,11 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
rt_cache_flush(dev_net(dev), 0);
break;
case NETDEV_UNREGISTER_BATCH:
- rt_cache_flush_batch();
+ /* The batch unregister is only called on the first
+ * device in the list of devices being unregistered.
+ * Therefore we should not pass dev_net(dev) in here.
+ */
+ rt_cache_flush_batch(NULL);
break;
}
return NOTIFY_DONE;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 25e318153f14..97e5fb765265 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
- sk2->sk_state == TCP_LISTEN) {
+ ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,7 +122,8 @@ again:
(tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners;
smallest_rover = rover;
- if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
+ if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
+ !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
spin_unlock(&head->lock);
snum = smallest_rover;
goto have_snum;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d9bc85751c74..a96e65674ac3 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -475,7 +475,7 @@ static int cleanup_once(unsigned long ttl)
struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
{
struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
- struct inet_peer_base *base = family_to_base(AF_INET);
+ struct inet_peer_base *base = family_to_base(daddr->family);
struct inet_peer *p;
/* Look up for the address quickly, lockless.
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index e6215bdd96c0..a1151b8adf3c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -45,6 +45,7 @@
#include <linux/udp.h>
#include <linux/inet.h>
#include <linux/netfilter_ipv4.h>
+#include <net/inet_ecn.h>
/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
* code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
@@ -70,11 +71,28 @@ struct ipq {
__be32 daddr;
__be16 id;
u8 protocol;
+ u8 ecn; /* RFC3168 support */
int iif;
unsigned int rid;
struct inet_peer *peer;
};
+#define IPFRAG_ECN_CLEAR 0x01 /* one frag had INET_ECN_NOT_ECT */
+#define IPFRAG_ECN_SET_CE 0x04 /* one frag had INET_ECN_CE */
+
+static inline u8 ip4_frag_ecn(u8 tos)
+{
+ tos = (tos & INET_ECN_MASK) + 1;
+ /*
+ * After the last operation we have (in binary):
+ * INET_ECN_NOT_ECT => 001
+ * INET_ECN_ECT_1 => 010
+ * INET_ECN_ECT_0 => 011
+ * INET_ECN_CE => 100
+ */
+ return (tos & 2) ? 0 : tos;
+}
+
static struct inet_frags ip4_frags;
int ip_frag_nqueues(struct net *net)
@@ -137,6 +155,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, void *a)
qp->protocol = arg->iph->protocol;
qp->id = arg->iph->id;
+ qp->ecn = ip4_frag_ecn(arg->iph->tos);
qp->saddr = arg->iph->saddr;
qp->daddr = arg->iph->daddr;
qp->user = arg->user;
@@ -316,6 +335,7 @@ static int ip_frag_reinit(struct ipq *qp)
qp->q.fragments = NULL;
qp->q.fragments_tail = NULL;
qp->iif = 0;
+ qp->ecn = 0;
return 0;
}
@@ -328,6 +348,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
int flags, offset;
int ihl, end;
int err = -ENOENT;
+ u8 ecn;
if (qp->q.last_in & INET_FRAG_COMPLETE)
goto err;
@@ -339,6 +360,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
goto err;
}
+ ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
offset = ntohs(ip_hdr(skb)->frag_off);
flags = offset & ~IP_OFFSET;
offset &= IP_OFFSET;
@@ -472,6 +494,7 @@ found:
}
qp->q.stamp = skb->tstamp;
qp->q.meat += skb->len;
+ qp->ecn |= ecn;
atomic_add(skb->truesize, &qp->q.net->mem);
if (offset == 0)
qp->q.last_in |= INET_FRAG_FIRST_IN;
@@ -583,6 +606,17 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
iph = ip_hdr(head);
iph->frag_off = 0;
iph->tot_len = htons(len);
+ /* RFC3168 5.3 Fragmentation support
+ * If one fragment had INET_ECN_NOT_ECT,
+ * reassembled frame also has INET_ECN_NOT_ECT
+ * Elif one fragment had INET_ECN_CE
+ * reassembled frame also has INET_ECN_CE
+ */
+ if (qp->ecn & IPFRAG_ECN_CLEAR)
+ iph->tos &= ~INET_ECN_MASK;
+ else if (qp->ecn & IPFRAG_ECN_SET_CE)
+ iph->tos |= INET_ECN_CE;
+
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
qp->q.fragments = NULL;
qp->q.fragments_tail = NULL;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3f3a9afd73e0..8b65a12654e7 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -60,6 +60,7 @@
#include <linux/notifier.h>
#include <linux/if_arp.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/compat.h>
#include <net/ipip.h>
#include <net/checksum.h>
#include <net/netlink.h>
@@ -1434,6 +1435,81 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
}
}
+#ifdef CONFIG_COMPAT
+struct compat_sioc_sg_req {
+ struct in_addr src;
+ struct in_addr grp;
+ compat_ulong_t pktcnt;
+ compat_ulong_t bytecnt;
+ compat_ulong_t wrong_if;
+};
+
+struct compat_sioc_vif_req {
+ vifi_t vifi; /* Which iface */
+ compat_ulong_t icount;
+ compat_ulong_t ocount;
+ compat_ulong_t ibytes;
+ compat_ulong_t obytes;
+};
+
+int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+{
+ struct compat_sioc_sg_req sr;
+ struct compat_sioc_vif_req vr;
+ struct vif_device *vif;
+ struct mfc_cache *c;
+ struct net *net = sock_net(sk);
+ struct mr_table *mrt;
+
+ mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+ if (mrt == NULL)
+ return -ENOENT;
+
+ switch (cmd) {
+ case SIOCGETVIFCNT:
+ if (copy_from_user(&vr, arg, sizeof(vr)))
+ return -EFAULT;
+ if (vr.vifi >= mrt->maxvif)
+ return -EINVAL;
+ read_lock(&mrt_lock);
+ vif = &mrt->vif_table[vr.vifi];
+ if (VIF_EXISTS(mrt, vr.vifi)) {
+ vr.icount = vif->pkt_in;
+ vr.ocount = vif->pkt_out;
+ vr.ibytes = vif->bytes_in;
+ vr.obytes = vif->bytes_out;
+ read_unlock(&mrt_lock);
+
+ if (copy_to_user(arg, &vr, sizeof(vr)))
+ return -EFAULT;
+ return 0;
+ }
+ read_unlock(&mrt_lock);
+ return -EADDRNOTAVAIL;
+ case SIOCGETSGCNT:
+ if (copy_from_user(&sr, arg, sizeof(sr)))
+ return -EFAULT;
+
+ rcu_read_lock();
+ c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
+ if (c) {
+ sr.pktcnt = c->mfc_un.res.pkt;
+ sr.bytecnt = c->mfc_un.res.bytes;
+ sr.wrong_if = c->mfc_un.res.wrong_if;
+ rcu_read_unlock();
+
+ if (copy_to_user(arg, &sr, sizeof(sr)))
+ return -EFAULT;
+ return 0;
+ }
+ rcu_read_unlock();
+ return -EADDRNOTAVAIL;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#endif
+
static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3fac340a28d5..e855fffaed95 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t,
struct arpt_entry *iter;
unsigned int cpu;
unsigned int i;
- unsigned int curcpu = get_cpu();
-
- /* Instead of clearing (by a previous call to memset())
- * the counters and using adds, we set the counters
- * with data used by 'current' CPU
- *
- * Bottom half has to be disabled to prevent deadlock
- * if new softirq were to run and call ipt_do_table
- */
- local_bh_disable();
- i = 0;
- xt_entry_foreach(iter, t->entries[curcpu], t->size) {
- SET_COUNTER(counters[i], iter->counters.bcnt,
- iter->counters.pcnt);
- ++i;
- }
- local_bh_enable();
- /* Processing counters from other cpus, we can let bottom half enabled,
- * (preemption is disabled)
- */
for_each_possible_cpu(cpu) {
- if (cpu == curcpu)
- continue;
+ seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
i = 0;
- local_bh_disable();
- xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) {
- ADD_COUNTER(counters[i], iter->counters.bcnt,
- iter->counters.pcnt);
+ u64 bcnt, pcnt;
+ unsigned int start;
+
+ do {
+ start = read_seqbegin(lock);
+ bcnt = iter->counters.bcnt;
+ pcnt = iter->counters.pcnt;
+ } while (read_seqretry(lock, start));
+
+ ADD_COUNTER(counters[i], bcnt, pcnt);
++i;
}
- xt_info_wrunlock(cpu);
- local_bh_enable();
}
- put_cpu();
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
* about).
*/
countersize = sizeof(struct xt_counters) * private->number;
- counters = vmalloc(countersize);
+ counters = vzalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
@@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name,
struct arpt_entry *iter;
ret = 0;
- counters = vmalloc(num_counters * sizeof(struct xt_counters));
+ counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto out;
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c
index b8ddcc480ed9..a5e52a9f0a12 100644
--- a/net/ipv4/netfilter/arpt_mangle.c
+++ b/net/ipv4/netfilter/arpt_mangle.c
@@ -60,12 +60,12 @@ static int checkentry(const struct xt_tgchk_param *par)
if (mangle->flags & ~ARPT_MANGLE_MASK ||
!(mangle->flags & ARPT_MANGLE_MASK))
- return false;
+ return -EINVAL;
if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT &&
mangle->target != XT_CONTINUE)
- return false;
- return true;
+ return -EINVAL;
+ return 0;
}
static struct xt_target arpt_mangle_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index a846d633b3b6..652efea013dc 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t,
struct ipt_entry *iter;
unsigned int cpu;
unsigned int i;
- unsigned int curcpu = get_cpu();
-
- /* Instead of clearing (by a previous call to memset())
- * the counters and using adds, we set the counters
- * with data used by 'current' CPU.
- *
- * Bottom half has to be disabled to prevent deadlock
- * if new softirq were to run and call ipt_do_table
- */
- local_bh_disable();
- i = 0;
- xt_entry_foreach(iter, t->entries[curcpu], t->size) {
- SET_COUNTER(counters[i], iter->counters.bcnt,
- iter->counters.pcnt);
- ++i;
- }
- local_bh_enable();
- /* Processing counters from other cpus, we can let bottom half enabled,
- * (preemption is disabled)
- */
for_each_possible_cpu(cpu) {
- if (cpu == curcpu)
- continue;
+ seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
i = 0;
- local_bh_disable();
- xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) {
- ADD_COUNTER(counters[i], iter->counters.bcnt,
- iter->counters.pcnt);
+ u64 bcnt, pcnt;
+ unsigned int start;
+
+ do {
+ start = read_seqbegin(lock);
+ bcnt = iter->counters.bcnt;
+ pcnt = iter->counters.pcnt;
+ } while (read_seqretry(lock, start));
+
+ ADD_COUNTER(counters[i], bcnt, pcnt);
++i; /* macro does multi eval of i */
}
- xt_info_wrunlock(cpu);
- local_bh_enable();
}
- put_cpu();
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care
about). */
countersize = sizeof(struct xt_counters) * private->number;
- counters = vmalloc(countersize);
+ counters = vzalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
@@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct ipt_entry *iter;
ret = 0;
- counters = vmalloc(num_counters * sizeof(struct xt_counters));
+ counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto out;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 37f8adb68c79..63f60fc5d26a 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -97,7 +97,7 @@ static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
if (ret)
- return ret;
+ return 0;
ret = seq_printf(s, "secctx=%s ", secctx);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a3d5ab786e81..6390ba299b3d 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -76,6 +76,7 @@
#include <linux/seq_file.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/compat.h>
static struct raw_hashinfo raw_v4_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
@@ -838,6 +839,23 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
}
}
+#ifdef CONFIG_COMPAT
+static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case SIOCOUTQ:
+ case SIOCINQ:
+ return -ENOIOCTLCMD;
+ default:
+#ifdef CONFIG_IP_MROUTE
+ return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg));
+#else
+ return -ENOIOCTLCMD;
+#endif
+ }
+}
+#endif
+
struct proto raw_prot = {
.name = "RAW",
.owner = THIS_MODULE,
@@ -860,6 +878,7 @@ struct proto raw_prot = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_raw_setsockopt,
.compat_getsockopt = compat_raw_getsockopt,
+ .compat_ioctl = compat_raw_ioctl,
#endif
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ae520963540f..788a3e74834e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -717,13 +717,15 @@ static inline int rt_is_expired(struct rtable *rth)
* Can be called by a softirq or a process.
* In the later case, we want to be reschedule if necessary
*/
-static void rt_do_flush(int process_context)
+static void rt_do_flush(struct net *net, int process_context)
{
unsigned int i;
struct rtable *rth, *next;
- struct rtable * tail;
for (i = 0; i <= rt_hash_mask; i++) {
+ struct rtable __rcu **pprev;
+ struct rtable *list;
+
if (process_context && need_resched())
cond_resched();
rth = rcu_dereference_raw(rt_hash_table[i].chain);
@@ -731,50 +733,32 @@ static void rt_do_flush(int process_context)
continue;
spin_lock_bh(rt_hash_lock_addr(i));
-#ifdef CONFIG_NET_NS
- {
- struct rtable __rcu **prev;
- struct rtable *p;
- rth = rcu_dereference_protected(rt_hash_table[i].chain,
+ list = NULL;
+ pprev = &rt_hash_table[i].chain;
+ rth = rcu_dereference_protected(*pprev,
lockdep_is_held(rt_hash_lock_addr(i)));
- /* defer releasing the head of the list after spin_unlock */
- for (tail = rth; tail;
- tail = rcu_dereference_protected(tail->dst.rt_next,
- lockdep_is_held(rt_hash_lock_addr(i))))
- if (!rt_is_expired(tail))
- break;
- if (rth != tail)
- rt_hash_table[i].chain = tail;
-
- /* call rt_free on entries after the tail requiring flush */
- prev = &rt_hash_table[i].chain;
- for (p = rcu_dereference_protected(*prev,
+ while (rth) {
+ next = rcu_dereference_protected(rth->dst.rt_next,
lockdep_is_held(rt_hash_lock_addr(i)));
- p != NULL;
- p = next) {
- next = rcu_dereference_protected(p->dst.rt_next,
- lockdep_is_held(rt_hash_lock_addr(i)));
- if (!rt_is_expired(p)) {
- prev = &p->dst.rt_next;
+
+ if (!net ||
+ net_eq(dev_net(rth->dst.dev), net)) {
+ rcu_assign_pointer(*pprev, next);
+ rcu_assign_pointer(rth->dst.rt_next, list);
+ list = rth;
} else {
- *prev = next;
- rt_free(p);
+ pprev = &rth->dst.rt_next;
}
+ rth = next;
}
- }
-#else
- rth = rcu_dereference_protected(rt_hash_table[i].chain,
- lockdep_is_held(rt_hash_lock_addr(i)));
- rcu_assign_pointer(rt_hash_table[i].chain, NULL);
- tail = NULL;
-#endif
+
spin_unlock_bh(rt_hash_lock_addr(i));
- for (; rth != tail; rth = next) {
- next = rcu_dereference_protected(rth->dst.rt_next, 1);
- rt_free(rth);
+ for (; list; list = next) {
+ next = rcu_dereference_protected(list->dst.rt_next, 1);
+ rt_free(list);
}
}
}
@@ -922,13 +906,13 @@ void rt_cache_flush(struct net *net, int delay)
{
rt_cache_invalidate(net);
if (delay >= 0)
- rt_do_flush(!in_softirq());
+ rt_do_flush(net, !in_softirq());
}
/* Flush previous cache invalidated entries from the cache */
-void rt_cache_flush_batch(void)
+void rt_cache_flush_batch(struct net *net)
{
- rt_do_flush(!in_softirq());
+ rt_do_flush(net, !in_softirq());
}
static void rt_emergency_hash_rebuild(struct net *net)
@@ -2578,9 +2562,10 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
goto out;
/* RACE: Check return value of inet_select_addr instead. */
- if (rcu_dereference(dev_out->ip_ptr) == NULL)
- goto out; /* Wrong error code */
-
+ if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
+ err = -ENETUNREACH;
+ goto out;
+ }
if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
ipv4_is_lbcast(oldflp->fl4_dst)) {
if (!fl.fl4_src)
@@ -2641,8 +2626,12 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
}
if (res.type == RTN_LOCAL) {
- if (!fl.fl4_src)
- fl.fl4_src = fl.fl4_dst;
+ if (!fl.fl4_src) {
+ if (res.fi->fib_prefsrc)
+ fl.fl4_src = res.fi->fib_prefsrc;
+ else
+ fl.fl4_src = fl.fl4_dst;
+ }
dev_out = net->loopback_dev;
fl.oif = dev_out->ifindex;
res.fi = NULL;
@@ -2718,6 +2707,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
return NULL;
}
+static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
+{
+ return 0;
+}
+
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
@@ -2727,6 +2721,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
.protocol = cpu_to_be16(ETH_P_IP),
.destroy = ipv4_dst_destroy,
.check = ipv4_blackhole_dst_check,
+ .default_mtu = ipv4_blackhole_default_mtu,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
};
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 824e8c8a17ad..eb7f82ebf4a3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -916,25 +916,20 @@ static void tcp_init_metrics(struct sock *sk)
tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
}
tcp_set_rto(sk);
- if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
- goto reset;
-
-cwnd:
- tp->snd_cwnd = tcp_init_cwnd(tp, dst);
- tp->snd_cwnd_stamp = tcp_time_stamp;
- return;
-
+ if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) {
reset:
- /* Play conservative. If timestamps are not
- * supported, TCP will fail to recalculate correct
- * rtt, if initial rto is too small. FORGET ALL AND RESET!
- */
- if (!tp->rx_opt.saw_tstamp && tp->srtt) {
- tp->srtt = 0;
- tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
- inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
+ /* Play conservative. If timestamps are not
+ * supported, TCP will fail to recalculate correct
+ * rtt, if initial rto is too small. FORGET ALL AND RESET!
+ */
+ if (!tp->rx_opt.saw_tstamp && tp->srtt) {
+ tp->srtt = 0;
+ tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
+ inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
+ }
}
- goto cwnd;
+ tp->snd_cwnd = tcp_init_cwnd(tp, dst);
+ tp->snd_cwnd_stamp = tcp_time_stamp;
}
static void tcp_update_reordering(struct sock *sk, const int metric,
@@ -4404,7 +4399,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
- eaten = (chunk == skb->len && !th->fin);
+ eaten = (chunk == skb->len);
tcp_rcv_space_adjust(sk);
}
local_bh_disable();
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index f4011027543d..02f583b3744a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1994,13 +1994,12 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
}
req = req->dl_next;
}
- st->offset = 0;
if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
break;
get_req:
req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
}
- sk = sk_next(st->syn_wait_sk);
+ sk = sk_nulls_next(st->syn_wait_sk);
st->state = TCP_SEQ_STATE_LISTENING;
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
} else {
@@ -2009,7 +2008,7 @@ get_req:
if (reqsk_queue_len(&icsk->icsk_accept_queue))
goto start_req;
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
- sk = sk_next(sk);
+ sk = sk_nulls_next(sk);
}
get_sk:
sk_nulls_for_each_from(sk, node) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2d390669d406..406f320336e6 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -228,10 +228,15 @@ void tcp_select_initial_window(int __space, __u32 mss,
}
}
- /* Set initial window to value enough for senders, following RFC5681. */
+ /* Set initial window to a value enough for senders starting with
+ * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
+ * a limit on the initial window when mss is larger than 1460.
+ */
if (mss > (1 << *rcv_wscale)) {
- int init_cwnd = rfc3390_bytes_to_packets(mss);
-
+ int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
+ if (mss > 1460)
+ init_cwnd =
+ max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
/* when initializing use the value from init_rcv_wnd
* rather than the default from above
*/
@@ -1345,7 +1350,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
return 0;
}
-/* Intialize TSO state of a skb.
+/* Initialize TSO state of a skb.
* This must be invoked the first time we consider transmitting
* SKB onto the wire.
*/
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 5b189c97c2fc..fd6782e3a038 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -420,9 +420,6 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
dev->type == ARPHRD_TUNNEL6 ||
dev->type == ARPHRD_SIT ||
dev->type == ARPHRD_NONE) {
- printk(KERN_INFO
- "%s: Disabled Privacy Extensions\n",
- dev->name);
ndev->cnf.use_tempaddr = -1;
} else {
in6_dev_hold(ndev);
@@ -2664,14 +2661,12 @@ static int addrconf_ifdown(struct net_device *dev, int how)
struct net *net = dev_net(dev);
struct inet6_dev *idev;
struct inet6_ifaddr *ifa;
- LIST_HEAD(keep_list);
- int state;
+ int state, i;
ASSERT_RTNL();
- /* Flush routes if device is being removed or it is not loopback */
- if (how || !(dev->flags & IFF_LOOPBACK))
- rt6_ifdown(net, dev);
+ rt6_ifdown(net, dev);
+ neigh_ifdown(&nd_tbl, dev);
idev = __in6_dev_get(dev);
if (idev == NULL)
@@ -2692,6 +2687,23 @@ static int addrconf_ifdown(struct net_device *dev, int how)
}
+ /* Step 2: clear hash table */
+ for (i = 0; i < IN6_ADDR_HSIZE; i++) {
+ struct hlist_head *h = &inet6_addr_lst[i];
+ struct hlist_node *n;
+
+ spin_lock_bh(&addrconf_hash_lock);
+ restart:
+ hlist_for_each_entry_rcu(ifa, n, h, addr_lst) {
+ if (ifa->idev == idev) {
+ hlist_del_init_rcu(&ifa->addr_lst);
+ addrconf_del_timer(ifa);
+ goto restart;
+ }
+ }
+ spin_unlock_bh(&addrconf_hash_lock);
+ }
+
write_lock_bh(&idev->lock);
/* Step 2: clear flags for stateless addrconf */
@@ -2725,52 +2737,23 @@ static int addrconf_ifdown(struct net_device *dev, int how)
struct inet6_ifaddr, if_list);
addrconf_del_timer(ifa);
- /* If just doing link down, and address is permanent
- and not link-local, then retain it. */
- if (!how &&
- (ifa->flags&IFA_F_PERMANENT) &&
- !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
- list_move_tail(&ifa->if_list, &keep_list);
-
- /* If not doing DAD on this address, just keep it. */
- if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
- idev->cnf.accept_dad <= 0 ||
- (ifa->flags & IFA_F_NODAD))
- continue;
+ list_del(&ifa->if_list);
- /* If it was tentative already, no need to notify */
- if (ifa->flags & IFA_F_TENTATIVE)
- continue;
+ write_unlock_bh(&idev->lock);
- /* Flag it for later restoration when link comes up */
- ifa->flags |= IFA_F_TENTATIVE;
- ifa->state = INET6_IFADDR_STATE_DAD;
- } else {
- list_del(&ifa->if_list);
-
- /* clear hash table */
- spin_lock_bh(&addrconf_hash_lock);
- hlist_del_init_rcu(&ifa->addr_lst);
- spin_unlock_bh(&addrconf_hash_lock);
-
- write_unlock_bh(&idev->lock);
- spin_lock_bh(&ifa->state_lock);
- state = ifa->state;
- ifa->state = INET6_IFADDR_STATE_DEAD;
- spin_unlock_bh(&ifa->state_lock);
-
- if (state != INET6_IFADDR_STATE_DEAD) {
- __ipv6_ifa_notify(RTM_DELADDR, ifa);
- atomic_notifier_call_chain(&inet6addr_chain,
- NETDEV_DOWN, ifa);
- }
+ spin_lock_bh(&ifa->state_lock);
+ state = ifa->state;
+ ifa->state = INET6_IFADDR_STATE_DEAD;
+ spin_unlock_bh(&ifa->state_lock);
- in6_ifa_put(ifa);
- write_lock_bh(&idev->lock);
+ if (state != INET6_IFADDR_STATE_DEAD) {
+ __ipv6_ifa_notify(RTM_DELADDR, ifa);
+ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
}
- }
+ in6_ifa_put(ifa);
- list_splice(&keep_list, &idev->addr_list);
+ write_lock_bh(&idev->lock);
+ }
write_unlock_bh(&idev->lock);
@@ -4159,8 +4142,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
addrconf_leave_solict(ifp->idev, &ifp->addr);
dst_hold(&ifp->rt->dst);
- if (ifp->state == INET6_IFADDR_STATE_DEAD &&
- ip6_del_rt(ifp->rt))
+ if (ip6_del_rt(ifp->rt))
dst_free(&ifp->rt->dst);
break;
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 059a3de647db..978e80e2c4a8 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -300,7 +300,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
}
- /* Reproduce AF_INET checks to make the bindings consitant */
+ /* Reproduce AF_INET checks to make the bindings consistent */
v4addr = addr->sin6_addr.s6_addr32[3];
chk_addr_ret = inet_addr_type(net, v4addr);
if (!sysctl_ip_nonlocal_bind &&
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ee82d4ef26ce..1aba54ae53c4 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -538,14 +538,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
if (!pskb_may_pull(skb, ah_hlen))
goto out;
- ip6h = ipv6_hdr(skb);
-
- skb_push(skb, hdr_len);
if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
goto out;
nfrags = err;
+ ah = (struct ip_auth_hdr *)skb->data;
+ ip6h = ipv6_hdr(skb);
+
+ skb_push(skb, hdr_len);
+
work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
if (!work_iph)
goto out;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e46305d1815a..d144e629d2b4 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
(!sk->sk_reuse || !sk2->sk_reuse ||
- sk2->sk_state == TCP_LISTEN) &&
+ ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
ipv6_rcv_saddr_equal(sk, sk2))
break;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 99157b4cd56e..5f8d242be3f3 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -56,7 +56,7 @@
#include <net/checksum.h>
#include <linux/mroute6.h>
-static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
int __ip6_local_out(struct sk_buff *skb)
{
@@ -145,14 +145,6 @@ static int ip6_finish_output2(struct sk_buff *skb)
return -EINVAL;
}
-static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
-{
- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
-
- return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?
- skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
-}
-
static int ip6_finish_output(struct sk_buff *skb)
{
if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
@@ -409,6 +401,9 @@ int ip6_forward(struct sk_buff *skb)
goto drop;
}
+ if (skb->pkt_type != PACKET_HOST)
+ goto drop;
+
skb_forward_csum(skb);
/*
@@ -601,7 +596,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
return offset;
}
-static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
struct sk_buff *frag;
struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9fab274019c0..0e1d53bcf1e0 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -34,6 +34,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/compat.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/sock.h>
@@ -1804,6 +1805,80 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
}
}
+#ifdef CONFIG_COMPAT
+struct compat_sioc_sg_req6 {
+ struct sockaddr_in6 src;
+ struct sockaddr_in6 grp;
+ compat_ulong_t pktcnt;
+ compat_ulong_t bytecnt;
+ compat_ulong_t wrong_if;
+};
+
+struct compat_sioc_mif_req6 {
+ mifi_t mifi;
+ compat_ulong_t icount;
+ compat_ulong_t ocount;
+ compat_ulong_t ibytes;
+ compat_ulong_t obytes;
+};
+
+int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+{
+ struct compat_sioc_sg_req6 sr;
+ struct compat_sioc_mif_req6 vr;
+ struct mif_device *vif;
+ struct mfc6_cache *c;
+ struct net *net = sock_net(sk);
+ struct mr6_table *mrt;
+
+ mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
+ if (mrt == NULL)
+ return -ENOENT;
+
+ switch (cmd) {
+ case SIOCGETMIFCNT_IN6:
+ if (copy_from_user(&vr, arg, sizeof(vr)))
+ return -EFAULT;
+ if (vr.mifi >= mrt->maxvif)
+ return -EINVAL;
+ read_lock(&mrt_lock);
+ vif = &mrt->vif6_table[vr.mifi];
+ if (MIF_EXISTS(mrt, vr.mifi)) {
+ vr.icount = vif->pkt_in;
+ vr.ocount = vif->pkt_out;
+ vr.ibytes = vif->bytes_in;
+ vr.obytes = vif->bytes_out;
+ read_unlock(&mrt_lock);
+
+ if (copy_to_user(arg, &vr, sizeof(vr)))
+ return -EFAULT;
+ return 0;
+ }
+ read_unlock(&mrt_lock);
+ return -EADDRNOTAVAIL;
+ case SIOCGETSGCNT_IN6:
+ if (copy_from_user(&sr, arg, sizeof(sr)))
+ return -EFAULT;
+
+ read_lock(&mrt_lock);
+ c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
+ if (c) {
+ sr.pktcnt = c->mfc_un.res.pkt;
+ sr.bytecnt = c->mfc_un.res.bytes;
+ sr.wrong_if = c->mfc_un.res.wrong_if;
+ read_unlock(&mrt_lock);
+
+ if (copy_to_user(arg, &sr, sizeof(sr)))
+ return -EFAULT;
+ return 0;
+ }
+ read_unlock(&mrt_lock);
+ return -EADDRNOTAVAIL;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#endif
static inline int ip6mr_forward2_finish(struct sk_buff *skb)
{
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 455582384ece..7d227c644f72 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t,
struct ip6t_entry *iter;
unsigned int cpu;
unsigned int i;
- unsigned int curcpu = get_cpu();
-
- /* Instead of clearing (by a previous call to memset())
- * the counters and using adds, we set the counters
- * with data used by 'current' CPU
- *
- * Bottom half has to be disabled to prevent deadlock
- * if new softirq were to run and call ipt_do_table
- */
- local_bh_disable();
- i = 0;
- xt_entry_foreach(iter, t->entries[curcpu], t->size) {
- SET_COUNTER(counters[i], iter->counters.bcnt,
- iter->counters.pcnt);
- ++i;
- }
- local_bh_enable();
- /* Processing counters from other cpus, we can let bottom half enabled,
- * (preemption is disabled)
- */
for_each_possible_cpu(cpu) {
- if (cpu == curcpu)
- continue;
+ seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
i = 0;
- local_bh_disable();
- xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) {
- ADD_COUNTER(counters[i], iter->counters.bcnt,
- iter->counters.pcnt);
+ u64 bcnt, pcnt;
+ unsigned int start;
+
+ do {
+ start = read_seqbegin(lock);
+ bcnt = iter->counters.bcnt;
+ pcnt = iter->counters.pcnt;
+ } while (read_seqretry(lock, start));
+
+ ADD_COUNTER(counters[i], bcnt, pcnt);
++i;
}
- xt_info_wrunlock(cpu);
- local_bh_enable();
}
- put_cpu();
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care
about). */
countersize = sizeof(struct xt_counters) * private->number;
- counters = vmalloc(countersize);
+ counters = vzalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
@@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct ip6t_entry *iter;
ret = 0;
- counters = vmalloc(num_counters * sizeof(struct xt_counters));
+ counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto out;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index 99abfb53bab9..97c5b21b9674 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -19,13 +19,15 @@
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+#endif
+#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
@@ -33,8 +35,10 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
{
u16 zone = NF_CT_DEFAULT_ZONE;
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
if (skb->nfct)
zone = nf_ct_zone((struct nf_conn *)skb->nfct);
+#endif
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge &&
@@ -56,9 +60,11 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
{
struct sk_buff *reasm;
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
/* Previously seen (loopback)? */
if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
return NF_ACCEPT;
+#endif
reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
/* queued */
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 86c39526ba5e..c5b0915d106b 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -31,6 +31,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <linux/skbuff.h>
+#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
@@ -1157,6 +1158,23 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
}
}
+#ifdef CONFIG_COMPAT
+static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case SIOCOUTQ:
+ case SIOCINQ:
+ return -ENOIOCTLCMD;
+ default:
+#ifdef CONFIG_IPV6_MROUTE
+ return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
+#else
+ return -ENOIOCTLCMD;
+#endif
+ }
+}
+#endif
+
static void rawv6_close(struct sock *sk, long timeout)
{
if (inet_sk(sk)->inet_num == IPPROTO_RAW)
@@ -1215,6 +1233,7 @@ struct proto rawv6_prot = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_rawv6_setsockopt,
.compat_getsockopt = compat_rawv6_getsockopt,
+ .compat_ioctl = compat_rawv6_ioctl,
#endif
};
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 373bd0416f69..1c29f95695de 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -72,8 +72,6 @@
#define RT6_TRACE(x...) do { ; } while (0)
#endif
-#define CLONE_OFFLINK_ROUTE 0
-
static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
@@ -115,6 +113,11 @@ static struct dst_ops ip6_dst_ops_template = {
.local_out = __ip6_local_out,
};
+static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
+{
+ return 0;
+}
+
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
@@ -124,6 +127,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
.protocol = cpu_to_be16(ETH_P_IPV6),
.destroy = ip6_dst_destroy,
.check = ip6_dst_check,
+ .default_mtu = ip6_blackhole_default_mtu,
.update_pmtu = ip6_rt_blackhole_update_pmtu,
};
@@ -196,7 +200,6 @@ static void ip6_dst_destroy(struct dst_entry *dst)
in6_dev_put(idev);
}
if (peer) {
- BUG_ON(!(rt->rt6i_flags & RTF_CACHE));
rt->rt6i_peer = NULL;
inet_putpeer(peer);
}
@@ -206,9 +209,6 @@ void rt6_bind_peer(struct rt6_info *rt, int create)
{
struct inet_peer *peer;
- if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE)))
- return;
-
peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
inet_putpeer(peer);
@@ -738,13 +738,8 @@ restart:
if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
- else {
-#if CLONE_OFFLINK_ROUTE
+ else
nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
-#else
- goto out2;
-#endif
- }
dst_release(&rt->dst);
rt = nrt ? : net->ipv6.ip6_null_entry;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index fa1d8f4e0051..7cb65ef79f9c 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -15,6 +15,8 @@
#include <net/addrconf.h>
#include <net/inet_frag.h>
+static struct ctl_table empty[1];
+
static ctl_table ipv6_table_template[] = {
{
.procname = "route",
@@ -35,6 +37,12 @@ static ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "neigh",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = empty,
+ },
{ }
};
@@ -152,7 +160,6 @@ static struct ctl_table_header *ip6_base;
int ipv6_static_sysctl_register(void)
{
- static struct ctl_table empty[1];
ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty);
if (ip6_base == NULL)
return -ENOMEM;
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 6434bd5ce088..8e688b3de9ab 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -17,6 +17,7 @@
#include <linux/netfilter_ipv6.h>
#include <net/dst.h>
#include <net/ipv6.h>
+#include <net/ip6_route.h>
#include <net/xfrm.h>
int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
@@ -88,8 +89,21 @@ static int xfrm6_output_finish(struct sk_buff *skb)
return xfrm_output(skb);
}
+static int __xfrm6_output(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_state *x = dst->xfrm;
+
+ if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
+ ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+ dst_allfrag(skb_dst(skb)))) {
+ return ip6_fragment(skb, xfrm6_output_finish);
+ }
+ return xfrm6_output_finish(skb);
+}
+
int xfrm6_output(struct sk_buff *skb)
{
return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
- skb_dst(skb)->dev, xfrm6_output_finish);
+ skb_dst(skb)->dev, __xfrm6_output);
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 7e74023ea6e4..da87428681cc 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -98,6 +98,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
if (!xdst->u.rt6.rt6i_idev)
return -ENODEV;
+ xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
+ if (rt->rt6i_peer)
+ atomic_inc(&rt->rt6i_peer->refcnt);
+
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
@@ -216,6 +220,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt6.rt6i_idev))
in6_dev_put(xdst->u.rt6.rt6i_idev);
+ if (likely(xdst->u.rt6.rt6i_peer))
+ inet_putpeer(xdst->u.rt6.rt6i_peer);
xfrm_dst_destroy(xdst);
}
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 7f097989cde2..c9890e25cd4c 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -45,7 +45,6 @@
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/smp_lock.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/slab.h>
@@ -2281,6 +2280,16 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case IRLMP_ENUMDEVICES:
+
+ /* Offset to first device entry */
+ offset = sizeof(struct irda_device_list) -
+ sizeof(struct irda_device_info);
+
+ if (len < offset) {
+ err = -EINVAL;
+ goto out;
+ }
+
/* Ask lmp for the current discovery log */
discoveries = irlmp_get_discoveries(&list.len, self->mask.word,
self->nslots);
@@ -2291,15 +2300,9 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
}
/* Write total list length back to client */
- if (copy_to_user(optval, &list,
- sizeof(struct irda_device_list) -
- sizeof(struct irda_device_info)))
+ if (copy_to_user(optval, &list, offset))
err = -EFAULT;
- /* Offset to first device entry */
- offset = sizeof(struct irda_device_list) -
- sizeof(struct irda_device_info);
-
/* Copy the list itself - watch for overflow */
if (list.len > 2048) {
err = -EINVAL;
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 7fa86373de41..7c567b8aa89a 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -15,7 +15,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include "irnet_ppp.h" /* Private header */
/* Please put other headers in irnet.h - Thanks */
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index f7db676de77d..1ee5dab3cfae 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -36,6 +36,7 @@
#define KMSG_COMPONENT "iucv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
@@ -1804,6 +1805,7 @@ static void iucv_external_interrupt(unsigned int ext_int_code,
struct iucv_irq_data *p;
struct iucv_irq_list *work;
+ kstat_cpu(smp_processor_id()).irqs[EXTINT_IUC]++;
p = iucv_irq_data[smp_processor_id()];
if (p->ippathid >= iucv_max_pathid) {
WARN_ON(p->ippathid >= iucv_max_pathid);
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 798d9b9462e2..c766056d0488 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -20,7 +20,7 @@ config MAC80211_HAS_RC
def_bool n
config MAC80211_RC_PID
- bool "PID controller based rate control algorithm" if EMBEDDED
+ bool "PID controller based rate control algorithm" if EXPERT
select MAC80211_HAS_RC
---help---
This option enables a TX rate control algorithm for
@@ -28,14 +28,14 @@ config MAC80211_RC_PID
rate.
config MAC80211_RC_MINSTREL
- bool "Minstrel" if EMBEDDED
+ bool "Minstrel" if EXPERT
select MAC80211_HAS_RC
default y
---help---
This option enables the 'minstrel' TX rate control algorithm
config MAC80211_RC_MINSTREL_HT
- bool "Minstrel 802.11n support" if EMBEDDED
+ bool "Minstrel 802.11n support" if EXPERT
depends on MAC80211_RC_MINSTREL
default y
---help---
@@ -93,7 +93,7 @@ config MAC80211_MESH
config MAC80211_LEDS
bool "Enable LED triggers"
depends on MAC80211
- select NEW_LEDS
+ depends on LEDS_CLASS
select LEDS_TRIGGERS
---help---
This option enables a few LED triggers for different
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index f138b195d657..227ca82eef72 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -185,8 +185,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
struct ieee80211_mgmt *mgmt,
size_t len)
{
- struct ieee80211_hw *hw = &local->hw;
- struct ieee80211_conf *conf = &hw->conf;
struct tid_ampdu_rx *tid_agg_rx;
u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
u8 dialog_token;
@@ -231,13 +229,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
goto end_no_lock;
}
/* determine default buffer size */
- if (buf_size == 0) {
- struct ieee80211_supported_band *sband;
-
- sband = local->hw.wiphy->bands[conf->channel->band];
- buf_size = IEEE80211_MIN_AMPDU_BUF;
- buf_size = buf_size << sband->ht_cap.ampdu_factor;
- }
+ if (buf_size == 0)
+ buf_size = IEEE80211_MAX_AMPDU_BUF;
/* examine state machine */
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index d4679b265ba8..9cc472c6a6a5 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -342,10 +342,11 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
tid_tx->dialog_token, start_seq_num,
- 0x40, 5000);
+ 0x40, tid_tx->timeout);
}
-int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
+int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
+ u16 timeout)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -420,6 +421,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
skb_queue_head_init(&tid_tx->pending);
__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
+ tid_tx->timeout = timeout;
+
/* Tx timer */
tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c30b8b72eedb..4bc8a9250cfd 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -295,11 +295,12 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
static int ieee80211_config_default_key(struct wiphy *wiphy,
struct net_device *dev,
- u8 key_idx)
+ u8 key_idx, bool uni,
+ bool multi)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- ieee80211_set_default_key(sdata, key_idx);
+ ieee80211_set_default_key(sdata, key_idx, uni, multi);
return 0;
}
@@ -983,7 +984,7 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-static int ieee80211_get_mesh_params(struct wiphy *wiphy,
+static int ieee80211_get_mesh_config(struct wiphy *wiphy,
struct net_device *dev,
struct mesh_config *conf)
{
@@ -999,7 +1000,37 @@ static inline bool _chg_mesh_attr(enum nl80211_meshconf_params parm, u32 mask)
return (mask >> (parm-1)) & 0x1;
}
-static int ieee80211_update_mesh_params(struct wiphy *wiphy,
+static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
+ const struct mesh_setup *setup)
+{
+ u8 *new_ie;
+ const u8 *old_ie;
+
+ /* first allocate the new vendor information element */
+ new_ie = NULL;
+ old_ie = ifmsh->vendor_ie;
+
+ ifmsh->vendor_ie_len = setup->vendor_ie_len;
+ if (setup->vendor_ie_len) {
+ new_ie = kmemdup(setup->vendor_ie, setup->vendor_ie_len,
+ GFP_KERNEL);
+ if (!new_ie)
+ return -ENOMEM;
+ }
+
+ /* now copy the rest of the setup parameters */
+ ifmsh->mesh_id_len = setup->mesh_id_len;
+ memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len);
+ ifmsh->mesh_pp_id = setup->path_sel_proto;
+ ifmsh->mesh_pm_id = setup->path_metric;
+ ifmsh->vendor_ie = new_ie;
+
+ kfree(old_ie);
+
+ return 0;
+}
+
+static int ieee80211_update_mesh_config(struct wiphy *wiphy,
struct net_device *dev, u32 mask,
const struct mesh_config *nconf)
{
@@ -1058,11 +1089,12 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ int err;
- memcpy(&sdata->u.mesh.mshcfg, conf, sizeof(struct mesh_config));
- ifmsh->mesh_id_len = setup->mesh_id_len;
- memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len);
-
+ memcpy(&ifmsh->mshcfg, conf, sizeof(struct mesh_config));
+ err = copy_mesh_setup(ifmsh, setup);
+ if (err)
+ return err;
ieee80211_start_mesh(sdata);
return 0;
@@ -1561,6 +1593,37 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
return 0;
}
+static int ieee80211_remain_on_channel_hw(struct ieee80211_local *local,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type chantype,
+ unsigned int duration, u64 *cookie)
+{
+ int ret;
+ u32 random_cookie;
+
+ lockdep_assert_held(&local->mtx);
+
+ if (local->hw_roc_cookie)
+ return -EBUSY;
+ /* must be nonzero */
+ random_cookie = random32() | 1;
+
+ *cookie = random_cookie;
+ local->hw_roc_dev = dev;
+ local->hw_roc_cookie = random_cookie;
+ local->hw_roc_channel = chan;
+ local->hw_roc_channel_type = chantype;
+ local->hw_roc_duration = duration;
+ ret = drv_remain_on_channel(local, chan, chantype, duration);
+ if (ret) {
+ local->hw_roc_channel = NULL;
+ local->hw_roc_cookie = 0;
+ }
+
+ return ret;
+}
+
static int ieee80211_remain_on_channel(struct wiphy *wiphy,
struct net_device *dev,
struct ieee80211_channel *chan,
@@ -1569,16 +1632,63 @@ static int ieee80211_remain_on_channel(struct wiphy *wiphy,
u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+
+ if (local->ops->remain_on_channel) {
+ int ret;
+
+ mutex_lock(&local->mtx);
+ ret = ieee80211_remain_on_channel_hw(local, dev,
+ chan, channel_type,
+ duration, cookie);
+ local->hw_roc_for_tx = false;
+ mutex_unlock(&local->mtx);
+
+ return ret;
+ }
return ieee80211_wk_remain_on_channel(sdata, chan, channel_type,
duration, cookie);
}
+static int ieee80211_cancel_remain_on_channel_hw(struct ieee80211_local *local,
+ u64 cookie)
+{
+ int ret;
+
+ lockdep_assert_held(&local->mtx);
+
+ if (local->hw_roc_cookie != cookie)
+ return -ENOENT;
+
+ ret = drv_cancel_remain_on_channel(local);
+ if (ret)
+ return ret;
+
+ local->hw_roc_cookie = 0;
+ local->hw_roc_channel = NULL;
+
+ ieee80211_recalc_idle(local);
+
+ return 0;
+}
+
static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
struct net_device *dev,
u64 cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+
+ if (local->ops->cancel_remain_on_channel) {
+ int ret;
+
+ mutex_lock(&local->mtx);
+ ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
+ mutex_unlock(&local->mtx);
+
+ return ret;
+ }
return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
}
@@ -1630,6 +1740,12 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
channel_type != local->_oper_channel_type))
is_offchan = true;
+ if (chan == local->hw_roc_channel) {
+ /* TODO: check channel type? */
+ is_offchan = false;
+ flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
+ }
+
if (is_offchan && !offchan)
return -EBUSY;
@@ -1638,6 +1754,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_MESH_POINT:
if (!ieee80211_is_action(mgmt->frame_control) ||
mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)
break;
@@ -1667,6 +1784,49 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
*cookie = (unsigned long) skb;
+ if (is_offchan && local->ops->remain_on_channel) {
+ unsigned int duration;
+ int ret;
+
+ mutex_lock(&local->mtx);
+ /*
+ * If the duration is zero, then the driver
+ * wouldn't actually do anything. Set it to
+ * 100 for now.
+ *
+ * TODO: cancel the off-channel operation
+ * when we get the SKB's TX status and
+ * the wait time was zero before.
+ */
+ duration = 100;
+ if (wait)
+ duration = wait;
+ ret = ieee80211_remain_on_channel_hw(local, dev, chan,
+ channel_type,
+ duration, cookie);
+ if (ret) {
+ kfree_skb(skb);
+ mutex_unlock(&local->mtx);
+ return ret;
+ }
+
+ local->hw_roc_for_tx = true;
+ local->hw_roc_duration = wait;
+
+ /*
+ * queue up frame for transmission after
+ * ieee80211_ready_on_channel call
+ */
+
+ /* modify cookie to prevent API mismatches */
+ *cookie ^= 2;
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
+ local->hw_roc_skb = skb;
+ mutex_unlock(&local->mtx);
+
+ return 0;
+ }
+
/*
* Can transmit right away if the channel was the
* right one and there's no wait involved... If a
@@ -1707,6 +1867,21 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
int ret = -ENOENT;
mutex_lock(&local->mtx);
+
+ if (local->ops->cancel_remain_on_channel) {
+ cookie ^= 2;
+ ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
+
+ if (ret == 0) {
+ kfree_skb(local->hw_roc_skb);
+ local->hw_roc_skb = NULL;
+ }
+
+ mutex_unlock(&local->mtx);
+
+ return ret;
+ }
+
list_for_each_entry(wk, &local->work_list, list) {
if (wk->sdata != sdata)
continue;
@@ -1786,8 +1961,8 @@ struct cfg80211_ops mac80211_config_ops = {
.change_mpath = ieee80211_change_mpath,
.get_mpath = ieee80211_get_mpath,
.dump_mpath = ieee80211_dump_mpath,
- .update_mesh_params = ieee80211_update_mesh_params,
- .get_mesh_params = ieee80211_get_mesh_params,
+ .update_mesh_config = ieee80211_update_mesh_config,
+ .get_mesh_config = ieee80211_get_mesh_config,
.join_mesh = ieee80211_join_mesh,
.leave_mesh = ieee80211_leave_mesh,
#endif
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 5822a6ce7671..f7ef3477c24a 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -274,7 +274,8 @@ void ieee80211_debugfs_key_remove(struct ieee80211_key *key)
debugfs_remove_recursive(key->debugfs.dir);
key->debugfs.dir = NULL;
}
-void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata)
+
+void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
{
char buf[50];
struct ieee80211_key *key;
@@ -282,25 +283,29 @@ void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata)
if (!sdata->debugfs.dir)
return;
- /* this is running under the key lock */
+ lockdep_assert_held(&sdata->local->key_mtx);
- key = sdata->default_key;
- if (key) {
+ if (sdata->default_unicast_key) {
+ key = sdata->default_unicast_key;
sprintf(buf, "../keys/%d", key->debugfs.cnt);
- sdata->debugfs.default_key =
- debugfs_create_symlink("default_key",
+ sdata->debugfs.default_unicast_key =
+ debugfs_create_symlink("default_unicast_key",
sdata->debugfs.dir, buf);
- } else
- ieee80211_debugfs_key_remove_default(sdata);
-}
-
-void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata)
-{
- if (!sdata)
- return;
+ } else {
+ debugfs_remove(sdata->debugfs.default_unicast_key);
+ sdata->debugfs.default_unicast_key = NULL;
+ }
- debugfs_remove(sdata->debugfs.default_key);
- sdata->debugfs.default_key = NULL;
+ if (sdata->default_multicast_key) {
+ key = sdata->default_multicast_key;
+ sprintf(buf, "../keys/%d", key->debugfs.cnt);
+ sdata->debugfs.default_multicast_key =
+ debugfs_create_symlink("default_multicast_key",
+ sdata->debugfs.dir, buf);
+ } else {
+ debugfs_remove(sdata->debugfs.default_multicast_key);
+ sdata->debugfs.default_multicast_key = NULL;
+ }
}
void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/debugfs_key.h b/net/mac80211/debugfs_key.h
index 54717b4e1371..32adc77e9c77 100644
--- a/net/mac80211/debugfs_key.h
+++ b/net/mac80211/debugfs_key.h
@@ -4,8 +4,7 @@
#ifdef CONFIG_MAC80211_DEBUGFS
void ieee80211_debugfs_key_add(struct ieee80211_key *key);
void ieee80211_debugfs_key_remove(struct ieee80211_key *key);
-void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata);
-void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata);
+void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata);
void ieee80211_debugfs_key_add_mgmt_default(
struct ieee80211_sub_if_data *sdata);
void ieee80211_debugfs_key_remove_mgmt_default(
@@ -17,10 +16,7 @@ static inline void ieee80211_debugfs_key_add(struct ieee80211_key *key)
{}
static inline void ieee80211_debugfs_key_remove(struct ieee80211_key *key)
{}
-static inline void ieee80211_debugfs_key_add_default(
- struct ieee80211_sub_if_data *sdata)
-{}
-static inline void ieee80211_debugfs_key_remove_default(
+static inline void ieee80211_debugfs_key_update_default(
struct ieee80211_sub_if_data *sdata)
{}
static inline void ieee80211_debugfs_key_add_mgmt_default(
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 8bb5af85f469..c04a1396cf8d 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -189,7 +189,7 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu
if (tx) {
if (start)
- ret = ieee80211_start_tx_ba_session(&sta->sta, tid);
+ ret = ieee80211_start_tx_ba_session(&sta->sta, tid, 5000);
else
ret = ieee80211_stop_tx_ba_session(&sta->sta, tid);
} else {
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 4244554d218a..98d589960a49 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -367,7 +367,7 @@ static inline void drv_reset_tsf(struct ieee80211_local *local)
static inline int drv_tx_last_beacon(struct ieee80211_local *local)
{
- int ret = 1;
+ int ret = 0; /* default unsuported op for less congestion */
might_sleep();
@@ -465,4 +465,34 @@ static inline int drv_get_antenna(struct ieee80211_local *local,
return ret;
}
+static inline int drv_remain_on_channel(struct ieee80211_local *local,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type chantype,
+ unsigned int duration)
+{
+ int ret;
+
+ might_sleep();
+
+ trace_drv_remain_on_channel(local, chan, chantype, duration);
+ ret = local->ops->remain_on_channel(&local->hw, chan, chantype,
+ duration);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
+
+static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local)
+{
+ int ret;
+
+ might_sleep();
+
+ trace_drv_cancel_remain_on_channel(local);
+ ret = local->ops->cancel_remain_on_channel(&local->hw);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index c2772f23ac9c..49c84218b2f4 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -933,6 +933,50 @@ TRACE_EVENT(drv_get_antenna,
)
);
+TRACE_EVENT(drv_remain_on_channel,
+ TP_PROTO(struct ieee80211_local *local, struct ieee80211_channel *chan,
+ enum nl80211_channel_type chantype, unsigned int duration),
+
+ TP_ARGS(local, chan, chantype, duration),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(int, center_freq)
+ __field(int, channel_type)
+ __field(unsigned int, duration)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->center_freq = chan->center_freq;
+ __entry->channel_type = chantype;
+ __entry->duration = duration;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " freq:%dMHz duration:%dms",
+ LOCAL_PR_ARG, __entry->center_freq, __entry->duration
+ )
+);
+
+TRACE_EVENT(drv_cancel_remain_on_channel,
+ TP_PROTO(struct ieee80211_local *local),
+
+ TP_ARGS(local),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT, LOCAL_PR_ARG
+ )
+);
+
/*
* Tracing for API calls that drivers call.
*/
@@ -1170,6 +1214,42 @@ TRACE_EVENT(api_chswitch_done,
)
);
+TRACE_EVENT(api_ready_on_channel,
+ TP_PROTO(struct ieee80211_local *local),
+
+ TP_ARGS(local),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT, LOCAL_PR_ARG
+ )
+);
+
+TRACE_EVENT(api_remain_on_channel_expired,
+ TP_PROTO(struct ieee80211_local *local),
+
+ TP_ARGS(local),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT, LOCAL_PR_ARG
+ )
+);
+
/*
* Tracing for internal functions
* (which may also be called in response to driver calls)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 72499fe5fc36..c47d7c0e48a4 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/etherdevice.h>
+#include <linux/leds.h>
#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
@@ -167,6 +168,7 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
* @IEEE80211_RX_FRAGMENTED: fragmented frame
* @IEEE80211_RX_AMSDU: a-MSDU packet
* @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
+ * @IEEE80211_RX_DEFERRED_RELEASE: frame was subjected to receive reordering
*
* These are per-frame flags that are attached to a frame in the
* @rx_flags field of &struct ieee80211_rx_status.
@@ -177,6 +179,7 @@ enum ieee80211_packet_rx_flags {
IEEE80211_RX_FRAGMENTED = BIT(2),
IEEE80211_RX_AMSDU = BIT(3),
IEEE80211_RX_MALFORMED_ACTION_FRM = BIT(4),
+ IEEE80211_RX_DEFERRED_RELEASE = BIT(5),
};
/**
@@ -484,6 +487,8 @@ struct ieee80211_if_mesh {
struct mesh_config mshcfg;
u32 mesh_seqnum;
bool accepting_plinks;
+ const u8 *vendor_ie;
+ u8 vendor_ie_len;
};
#ifdef CONFIG_MAC80211_MESH
@@ -557,7 +562,7 @@ struct ieee80211_sub_if_data {
unsigned int fragment_next;
struct ieee80211_key *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
- struct ieee80211_key *default_key;
+ struct ieee80211_key *default_unicast_key, *default_multicast_key;
struct ieee80211_key *default_mgmt_key;
u16 sequence_number;
@@ -585,9 +590,7 @@ struct ieee80211_sub_if_data {
struct ieee80211_if_vlan vlan;
struct ieee80211_if_managed mgd;
struct ieee80211_if_ibss ibss;
-#ifdef CONFIG_MAC80211_MESH
struct ieee80211_if_mesh mesh;
-#endif
u32 mntr_flags;
} u;
@@ -595,7 +598,8 @@ struct ieee80211_sub_if_data {
struct {
struct dentry *dir;
struct dentry *subdir_stations;
- struct dentry *default_key;
+ struct dentry *default_unicast_key;
+ struct dentry *default_multicast_key;
struct dentry *default_mgmt_key;
} debugfs;
#endif
@@ -629,6 +633,20 @@ enum queue_stop_reason {
IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
};
+#ifdef CONFIG_MAC80211_LEDS
+struct tpt_led_trigger {
+ struct led_trigger trig;
+ char name[32];
+ const struct ieee80211_tpt_blink *blink_table;
+ unsigned int blink_table_len;
+ struct timer_list timer;
+ unsigned long prev_traffic;
+ unsigned long tx_bytes, rx_bytes;
+ unsigned int active, want;
+ bool running;
+};
+#endif
+
/**
* mac80211 scan flags - currently active scan mode
*
@@ -758,6 +776,15 @@ struct ieee80211_local {
struct sk_buff_head skb_queue;
struct sk_buff_head skb_queue_unreliable;
+ /*
+ * Internal FIFO queue which is shared between multiple rx path
+ * stages. Its main task is to provide a serialization mechanism,
+ * so all rx handlers can enjoy having exclusive access to their
+ * private data structures.
+ */
+ struct sk_buff_head rx_skb_queue;
+ bool running_rx_handler; /* protected by rx_skb_queue.lock */
+
/* Station data */
/*
* The mutex only protects the list and counter,
@@ -837,6 +864,7 @@ struct ieee80211_local {
#ifdef CONFIG_MAC80211_LEDS
int tx_led_counter, rx_led_counter;
struct led_trigger *tx_led, *rx_led, *assoc_led, *radio_led;
+ struct tpt_led_trigger *tpt_led_trigger;
char tx_led_name[32], rx_led_name[32],
assoc_led_name[32], radio_led_name[32];
#endif
@@ -923,6 +951,15 @@ struct ieee80211_local {
} debugfs;
#endif
+ struct ieee80211_channel *hw_roc_channel;
+ struct net_device *hw_roc_dev;
+ struct sk_buff *hw_roc_skb;
+ struct work_struct hw_roc_start, hw_roc_done;
+ enum nl80211_channel_type hw_roc_channel_type;
+ unsigned int hw_roc_duration;
+ u32 hw_roc_cookie;
+ bool hw_roc_for_tx;
+
/* dummy netdev for use w/ NAPI */
struct net_device napi_dev;
@@ -1114,6 +1151,7 @@ void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
void ieee80211_offchannel_return(struct ieee80211_local *local,
bool enable_beaconing);
+void ieee80211_hw_roc_setup(struct ieee80211_local *local);
/* interface handling */
int ieee80211_iface_init(void);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index f0f11bb794af..8acba456744e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -220,6 +220,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
/* we're brought up, everything changes */
hw_reconf_flags = ~0;
ieee80211_led_radio(local, true);
+ ieee80211_mod_tpt_led_trig(local,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
}
/*
@@ -1262,8 +1264,9 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
int count = 0;
- bool working = false, scanning = false;
+ bool working = false, scanning = false, hw_roc = false;
struct ieee80211_work *wk;
+ unsigned int led_trig_start = 0, led_trig_stop = 0;
#ifdef CONFIG_PROVE_LOCKING
WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
@@ -1305,6 +1308,9 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
local->scan_sdata->vif.bss_conf.idle = false;
}
+ if (local->hw_roc_channel)
+ hw_roc = true;
+
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->old_idle == sdata->vif.bss_conf.idle)
continue;
@@ -1313,6 +1319,20 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
}
+ if (working || scanning || hw_roc)
+ led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK;
+ else
+ led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK;
+
+ if (count)
+ led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
+ else
+ led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
+
+ ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
+
+ if (hw_roc)
+ return ieee80211_idle_off(local, "hw remain-on-channel");
if (working)
return ieee80211_idle_off(local, "working");
if (scanning)
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 72df1ca7299b..8c02469b7176 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -30,19 +30,20 @@
* keys and per-station keys. Since each station belongs to an interface,
* each station key also belongs to that interface.
*
- * Hardware acceleration is done on a best-effort basis, for each key
- * that is eligible the hardware is asked to enable that key but if
- * it cannot do that they key is simply kept for software encryption.
- * There is currently no way of knowing this except by looking into
- * debugfs.
+ * Hardware acceleration is done on a best-effort basis for algorithms
+ * that are implemented in software, for each key the hardware is asked
+ * to enable that key for offloading but if it cannot do that the key is
+ * simply kept for software encryption (unless it is for an algorithm
+ * that isn't implemented in software).
+ * There is currently no way of knowing whether a key is handled in SW
+ * or HW except by looking into debugfs.
*
- * All key operations are protected internally.
- *
- * Within mac80211, key references are, just as STA structure references,
- * protected by RCU. Note, however, that some things are unprotected,
- * namely the key->sta dereferences within the hardware acceleration
- * functions. This means that sta_info_destroy() must remove the key
- * which waits for an RCU grace period.
+ * All key management is internally protected by a mutex. Within all
+ * other parts of mac80211, key references are, just as STA structure
+ * references, protected by RCU. Note, however, that some things are
+ * unprotected, namely the key->sta dereferences within the hardware
+ * acceleration functions. This means that sta_info_destroy() must
+ * remove the key which waits for an RCU grace period.
*/
static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -178,7 +179,7 @@ void ieee80211_key_removed(struct ieee80211_key_conf *key_conf)
EXPORT_SYMBOL_GPL(ieee80211_key_removed);
static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
- int idx)
+ int idx, bool uni, bool multi)
{
struct ieee80211_key *key = NULL;
@@ -187,18 +188,19 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
if (idx >= 0 && idx < NUM_DEFAULT_KEYS)
key = sdata->keys[idx];
- rcu_assign_pointer(sdata->default_key, key);
+ if (uni)
+ rcu_assign_pointer(sdata->default_unicast_key, key);
+ if (multi)
+ rcu_assign_pointer(sdata->default_multicast_key, key);
- if (key) {
- ieee80211_debugfs_key_remove_default(key->sdata);
- ieee80211_debugfs_key_add_default(key->sdata);
- }
+ ieee80211_debugfs_key_update_default(sdata);
}
-void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx)
+void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx,
+ bool uni, bool multi)
{
mutex_lock(&sdata->local->key_mtx);
- __ieee80211_set_default_key(sdata, idx);
+ __ieee80211_set_default_key(sdata, idx, uni, multi);
mutex_unlock(&sdata->local->key_mtx);
}
@@ -215,10 +217,7 @@ __ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx)
rcu_assign_pointer(sdata->default_mgmt_key, key);
- if (key) {
- ieee80211_debugfs_key_remove_mgmt_default(key->sdata);
- ieee80211_debugfs_key_add_mgmt_default(key->sdata);
- }
+ ieee80211_debugfs_key_update_default(sdata);
}
void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
@@ -236,7 +235,8 @@ static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
struct ieee80211_key *old,
struct ieee80211_key *new)
{
- int idx, defkey, defmgmtkey;
+ int idx;
+ bool defunikey, defmultikey, defmgmtkey;
if (new)
list_add(&new->list, &sdata->key_list);
@@ -257,29 +257,31 @@ static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
else
idx = new->conf.keyidx;
- defkey = old && sdata->default_key == old;
+ defunikey = old && sdata->default_unicast_key == old;
+ defmultikey = old && sdata->default_multicast_key == old;
defmgmtkey = old && sdata->default_mgmt_key == old;
- if (defkey && !new)
- __ieee80211_set_default_key(sdata, -1);
+ if (defunikey && !new)
+ __ieee80211_set_default_key(sdata, -1, true, false);
+ if (defmultikey && !new)
+ __ieee80211_set_default_key(sdata, -1, false, true);
if (defmgmtkey && !new)
__ieee80211_set_default_mgmt_key(sdata, -1);
rcu_assign_pointer(sdata->keys[idx], new);
- if (defkey && new)
- __ieee80211_set_default_key(sdata, new->conf.keyidx);
+ if (defunikey && new)
+ __ieee80211_set_default_key(sdata, new->conf.keyidx,
+ true, false);
+ if (defmultikey && new)
+ __ieee80211_set_default_key(sdata, new->conf.keyidx,
+ false, true);
if (defmgmtkey && new)
__ieee80211_set_default_mgmt_key(sdata,
new->conf.keyidx);
}
- if (old) {
- /*
- * We'll use an empty list to indicate that the key
- * has already been removed.
- */
- list_del_init(&old->list);
- }
+ if (old)
+ list_del(&old->list);
}
struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
@@ -373,6 +375,12 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
if (!key)
return;
+ /*
+ * Synchronize so the TX path can no longer be using
+ * this key before we free/remove it.
+ */
+ synchronize_rcu();
+
if (key->local)
ieee80211_key_disable_hw_accel(key);
@@ -414,8 +422,8 @@ int ieee80211_key_link(struct ieee80211_key *key,
struct sta_info *ap;
/*
- * We're getting a sta pointer in,
- * so must be under RCU read lock.
+ * We're getting a sta pointer in, so must be under
+ * appropriate locking for sta_info_get().
*/
/* same here, the AP could be using QoS */
@@ -509,11 +517,12 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata)
mutex_lock(&sdata->local->key_mtx);
- ieee80211_debugfs_key_remove_default(sdata);
ieee80211_debugfs_key_remove_mgmt_default(sdata);
list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
__ieee80211_key_free(key);
+ ieee80211_debugfs_key_update_default(sdata);
+
mutex_unlock(&sdata->local->key_mtx);
}
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 0db1c0f5f697..8106aa1b7466 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -138,7 +138,8 @@ int __must_check ieee80211_key_link(struct ieee80211_key *key,
struct sta_info *sta);
void ieee80211_key_free(struct ieee80211_local *local,
struct ieee80211_key *key);
-void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx);
+void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx,
+ bool uni, bool multi);
void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
int idx);
void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index 063aad944246..14590332c81c 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -54,12 +54,22 @@ void ieee80211_led_radio(struct ieee80211_local *local, bool enabled)
led_trigger_event(local->radio_led, LED_OFF);
}
+void ieee80211_led_names(struct ieee80211_local *local)
+{
+ snprintf(local->rx_led_name, sizeof(local->rx_led_name),
+ "%srx", wiphy_name(local->hw.wiphy));
+ snprintf(local->tx_led_name, sizeof(local->tx_led_name),
+ "%stx", wiphy_name(local->hw.wiphy));
+ snprintf(local->assoc_led_name, sizeof(local->assoc_led_name),
+ "%sassoc", wiphy_name(local->hw.wiphy));
+ snprintf(local->radio_led_name, sizeof(local->radio_led_name),
+ "%sradio", wiphy_name(local->hw.wiphy));
+}
+
void ieee80211_led_init(struct ieee80211_local *local)
{
local->rx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
if (local->rx_led) {
- snprintf(local->rx_led_name, sizeof(local->rx_led_name),
- "%srx", wiphy_name(local->hw.wiphy));
local->rx_led->name = local->rx_led_name;
if (led_trigger_register(local->rx_led)) {
kfree(local->rx_led);
@@ -69,8 +79,6 @@ void ieee80211_led_init(struct ieee80211_local *local)
local->tx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
if (local->tx_led) {
- snprintf(local->tx_led_name, sizeof(local->tx_led_name),
- "%stx", wiphy_name(local->hw.wiphy));
local->tx_led->name = local->tx_led_name;
if (led_trigger_register(local->tx_led)) {
kfree(local->tx_led);
@@ -80,8 +88,6 @@ void ieee80211_led_init(struct ieee80211_local *local)
local->assoc_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
if (local->assoc_led) {
- snprintf(local->assoc_led_name, sizeof(local->assoc_led_name),
- "%sassoc", wiphy_name(local->hw.wiphy));
local->assoc_led->name = local->assoc_led_name;
if (led_trigger_register(local->assoc_led)) {
kfree(local->assoc_led);
@@ -91,14 +97,19 @@ void ieee80211_led_init(struct ieee80211_local *local)
local->radio_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
if (local->radio_led) {
- snprintf(local->radio_led_name, sizeof(local->radio_led_name),
- "%sradio", wiphy_name(local->hw.wiphy));
local->radio_led->name = local->radio_led_name;
if (led_trigger_register(local->radio_led)) {
kfree(local->radio_led);
local->radio_led = NULL;
}
}
+
+ if (local->tpt_led_trigger) {
+ if (led_trigger_register(&local->tpt_led_trigger->trig)) {
+ kfree(local->tpt_led_trigger);
+ local->tpt_led_trigger = NULL;
+ }
+ }
}
void ieee80211_led_exit(struct ieee80211_local *local)
@@ -119,15 +130,18 @@ void ieee80211_led_exit(struct ieee80211_local *local)
led_trigger_unregister(local->rx_led);
kfree(local->rx_led);
}
+
+ if (local->tpt_led_trigger) {
+ led_trigger_unregister(&local->tpt_led_trigger->trig);
+ kfree(local->tpt_led_trigger);
+ }
}
char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
- if (local->radio_led)
- return local->radio_led_name;
- return NULL;
+ return local->radio_led_name;
}
EXPORT_SYMBOL(__ieee80211_get_radio_led_name);
@@ -135,9 +149,7 @@ char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
- if (local->assoc_led)
- return local->assoc_led_name;
- return NULL;
+ return local->assoc_led_name;
}
EXPORT_SYMBOL(__ieee80211_get_assoc_led_name);
@@ -145,9 +157,7 @@ char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
- if (local->tx_led)
- return local->tx_led_name;
- return NULL;
+ return local->tx_led_name;
}
EXPORT_SYMBOL(__ieee80211_get_tx_led_name);
@@ -155,8 +165,144 @@ char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
- if (local->rx_led)
- return local->rx_led_name;
- return NULL;
+ return local->rx_led_name;
}
EXPORT_SYMBOL(__ieee80211_get_rx_led_name);
+
+static unsigned long tpt_trig_traffic(struct ieee80211_local *local,
+ struct tpt_led_trigger *tpt_trig)
+{
+ unsigned long traffic, delta;
+
+ traffic = tpt_trig->tx_bytes + tpt_trig->rx_bytes;
+
+ delta = traffic - tpt_trig->prev_traffic;
+ tpt_trig->prev_traffic = traffic;
+ return DIV_ROUND_UP(delta, 1024 / 8);
+}
+
+static void tpt_trig_timer(unsigned long data)
+{
+ struct ieee80211_local *local = (void *)data;
+ struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger;
+ struct led_classdev *led_cdev;
+ unsigned long on, off, tpt;
+ int i;
+
+ if (!tpt_trig->running)
+ return;
+
+ mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ));
+
+ tpt = tpt_trig_traffic(local, tpt_trig);
+
+ /* default to just solid on */
+ on = 1;
+ off = 0;
+
+ for (i = tpt_trig->blink_table_len - 1; i >= 0; i--) {
+ if (tpt_trig->blink_table[i].throughput < 0 ||
+ tpt > tpt_trig->blink_table[i].throughput) {
+ off = tpt_trig->blink_table[i].blink_time / 2;
+ on = tpt_trig->blink_table[i].blink_time - off;
+ break;
+ }
+ }
+
+ read_lock(&tpt_trig->trig.leddev_list_lock);
+ list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list)
+ led_blink_set(led_cdev, &on, &off);
+ read_unlock(&tpt_trig->trig.leddev_list_lock);
+}
+
+char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+ unsigned int flags,
+ const struct ieee80211_tpt_blink *blink_table,
+ unsigned int blink_table_len)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct tpt_led_trigger *tpt_trig;
+
+ if (WARN_ON(local->tpt_led_trigger))
+ return NULL;
+
+ tpt_trig = kzalloc(sizeof(struct tpt_led_trigger), GFP_KERNEL);
+ if (!tpt_trig)
+ return NULL;
+
+ snprintf(tpt_trig->name, sizeof(tpt_trig->name),
+ "%stpt", wiphy_name(local->hw.wiphy));
+
+ tpt_trig->trig.name = tpt_trig->name;
+
+ tpt_trig->blink_table = blink_table;
+ tpt_trig->blink_table_len = blink_table_len;
+ tpt_trig->want = flags;
+
+ setup_timer(&tpt_trig->timer, tpt_trig_timer, (unsigned long)local);
+
+ local->tpt_led_trigger = tpt_trig;
+
+ return tpt_trig->name;
+}
+EXPORT_SYMBOL(__ieee80211_create_tpt_led_trigger);
+
+static void ieee80211_start_tpt_led_trig(struct ieee80211_local *local)
+{
+ struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger;
+
+ if (tpt_trig->running)
+ return;
+
+ /* reset traffic */
+ tpt_trig_traffic(local, tpt_trig);
+ tpt_trig->running = true;
+
+ tpt_trig_timer((unsigned long)local);
+ mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ));
+}
+
+static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local)
+{
+ struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger;
+ struct led_classdev *led_cdev;
+
+ if (!tpt_trig->running)
+ return;
+
+ tpt_trig->running = false;
+ del_timer_sync(&tpt_trig->timer);
+
+ read_lock(&tpt_trig->trig.leddev_list_lock);
+ list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list)
+ led_brightness_set(led_cdev, LED_OFF);
+ read_unlock(&tpt_trig->trig.leddev_list_lock);
+}
+
+void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
+ unsigned int types_on, unsigned int types_off)
+{
+ struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger;
+ bool allowed;
+
+ WARN_ON(types_on & types_off);
+
+ if (!tpt_trig)
+ return;
+
+ tpt_trig->active &= ~types_off;
+ tpt_trig->active |= types_on;
+
+ /*
+ * Regardless of wanted state, we shouldn't blink when
+ * the radio is disabled -- this can happen due to some
+ * code ordering issues with __ieee80211_recalc_idle()
+ * being called before the radio is started.
+ */
+ allowed = tpt_trig->active & IEEE80211_TPT_LEDTRIG_FL_RADIO;
+
+ if (!allowed || !(tpt_trig->active & tpt_trig->want))
+ ieee80211_stop_tpt_led_trig(local);
+ else
+ ieee80211_start_tpt_led_trig(local);
+}
diff --git a/net/mac80211/led.h b/net/mac80211/led.h
index 77b1e1ba6039..e0275d9befa8 100644
--- a/net/mac80211/led.h
+++ b/net/mac80211/led.h
@@ -12,14 +12,17 @@
#include "ieee80211_i.h"
#ifdef CONFIG_MAC80211_LEDS
-extern void ieee80211_led_rx(struct ieee80211_local *local);
-extern void ieee80211_led_tx(struct ieee80211_local *local, int q);
-extern void ieee80211_led_assoc(struct ieee80211_local *local,
- bool associated);
-extern void ieee80211_led_radio(struct ieee80211_local *local,
- bool enabled);
-extern void ieee80211_led_init(struct ieee80211_local *local);
-extern void ieee80211_led_exit(struct ieee80211_local *local);
+void ieee80211_led_rx(struct ieee80211_local *local);
+void ieee80211_led_tx(struct ieee80211_local *local, int q);
+void ieee80211_led_assoc(struct ieee80211_local *local,
+ bool associated);
+void ieee80211_led_radio(struct ieee80211_local *local,
+ bool enabled);
+void ieee80211_led_names(struct ieee80211_local *local);
+void ieee80211_led_init(struct ieee80211_local *local);
+void ieee80211_led_exit(struct ieee80211_local *local);
+void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
+ unsigned int types_on, unsigned int types_off);
#else
static inline void ieee80211_led_rx(struct ieee80211_local *local)
{
@@ -35,10 +38,36 @@ static inline void ieee80211_led_radio(struct ieee80211_local *local,
bool enabled)
{
}
+static inline void ieee80211_led_names(struct ieee80211_local *local)
+{
+}
static inline void ieee80211_led_init(struct ieee80211_local *local)
{
}
static inline void ieee80211_led_exit(struct ieee80211_local *local)
{
}
+static inline void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
+ unsigned int types_on,
+ unsigned int types_off)
+{
+}
+#endif
+
+static inline void
+ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, __le16 fc, int bytes)
+{
+#ifdef CONFIG_MAC80211_LEDS
+ if (local->tpt_led_trigger && ieee80211_is_data(fc))
+ local->tpt_led_trigger->tx_bytes += bytes;
+#endif
+}
+
+static inline void
+ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, __le16 fc, int bytes)
+{
+#ifdef CONFIG_MAC80211_LEDS
+ if (local->tpt_led_trigger && ieee80211_is_data(fc))
+ local->tpt_led_trigger->rx_bytes += bytes;
#endif
+}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 973fee9f7d69..a46ff06d7cb8 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -39,6 +39,8 @@ module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
"Disable 40MHz support in the 2.4GHz band");
+static struct lock_class_key ieee80211_rx_skb_queue_class;
+
void ieee80211_configure_filter(struct ieee80211_local *local)
{
u64 mc;
@@ -484,6 +486,10 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
BIT(IEEE80211_STYPE_DEAUTH >> 4) |
BIT(IEEE80211_STYPE_ACTION >> 4),
},
+ [NL80211_IFTYPE_MESH_POINT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4),
+ },
};
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
@@ -517,10 +523,15 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes;
+ wiphy->privid = mac80211_wiphy_privid;
+
wiphy->flags |= WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_4ADDR_AP |
- WIPHY_FLAG_4ADDR_STATION;
- wiphy->privid = mac80211_wiphy_privid;
+ WIPHY_FLAG_4ADDR_STATION |
+ WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS;
+
+ if (!ops->set_key)
+ wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
wiphy->bss_priv_size = sizeof(struct ieee80211_bss);
@@ -560,6 +571,16 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
spin_lock_init(&local->filter_lock);
spin_lock_init(&local->queue_stop_reason_lock);
+ /*
+ * The rx_skb_queue is only accessed from tasklets,
+ * but other SKB queues are used from within IRQ
+ * context. Therefore, this one needs a different
+ * locking class so our direct, non-irq-safe use of
+ * the queue's lock doesn't throw lockdep warnings.
+ */
+ skb_queue_head_init_class(&local->rx_skb_queue,
+ &ieee80211_rx_skb_queue_class);
+
INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
ieee80211_work_init(local);
@@ -596,6 +617,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
/* init dummy netdev for use w/ NAPI */
init_dummy_netdev(&local->napi_dev);
+ ieee80211_led_names(local);
+
+ ieee80211_hw_roc_setup(local);
+
return local_to_hw(local);
}
EXPORT_SYMBOL(ieee80211_alloc_hw);
@@ -740,6 +765,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
}
}
+ if (!local->ops->remain_on_channel)
+ local->hw.wiphy->max_remain_on_channel_duration = 5000;
+
result = wiphy_register(local->hw.wiphy);
if (result < 0)
goto fail_wiphy_register;
@@ -901,6 +929,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
skb_queue_purge(&local->skb_queue);
skb_queue_purge(&local->skb_queue_unreliable);
+ skb_queue_purge(&local->rx_skb_queue);
destroy_workqueue(local->workqueue);
wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 63e1188d5062..ca3af4685b0a 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -124,15 +124,6 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
ieee80211_mesh_housekeeping_timer((unsigned long) sdata);
}
-void mesh_ids_set_default(struct ieee80211_if_mesh *sta)
-{
- sta->mesh_pp_id = 0; /* HWMP */
- sta->mesh_pm_id = 0; /* Airtime */
- sta->mesh_cc_id = 0; /* Disabled */
- sta->mesh_sp_id = 0; /* Neighbor Offset */
- sta->mesh_auth_id = 0; /* Disabled */
-}
-
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
{
int i;
@@ -287,6 +278,13 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
*pos++ |= sdata->u.mesh.accepting_plinks ?
MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
*pos++ = 0x00;
+
+ if (sdata->u.mesh.vendor_ie) {
+ int len = sdata->u.mesh.vendor_ie_len;
+ const u8 *data = sdata->u.mesh.vendor_ie;
+ if (skb_tailroom(skb) > len)
+ memcpy(skb_put(skb, len), data, len);
+ }
}
u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl)
@@ -412,39 +410,33 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
* ieee80211_new_mesh_header - create a new mesh header
* @meshhdr: uninitialized mesh header
* @sdata: mesh interface to be used
- * @addr4: addr4 of the mesh frame (1st in ae header)
- * may be NULL
- * @addr5: addr5 of the mesh frame (1st or 2nd in ae header)
- * may be NULL unless addr6 is present
- * @addr6: addr6 of the mesh frame (2nd or 3rd in ae header)
- * may be NULL unless addr5 is present
+ * @addr4or5: 1st address in the ae header, which may correspond to address 4
+ * (if addr6 is NULL) or address 5 (if addr6 is present). It may
+ * be NULL.
+ * @addr6: 2nd address in the ae header, which corresponds to addr6 of the
+ * mesh frame
*
* Return the header length.
*/
int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
- struct ieee80211_sub_if_data *sdata, char *addr4,
- char *addr5, char *addr6)
+ struct ieee80211_sub_if_data *sdata, char *addr4or5,
+ char *addr6)
{
int aelen = 0;
+ BUG_ON(!addr4or5 && addr6);
memset(meshhdr, 0, sizeof(*meshhdr));
meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum);
sdata->u.mesh.mesh_seqnum++;
- if (addr4) {
+ if (addr4or5 && !addr6) {
meshhdr->flags |= MESH_FLAGS_AE_A4;
aelen += ETH_ALEN;
- memcpy(meshhdr->eaddr1, addr4, ETH_ALEN);
- }
- if (addr5 && addr6) {
+ memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN);
+ } else if (addr4or5 && addr6) {
meshhdr->flags |= MESH_FLAGS_AE_A5_A6;
aelen += 2 * ETH_ALEN;
- if (!addr4) {
- memcpy(meshhdr->eaddr1, addr5, ETH_ALEN);
- memcpy(meshhdr->eaddr2, addr6, ETH_ALEN);
- } else {
- memcpy(meshhdr->eaddr2, addr5, ETH_ALEN);
- memcpy(meshhdr->eaddr3, addr6, ETH_ALEN);
- }
+ memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN);
+ memcpy(meshhdr->eaddr2, addr6, ETH_ALEN);
}
return 6 + aelen;
}
@@ -518,6 +510,9 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
atomic_inc(&local->iff_allmultis);
ieee80211_configure_filter(local);
+ ifmsh->mesh_cc_id = 0; /* Disabled */
+ ifmsh->mesh_sp_id = 0; /* Neighbor Offset */
+ ifmsh->mesh_auth_id = 0; /* Disabled */
set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
ieee80211_mesh_root_setup(ifmsh);
ieee80211_queue_work(&local->hw, &sdata->work);
@@ -688,7 +683,6 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
/* Allocate all mesh structures when creating the first mesh interface. */
if (!mesh_allocated)
ieee80211s_init();
- mesh_ids_set_default(ifmsh);
setup_timer(&ifmsh->mesh_path_timer,
ieee80211_mesh_path_timer,
(unsigned long) sdata);
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 039d7fa0af74..b99e230fe31c 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -164,17 +164,6 @@ struct mesh_rmc {
};
-/*
- * MESH_CFG_COMP_LEN Includes:
- * - Active path selection protocol ID.
- * - Active path selection metric ID.
- * - Congestion control mode identifier.
- * - Channel precedence.
- * Does not include mesh capabilities, which may vary across nodes in the same
- * mesh
- */
-#define MESH_CFG_CMP_LEN (IEEE80211_MESH_CONFIG_LEN - 2)
-
#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */
#define MESH_PATH_EXPIRE (600 * HZ)
@@ -198,8 +187,8 @@ struct mesh_rmc {
int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
const u8 *da, const u8 *sa);
int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
- struct ieee80211_sub_if_data *sdata, char *addr4,
- char *addr5, char *addr6);
+ struct ieee80211_sub_if_data *sdata, char *addr4or5,
+ char *addr6);
int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
struct ieee80211_sub_if_data *sdata);
bool mesh_matches_local(struct ieee802_11_elems *ie,
@@ -295,6 +284,11 @@ static inline void mesh_path_activate(struct mesh_path *mpath)
mpath->flags |= MESH_PATH_ACTIVE | MESH_PATH_RESOLVED;
}
+static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
+{
+ return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
+}
+
#define for_each_mesh_entry(x, p, node, i) \
for (i = 0; i <= x->hash_mask; i++) \
hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list)
@@ -315,6 +309,8 @@ static inline void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
{}
static inline void mesh_plink_quiesce(struct sta_info *sta) {}
static inline void mesh_plink_restart(struct sta_info *sta) {}
+static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
+{ return false; }
#endif
#endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 1c91f0f3c307..44b53931ba5e 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -160,7 +160,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid,
__le16 reason) {
struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
+ struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
+ sdata->u.mesh.vendor_ie_len);
struct ieee80211_mgmt *mgmt;
bool include_plid = false;
static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A };
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 4b564091e51d..b4e52676f3fb 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -14,6 +14,7 @@
*/
#include <net/mac80211.h>
#include "ieee80211_i.h"
+#include "driver-trace.h"
/*
* inform AP that we will go to sleep so that it will buffer the frames
@@ -190,3 +191,87 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
}
mutex_unlock(&local->iflist_mtx);
}
+
+static void ieee80211_hw_roc_start(struct work_struct *work)
+{
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, hw_roc_start);
+ struct ieee80211_sub_if_data *sdata;
+
+ mutex_lock(&local->mtx);
+
+ if (!local->hw_roc_channel) {
+ mutex_unlock(&local->mtx);
+ return;
+ }
+
+ ieee80211_recalc_idle(local);
+
+ if (local->hw_roc_skb) {
+ sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev);
+ ieee80211_tx_skb(sdata, local->hw_roc_skb);
+ local->hw_roc_skb = NULL;
+ } else {
+ cfg80211_ready_on_channel(local->hw_roc_dev,
+ local->hw_roc_cookie,
+ local->hw_roc_channel,
+ local->hw_roc_channel_type,
+ local->hw_roc_duration,
+ GFP_KERNEL);
+ }
+
+ mutex_unlock(&local->mtx);
+}
+
+void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ trace_api_ready_on_channel(local);
+
+ ieee80211_queue_work(hw, &local->hw_roc_start);
+}
+EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
+
+static void ieee80211_hw_roc_done(struct work_struct *work)
+{
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, hw_roc_done);
+
+ mutex_lock(&local->mtx);
+
+ if (!local->hw_roc_channel) {
+ mutex_unlock(&local->mtx);
+ return;
+ }
+
+ if (!local->hw_roc_for_tx)
+ cfg80211_remain_on_channel_expired(local->hw_roc_dev,
+ local->hw_roc_cookie,
+ local->hw_roc_channel,
+ local->hw_roc_channel_type,
+ GFP_KERNEL);
+
+ local->hw_roc_channel = NULL;
+ local->hw_roc_cookie = 0;
+
+ ieee80211_recalc_idle(local);
+
+ mutex_unlock(&local->mtx);
+}
+
+void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ trace_api_remain_on_channel_expired(local);
+
+ ieee80211_queue_work(hw, &local->hw_roc_done);
+}
+EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
+
+void ieee80211_hw_roc_setup(struct ieee80211_local *local)
+{
+ INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
+ INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
+}
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 4ad7a362fcc1..165a4518bb48 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -374,7 +374,7 @@ minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, stru
if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
return;
- ieee80211_start_tx_ba_session(pubsta, tid);
+ ieee80211_start_tx_ba_session(pubsta, tid, 5000);
}
static void
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 2fe8f5f86499..a6701ed87f0d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -533,10 +533,11 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
struct tid_ampdu_rx *tid_agg_rx,
- int index,
- struct sk_buff_head *frames)
+ int index)
{
+ struct ieee80211_local *local = hw_to_local(hw);
struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
+ struct ieee80211_rx_status *status;
lockdep_assert_held(&tid_agg_rx->reorder_lock);
@@ -546,7 +547,9 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
/* release the frame from the reorder ring buffer */
tid_agg_rx->stored_mpdu_num--;
tid_agg_rx->reorder_buf[index] = NULL;
- __skb_queue_tail(frames, skb);
+ status = IEEE80211_SKB_RXCB(skb);
+ status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
+ skb_queue_tail(&local->rx_skb_queue, skb);
no_frame:
tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
@@ -554,8 +557,7 @@ no_frame:
static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
struct tid_ampdu_rx *tid_agg_rx,
- u16 head_seq_num,
- struct sk_buff_head *frames)
+ u16 head_seq_num)
{
int index;
@@ -564,7 +566,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
tid_agg_rx->buf_size;
- ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
+ ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
}
}
@@ -580,8 +582,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
- struct tid_ampdu_rx *tid_agg_rx,
- struct sk_buff_head *frames)
+ struct tid_ampdu_rx *tid_agg_rx)
{
int index, j;
@@ -612,8 +613,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
wiphy_debug(hw->wiphy,
"release an RX reorder frame due to timeout on earlier frames\n");
#endif
- ieee80211_release_reorder_frame(hw, tid_agg_rx,
- j, frames);
+ ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
/*
* Increment the head seq# also for the skipped slots.
@@ -623,31 +623,11 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
skipped = 0;
}
} else while (tid_agg_rx->reorder_buf[index]) {
- ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
+ ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
tid_agg_rx->buf_size;
}
- /*
- * Disable the reorder release timer for now.
- *
- * The current implementation lacks a proper locking scheme
- * which would protect vital statistic and debug counters
- * from being updated by two different but concurrent BHs.
- *
- * More information about the topic is available from:
- * - thread: http://marc.info/?t=128635927000001
- *
- * What was wrong:
- * => http://marc.info/?l=linux-wireless&m=128636170811964
- * "Basically the thing is that until your patch, the data
- * in the struct didn't actually need locking because it
- * was accessed by the RX path only which is not concurrent."
- *
- * List of what needs to be fixed:
- * => http://marc.info/?l=linux-wireless&m=128656352920957
- *
-
if (tid_agg_rx->stored_mpdu_num) {
j = index = seq_sub(tid_agg_rx->head_seq_num,
tid_agg_rx->ssn) % tid_agg_rx->buf_size;
@@ -666,10 +646,6 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
} else {
del_timer(&tid_agg_rx->reorder_timer);
}
- */
-
-set_release_timer:
- return;
}
/*
@@ -679,8 +655,7 @@ set_release_timer:
*/
static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
struct tid_ampdu_rx *tid_agg_rx,
- struct sk_buff *skb,
- struct sk_buff_head *frames)
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
u16 sc = le16_to_cpu(hdr->seq_ctrl);
@@ -707,8 +682,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
/* release stored frames up to new head to stack */
- ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num,
- frames);
+ ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
}
/* Now the new frame is always in the range of the reordering buffer */
@@ -736,7 +710,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
tid_agg_rx->reorder_buf[index] = skb;
tid_agg_rx->reorder_time[index] = jiffies;
tid_agg_rx->stored_mpdu_num++;
- ieee80211_sta_reorder_release(hw, tid_agg_rx, frames);
+ ieee80211_sta_reorder_release(hw, tid_agg_rx);
out:
spin_unlock(&tid_agg_rx->reorder_lock);
@@ -747,8 +721,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
* Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
* true if the MPDU was buffered, false if it should be processed.
*/
-static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
- struct sk_buff_head *frames)
+static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
{
struct sk_buff *skb = rx->skb;
struct ieee80211_local *local = rx->local;
@@ -803,11 +776,11 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
* sure that we cannot get to it any more before doing
* anything with it.
*/
- if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
+ if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb))
return;
dont_reorder:
- __skb_queue_tail(frames, skb);
+ skb_queue_tail(&local->rx_skb_queue, skb);
}
static ieee80211_rx_result debug_noinline
@@ -955,12 +928,31 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
* have been expected.
*/
struct ieee80211_key *key = NULL;
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ int i;
+
if (ieee80211_is_mgmt(fc) &&
is_multicast_ether_addr(hdr->addr1) &&
(key = rcu_dereference(rx->sdata->default_mgmt_key)))
rx->key = key;
- else if ((key = rcu_dereference(rx->sdata->default_key)))
- rx->key = key;
+ else {
+ if (rx->sta) {
+ for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
+ key = rcu_dereference(rx->sta->gtk[i]);
+ if (key)
+ break;
+ }
+ }
+ if (!key) {
+ for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
+ key = rcu_dereference(sdata->keys[i]);
+ if (key)
+ break;
+ }
+ }
+ if (key)
+ rx->key = key;
+ }
return RX_CONTINUE;
} else {
u8 keyid;
@@ -1170,6 +1162,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
* exchange sequence.
*/
if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
@@ -1521,12 +1514,30 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
if (unlikely(!ieee80211_has_protected(fc) &&
ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
- rx->key))
+ rx->key)) {
+ if (ieee80211_is_deauth(fc))
+ cfg80211_send_unprot_deauth(rx->sdata->dev,
+ rx->skb->data,
+ rx->skb->len);
+ else if (ieee80211_is_disassoc(fc))
+ cfg80211_send_unprot_disassoc(rx->sdata->dev,
+ rx->skb->data,
+ rx->skb->len);
return -EACCES;
+ }
/* BIP does not use Protected field, so need to check MMIE */
if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
- ieee80211_get_mmie_keyidx(rx->skb) < 0))
+ ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
+ if (ieee80211_is_deauth(fc))
+ cfg80211_send_unprot_deauth(rx->sdata->dev,
+ rx->skb->data,
+ rx->skb->len);
+ else if (ieee80211_is_disassoc(fc))
+ cfg80211_send_unprot_disassoc(rx->sdata->dev,
+ rx->skb->data,
+ rx->skb->len);
return -EACCES;
+ }
/*
* When using MFP, Action frames are not allowed prior to
* having configured keys.
@@ -1797,6 +1808,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (!fwd_skb && net_ratelimit())
printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
sdata->name);
+ if (!fwd_skb)
+ goto out;
fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
@@ -1834,6 +1847,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
}
}
+ out:
if (is_multicast_ether_addr(hdr->addr1) ||
sdata->dev->flags & IFF_PROMISC)
return RX_CONTINUE;
@@ -1890,7 +1904,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
}
static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
+ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
{
struct ieee80211_local *local = rx->local;
struct ieee80211_hw *hw = &local->hw;
@@ -1930,8 +1944,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
spin_lock(&tid_agg_rx->reorder_lock);
/* release stored frames up to start of BAR */
- ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
- frames);
+ ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
spin_unlock(&tid_agg_rx->reorder_lock);
kfree_skb(skb);
@@ -2124,10 +2137,13 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
}
break;
case WLAN_CATEGORY_MESH_PLINK:
- case WLAN_CATEGORY_MESH_PATH_SEL:
if (!ieee80211_vif_is_mesh(&sdata->vif))
break;
goto queue;
+ case WLAN_CATEGORY_MESH_PATH_SEL:
+ if (!mesh_path_sel_is_hwmp(sdata))
+ break;
+ goto queue;
}
return RX_CONTINUE;
@@ -2445,8 +2461,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
}
}
-static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
- struct sk_buff_head *frames)
+static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
{
ieee80211_rx_result res = RX_DROP_MONITOR;
struct sk_buff *skb;
@@ -2458,7 +2473,15 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
goto rxh_next; \
} while (0);
- while ((skb = __skb_dequeue(frames))) {
+ spin_lock(&rx->local->rx_skb_queue.lock);
+ if (rx->local->running_rx_handler)
+ goto unlock;
+
+ rx->local->running_rx_handler = true;
+
+ while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
+ spin_unlock(&rx->local->rx_skb_queue.lock);
+
/*
* all the other fields are valid across frames
* that belong to an aMPDU since they are on the
@@ -2481,12 +2504,7 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
CALL_RXH(ieee80211_rx_h_mesh_fwding);
#endif
CALL_RXH(ieee80211_rx_h_data)
-
- /* special treatment -- needs the queue */
- res = ieee80211_rx_h_ctrl(rx, frames);
- if (res != RX_CONTINUE)
- goto rxh_next;
-
+ CALL_RXH(ieee80211_rx_h_ctrl);
CALL_RXH(ieee80211_rx_h_mgmt_check)
CALL_RXH(ieee80211_rx_h_action)
CALL_RXH(ieee80211_rx_h_userspace_mgmt)
@@ -2495,18 +2513,20 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
rxh_next:
ieee80211_rx_handlers_result(rx, res);
-
+ spin_lock(&rx->local->rx_skb_queue.lock);
#undef CALL_RXH
}
+
+ rx->local->running_rx_handler = false;
+
+ unlock:
+ spin_unlock(&rx->local->rx_skb_queue.lock);
}
static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
{
- struct sk_buff_head reorder_release;
ieee80211_rx_result res = RX_DROP_MONITOR;
- __skb_queue_head_init(&reorder_release);
-
#define CALL_RXH(rxh) \
do { \
res = rxh(rx); \
@@ -2517,9 +2537,9 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
CALL_RXH(ieee80211_rx_h_passive_scan)
CALL_RXH(ieee80211_rx_h_check)
- ieee80211_rx_reorder_ampdu(rx, &reorder_release);
+ ieee80211_rx_reorder_ampdu(rx);
- ieee80211_rx_handlers(rx, &reorder_release);
+ ieee80211_rx_handlers(rx);
return;
rxh_next:
@@ -2534,7 +2554,6 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
*/
void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
{
- struct sk_buff_head frames;
struct ieee80211_rx_data rx = {
.sta = sta,
.sdata = sta->sdata,
@@ -2547,13 +2566,11 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
if (!tid_agg_rx)
return;
- __skb_queue_head_init(&frames);
-
spin_lock(&tid_agg_rx->reorder_lock);
- ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx, &frames);
+ ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx);
spin_unlock(&tid_agg_rx->reorder_lock);
- ieee80211_rx_handlers(&rx, &frames);
+ ieee80211_rx_handlers(&rx);
}
/* main receive path */
@@ -2888,6 +2905,9 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
+ ieee80211_tpt_led_trig_rx(local,
+ ((struct ieee80211_hdr *)skb->data)->frame_control,
+ skb->len);
__ieee80211_rx_handle_packet(hw, skb);
rcu_read_unlock();
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index fdca52cf88de..bbdd2a86a94b 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -78,6 +78,7 @@ enum ieee80211_sta_info_flags {
* @addba_resp_timer: timer for peer's response to addba request
* @pending: pending frames queue -- use sta's spinlock to protect
* @dialog_token: dialog token for aggregation session
+ * @timeout: session timeout value to be filled in ADDBA requests
* @state: session state (see above)
* @stop_initiator: initiator of a session stop
* @tx_stop: TX DelBA frame when stopping
@@ -96,6 +97,7 @@ struct tid_ampdu_tx {
struct timer_list addba_resp_timer;
struct sk_buff_head pending;
unsigned long state;
+ u16 timeout;
u8 dialog_token;
u8 stop_initiator;
bool tx_stop;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 0ee56bb0ea7e..b64b42bc774b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -539,7 +539,11 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
ieee80211_is_robust_mgmt_frame(hdr) &&
(key = rcu_dereference(tx->sdata->default_mgmt_key)))
tx->key = key;
- else if ((key = rcu_dereference(tx->sdata->default_key)))
+ else if (is_multicast_ether_addr(hdr->addr1) &&
+ (key = rcu_dereference(tx->sdata->default_multicast_key)))
+ tx->key = key;
+ else if (!is_multicast_ether_addr(hdr->addr1) &&
+ (key = rcu_dereference(tx->sdata->default_unicast_key)))
tx->key = key;
else if (tx->sdata->drop_unencrypted &&
(tx->skb->protocol != tx->sdata->control_port_protocol) &&
@@ -1293,6 +1297,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
while (skb) {
int q = skb_get_queue_mapping(skb);
+ __le16 fc;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
ret = IEEE80211_TX_OK;
@@ -1335,6 +1340,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
else
info->control.sta = NULL;
+ fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
ret = drv_tx(local, skb);
if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
dev_kfree_skb(skb);
@@ -1345,6 +1351,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
return IEEE80211_TX_AGAIN;
}
+ ieee80211_tpt_led_trig_tx(local, fc, len);
*skbp = skb = next;
ieee80211_led_tx(local, 1);
fragm = true;
@@ -1542,8 +1549,10 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
if (skb_header_cloned(skb))
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
- else
+ else if (head_need || tail_need)
I802_DEBUG_INC(local->tx_expand_skb_head);
+ else
+ return 0;
if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
wiphy_debug(local->hw.wiphy,
@@ -1735,12 +1744,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_info *info;
int ret = NETDEV_TX_BUSY, head_need;
u16 ethertype, hdrlen, meshhdrlen = 0;
__le16 fc;
struct ieee80211_hdr hdr;
struct ieee80211s_hdr mesh_hdr __maybe_unused;
+ struct mesh_path *mppath = NULL;
const u8 *encaps_data;
int encaps_len, skip_header_bytes;
int nh_pos, h_pos;
@@ -1801,16 +1811,23 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
ret = NETDEV_TX_OK;
goto fail;
}
+ if (!is_multicast_ether_addr(skb->data))
+ mppath = mpp_path_lookup(skb->data, sdata);
+ /*
+ * Do not use address extension, if it is a packet from
+ * the same interface and the destination is not being
+ * proxied by any other mest point.
+ */
if (compare_ether_addr(sdata->vif.addr,
- skb->data + ETH_ALEN) == 0) {
+ skb->data + ETH_ALEN) == 0 &&
+ (!mppath || !compare_ether_addr(mppath->mpp, skb->data))) {
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
skb->data, skb->data + ETH_ALEN);
meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
- sdata, NULL, NULL, NULL);
+ sdata, NULL, NULL);
} else {
/* packet from other interface */
- struct mesh_path *mppath;
int is_mesh_mcast = 1;
const u8 *mesh_da;
@@ -1821,8 +1838,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
else {
static const u8 bcast[ETH_ALEN] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
- mppath = mpp_path_lookup(skb->data, sdata);
if (mppath) {
/* RA TA mDA mSA AE:DA SA */
mesh_da = mppath->mpp;
@@ -1840,13 +1855,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
ieee80211_new_mesh_header(&mesh_hdr,
sdata,
skb->data + ETH_ALEN,
- NULL,
NULL);
else
meshhdrlen =
ieee80211_new_mesh_header(&mesh_hdr,
sdata,
- NULL,
skb->data,
skb->data + ETH_ALEN);
@@ -1930,7 +1943,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
*/
if (skb_shared(skb)) {
tmp_skb = skb;
- skb = skb_copy(skb, GFP_ATOMIC);
+ skb = skb_clone(skb, GFP_ATOMIC);
kfree_skb(tmp_skb);
if (!skb) {
@@ -2026,6 +2039,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
skb_set_network_header(skb, nh_pos);
skb_set_transport_header(skb, h_pos);
+ info = IEEE80211_SKB_CB(skb);
memset(info, 0, sizeof(*info));
dev->trans_start = jiffies;
@@ -2216,6 +2230,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
sdata = vif_to_sdata(vif);
+ if (!ieee80211_sdata_running(sdata))
+ goto out;
+
if (tim_offset)
*tim_offset = 0;
if (tim_length)
@@ -2286,7 +2303,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
u8 *pos;
/* headroom, head length, tail length and maximum TIM length */
- skb = dev_alloc_skb(local->tx_headroom + 400);
+ skb = dev_alloc_skb(local->tx_headroom + 400 +
+ sdata->u.mesh.vendor_ie_len);
if (!skb)
goto out;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index e497476174ce..cf68700abffa 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1116,6 +1116,7 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
void ieee80211_stop_device(struct ieee80211_local *local)
{
ieee80211_led_radio(local, false);
+ ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO);
cancel_work_sync(&local->reconfig_filter);
@@ -1150,6 +1151,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
ieee80211_led_radio(local, true);
+ ieee80211_mod_tpt_led_trig(local,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
}
/* add interfaces */
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 58e75bbc1f91..28bc084dbfb9 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -59,26 +59,22 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta = NULL;
- u32 sta_flags = 0;
const u8 *ra = NULL;
bool qos = false;
if (local->hw.queues < 4 || skb->len < 6) {
skb->priority = 0; /* required for correct WPA/11i MIC */
- return min_t(u16, local->hw.queues - 1,
- ieee802_1d_to_ac[skb->priority]);
+ return min_t(u16, local->hw.queues - 1, IEEE80211_AC_BE);
}
rcu_read_lock();
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
- rcu_read_lock();
sta = rcu_dereference(sdata->u.vlan.sta);
- if (sta)
- sta_flags = get_sta_flags(sta);
- rcu_read_unlock();
- if (sta)
+ if (sta) {
+ qos = get_sta_flags(sta) & WLAN_STA_WME;
break;
+ }
case NL80211_IFTYPE_AP:
ra = skb->data;
break;
@@ -107,17 +103,13 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
if (!sta && ra && !is_multicast_ether_addr(ra)) {
sta = sta_info_get(sdata, ra);
if (sta)
- sta_flags = get_sta_flags(sta);
+ qos = get_sta_flags(sta) & WLAN_STA_WME;
}
-
- if (sta_flags & WLAN_STA_WME)
- qos = true;
-
rcu_read_unlock();
if (!qos) {
skb->priority = 0; /* required for correct WPA/11i MIC */
- return ieee802_1d_to_ac[skb->priority];
+ return IEEE80211_AC_BE;
}
/* use the data classifier to determine what 802.1d tag the
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c6f293639220..22f7ad5101ab 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3430,7 +3430,7 @@ void ip_vs_control_cleanup(void)
{
EnterFunction(2);
ip_vs_trash_cleanup();
- cancel_rearming_delayed_work(&defense_work);
+ cancel_delayed_work_sync(&defense_work);
cancel_work_sync(&defense_work.work);
ip_vs_kill_estimator(&ip_vs_stats);
unregister_sysctl_table(sysctl_header);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 27a5ea6b6a0f..e61511929c66 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
-static unsigned int nf_conntrack_hash_rnd __read_mostly;
+unsigned int nf_conntrack_hash_rnd __read_mostly;
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
{
@@ -596,6 +596,21 @@ static noinline int early_drop(struct net *net, unsigned int hash)
return dropped;
}
+void init_nf_conntrack_hash_rnd(void)
+{
+ unsigned int rand;
+
+ /*
+ * Why not initialize nf_conntrack_rnd in a "init()" function ?
+ * Because there isn't enough entropy when system initializing,
+ * and we initialize it as late as possible.
+ */
+ do {
+ get_random_bytes(&rand, sizeof(rand));
+ } while (!rand);
+ cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
+}
+
static struct nf_conn *
__nf_conntrack_alloc(struct net *net, u16 zone,
const struct nf_conntrack_tuple *orig,
@@ -605,18 +620,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
struct nf_conn *ct;
if (unlikely(!nf_conntrack_hash_rnd)) {
- unsigned int rand;
-
- /*
- * Why not initialize nf_conntrack_rnd in a "init()" function ?
- * Because there isn't enough entropy when system initializing,
- * and we initialize it as late as possible.
- */
- do {
- get_random_bytes(&rand, sizeof(rand));
- } while (!rand);
- cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
-
+ init_nf_conntrack_hash_rnd();
/* recompute the hash as nf_conntrack_hash_rnd is initialized */
hash = hash_conntrack_raw(orig, zone);
}
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 5702de35e2bb..63a1b915a7e4 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -63,6 +63,9 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
* this does not harm and it happens very rarely. */
unsigned long missed = e->missed;
+ if (!((events | missed) & e->ctmask))
+ goto out_unlock;
+
ret = notify->fcn(events | missed, &item);
if (unlikely(ret < 0 || missed)) {
spin_lock_bh(&ct->lock);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 46e8966912b1..a20fb0bd1efe 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -32,9 +32,7 @@
unsigned int nf_ct_expect_hsize __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
-static unsigned int nf_ct_expect_hash_rnd __read_mostly;
unsigned int nf_ct_expect_max __read_mostly;
-static int nf_ct_expect_hash_rnd_initted __read_mostly;
static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
@@ -77,15 +75,13 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
{
unsigned int hash;
- if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
- get_random_bytes(&nf_ct_expect_hash_rnd,
- sizeof(nf_ct_expect_hash_rnd));
- nf_ct_expect_hash_rnd_initted = 1;
+ if (unlikely(!nf_conntrack_hash_rnd)) {
+ init_nf_conntrack_hash_rnd();
}
hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
(((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
- (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd);
+ (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
return ((u64)hash * nf_ct_expect_hsize) >> 32;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index b729ace1dcc1..eead9db6f899 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -254,7 +254,7 @@ ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
if (ret)
- return ret;
+ return 0;
ret = -1;
nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
@@ -453,16 +453,22 @@ ctnetlink_counters_size(const struct nf_conn *ct)
;
}
-#ifdef CONFIG_NF_CONNTRACK_SECMARK
-static int ctnetlink_nlmsg_secctx_size(const struct nf_conn *ct)
+static inline int
+ctnetlink_secctx_size(const struct nf_conn *ct)
{
- int len;
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ int len, ret;
- security_secid_to_secctx(ct->secmark, NULL, &len);
+ ret = security_secid_to_secctx(ct->secmark, NULL, &len);
+ if (ret)
+ return 0;
- return sizeof(char) * len;
-}
+ return nla_total_size(0) /* CTA_SECCTX */
+ + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
+#else
+ return 0;
#endif
+}
static inline size_t
ctnetlink_nlmsg_size(const struct nf_conn *ct)
@@ -479,10 +485,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
+ nla_total_size(0) /* CTA_PROTOINFO */
+ nla_total_size(0) /* CTA_HELP */
+ nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
-#ifdef CONFIG_NF_CONNTRACK_SECMARK
- + nla_total_size(0) /* CTA_SECCTX */
- + nla_total_size(ctnetlink_nlmsg_secctx_size(ct)) /* CTA_SECCTX_NAME */
-#endif
+ + ctnetlink_secctx_size(ct)
#ifdef CONFIG_NF_NAT_NEEDED
+ 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
+ 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
@@ -642,30 +645,29 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
- rcu_read_lock();
+ spin_lock_bh(&nf_conntrack_lock);
last = (struct nf_conn *)cb->args[1];
for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
restart:
- hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
+ hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
hnnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue;
ct = nf_ct_tuplehash_to_ctrack(h);
- if (!atomic_inc_not_zero(&ct->ct_general.use))
- continue;
/* Dump entries of a given L3 protocol number.
* If it is not specified, ie. l3proto == 0,
* then dump everything. */
if (l3proto && nf_ct_l3num(ct) != l3proto)
- goto releasect;
+ continue;
if (cb->args[1]) {
if (ct != last)
- goto releasect;
+ continue;
cb->args[1] = 0;
}
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
IPCTNL_MSG_CT_NEW, ct) < 0) {
+ nf_conntrack_get(&ct->ct_general);
cb->args[1] = (unsigned long)ct;
goto out;
}
@@ -678,8 +680,6 @@ restart:
if (acct)
memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
}
-releasect:
- nf_ct_put(ct);
}
if (cb->args[1]) {
cb->args[1] = 0;
@@ -687,7 +687,7 @@ releasect:
}
}
out:
- rcu_read_unlock();
+ spin_unlock_bh(&nf_conntrack_lock);
if (last)
nf_ct_put(last);
@@ -973,7 +973,8 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
free:
kfree_skb(skb2);
out:
- return err;
+ /* this avoids a loop in nfnetlink. */
+ return err == -EAGAIN ? -ENOBUFS : err;
}
#ifdef CONFIG_NF_NAT_NEEDED
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 0fb65705b44b..b4d7f0f24b27 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -118,7 +118,7 @@ static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
if (ret)
- return ret;
+ return 0;
ret = seq_printf(s, "secctx=%s ", secctx);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 80463507420e..c94237631077 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1325,7 +1325,8 @@ static int __init xt_init(void)
for_each_possible_cpu(i) {
struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
- spin_lock_init(&lock->lock);
+
+ seqlock_init(&lock->lock);
lock->readers = 0;
}
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index 88f7c3511c72..73c33a42f87f 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -53,15 +53,13 @@ iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
}
static inline int
-iprange_ipv6_sub(const struct in6_addr *a, const struct in6_addr *b)
+iprange_ipv6_lt(const struct in6_addr *a, const struct in6_addr *b)
{
unsigned int i;
- int r;
for (i = 0; i < 4; ++i) {
- r = ntohl(a->s6_addr32[i]) - ntohl(b->s6_addr32[i]);
- if (r != 0)
- return r;
+ if (a->s6_addr32[i] != b->s6_addr32[i])
+ return ntohl(a->s6_addr32[i]) < ntohl(b->s6_addr32[i]);
}
return 0;
@@ -75,15 +73,15 @@ iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par)
bool m;
if (info->flags & IPRANGE_SRC) {
- m = iprange_ipv6_sub(&iph->saddr, &info->src_min.in6) < 0;
- m |= iprange_ipv6_sub(&iph->saddr, &info->src_max.in6) > 0;
+ m = iprange_ipv6_lt(&iph->saddr, &info->src_min.in6);
+ m |= iprange_ipv6_lt(&info->src_max.in6, &iph->saddr);
m ^= !!(info->flags & IPRANGE_SRC_INV);
if (m)
return false;
}
if (info->flags & IPRANGE_DST) {
- m = iprange_ipv6_sub(&iph->daddr, &info->dst_min.in6) < 0;
- m |= iprange_ipv6_sub(&iph->daddr, &info->dst_max.in6) > 0;
+ m = iprange_ipv6_lt(&iph->daddr, &info->dst_min.in6);
+ m |= iprange_ipv6_lt(&info->dst_max.in6, &iph->daddr);
m ^= !!(info->flags & IPRANGE_DST_INV);
if (m)
return false;
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index fd95beb72f5d..1072b2c19d31 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -37,7 +37,7 @@
/* Transport protocol registration */
static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
-static struct phonet_protocol *phonet_proto_get(int protocol)
+static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
{
struct phonet_protocol *pp;
@@ -458,7 +458,7 @@ static struct packet_type phonet_packet_type __read_mostly = {
static DEFINE_MUTEX(proto_tab_lock);
-int __init_or_module phonet_proto_register(int protocol,
+int __init_or_module phonet_proto_register(unsigned int protocol,
struct phonet_protocol *pp)
{
int err = 0;
@@ -481,7 +481,7 @@ int __init_or_module phonet_proto_register(int protocol,
}
EXPORT_SYMBOL(phonet_proto_register);
-void phonet_proto_unregister(int protocol, struct phonet_protocol *pp)
+void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
{
mutex_lock(&proto_tab_lock);
BUG_ON(proto_tab[protocol] != pp);
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index eaf765876458..7fce6dfd2180 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -18,7 +18,7 @@ config RFKILL_LEDS
default y
config RFKILL_INPUT
- bool "RF switch input support" if EMBEDDED
+ bool "RF switch input support" if EXPERT
depends on RFKILL
depends on INPUT = y || RFKILL = INPUT
- default y if !EMBEDDED
+ default y if !EXPERT
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 0b9bb2085ce4..74c064c0dfdd 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -808,7 +808,7 @@ static int __init af_rxrpc_init(void)
goto error_call_jar;
}
- rxrpc_workqueue = create_workqueue("krxrpcd");
+ rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1);
if (!rxrpc_workqueue) {
printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n");
goto error_work_queue;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a36270a994d7..f04d4a484d53 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -24,7 +24,7 @@ menuconfig NET_SCHED
To administer these schedulers, you'll need the user-level utilities
from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>.
That package also contains some documentation; for more, check out
- <http://linux-net.osdl.org/index.php/Iproute2>.
+ <http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2>.
This Quality of Service (QoS) support will enable you to use
Differentiated Services (diffserv) and Resource Reservation Protocol
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 67dc7ce9b63a..83ddfc07e45d 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb,
spin_lock(&p->tcf_lock);
p->tcf_tm.lastuse = jiffies;
- p->tcf_bstats.bytes += qdisc_pkt_len(skb);
- p->tcf_bstats.packets++;
+ bstats_update(&p->tcf_bstats, skb);
action = p->tcf_action;
update_flags = p->update_flags;
spin_unlock(&p->tcf_lock);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8daef9632255..c2a7c20e81c1 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
spin_lock(&ipt->tcf_lock);
ipt->tcf_tm.lastuse = jiffies;
- ipt->tcf_bstats.bytes += qdisc_pkt_len(skb);
- ipt->tcf_bstats.packets++;
+ bstats_update(&ipt->tcf_bstats, skb);
/* yes, we have to worry about both in and out dev
worry later - danger - this API seems to have changed
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 0c311be92827..d765067e99db 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
spin_lock(&m->tcf_lock);
m->tcf_tm.lastuse = jiffies;
- m->tcf_bstats.bytes += qdisc_pkt_len(skb);
- m->tcf_bstats.packets++;
+ bstats_update(&m->tcf_bstats, skb);
dev = m->tcfm_dev;
if (!dev) {
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 186eb837e600..178a4bd7b7cb 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
egress = p->flags & TCA_NAT_FLAG_EGRESS;
action = p->tcf_action;
- p->tcf_bstats.bytes += qdisc_pkt_len(skb);
- p->tcf_bstats.packets++;
+ bstats_update(&p->tcf_bstats, skb);
spin_unlock(&p->tcf_lock);
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index a0593c9640db..445bef716f77 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
bad:
p->tcf_qstats.overlimits++;
done:
- p->tcf_bstats.bytes += qdisc_pkt_len(skb);
- p->tcf_bstats.packets++;
+ bstats_update(&p->tcf_bstats, skb);
spin_unlock(&p->tcf_lock);
return p->tcf_action;
}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 7ebf7439b478..e2f08b1e2e58 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
spin_lock(&police->tcf_lock);
- police->tcf_bstats.bytes += qdisc_pkt_len(skb);
- police->tcf_bstats.packets++;
+ bstats_update(&police->tcf_bstats, skb);
if (police->tcfp_ewma_rate &&
police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 97e84f3ee775..7287cff7af3e 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
spin_lock(&d->tcf_lock);
d->tcf_tm.lastuse = jiffies;
- d->tcf_bstats.bytes += qdisc_pkt_len(skb);
- d->tcf_bstats.packets++;
+ bstats_update(&d->tcf_bstats, skb);
/* print policy string followed by _ then packet count
* Example if this was the 3rd packet and the string was "hello"
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 66cbf4eb8855..836f5fee9e58 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a,
spin_lock(&d->tcf_lock);
d->tcf_tm.lastuse = jiffies;
- d->tcf_bstats.bytes += qdisc_pkt_len(skb);
- d->tcf_bstats.packets++;
+ bstats_update(&d->tcf_bstats, skb);
if (d->flags & SKBEDIT_F_PRIORITY)
skb->priority = d->priority;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 282540778aa8..943d733409d0 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -422,10 +422,8 @@ drop: __maybe_unused
}
return ret;
}
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
- flow->bstats.bytes += qdisc_pkt_len(skb);
- flow->bstats.packets++;
+ qdisc_bstats_update(sch, skb);
+ bstats_update(&flow->bstats, skb);
/*
* Okay, this may seem weird. We pretend we've dropped the packet if
* it goes via ATM. The reason for this is that the outer qdisc
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index eb7631590865..5f63ec58942c 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,8 +390,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- sch->bstats.packets++;
- sch->bstats.bytes += qdisc_pkt_len(skb);
cbq_mark_toplevel(q, cl);
if (!cl->next_alive)
cbq_activate_class(cl);
@@ -650,8 +648,6 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- sch->bstats.packets++;
- sch->bstats.bytes += qdisc_pkt_len(skb);
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
@@ -973,6 +969,7 @@ cbq_dequeue(struct Qdisc *sch)
skb = cbq_dequeue_1(sch);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
return skb;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index aa8b5313f8cf..6b7fe4a84f13 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
- unsigned int len;
int err;
cl = drr_classify(skb, sch, &err);
@@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
}
- len = qdisc_pkt_len(skb);
err = qdisc_enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
@@ -377,10 +375,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->deficit = cl->quantum;
}
- cl->bstats.packets++;
- cl->bstats.bytes += len;
- sch->bstats.packets++;
- sch->bstats.bytes += len;
+ bstats_update(&cl->bstats, skb);
sch->q.qlen++;
return err;
@@ -407,6 +402,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
skb = qdisc_dequeue_peeked(cl->qdisc);
if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1d295d62bb5c..0f7bf3fdfea5 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,8 +260,6 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
}
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -284,6 +282,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
if (skb == NULL)
return NULL;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
index = skb->tc_index & (p->indices - 1);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 4dfecb0cba37..d468b479aa93 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -46,19 +46,14 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct sk_buff *skb_head;
struct fifo_sched_data *q = qdisc_priv(sch);
if (likely(skb_queue_len(&sch->q) < q->limit))
return qdisc_enqueue_tail(skb, sch);
/* queue full, remove one skb to fulfill the limit */
- skb_head = qdisc_dequeue_head(sch);
- sch->bstats.bytes -= qdisc_pkt_len(skb_head);
- sch->bstats.packets--;
+ __qdisc_queue_drop_head(sch, &sch->q);
sch->qstats.drops++;
- kfree_skb(skb_head);
-
qdisc_enqueue_tail(skb, sch);
return NET_XMIT_CN;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 069c62b7bb36..14a799de1c35 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1599,10 +1599,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl->qdisc->q.qlen == 1)
set_active(cl, qdisc_pkt_len(skb));
- cl->bstats.packets++;
- cl->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
- sch->bstats.bytes += qdisc_pkt_len(skb);
+ bstats_update(&cl->bstats, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -1668,6 +1665,7 @@ hfsc_dequeue(struct Qdisc *sch)
}
sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 01b519d6c52d..fc12fe6f5597 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -569,15 +569,11 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
return ret;
} else {
- cl->bstats.packets +=
- skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
- cl->bstats.bytes += qdisc_pkt_len(skb);
+ bstats_update(&cl->bstats, skb);
htb_activate(q, cl);
}
sch->q.qlen++;
- sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
- sch->bstats.bytes += qdisc_pkt_len(skb);
return NET_XMIT_SUCCESS;
}
@@ -648,12 +644,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
htb_add_to_wait_tree(q, cl, diff);
}
- /* update byte stats except for leaves which are already updated */
- if (cl->level) {
- cl->bstats.bytes += bytes;
- cl->bstats.packets += skb_is_gso(skb)?
- skb_shinfo(skb)->gso_segs:1;
- }
+ /* update basic stats except for leaves which are already updated */
+ if (cl->level)
+ bstats_update(&cl->bstats, skb);
+
cl = cl->parent;
}
}
@@ -847,7 +841,7 @@ next:
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch);
int level;
psched_time_t next_event;
@@ -856,6 +850,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue);
if (skb != NULL) {
+ok:
+ qdisc_bstats_update(sch, skb);
sch->flags &= ~TCQ_F_THROTTLED;
sch->q.qlen--;
return skb;
@@ -889,11 +885,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
int prio = ffz(m);
m |= 1 << prio;
skb = htb_dequeue_tree(q, prio, level);
- if (likely(skb != NULL)) {
- sch->q.qlen--;
- sch->flags &= ~TCQ_F_THROTTLED;
- goto fin;
- }
+ if (likely(skb != NULL))
+ goto ok;
}
}
sch->qstats.overlimits++;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index f10e34a68445..bce1665239b8 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
result = tc_classify(skb, p->filter_list, &res);
- sch->bstats.packets++;
- sch->bstats.bytes += qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
switch (result) {
case TC_ACT_SHOT:
result = TC_ACT_SHOT;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 32690deab5d0..436a2e75b322 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,8 +83,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -113,6 +111,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
qdisc = q->queues[q->curband];
skb = qdisc->dequeue(qdisc);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e5593c083a78..6a3006b38dc5 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,8 +240,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (likely(ret == NET_XMIT_SUCCESS)) {
sch->q.qlen++;
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
} else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
}
@@ -290,6 +288,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
skb->tstamp.tv64 = 0;
#endif
pr_debug("netem_dequeue: return skb=%p\n", skb);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
@@ -477,8 +476,6 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
__skb_queue_after(list, skb, nskb);
sch->qstats.backlog += qdisc_pkt_len(nskb);
- sch->bstats.bytes += qdisc_pkt_len(nskb);
- sch->bstats.packets++;
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b1c95bce33ce..fbd710d619bf 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,8 +84,6 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -117,6 +115,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc->dequeue(qdisc);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 8d42bb3ba540..9f98dbd32d4c 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,8 +94,6 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) {
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
@@ -115,11 +113,13 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
struct Qdisc *child = q->qdisc;
skb = child->dequeue(child);
- if (skb)
+ if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
- else if (!red_is_idling(&q->parms))
- red_start_of_idle_period(&q->parms);
-
+ } else {
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+ }
return skb;
}
@@ -239,6 +239,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
.Scell_log = q->parms.Scell_log,
};
+ sch->qstats.backlog = q->qdisc->qstats.backlog;
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 3cf478d012dd..edea8cefec6c 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -67,27 +67,47 @@
IMPLEMENTATION:
This implementation limits maximal queue length to 128;
- maximal mtu to 2^15-1; number of hash buckets to 1024.
+ max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
The only goal of this restrictions was that all data
- fit into one 4K page :-). Struct sfq_sched_data is
- organized in anti-cache manner: all the data for a bucket
- are scattered over different locations. This is not good,
- but it allowed me to put it into 4K.
+ fit into one 4K page on 32bit arches.
It is easy to increase these values, but not in flight. */
-#define SFQ_DEPTH 128
+#define SFQ_DEPTH 128 /* max number of packets per flow */
+#define SFQ_SLOTS 128 /* max number of flows */
+#define SFQ_EMPTY_SLOT 255
#define SFQ_HASH_DIVISOR 1024
+/* We use 16 bits to store allot, and want to handle packets up to 64K
+ * Scale allot by 8 (1<<3) so that no overflow occurs.
+ */
+#define SFQ_ALLOT_SHIFT 3
+#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
-/* This type should contain at least SFQ_DEPTH*2 values */
+/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
typedef unsigned char sfq_index;
+/*
+ * We dont use pointers to save space.
+ * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
+ * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
+ * are 'pointers' to dep[] array
+ */
struct sfq_head
{
sfq_index next;
sfq_index prev;
};
+struct sfq_slot {
+ struct sk_buff *skblist_next;
+ struct sk_buff *skblist_prev;
+ sfq_index qlen; /* number of skbs in skblist */
+ sfq_index next; /* next slot in sfq chain */
+ struct sfq_head dep; /* anchor in dep[] chains */
+ unsigned short hash; /* hash value (index in ht[]) */
+ short allot; /* credit for this slot */
+};
+
struct sfq_sched_data
{
/* Parameters */
@@ -99,17 +119,24 @@ struct sfq_sched_data
struct tcf_proto *filter_list;
struct timer_list perturb_timer;
u32 perturbation;
- sfq_index tail; /* Index of current slot in round */
- sfq_index max_depth; /* Maximal depth */
-
+ sfq_index cur_depth; /* depth of longest slot */
+ unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
+ struct sfq_slot *tail; /* current slot in round */
sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */
- sfq_index next[SFQ_DEPTH]; /* Active slots link */
- short allot[SFQ_DEPTH]; /* Current allotment per slot */
- unsigned short hash[SFQ_DEPTH]; /* Hash value indexed by slots */
- struct sk_buff_head qs[SFQ_DEPTH]; /* Slot queue */
- struct sfq_head dep[SFQ_DEPTH*2]; /* Linked list of slots, indexed by depth */
+ struct sfq_slot slots[SFQ_SLOTS];
+ struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
};
+/*
+ * sfq_head are either in a sfq_slot or in dep[] array
+ */
+static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
+{
+ if (val < SFQ_SLOTS)
+ return &q->slots[val].dep;
+ return &q->dep[val - SFQ_SLOTS];
+}
+
static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
{
return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
@@ -200,30 +227,41 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
return 0;
}
+/*
+ * x : slot number [0 .. SFQ_SLOTS - 1]
+ */
static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
{
sfq_index p, n;
- int d = q->qs[x].qlen + SFQ_DEPTH;
+ int qlen = q->slots[x].qlen;
+
+ p = qlen + SFQ_SLOTS;
+ n = q->dep[qlen].next;
- p = d;
- n = q->dep[d].next;
- q->dep[x].next = n;
- q->dep[x].prev = p;
- q->dep[p].next = q->dep[n].prev = x;
+ q->slots[x].dep.next = n;
+ q->slots[x].dep.prev = p;
+
+ q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
+ sfq_dep_head(q, n)->prev = x;
}
+#define sfq_unlink(q, x, n, p) \
+ n = q->slots[x].dep.next; \
+ p = q->slots[x].dep.prev; \
+ sfq_dep_head(q, p)->next = n; \
+ sfq_dep_head(q, n)->prev = p
+
+
static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
{
sfq_index p, n;
+ int d;
- n = q->dep[x].next;
- p = q->dep[x].prev;
- q->dep[p].next = n;
- q->dep[n].prev = p;
-
- if (n == p && q->max_depth == q->qs[x].qlen + 1)
- q->max_depth--;
+ sfq_unlink(q, x, n, p);
+ d = q->slots[x].qlen--;
+ if (n == p && q->cur_depth == d)
+ q->cur_depth--;
sfq_link(q, x);
}
@@ -232,34 +270,74 @@ static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
sfq_index p, n;
int d;
- n = q->dep[x].next;
- p = q->dep[x].prev;
- q->dep[p].next = n;
- q->dep[n].prev = p;
- d = q->qs[x].qlen;
- if (q->max_depth < d)
- q->max_depth = d;
+ sfq_unlink(q, x, n, p);
+ d = ++q->slots[x].qlen;
+ if (q->cur_depth < d)
+ q->cur_depth = d;
sfq_link(q, x);
}
+/* helper functions : might be changed when/if skb use a standard list_head */
+
+/* remove one skb from tail of slot queue */
+static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
+{
+ struct sk_buff *skb = slot->skblist_prev;
+
+ slot->skblist_prev = skb->prev;
+ skb->prev->next = (struct sk_buff *)slot;
+ skb->next = skb->prev = NULL;
+ return skb;
+}
+
+/* remove one skb from head of slot queue */
+static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
+{
+ struct sk_buff *skb = slot->skblist_next;
+
+ slot->skblist_next = skb->next;
+ skb->next->prev = (struct sk_buff *)slot;
+ skb->next = skb->prev = NULL;
+ return skb;
+}
+
+static inline void slot_queue_init(struct sfq_slot *slot)
+{
+ slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
+}
+
+/* add skb to slot queue (tail add) */
+static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
+{
+ skb->prev = slot->skblist_prev;
+ skb->next = (struct sk_buff *)slot;
+ slot->skblist_prev->next = skb;
+ slot->skblist_prev = skb;
+}
+
+#define slot_queue_walk(slot, skb) \
+ for (skb = slot->skblist_next; \
+ skb != (struct sk_buff *)slot; \
+ skb = skb->next)
+
static unsigned int sfq_drop(struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
- sfq_index d = q->max_depth;
+ sfq_index x, d = q->cur_depth;
struct sk_buff *skb;
unsigned int len;
+ struct sfq_slot *slot;
- /* Queue is full! Find the longest slot and
- drop a packet from it */
-
+ /* Queue is full! Find the longest slot and drop tail packet from it */
if (d > 1) {
- sfq_index x = q->dep[d + SFQ_DEPTH].next;
- skb = q->qs[x].prev;
+ x = q->dep[d].next;
+ slot = &q->slots[x];
+drop:
+ skb = slot_dequeue_tail(slot);
len = qdisc_pkt_len(skb);
- __skb_unlink(skb, &q->qs[x]);
- kfree_skb(skb);
sfq_dec(q, x);
+ kfree_skb(skb);
sch->q.qlen--;
sch->qstats.drops++;
sch->qstats.backlog -= len;
@@ -268,19 +346,11 @@ static unsigned int sfq_drop(struct Qdisc *sch)
if (d == 1) {
/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
- d = q->next[q->tail];
- q->next[q->tail] = q->next[d];
- q->allot[q->next[d]] += q->quantum;
- skb = q->qs[d].prev;
- len = qdisc_pkt_len(skb);
- __skb_unlink(skb, &q->qs[d]);
- kfree_skb(skb);
- sfq_dec(q, d);
- sch->q.qlen--;
- q->ht[q->hash[d]] = SFQ_DEPTH;
- sch->qstats.drops++;
- sch->qstats.backlog -= len;
- return len;
+ x = q->tail->next;
+ slot = &q->slots[x];
+ q->tail->next = slot->next;
+ q->ht[slot->hash] = SFQ_EMPTY_SLOT;
+ goto drop;
}
return 0;
@@ -292,6 +362,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct sfq_sched_data *q = qdisc_priv(sch);
unsigned int hash;
sfq_index x;
+ struct sfq_slot *slot;
int uninitialized_var(ret);
hash = sfq_classify(skb, sch, &ret);
@@ -304,37 +375,35 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
hash--;
x = q->ht[hash];
- if (x == SFQ_DEPTH) {
- q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
- q->hash[x] = hash;
+ slot = &q->slots[x];
+ if (x == SFQ_EMPTY_SLOT) {
+ x = q->dep[0].next; /* get a free slot */
+ q->ht[hash] = x;
+ slot = &q->slots[x];
+ slot->hash = hash;
}
- /* If selected queue has length q->limit, this means that
- * all another queues are empty and that we do simple tail drop,
+ /* If selected queue has length q->limit, do simple tail drop,
* i.e. drop _this_ packet.
*/
- if (q->qs[x].qlen >= q->limit)
+ if (slot->qlen >= q->limit)
return qdisc_drop(skb, sch);
sch->qstats.backlog += qdisc_pkt_len(skb);
- __skb_queue_tail(&q->qs[x], skb);
+ slot_queue_add(slot, skb);
sfq_inc(q, x);
- if (q->qs[x].qlen == 1) { /* The flow is new */
- if (q->tail == SFQ_DEPTH) { /* It is the first flow */
- q->tail = x;
- q->next[x] = x;
- q->allot[x] = q->quantum;
+ if (slot->qlen == 1) { /* The flow is new */
+ if (q->tail == NULL) { /* It is the first flow */
+ slot->next = x;
} else {
- q->next[x] = q->next[q->tail];
- q->next[q->tail] = x;
- q->tail = x;
+ slot->next = q->tail->next;
+ q->tail->next = x;
}
+ q->tail = slot;
+ slot->allot = q->scaled_quantum;
}
- if (++sch->q.qlen <= q->limit) {
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
+ if (++sch->q.qlen <= q->limit)
return NET_XMIT_SUCCESS;
- }
sfq_drop(sch);
return NET_XMIT_CN;
@@ -344,14 +413,12 @@ static struct sk_buff *
sfq_peek(struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
- sfq_index a;
/* No active slots */
- if (q->tail == SFQ_DEPTH)
+ if (q->tail == NULL)
return NULL;
- a = q->next[q->tail];
- return skb_peek(&q->qs[a]);
+ return q->slots[q->tail->next].skblist_next;
}
static struct sk_buff *
@@ -359,34 +426,38 @@ sfq_dequeue(struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
- sfq_index a, old_a;
+ sfq_index a, next_a;
+ struct sfq_slot *slot;
/* No active slots */
- if (q->tail == SFQ_DEPTH)
+ if (q->tail == NULL)
return NULL;
- a = old_a = q->next[q->tail];
-
- /* Grab packet */
- skb = __skb_dequeue(&q->qs[a]);
+next_slot:
+ a = q->tail->next;
+ slot = &q->slots[a];
+ if (slot->allot <= 0) {
+ q->tail = slot;
+ slot->allot += q->scaled_quantum;
+ goto next_slot;
+ }
+ skb = slot_dequeue_head(slot);
sfq_dec(q, a);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
sch->qstats.backlog -= qdisc_pkt_len(skb);
/* Is the slot empty? */
- if (q->qs[a].qlen == 0) {
- q->ht[q->hash[a]] = SFQ_DEPTH;
- a = q->next[a];
- if (a == old_a) {
- q->tail = SFQ_DEPTH;
+ if (slot->qlen == 0) {
+ q->ht[slot->hash] = SFQ_EMPTY_SLOT;
+ next_a = slot->next;
+ if (a == next_a) {
+ q->tail = NULL; /* no more active slots */
return skb;
}
- q->next[q->tail] = a;
- q->allot[a] += q->quantum;
- } else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) {
- q->tail = a;
- a = q->next[a];
- q->allot[a] += q->quantum;
+ q->tail->next = next_a;
+ } else {
+ slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
}
return skb;
}
@@ -422,6 +493,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
sch_tree_lock(sch);
q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
+ q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
q->perturb_period = ctl->perturb_period * HZ;
if (ctl->limit)
q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
@@ -450,19 +522,19 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
init_timer_deferrable(&q->perturb_timer);
for (i = 0; i < SFQ_HASH_DIVISOR; i++)
- q->ht[i] = SFQ_DEPTH;
+ q->ht[i] = SFQ_EMPTY_SLOT;
for (i = 0; i < SFQ_DEPTH; i++) {
- skb_queue_head_init(&q->qs[i]);
- q->dep[i + SFQ_DEPTH].next = i + SFQ_DEPTH;
- q->dep[i + SFQ_DEPTH].prev = i + SFQ_DEPTH;
+ q->dep[i].next = i + SFQ_SLOTS;
+ q->dep[i].prev = i + SFQ_SLOTS;
}
q->limit = SFQ_DEPTH - 1;
- q->max_depth = 0;
- q->tail = SFQ_DEPTH;
+ q->cur_depth = 0;
+ q->tail = NULL;
if (opt == NULL) {
q->quantum = psched_mtu(qdisc_dev(sch));
+ q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
q->perturb_period = 0;
q->perturbation = net_random();
} else {
@@ -471,8 +543,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
return err;
}
- for (i = 0; i < SFQ_DEPTH; i++)
+ for (i = 0; i < SFQ_SLOTS; i++) {
+ slot_queue_init(&q->slots[i]);
sfq_link(q, i);
+ }
return 0;
}
@@ -547,10 +621,19 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct gnet_dump *d)
{
struct sfq_sched_data *q = qdisc_priv(sch);
- sfq_index idx = q->ht[cl-1];
- struct gnet_stats_queue qs = { .qlen = q->qs[idx].qlen };
- struct tc_sfq_xstats xstats = { .allot = q->allot[idx] };
+ sfq_index idx = q->ht[cl - 1];
+ struct gnet_stats_queue qs = { 0 };
+ struct tc_sfq_xstats xstats = { 0 };
+ struct sk_buff *skb;
+
+ if (idx != SFQ_EMPTY_SLOT) {
+ const struct sfq_slot *slot = &q->slots[idx];
+ xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
+ qs.qlen = slot->qlen;
+ slot_queue_walk(slot, skb)
+ qs.backlog += qdisc_pkt_len(skb);
+ }
if (gnet_stats_copy_queue(d, &qs) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
@@ -565,7 +648,7 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
return;
for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
- if (q->ht[i] == SFQ_DEPTH ||
+ if (q->ht[i] == SFQ_EMPTY_SLOT ||
arg->count < arg->skip) {
arg->count++;
continue;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 641a30d64635..e93165820c3f 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,8 +134,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
}
sch->q.qlen++;
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
return NET_XMIT_SUCCESS;
}
@@ -188,6 +186,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
q->ptokens = ptoks;
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_bstats_update(sch, skb);
return skb;
}
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 106479a7c94a..d84e7329660f 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -59,6 +59,10 @@ struct teql_master
struct net_device *dev;
struct Qdisc *slaves;
struct list_head master_list;
+ unsigned long tx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_errors;
+ unsigned long tx_dropped;
};
struct teql_sched_data
@@ -83,8 +87,6 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (q->q.qlen < dev->tx_queue_len) {
__skb_queue_tail(&q->q, skb);
- sch->bstats.bytes += qdisc_pkt_len(skb);
- sch->bstats.packets++;
return NET_XMIT_SUCCESS;
}
@@ -108,6 +110,8 @@ teql_dequeue(struct Qdisc* sch)
dat->m->slaves = sch;
netif_wake_queue(m);
}
+ } else {
+ qdisc_bstats_update(sch, skb);
}
sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
return skb;
@@ -275,7 +279,6 @@ static inline int teql_resolve(struct sk_buff *skb,
static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct teql_master *master = netdev_priv(dev);
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
struct Qdisc *start, *q;
int busy;
int nores;
@@ -315,8 +318,8 @@ restart:
__netif_tx_unlock(slave_txq);
master->slaves = NEXT_SLAVE(q);
netif_wake_queue(dev);
- txq->tx_packets++;
- txq->tx_bytes += length;
+ master->tx_packets++;
+ master->tx_bytes += length;
return NETDEV_TX_OK;
}
__netif_tx_unlock(slave_txq);
@@ -343,10 +346,10 @@ restart:
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
}
- dev->stats.tx_errors++;
+ master->tx_errors++;
drop:
- txq->tx_dropped++;
+ master->tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -399,6 +402,18 @@ static int teql_master_close(struct net_device *dev)
return 0;
}
+static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct teql_master *m = netdev_priv(dev);
+
+ stats->tx_packets = m->tx_packets;
+ stats->tx_bytes = m->tx_bytes;
+ stats->tx_errors = m->tx_errors;
+ stats->tx_dropped = m->tx_dropped;
+ return stats;
+}
+
static int teql_master_mtu(struct net_device *dev, int new_mtu)
{
struct teql_master *m = netdev_priv(dev);
@@ -423,6 +438,7 @@ static const struct net_device_ops teql_netdev_ops = {
.ndo_open = teql_master_open,
.ndo_stop = teql_master_close,
.ndo_start_xmit = teql_master_xmit,
+ .ndo_get_stats64 = teql_master_stats64,
.ndo_change_mtu = teql_master_mtu,
};
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index a09b0dd25f50..8e02550ff3e8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3428,7 +3428,7 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
break;
- case SCTP_DELAYED_ACK:
+ case SCTP_DELAYED_SACK:
retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
break;
case SCTP_PARTIAL_DELIVERY_POINT:
@@ -5333,7 +5333,7 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
optlen);
break;
- case SCTP_DELAYED_ACK:
+ case SCTP_DELAYED_SACK:
retval = sctp_getsockopt_delayed_ack(sk, len, optval,
optlen);
break;
diff --git a/net/socket.c b/net/socket.c
index c1663c0ff3d3..ac2219f90d5d 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -262,6 +262,7 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
}
+
static void wq_free_rcu(struct rcu_head *head)
{
struct socket_wq *wq = container_of(head, struct socket_wq, rcu);
@@ -305,20 +306,6 @@ static const struct super_operations sockfs_ops = {
.statfs = simple_statfs,
};
-static struct dentry *sockfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC);
-}
-
-static struct vfsmount *sock_mnt __read_mostly;
-
-static struct file_system_type sock_fs_type = {
- .name = "sockfs",
- .mount = sockfs_mount,
- .kill_sb = kill_anon_super,
-};
-
/*
* sockfs_dname() is called from d_path().
*/
@@ -332,6 +319,21 @@ static const struct dentry_operations sockfs_dentry_operations = {
.d_dname = sockfs_dname,
};
+static struct dentry *sockfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type, "socket:", &sockfs_ops,
+ &sockfs_dentry_operations, SOCKFS_MAGIC);
+}
+
+static struct vfsmount *sock_mnt __read_mostly;
+
+static struct file_system_type sock_fs_type = {
+ .name = "sockfs",
+ .mount = sockfs_mount,
+ .kill_sb = kill_anon_super,
+};
+
/*
* Obtains the first available file descriptor and sets it up for use.
*
@@ -360,14 +362,13 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
if (unlikely(fd < 0))
return fd;
- path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
+ path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
if (unlikely(!path.dentry)) {
put_unused_fd(fd);
return -ENOMEM;
}
path.mnt = mntget(sock_mnt);
- path.dentry->d_op = &sockfs_dentry_operations;
d_instantiate(path.dentry, SOCK_INODE(sock));
SOCK_INODE(sock)->i_fop = &socket_file_ops;
@@ -2390,6 +2391,8 @@ EXPORT_SYMBOL(sock_unregister);
static int __init sock_init(void)
{
+ int err;
+
/*
* Initialize sock SLAB cache.
*/
@@ -2406,8 +2409,15 @@ static int __init sock_init(void)
*/
init_inodecache();
- register_filesystem(&sock_fs_type);
+
+ err = register_filesystem(&sock_fs_type);
+ if (err)
+ goto out_fs;
sock_mnt = kern_mount(&sock_fs_type);
+ if (IS_ERR(sock_mnt)) {
+ err = PTR_ERR(sock_mnt);
+ goto out_mount;
+ }
/* The real protocol initialization is performed in later initcalls.
*/
@@ -2420,7 +2430,13 @@ static int __init sock_init(void)
skb_timestamping_init();
#endif
- return 0;
+out:
+ return err;
+
+out_mount:
+ unregister_filesystem(&sock_fs_type);
+out_fs:
+ goto out;
}
core_initcall(sock_init); /* early initcall */
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index afe67849269f..67e31276682a 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -563,8 +563,17 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p)
return cred->cr_ops->crvalidate(task, p);
}
+static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
+ __be32 *data, void *obj)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data);
+ encode(rqstp, &xdr, obj);
+}
+
int
-rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
+rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp,
__be32 *data, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -574,11 +583,22 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
if (cred->cr_ops->crwrap_req)
return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
/* By default, we encode the arguments normally. */
- return encode(rqstp, data, obj);
+ rpcauth_wrap_req_encode(encode, rqstp, data, obj);
+ return 0;
+}
+
+static int
+rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
+ __be32 *data, void *obj)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data);
+ return decode(rqstp, &xdr, obj);
}
int
-rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
+rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp,
__be32 *data, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -589,7 +609,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
data, obj);
/* By default, we decode the arguments normally. */
- return decode(rqstp, data, obj);
+ return rpcauth_unwrap_req_decode(decode, rqstp, data, obj);
}
int
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3835ce35e224..45dbf1521b9a 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1231,9 +1231,19 @@ out_bad:
return NULL;
}
+static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
+ __be32 *p, void *obj)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
+ encode(rqstp, &xdr, obj);
+}
+
static inline int
gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
- kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
+ kxdreproc_t encode, struct rpc_rqst *rqstp,
+ __be32 *p, void *obj)
{
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
struct xdr_buf integ_buf;
@@ -1249,9 +1259,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
*p++ = htonl(rqstp->rq_seqno);
- status = encode(rqstp, p, obj);
- if (status)
- return status;
+ gss_wrap_req_encode(encode, rqstp, p, obj);
if (xdr_buf_subsegment(snd_buf, &integ_buf,
offset, snd_buf->len - offset))
@@ -1325,7 +1333,8 @@ out:
static inline int
gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
- kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
+ kxdreproc_t encode, struct rpc_rqst *rqstp,
+ __be32 *p, void *obj)
{
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
u32 offset;
@@ -1342,9 +1351,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
*p++ = htonl(rqstp->rq_seqno);
- status = encode(rqstp, p, obj);
- if (status)
- return status;
+ gss_wrap_req_encode(encode, rqstp, p, obj);
status = alloc_enc_pages(rqstp);
if (status)
@@ -1394,7 +1401,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
static int
gss_wrap_req(struct rpc_task *task,
- kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
+ kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1407,12 +1414,14 @@ gss_wrap_req(struct rpc_task *task,
/* The spec seems a little ambiguous here, but I think that not
* wrapping context destruction requests makes the most sense.
*/
- status = encode(rqstp, p, obj);
+ gss_wrap_req_encode(encode, rqstp, p, obj);
+ status = 0;
goto out;
}
switch (gss_cred->gc_service) {
case RPC_GSS_SVC_NONE:
- status = encode(rqstp, p, obj);
+ gss_wrap_req_encode(encode, rqstp, p, obj);
+ status = 0;
break;
case RPC_GSS_SVC_INTEGRITY:
status = gss_wrap_req_integ(cred, ctx, encode,
@@ -1494,10 +1503,19 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
return 0;
}
+static int
+gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
+ __be32 *p, void *obj)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ return decode(rqstp, &xdr, obj);
+}
static int
gss_unwrap_resp(struct rpc_task *task,
- kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
+ kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1528,7 +1546,7 @@ gss_unwrap_resp(struct rpc_task *task,
cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
+ (savedlen - head->iov_len);
out_decode:
- status = decode(rqstp, p, obj);
+ status = gss_unwrap_req_decode(decode, rqstp, p, obj);
out:
gss_put_ctx(ctx);
dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 75ee993ea057..9576f35ab701 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -137,7 +137,7 @@ arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
ms_usage = 13;
break;
default:
- return EINVAL;;
+ return -EINVAL;
}
salt[0] = (ms_usage >> 0) & 0xff;
salt[1] = (ms_usage >> 8) & 0xff;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index dec2a6fc7c12..bcdae78fdfc6 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -67,7 +67,6 @@ static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b)
#define RSI_HASHBITS 6
#define RSI_HASHMAX (1<<RSI_HASHBITS)
-#define RSI_HASHMASK (RSI_HASHMAX-1)
struct rsi {
struct cache_head h;
@@ -319,7 +318,6 @@ static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
#define RSC_HASHBITS 10
#define RSC_HASHMAX (1<<RSC_HASHBITS)
-#define RSC_HASHMASK (RSC_HASHMAX-1)
#define GSS_SEQ_WIN 128
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 7dcfe0cc3500..1dd1a6890007 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -59,8 +59,8 @@ int bc_send(struct rpc_rqst *req)
ret = task->tk_status;
rpc_put_task(task);
}
- return ret;
dprintk("RPC: bc_send ret= %d\n", ret);
+ return ret;
}
#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index e433e7580e27..72ad836e4fe0 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -37,7 +37,7 @@
#define RPCDBG_FACILITY RPCDBG_CACHE
-static void cache_defer_req(struct cache_req *req, struct cache_head *item);
+static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
static void cache_revisit_request(struct cache_head *item);
static void cache_init(struct cache_head *h)
@@ -128,6 +128,7 @@ static void cache_fresh_locked(struct cache_head *head, time_t expiry)
{
head->expiry_time = expiry;
head->last_refresh = seconds_since_boot();
+ smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
set_bit(CACHE_VALID, &head->flags);
}
@@ -208,11 +209,36 @@ static inline int cache_is_valid(struct cache_detail *detail, struct cache_head
/* entry is valid */
if (test_bit(CACHE_NEGATIVE, &h->flags))
return -ENOENT;
- else
+ else {
+ /*
+ * In combination with write barrier in
+ * sunrpc_cache_update, ensures that anyone
+ * using the cache entry after this sees the
+ * updated contents:
+ */
+ smp_rmb();
return 0;
+ }
}
}
+static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
+{
+ int rv;
+
+ write_lock(&detail->hash_lock);
+ rv = cache_is_valid(detail, h);
+ if (rv != -EAGAIN) {
+ write_unlock(&detail->hash_lock);
+ return rv;
+ }
+ set_bit(CACHE_NEGATIVE, &h->flags);
+ cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
+ write_unlock(&detail->hash_lock);
+ cache_fresh_unlocked(h, detail);
+ return -ENOENT;
+}
+
/*
* This is the generic cache management routine for all
* the authentication caches.
@@ -251,14 +277,8 @@ int cache_check(struct cache_detail *detail,
case -EINVAL:
clear_bit(CACHE_PENDING, &h->flags);
cache_revisit_request(h);
- if (rv == -EAGAIN) {
- set_bit(CACHE_NEGATIVE, &h->flags);
- cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
- cache_fresh_unlocked(h, detail);
- rv = -ENOENT;
- }
+ rv = try_to_negate_entry(detail, h);
break;
-
case -EAGAIN:
clear_bit(CACHE_PENDING, &h->flags);
cache_revisit_request(h);
@@ -268,9 +288,11 @@ int cache_check(struct cache_detail *detail,
}
if (rv == -EAGAIN) {
- cache_defer_req(rqstp, h);
- if (!test_bit(CACHE_PENDING, &h->flags)) {
- /* Request is not deferred */
+ if (!cache_defer_req(rqstp, h)) {
+ /*
+ * Request was not deferred; handle it as best
+ * we can ourselves:
+ */
rv = cache_is_valid(detail, h);
if (rv == -EAGAIN)
rv = -ETIMEDOUT;
@@ -618,18 +640,19 @@ static void cache_limit_defers(void)
discard->revisit(discard, 1);
}
-static void cache_defer_req(struct cache_req *req, struct cache_head *item)
+/* Return true if and only if a deferred request is queued. */
+static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
{
struct cache_deferred_req *dreq;
if (req->thread_wait) {
cache_wait_req(req, item);
if (!test_bit(CACHE_PENDING, &item->flags))
- return;
+ return false;
}
dreq = req->defer(req);
if (dreq == NULL)
- return;
+ return false;
setup_deferral(dreq, item, 1);
if (!test_bit(CACHE_PENDING, &item->flags))
/* Bit could have been cleared before we managed to
@@ -638,6 +661,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item)
cache_revisit_request(item);
cache_limit_defers();
+ return true;
}
static void cache_revisit_request(struct cache_head *item)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 9dab9573be41..57d344cf2256 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -989,20 +989,26 @@ call_refreshresult(struct rpc_task *task)
dprint_status(task);
task->tk_status = 0;
- task->tk_action = call_allocate;
- if (status >= 0 && rpcauth_uptodatecred(task))
- return;
+ task->tk_action = call_refresh;
switch (status) {
- case -EACCES:
- rpc_exit(task, -EACCES);
- return;
- case -ENOMEM:
- rpc_exit(task, -ENOMEM);
+ case 0:
+ if (rpcauth_uptodatecred(task))
+ task->tk_action = call_allocate;
return;
case -ETIMEDOUT:
rpc_delay(task, 3*HZ);
+ case -EAGAIN:
+ status = -EACCES;
+ if (!task->tk_cred_retry)
+ break;
+ task->tk_cred_retry--;
+ dprintk("RPC: %5u %s: retry refresh creds\n",
+ task->tk_pid, __func__);
+ return;
}
- task->tk_action = call_refresh;
+ dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
+ task->tk_pid, __func__, status);
+ rpc_exit(task, status);
}
/*
@@ -1089,7 +1095,7 @@ static void
rpc_xdr_encode(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- kxdrproc_t encode;
+ kxdreproc_t encode;
__be32 *p;
dprint_status(task);
@@ -1529,7 +1535,7 @@ call_decode(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
- kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
+ kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
__be32 *p;
dprintk("RPC: %5u call_decode (status %d)\n",
@@ -1770,12 +1776,11 @@ out_overflow:
goto out_garbage;
}
-static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj)
+static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
{
- return 0;
}
-static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj)
+static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
{
return 0;
}
@@ -1824,23 +1829,15 @@ static void rpc_show_task(const struct rpc_clnt *clnt,
const struct rpc_task *task)
{
const char *rpc_waitq = "none";
- char *p, action[KSYM_SYMBOL_LEN];
if (RPC_IS_QUEUED(task))
rpc_waitq = rpc_qname(task->tk_waitqueue);
- /* map tk_action pointer to a function name; then trim off
- * the "+0x0 [sunrpc]" */
- sprint_symbol(action, (unsigned long)task->tk_action);
- p = strchr(action, '+');
- if (p)
- *p = '\0';
-
- printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n",
+ printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
task->tk_pid, task->tk_flags, task->tk_status,
clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
- action, rpc_waitq);
+ task->tk_action, rpc_waitq);
}
void rpc_show_tasks(void)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 10a17a37ec4e..72bc53683965 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -162,11 +162,19 @@ rpc_alloc_inode(struct super_block *sb)
}
static void
-rpc_destroy_inode(struct inode *inode)
+rpc_i_callback(struct rcu_head *head)
{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+ INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
}
+static void
+rpc_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, rpc_i_callback);
+}
+
static int
rpc_pipe_open(struct inode *inode, struct file *filp)
{
@@ -430,7 +438,7 @@ void rpc_put_mount(void)
}
EXPORT_SYMBOL_GPL(rpc_put_mount);
-static int rpc_delete_dentry(struct dentry *dentry)
+static int rpc_delete_dentry(const struct dentry *dentry)
{
return 1;
}
@@ -466,7 +474,7 @@ static int __rpc_create_common(struct inode *dir, struct dentry *dentry,
{
struct inode *inode;
- BUG_ON(!d_unhashed(dentry));
+ d_drop(dentry);
inode = rpc_get_inode(dir->i_sb, mode);
if (!inode)
goto out_err;
@@ -583,7 +591,7 @@ static struct dentry *__rpc_lookup_create(struct dentry *parent,
}
}
if (!dentry->d_inode)
- dentry->d_op = &rpc_dentry_operations;
+ d_set_d_op(dentry, &rpc_dentry_operations);
out_err:
return dentry;
}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index fa6d7ca2c851..c652e4cc9fe9 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -57,10 +57,6 @@ enum {
RPCBPROC_GETSTAT,
};
-#define RPCB_HIGHPROC_2 RPCBPROC_CALLIT
-#define RPCB_HIGHPROC_3 RPCBPROC_TADDR2UADDR
-#define RPCB_HIGHPROC_4 RPCBPROC_GETSTAT
-
/*
* r_owner
*
@@ -693,46 +689,37 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
* XDR functions for rpcbind
*/
-static int rpcb_enc_mapping(struct rpc_rqst *req, __be32 *p,
- const struct rpcbind_args *rpcb)
+static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct rpcbind_args *rpcb)
{
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
+ __be32 *p;
dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
task->tk_pid, task->tk_msg.rpc_proc->p_name,
rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-
- p = xdr_reserve_space(&xdr, sizeof(__be32) * RPCB_mappingargs_sz);
- if (unlikely(p == NULL))
- return -EIO;
-
- *p++ = htonl(rpcb->r_prog);
- *p++ = htonl(rpcb->r_vers);
- *p++ = htonl(rpcb->r_prot);
- *p = htonl(rpcb->r_port);
-
- return 0;
+ p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2);
+ *p++ = cpu_to_be32(rpcb->r_prog);
+ *p++ = cpu_to_be32(rpcb->r_vers);
+ *p++ = cpu_to_be32(rpcb->r_prot);
+ *p = cpu_to_be32(rpcb->r_port);
}
-static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
struct rpcbind_args *rpcb)
{
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
unsigned long port;
-
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+ __be32 *p;
rpcb->r_port = 0;
- p = xdr_inline_decode(&xdr, sizeof(__be32));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
- port = ntohl(*p);
+ port = be32_to_cpup(p);
dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
task->tk_msg.rpc_proc->p_name, port);
if (unlikely(port > USHRT_MAX))
@@ -742,20 +729,18 @@ static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
return 0;
}
-static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
unsigned int *boolp)
{
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
-
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+ __be32 *p;
- p = xdr_inline_decode(&xdr, sizeof(__be32));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
*boolp = 0;
- if (*p)
+ if (*p != xdr_zero)
*boolp = 1;
dprintk("RPC: %5u RPCB_%s call %s\n",
@@ -764,73 +749,53 @@ static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
return 0;
}
-static int encode_rpcb_string(struct xdr_stream *xdr, const char *string,
- const u32 maxstrlen)
+static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
+ const u32 maxstrlen)
{
- u32 len;
__be32 *p;
+ u32 len;
- if (unlikely(string == NULL))
- return -EIO;
len = strlen(string);
- if (unlikely(len > maxstrlen))
- return -EIO;
-
- p = xdr_reserve_space(xdr, sizeof(__be32) + len);
- if (unlikely(p == NULL))
- return -EIO;
+ BUG_ON(len > maxstrlen);
+ p = xdr_reserve_space(xdr, 4 + len);
xdr_encode_opaque(p, string, len);
-
- return 0;
}
-static int rpcb_enc_getaddr(struct rpc_rqst *req, __be32 *p,
- const struct rpcbind_args *rpcb)
+static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct rpcbind_args *rpcb)
{
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
+ __be32 *p;
dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
task->tk_pid, task->tk_msg.rpc_proc->p_name,
rpcb->r_prog, rpcb->r_vers,
rpcb->r_netid, rpcb->r_addr);
- xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-
- p = xdr_reserve_space(&xdr,
- sizeof(__be32) * (RPCB_program_sz + RPCB_version_sz));
- if (unlikely(p == NULL))
- return -EIO;
- *p++ = htonl(rpcb->r_prog);
- *p = htonl(rpcb->r_vers);
-
- if (encode_rpcb_string(&xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN))
- return -EIO;
- if (encode_rpcb_string(&xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN))
- return -EIO;
- if (encode_rpcb_string(&xdr, rpcb->r_owner, RPCB_MAXOWNERLEN))
- return -EIO;
+ p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2);
+ *p++ = cpu_to_be32(rpcb->r_prog);
+ *p = cpu_to_be32(rpcb->r_vers);
- return 0;
+ encode_rpcb_string(xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN);
+ encode_rpcb_string(xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN);
+ encode_rpcb_string(xdr, rpcb->r_owner, RPCB_MAXOWNERLEN);
}
-static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
struct rpcbind_args *rpcb)
{
struct sockaddr_storage address;
struct sockaddr *sap = (struct sockaddr *)&address;
struct rpc_task *task = req->rq_task;
- struct xdr_stream xdr;
+ __be32 *p;
u32 len;
rpcb->r_port = 0;
- xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
- p = xdr_inline_decode(&xdr, sizeof(__be32));
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_fail;
- len = ntohl(*p);
+ len = be32_to_cpup(p);
/*
* If the returned universal address is a null string,
@@ -845,7 +810,7 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
if (unlikely(len > RPCBIND_MAXUADDRLEN))
goto out_fail;
- p = xdr_inline_decode(&xdr, len);
+ p = xdr_inline_decode(xdr, len);
if (unlikely(p == NULL))
goto out_fail;
dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid,
@@ -871,8 +836,8 @@ out_fail:
static struct rpc_procinfo rpcb_procedures2[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
- .p_encode = (kxdrproc_t)rpcb_enc_mapping,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_mapping,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
@@ -881,8 +846,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
- .p_encode = (kxdrproc_t)rpcb_enc_mapping,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_mapping,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
@@ -891,8 +856,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
},
[RPCBPROC_GETPORT] = {
.p_proc = RPCBPROC_GETPORT,
- .p_encode = (kxdrproc_t)rpcb_enc_mapping,
- .p_decode = (kxdrproc_t)rpcb_dec_getport,
+ .p_encode = (kxdreproc_t)rpcb_enc_mapping,
+ .p_decode = (kxdrdproc_t)rpcb_dec_getport,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_getportres_sz,
.p_statidx = RPCBPROC_GETPORT,
@@ -904,8 +869,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
static struct rpc_procinfo rpcb_procedures3[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
@@ -914,8 +879,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
@@ -924,8 +889,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
},
[RPCBPROC_GETADDR] = {
.p_proc = RPCBPROC_GETADDR,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_getaddr,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_getaddrres_sz,
.p_statidx = RPCBPROC_GETADDR,
@@ -937,8 +902,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
static struct rpc_procinfo rpcb_procedures4[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
@@ -947,8 +912,8 @@ static struct rpc_procinfo rpcb_procedures4[] = {
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
@@ -957,8 +922,8 @@ static struct rpc_procinfo rpcb_procedures4[] = {
},
[RPCBPROC_GETADDR] = {
.p_proc = RPCBPROC_GETADDR,
- .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
- .p_decode = (kxdrproc_t)rpcb_dec_getaddr,
+ .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_getaddrres_sz,
.p_statidx = RPCBPROC_GETADDR,
@@ -993,19 +958,19 @@ static struct rpcb_info rpcb_next_version6[] = {
static struct rpc_version rpcb_version2 = {
.number = RPCBVERS_2,
- .nrprocs = RPCB_HIGHPROC_2,
+ .nrprocs = ARRAY_SIZE(rpcb_procedures2),
.procs = rpcb_procedures2
};
static struct rpc_version rpcb_version3 = {
.number = RPCBVERS_3,
- .nrprocs = RPCB_HIGHPROC_3,
+ .nrprocs = ARRAY_SIZE(rpcb_procedures3),
.procs = rpcb_procedures3
};
static struct rpc_version rpcb_version4 = {
.number = RPCBVERS_4,
- .nrprocs = RPCB_HIGHPROC_4,
+ .nrprocs = ARRAY_SIZE(rpcb_procedures4),
.procs = rpcb_procedures4
};
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index f71a73107ae9..80df89d957ba 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -115,9 +115,7 @@ EXPORT_SYMBOL_GPL(svc_seq_show);
*/
struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
{
- struct rpc_iostats *new;
- new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL);
- return new;
+ return kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(rpc_alloc_iostats);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6359c42c4941..08e05a8ce025 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -488,10 +488,6 @@ svc_destroy(struct svc_serv *serv)
if (svc_serv_is_pooled(serv))
svc_pool_map_put();
-#if defined(CONFIG_NFS_V4_1)
- svc_sock_destroy(serv->bc_xprt);
-#endif /* CONFIG_NFS_V4_1 */
-
svc_unregister(serv);
kfree(serv->sv_pools);
kfree(serv);
@@ -1005,6 +1001,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
rqstp->rq_splice_ok = 1;
/* Will be turned off only when NFSv4 Sessions are used */
rqstp->rq_usedeferral = 1;
+ rqstp->rq_dropme = false;
/* Setup reply header */
rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
@@ -1106,7 +1103,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
*statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
/* Encode reply */
- if (*statp == rpc_drop_reply) {
+ if (rqstp->rq_dropme) {
if (procp->pc_release)
procp->pc_release(rqstp, NULL, rqstp->rq_resp);
goto dropit;
@@ -1147,7 +1144,6 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
dropit:
svc_authorise(rqstp); /* doesn't hurt to call this twice */
dprintk("svc: svc_process dropit\n");
- svc_drop(rqstp);
return 0;
err_short_len:
@@ -1218,7 +1214,6 @@ svc_process(struct svc_rqst *rqstp)
struct kvec *resv = &rqstp->rq_res.head[0];
struct svc_serv *serv = rqstp->rq_server;
u32 dir;
- int error;
/*
* Setup response xdr_buf.
@@ -1246,11 +1241,13 @@ svc_process(struct svc_rqst *rqstp)
return 0;
}
- error = svc_process_common(rqstp, argv, resv);
- if (error <= 0)
- return error;
-
- return svc_send(rqstp);
+ /* Returns 1 for send, 0 for drop */
+ if (svc_process_common(rqstp, argv, resv))
+ return svc_send(rqstp);
+ else {
+ svc_drop(rqstp);
+ return 0;
+ }
}
#if defined(CONFIG_NFS_V4_1)
@@ -1264,10 +1261,9 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
{
struct kvec *argv = &rqstp->rq_arg.head[0];
struct kvec *resv = &rqstp->rq_res.head[0];
- int error;
/* Build the svc_rqst used by the common processing routine */
- rqstp->rq_xprt = serv->bc_xprt;
+ rqstp->rq_xprt = serv->sv_bc_xprt;
rqstp->rq_xid = req->rq_xid;
rqstp->rq_prot = req->rq_xprt->prot;
rqstp->rq_server = serv;
@@ -1292,12 +1288,15 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
svc_getu32(argv); /* XID */
svc_getnl(argv); /* CALLDIR */
- error = svc_process_common(rqstp, argv, resv);
- if (error <= 0)
- return error;
-
- memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
- return bc_send(req);
+ /* Returns 1 for send, 0 for drop */
+ if (svc_process_common(rqstp, argv, resv)) {
+ memcpy(&req->rq_snd_buf, &rqstp->rq_res,
+ sizeof(req->rq_snd_buf));
+ return bc_send(req);
+ } else {
+ /* Nothing to do to drop request */
+ return 0;
+ }
}
EXPORT_SYMBOL(bc_svc_process);
#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index c82fe739fbdc..ab86b7927f84 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -5,7 +5,6 @@
*/
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
@@ -14,6 +13,7 @@
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/xprt.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
@@ -129,6 +129,9 @@ static void svc_xprt_free(struct kref *kref)
if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
svcauth_unix_info_release(xprt);
put_net(xprt->xpt_net);
+ /* See comment on corresponding get in xs_setup_bc_tcp(): */
+ if (xprt->xpt_bc_xprt)
+ xprt_put(xprt->xpt_bc_xprt);
xprt->xpt_ops->xpo_free(xprt);
module_put(owner);
}
@@ -213,6 +216,7 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
spin_lock(&svc_xprt_class_lock);
list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
struct svc_xprt *newxprt;
+ unsigned short newport;
if (strcmp(xprt_name, xcl->xcl_name))
continue;
@@ -231,8 +235,9 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
spin_lock_bh(&serv->sv_lock);
list_add(&newxprt->xpt_list, &serv->sv_permsocks);
spin_unlock_bh(&serv->sv_lock);
+ newport = svc_xprt_local_port(newxprt);
clear_bit(XPT_BUSY, &newxprt->xpt_flags);
- return svc_xprt_local_port(newxprt);
+ return newport;
}
err:
spin_unlock(&svc_xprt_class_lock);
@@ -302,6 +307,15 @@ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
list_del(&rqstp->rq_list);
}
+static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
+{
+ if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
+ return true;
+ if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED)))
+ return xprt->xpt_ops->xpo_has_wspace(xprt);
+ return false;
+}
+
/*
* Queue up a transport with data pending. If there are idle nfsd
* processes, wake 'em up.
@@ -314,8 +328,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
struct svc_rqst *rqstp;
int cpu;
- if (!(xprt->xpt_flags &
- ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
+ if (!svc_xprt_has_something_to_do(xprt))
return;
cpu = get_cpu();
@@ -342,28 +355,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
dprintk("svc: transport %p busy, not enqueued\n", xprt);
goto out_unlock;
}
- BUG_ON(xprt->xpt_pool != NULL);
- xprt->xpt_pool = pool;
-
- /* Handle pending connection */
- if (test_bit(XPT_CONN, &xprt->xpt_flags))
- goto process;
-
- /* Handle close in-progress */
- if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
- goto process;
-
- /* Check if we have space to reply to a request */
- if (!xprt->xpt_ops->xpo_has_wspace(xprt)) {
- /* Don't enqueue while not enough space for reply */
- dprintk("svc: no write space, transport %p not enqueued\n",
- xprt);
- xprt->xpt_pool = NULL;
- clear_bit(XPT_BUSY, &xprt->xpt_flags);
- goto out_unlock;
- }
- process:
if (!list_empty(&pool->sp_threads)) {
rqstp = list_entry(pool->sp_threads.next,
struct svc_rqst,
@@ -380,13 +372,11 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
pool->sp_stats.threads_woken++;
- BUG_ON(xprt->xpt_pool != pool);
wake_up(&rqstp->rq_wait);
} else {
dprintk("svc: transport %p put into queue\n", xprt);
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
pool->sp_stats.sockets_queued++;
- BUG_ON(xprt->xpt_pool != pool);
}
out_unlock:
@@ -425,9 +415,13 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
void svc_xprt_received(struct svc_xprt *xprt)
{
BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
- xprt->xpt_pool = NULL;
+ /* As soon as we clear busy, the xprt could be closed and
+ * 'put', so we need a reference to call svc_xprt_enqueue with:
+ */
+ svc_xprt_get(xprt);
clear_bit(XPT_BUSY, &xprt->xpt_flags);
svc_xprt_enqueue(xprt);
+ svc_xprt_put(xprt);
}
EXPORT_SYMBOL_GPL(svc_xprt_received);
@@ -716,7 +710,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
dprintk("svc_recv: found XPT_CLOSE\n");
svc_delete_xprt(xprt);
- } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
+ /* Leave XPT_BUSY set on the dead xprt: */
+ goto out;
+ }
+ if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
struct svc_xprt *newxpt;
newxpt = xprt->xpt_ops->xpo_accept(xprt);
if (newxpt) {
@@ -741,28 +738,23 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
spin_unlock_bh(&serv->sv_lock);
svc_xprt_received(newxpt);
}
- svc_xprt_received(xprt);
- } else {
+ } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
rqstp, pool->sp_id, xprt,
atomic_read(&xprt->xpt_ref.refcount));
rqstp->rq_deferred = svc_deferred_dequeue(xprt);
- if (rqstp->rq_deferred) {
- svc_xprt_received(xprt);
+ if (rqstp->rq_deferred)
len = svc_deferred_recv(rqstp);
- } else {
+ else
len = xprt->xpt_ops->xpo_recvfrom(rqstp);
- svc_xprt_received(xprt);
- }
dprintk("svc: got len=%d\n", len);
}
+ svc_xprt_received(xprt);
/* No data, incomplete (TCP) read, or accept() */
- if (len == 0 || len == -EAGAIN) {
- rqstp->rq_res.len = 0;
- svc_xprt_release(rqstp);
- return -EAGAIN;
- }
+ if (len == 0 || len == -EAGAIN)
+ goto out;
+
clear_bit(XPT_OLD, &xprt->xpt_flags);
rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
@@ -771,6 +763,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (serv->sv_stats)
serv->sv_stats->netcnt++;
return len;
+out:
+ rqstp->rq_res.len = 0;
+ svc_xprt_release(rqstp);
+ return -EAGAIN;
}
EXPORT_SYMBOL_GPL(svc_recv);
@@ -929,7 +925,12 @@ void svc_close_xprt(struct svc_xprt *xprt)
if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
/* someone else will have to effect the close */
return;
-
+ /*
+ * We expect svc_close_xprt() to work even when no threads are
+ * running (e.g., while configuring the server before starting
+ * any threads), so if the transport isn't busy, we delete
+ * it ourself:
+ */
svc_delete_xprt(xprt);
}
EXPORT_SYMBOL_GPL(svc_close_xprt);
@@ -939,16 +940,16 @@ void svc_close_all(struct list_head *xprt_list)
struct svc_xprt *xprt;
struct svc_xprt *tmp;
+ /*
+ * The server is shutting down, and no more threads are running.
+ * svc_xprt_enqueue() might still be running, but at worst it
+ * will re-add the xprt to sp_sockets, which will soon get
+ * freed. So we don't bother with any more locking, and don't
+ * leave the close to the (nonexistent) server threads:
+ */
list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
set_bit(XPT_CLOSE, &xprt->xpt_flags);
- if (test_bit(XPT_BUSY, &xprt->xpt_flags)) {
- /* Waiting to be processed, but no threads left,
- * So just remove it from the waiting list
- */
- list_del_init(&xprt->xpt_ready);
- clear_bit(XPT_BUSY, &xprt->xpt_flags);
- }
- svc_close_xprt(xprt);
+ svc_delete_xprt(xprt);
}
}
@@ -1022,6 +1023,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
}
svc_xprt_get(rqstp->rq_xprt);
dr->xprt = rqstp->rq_xprt;
+ rqstp->rq_dropme = true;
dr->handle.revisit = svc_revisit;
return &dr->handle;
@@ -1059,14 +1061,13 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
return NULL;
spin_lock(&xprt->xpt_lock);
- clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
if (!list_empty(&xprt->xpt_deferred)) {
dr = list_entry(xprt->xpt_deferred.next,
struct svc_deferred_req,
handle.recent);
list_del_init(&dr->handle.recent);
- set_bit(XPT_DEFERRED, &xprt->xpt_flags);
- }
+ } else
+ clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
spin_unlock(&xprt->xpt_lock);
return dr;
}
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 4e9393c24687..7963569fc04f 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -118,7 +118,6 @@ EXPORT_SYMBOL_GPL(svc_auth_unregister);
#define DN_HASHBITS 6
#define DN_HASHMAX (1<<DN_HASHBITS)
-#define DN_HASHMASK (DN_HASHMAX-1)
static struct hlist_head auth_domain_table[DN_HASHMAX];
static spinlock_t auth_domain_lock =
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 560677d187f1..30916b06c12b 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -30,7 +30,9 @@
struct unix_domain {
struct auth_domain h;
+#ifdef CONFIG_NFSD_DEPRECATED
int addr_changes;
+#endif /* CONFIG_NFSD_DEPRECATED */
/* other stuff later */
};
@@ -64,7 +66,9 @@ struct auth_domain *unix_domain_find(char *name)
return NULL;
}
new->h.flavour = &svcauth_unix;
+#ifdef CONFIG_NFSD_DEPRECATED
new->addr_changes = 0;
+#endif /* CONFIG_NFSD_DEPRECATED */
rv = auth_domain_lookup(name, &new->h);
}
}
@@ -85,14 +89,15 @@ static void svcauth_unix_domain_release(struct auth_domain *dom)
*/
#define IP_HASHBITS 8
#define IP_HASHMAX (1<<IP_HASHBITS)
-#define IP_HASHMASK (IP_HASHMAX-1)
struct ip_map {
struct cache_head h;
char m_class[8]; /* e.g. "nfsd" */
struct in6_addr m_addr;
struct unix_domain *m_client;
+#ifdef CONFIG_NFSD_DEPRECATED
int m_add_change;
+#endif /* CONFIG_NFSD_DEPRECATED */
};
static void ip_map_put(struct kref *kref)
@@ -146,7 +151,9 @@ static void update(struct cache_head *cnew, struct cache_head *citem)
kref_get(&item->m_client->h.ref);
new->m_client = item->m_client;
+#ifdef CONFIG_NFSD_DEPRECATED
new->m_add_change = item->m_add_change;
+#endif /* CONFIG_NFSD_DEPRECATED */
}
static struct cache_head *ip_map_alloc(void)
{
@@ -331,6 +338,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
ip.h.flags = 0;
if (!udom)
set_bit(CACHE_NEGATIVE, &ip.h.flags);
+#ifdef CONFIG_NFSD_DEPRECATED
else {
ip.m_add_change = udom->addr_changes;
/* if this is from the legacy set_client system call,
@@ -339,6 +347,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
if (expiry == NEVER)
ip.m_add_change++;
}
+#endif /* CONFIG_NFSD_DEPRECATED */
ip.h.expiry_time = expiry;
ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
hash_str(ipm->m_class, IP_HASHBITS) ^
@@ -358,6 +367,7 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm,
return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
}
+#ifdef CONFIG_NFSD_DEPRECATED
int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom)
{
struct unix_domain *udom;
@@ -402,8 +412,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr)
return NULL;
if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
- if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0)
- auth_domain_put(&ipm->m_client->h);
+ sunrpc_invalidate(&ipm->h, sn->ip_map_cache);
rv = NULL;
} else {
rv = &ipm->m_client->h;
@@ -413,6 +422,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr)
return rv;
}
EXPORT_SYMBOL_GPL(auth_unix_lookup);
+#endif /* CONFIG_NFSD_DEPRECATED */
void svcauth_unix_purge(void)
{
@@ -497,7 +507,6 @@ svcauth_unix_info_release(struct svc_xprt *xpt)
*/
#define GID_HASHBITS 8
#define GID_HASHMAX (1<<GID_HASHBITS)
-#define GID_HASHMASK (GID_HASHMAX - 1)
struct unix_gid {
struct cache_head h;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 07919e16be3e..7bd3bbba4710 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -66,6 +66,13 @@ static void svc_sock_free(struct svc_xprt *);
static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
struct net *, struct sockaddr *,
int, int);
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
+ struct net *, struct sockaddr *,
+ int, int);
+static void svc_bc_sock_free(struct svc_xprt *xprt);
+#endif /* CONFIG_NFS_V4_1 */
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key svc_key[2];
static struct lock_class_key svc_slock_key[2];
@@ -324,19 +331,21 @@ int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen,
len = onelen;
break;
}
- if (toclose && strcmp(toclose, buf + len) == 0)
+ if (toclose && strcmp(toclose, buf + len) == 0) {
closesk = svsk;
- else
+ svc_xprt_get(&closesk->sk_xprt);
+ } else
len += onelen;
}
spin_unlock_bh(&serv->sv_lock);
- if (closesk)
+ if (closesk) {
/* Should unregister with portmap, but you cannot
* unregister just one protocol...
*/
svc_close_xprt(&closesk->sk_xprt);
- else if (toclose)
+ svc_xprt_put(&closesk->sk_xprt);
+ } else if (toclose)
return -ENOENT;
return len;
}
@@ -985,15 +994,17 @@ static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp,
vec[0] = rqstp->rq_arg.head[0];
} else {
/* REPLY */
- if (svsk->sk_bc_xprt)
- req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid);
+ struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
+
+ if (bc_xprt)
+ req = xprt_lookup_rqst(bc_xprt, xid);
if (!req) {
printk(KERN_NOTICE
"%s: Got unrecognized reply: "
- "calldir 0x%x sk_bc_xprt %p xid %08x\n",
+ "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
__func__, ntohl(calldir),
- svsk->sk_bc_xprt, xid);
+ bc_xprt, xid);
vec[0] = rqstp->rq_arg.head[0];
goto out;
}
@@ -1184,6 +1195,57 @@ static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
}
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
+ struct net *, struct sockaddr *,
+ int, int);
+static void svc_bc_sock_free(struct svc_xprt *xprt);
+
+static struct svc_xprt *svc_bc_tcp_create(struct svc_serv *serv,
+ struct net *net,
+ struct sockaddr *sa, int salen,
+ int flags)
+{
+ return svc_bc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
+}
+
+static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt)
+{
+}
+
+static struct svc_xprt_ops svc_tcp_bc_ops = {
+ .xpo_create = svc_bc_tcp_create,
+ .xpo_detach = svc_bc_tcp_sock_detach,
+ .xpo_free = svc_bc_sock_free,
+ .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
+};
+
+static struct svc_xprt_class svc_tcp_bc_class = {
+ .xcl_name = "tcp-bc",
+ .xcl_owner = THIS_MODULE,
+ .xcl_ops = &svc_tcp_bc_ops,
+ .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
+};
+
+static void svc_init_bc_xprt_sock(void)
+{
+ svc_reg_xprt_class(&svc_tcp_bc_class);
+}
+
+static void svc_cleanup_bc_xprt_sock(void)
+{
+ svc_unreg_xprt_class(&svc_tcp_bc_class);
+}
+#else /* CONFIG_NFS_V4_1 */
+static void svc_init_bc_xprt_sock(void)
+{
+}
+
+static void svc_cleanup_bc_xprt_sock(void)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static struct svc_xprt_ops svc_tcp_ops = {
.xpo_create = svc_tcp_create,
.xpo_recvfrom = svc_tcp_recvfrom,
@@ -1207,12 +1269,14 @@ void svc_init_xprt_sock(void)
{
svc_reg_xprt_class(&svc_tcp_class);
svc_reg_xprt_class(&svc_udp_class);
+ svc_init_bc_xprt_sock();
}
void svc_cleanup_xprt_sock(void)
{
svc_unreg_xprt_class(&svc_tcp_class);
svc_unreg_xprt_class(&svc_udp_class);
+ svc_cleanup_bc_xprt_sock();
}
static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
@@ -1509,41 +1573,45 @@ static void svc_sock_free(struct svc_xprt *xprt)
kfree(svsk);
}
+#if defined(CONFIG_NFS_V4_1)
/*
- * Create a svc_xprt.
- *
- * For internal use only (e.g. nfsv4.1 backchannel).
- * Callers should typically use the xpo_create() method.
+ * Create a back channel svc_xprt which shares the fore channel socket.
*/
-struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
+ int protocol,
+ struct net *net,
+ struct sockaddr *sin, int len,
+ int flags)
{
struct svc_sock *svsk;
- struct svc_xprt *xprt = NULL;
+ struct svc_xprt *xprt;
+
+ if (protocol != IPPROTO_TCP) {
+ printk(KERN_WARNING "svc: only TCP sockets"
+ " supported on shared back channel\n");
+ return ERR_PTR(-EINVAL);
+ }
- dprintk("svc: %s\n", __func__);
svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
if (!svsk)
- goto out;
+ return ERR_PTR(-ENOMEM);
xprt = &svsk->sk_xprt;
- if (prot == IPPROTO_TCP)
- svc_xprt_init(&svc_tcp_class, xprt, serv);
- else if (prot == IPPROTO_UDP)
- svc_xprt_init(&svc_udp_class, xprt, serv);
- else
- BUG();
-out:
- dprintk("svc: %s return %p\n", __func__, xprt);
+ svc_xprt_init(&svc_tcp_bc_class, xprt, serv);
+
+ serv->sv_bc_xprt = xprt;
+
return xprt;
}
-EXPORT_SYMBOL_GPL(svc_sock_create);
/*
- * Destroy a svc_sock.
+ * Free a back channel svc_sock.
*/
-void svc_sock_destroy(struct svc_xprt *xprt)
+static void svc_bc_sock_free(struct svc_xprt *xprt)
{
- if (xprt)
+ if (xprt) {
+ kfree(xprt->xpt_bc_sid);
kfree(container_of(xprt, struct svc_sock, sk_xprt));
+ }
}
-EXPORT_SYMBOL_GPL(svc_sock_destroy);
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index cd9e841e7492..679cd674b81d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -552,6 +552,74 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
}
EXPORT_SYMBOL_GPL(xdr_write_pages);
+static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
+ __be32 *p, unsigned int len)
+{
+ if (len > iov->iov_len)
+ len = iov->iov_len;
+ if (p == NULL)
+ p = (__be32*)iov->iov_base;
+ xdr->p = p;
+ xdr->end = (__be32*)(iov->iov_base + len);
+ xdr->iov = iov;
+ xdr->page_ptr = NULL;
+}
+
+static int xdr_set_page_base(struct xdr_stream *xdr,
+ unsigned int base, unsigned int len)
+{
+ unsigned int pgnr;
+ unsigned int maxlen;
+ unsigned int pgoff;
+ unsigned int pgend;
+ void *kaddr;
+
+ maxlen = xdr->buf->page_len;
+ if (base >= maxlen)
+ return -EINVAL;
+ maxlen -= base;
+ if (len > maxlen)
+ len = maxlen;
+
+ base += xdr->buf->page_base;
+
+ pgnr = base >> PAGE_SHIFT;
+ xdr->page_ptr = &xdr->buf->pages[pgnr];
+ kaddr = page_address(*xdr->page_ptr);
+
+ pgoff = base & ~PAGE_MASK;
+ xdr->p = (__be32*)(kaddr + pgoff);
+
+ pgend = pgoff + len;
+ if (pgend > PAGE_SIZE)
+ pgend = PAGE_SIZE;
+ xdr->end = (__be32*)(kaddr + pgend);
+ xdr->iov = NULL;
+ return 0;
+}
+
+static void xdr_set_next_page(struct xdr_stream *xdr)
+{
+ unsigned int newbase;
+
+ newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
+ newbase -= xdr->buf->page_base;
+
+ if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
+ xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+}
+
+static bool xdr_set_next_buffer(struct xdr_stream *xdr)
+{
+ if (xdr->page_ptr != NULL)
+ xdr_set_next_page(xdr);
+ else if (xdr->iov == xdr->buf->head) {
+ if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
+ xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+ }
+ return xdr->p != xdr->end;
+}
+
/**
* xdr_init_decode - Initialize an xdr_stream for decoding data.
* @xdr: pointer to xdr_stream struct
@@ -560,41 +628,67 @@ EXPORT_SYMBOL_GPL(xdr_write_pages);
*/
void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
{
- struct kvec *iov = buf->head;
- unsigned int len = iov->iov_len;
-
- if (len > buf->len)
- len = buf->len;
xdr->buf = buf;
- xdr->iov = iov;
- xdr->p = p;
- xdr->end = (__be32 *)((char *)iov->iov_base + len);
+ xdr->scratch.iov_base = NULL;
+ xdr->scratch.iov_len = 0;
+ if (buf->head[0].iov_len != 0)
+ xdr_set_iov(xdr, buf->head, p, buf->len);
+ else if (buf->page_len != 0)
+ xdr_set_page_base(xdr, 0, buf->len);
}
EXPORT_SYMBOL_GPL(xdr_init_decode);
-/**
- * xdr_inline_peek - Allow read-ahead in the XDR data stream
- * @xdr: pointer to xdr_stream struct
- * @nbytes: number of bytes of data to decode
- *
- * Check if the input buffer is long enough to enable us to decode
- * 'nbytes' more bytes of data starting at the current position.
- * If so return the current pointer without updating the current
- * pointer position.
- */
-__be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes)
+static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p = xdr->p;
__be32 *q = p + XDR_QUADLEN(nbytes);
if (unlikely(q > xdr->end || q < p))
return NULL;
+ xdr->p = q;
return p;
}
-EXPORT_SYMBOL_GPL(xdr_inline_peek);
/**
- * xdr_inline_decode - Retrieve non-page XDR data to decode
+ * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
+ * @xdr: pointer to xdr_stream struct
+ * @buf: pointer to an empty buffer
+ * @buflen: size of 'buf'
+ *
+ * The scratch buffer is used when decoding from an array of pages.
+ * If an xdr_inline_decode() call spans across page boundaries, then
+ * we copy the data into the scratch buffer in order to allow linear
+ * access.
+ */
+void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
+{
+ xdr->scratch.iov_base = buf;
+ xdr->scratch.iov_len = buflen;
+}
+EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
+
+static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
+{
+ __be32 *p;
+ void *cpdest = xdr->scratch.iov_base;
+ size_t cplen = (char *)xdr->end - (char *)xdr->p;
+
+ if (nbytes > xdr->scratch.iov_len)
+ return NULL;
+ memcpy(cpdest, xdr->p, cplen);
+ cpdest += cplen;
+ nbytes -= cplen;
+ if (!xdr_set_next_buffer(xdr))
+ return NULL;
+ p = __xdr_inline_decode(xdr, nbytes);
+ if (p == NULL)
+ return NULL;
+ memcpy(cpdest, p, nbytes);
+ return xdr->scratch.iov_base;
+}
+
+/**
+ * xdr_inline_decode - Retrieve XDR data to decode
* @xdr: pointer to xdr_stream struct
* @nbytes: number of bytes of data to decode
*
@@ -605,13 +699,16 @@ EXPORT_SYMBOL_GPL(xdr_inline_peek);
*/
__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
- __be32 *p = xdr->p;
- __be32 *q = p + XDR_QUADLEN(nbytes);
+ __be32 *p;
- if (unlikely(q > xdr->end || q < p))
+ if (nbytes == 0)
+ return xdr->p;
+ if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
return NULL;
- xdr->p = q;
- return p;
+ p = __xdr_inline_decode(xdr, nbytes);
+ if (p != NULL)
+ return p;
+ return xdr_copy_to_scratch(xdr, nbytes);
}
EXPORT_SYMBOL_GPL(xdr_inline_decode);
@@ -671,16 +768,12 @@ EXPORT_SYMBOL_GPL(xdr_read_pages);
*/
void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
{
- char * kaddr = page_address(xdr->buf->pages[0]);
xdr_read_pages(xdr, len);
/*
* Position current pointer at beginning of tail, and
* set remaining message length.
*/
- if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
- len = PAGE_CACHE_SIZE - xdr->buf->page_base;
- xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
- xdr->end = (__be32 *)((char *)xdr->p + len);
+ xdr_set_page_base(xdr, 0, len);
}
EXPORT_SYMBOL_GPL(xdr_enter_page);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 4c8f18aff7c3..856274d7e85c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -965,6 +965,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req)
xprt = kzalloc(size, GFP_KERNEL);
if (xprt == NULL)
goto out;
+ kref_init(&xprt->kref);
xprt->max_reqs = max_req;
xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
@@ -1101,8 +1102,10 @@ found:
-PTR_ERR(xprt));
return xprt;
}
+ if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state))
+ /* ->setup returned a pre-initialized xprt: */
+ return xprt;
- kref_init(&xprt->kref);
spin_lock_init(&xprt->transport_lock);
spin_lock_init(&xprt->reserve_lock);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index dfcab5ac65af..c431f5a57960 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -770,7 +770,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
dprintk("RPC: xs_destroy xprt %p\n", xprt);
- cancel_rearming_delayed_work(&transport->connect_worker);
+ cancel_delayed_work_sync(&transport->connect_worker);
xs_close(xprt);
xs_free_peer_addresses(xprt);
@@ -2359,6 +2359,15 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
struct svc_sock *bc_sock;
struct rpc_xprt *ret;
+ if (args->bc_xprt->xpt_bc_xprt) {
+ /*
+ * This server connection already has a backchannel
+ * export; we can't create a new one, as we wouldn't be
+ * able to match replies based on xid any more. So,
+ * reuse the already-existing one:
+ */
+ return args->bc_xprt->xpt_bc_xprt;
+ }
xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
if (IS_ERR(xprt))
return xprt;
@@ -2375,16 +2384,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
xprt->reestablish_timeout = 0;
xprt->idle_timeout = 0;
- /*
- * The backchannel uses the same socket connection as the
- * forechannel
- */
- xprt->bc_xprt = args->bc_xprt;
- bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
- bc_sock->sk_bc_xprt = xprt;
- transport->sock = bc_sock->sk_sock;
- transport->inet = bc_sock->sk_sk;
-
xprt->ops = &bc_tcp_ops;
switch (addr->sa_family) {
@@ -2407,6 +2406,20 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
xprt->address_strings[RPC_DISPLAY_PROTO]);
/*
+ * Once we've associated a backchannel xprt with a connection,
+ * we want to keep it around as long as long as the connection
+ * lasts, in case we need to start using it for a backchannel
+ * again; this reference won't be dropped until bc_xprt is
+ * destroyed.
+ */
+ xprt_get(xprt);
+ args->bc_xprt->xpt_bc_xprt = xprt;
+ xprt->bc_xprt = args->bc_xprt;
+ bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
+ transport->sock = bc_sock->sk_sock;
+ transport->inet = bc_sock->sk_sk;
+
+ /*
* Since we don't want connections for the backchannel, we set
* the xprt status to connected
*/
@@ -2415,6 +2428,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
if (try_module_get(THIS_MODULE))
return xprt;
+ xprt_put(xprt);
ret = ERR_PTR(-EINVAL);
out_err:
xprt_free(xprt);
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index b74f78d0c033..0436927369f3 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -29,28 +29,6 @@ config TIPC_ADVANCED
Saying Y here will open some advanced configuration for TIPC.
Most users do not need to bother; if unsure, just say N.
-config TIPC_ZONES
- int "Maximum number of zones in a network"
- depends on TIPC_ADVANCED
- range 1 255
- default "3"
- help
- Specifies how many zones can be supported in a TIPC network.
- Can range from 1 to 255 zones; default is 3.
-
- Setting this to a smaller value saves some memory;
- setting it to a higher value allows for more zones.
-
-config TIPC_CLUSTERS
- int "Maximum number of clusters in a zone"
- depends on TIPC_ADVANCED
- range 1 1
- default "1"
- help
- Specifies how many clusters can be supported in a TIPC zone.
-
- *** Currently TIPC only supports a single cluster per zone. ***
-
config TIPC_NODES
int "Maximum number of nodes in a cluster"
depends on TIPC_ADVANCED
@@ -72,7 +50,7 @@ config TIPC_PORTS
Specifies how many ports can be supported by a node.
Can range from 127 to 65535 ports; default is 8191.
- Setting this to a smaller value saves some memory,
+ Setting this to a smaller value saves some memory,
setting it to higher allows for more ports.
config TIPC_LOG
@@ -89,12 +67,15 @@ config TIPC_LOG
managed remotely via TIPC.
config TIPC_DEBUG
- bool "Enable debug messages"
+ bool "Enable debugging support"
default n
help
- This enables debugging of TIPC.
+ Saying Y here enables TIPC debugging capabilities used by developers.
+ Most users do not need to bother; if unsure, just say N.
- Only say Y here if you are having trouble with TIPC. It will
- enable the display of detailed information about what is going on.
+ Enabling debugging support causes TIPC to display data about its
+ internal state when certain abnormal conditions occur. It also
+ makes it easy for developers to capture additional information of
+ interest using the dbg() or msg_dbg() macros.
endif # TIPC
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index dceb7027946c..521d24d04ab2 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -4,10 +4,10 @@
obj-$(CONFIG_TIPC) := tipc.o
-tipc-y += addr.o bcast.o bearer.o config.o cluster.o \
+tipc-y += addr.o bcast.o bearer.o config.o \
core.o handler.o link.o discover.o msg.o \
name_distr.o subscr.o name_table.o net.o \
netlink.o node.o node_subscr.o port.o ref.o \
- socket.o user_reg.o zone.o dbg.o eth_media.o
+ socket.o log.o eth_media.o
# End of file
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index 886715a75259..88463d9a6f12 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -36,8 +36,6 @@
#include "core.h"
#include "addr.h"
-#include "zone.h"
-#include "cluster.h"
/**
* tipc_addr_domain_valid - validates a network domain address
@@ -55,14 +53,8 @@ int tipc_addr_domain_valid(u32 addr)
u32 z = tipc_zone(addr);
u32 max_nodes = tipc_max_nodes;
- if (is_slave(addr))
- max_nodes = LOWEST_SLAVE + tipc_max_slaves;
if (n > max_nodes)
return 0;
- if (c > tipc_max_clusters)
- return 0;
- if (z > tipc_max_zones)
- return 0;
if (n && (!z || !c))
return 0;
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index c1cc5724d8cc..2490fadd0caf 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -37,36 +37,11 @@
#ifndef _TIPC_ADDR_H
#define _TIPC_ADDR_H
-static inline u32 own_node(void)
-{
- return tipc_node(tipc_own_addr);
-}
-
-static inline u32 own_cluster(void)
-{
- return tipc_cluster(tipc_own_addr);
-}
-
-static inline u32 own_zone(void)
-{
- return tipc_zone(tipc_own_addr);
-}
-
static inline int in_own_cluster(u32 addr)
{
return !((addr ^ tipc_own_addr) >> 12);
}
-static inline int is_slave(u32 addr)
-{
- return addr & 0x800;
-}
-
-static inline int may_route(u32 addr)
-{
- return(addr ^ tipc_own_addr) >> 11;
-}
-
/**
* addr_domain - convert 2-bit scope value to equivalent message lookup domain
*
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 6d828d9eda42..70ab5ef48766 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -38,15 +38,12 @@
#include "core.h"
#include "link.h"
#include "port.h"
-#include "name_distr.h"
#include "bcast.h"
#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
-#define BCLINK_LOG_BUF_SIZE 0
-
/*
* Loss rate for incoming broadcast frames; used to test retransmission code.
* Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
@@ -106,11 +103,14 @@ struct bclink {
};
-static struct bcbearer *bcbearer = NULL;
-static struct bclink *bclink = NULL;
-static struct link *bcl = NULL;
+static struct bcbearer *bcbearer;
+static struct bclink *bclink;
+static struct link *bcl;
static DEFINE_SPINLOCK(bc_lock);
+/* broadcast-capable node map */
+struct tipc_node_map tipc_bcast_nmap;
+
const char tipc_bclink_name[] = "broadcast-link";
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
@@ -196,9 +196,8 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
struct sk_buff *buf;
buf = bcl->first_out;
- while (buf && less_eq(buf_seqno(buf), after)) {
+ while (buf && less_eq(buf_seqno(buf), after))
buf = buf->next;
- }
tipc_link_retransmit(bcl, buf, mod(to - after));
}
@@ -224,9 +223,8 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
/* Skip over packets that node has previously acknowledged */
crs = bcl->first_out;
- while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
+ while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
crs = crs->next;
- }
/* Update packets that node is now acknowledging */
@@ -425,16 +423,14 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
void tipc_bclink_recv_pkt(struct sk_buff *buf)
{
#if (TIPC_BCAST_LOSS_RATE)
- static int rx_count = 0;
+ static int rx_count;
#endif
struct tipc_msg *msg = buf_msg(buf);
- struct tipc_node* node = tipc_node_find(msg_prevnode(msg));
+ struct tipc_node *node = tipc_node_find(msg_prevnode(msg));
u32 next_in;
u32 seqno;
struct sk_buff *deferred;
- msg_dbg(msg, "<BC<<<");
-
if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
(msg_mc_netid(msg) != tipc_net_id))) {
buf_discard(buf);
@@ -442,7 +438,6 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
}
if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
- msg_dbg(msg, "<BCNACK<<<");
if (msg_destnode(msg) == tipc_own_addr) {
tipc_node_lock(node);
tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
@@ -566,8 +561,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
if (likely(!msg_non_seq(buf_msg(buf)))) {
struct tipc_msg *msg;
- assert(tipc_cltr_bcast_nodes.count != 0);
- bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count);
+ assert(tipc_bcast_nmap.count != 0);
+ bcbuf_set_acks(buf, tipc_bcast_nmap.count);
msg = buf_msg(buf);
msg_set_non_seq(msg, 1);
msg_set_mc_netid(msg, tipc_net_id);
@@ -576,7 +571,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
/* Send buffer over bearers until all targets reached */
- bcbearer->remains = tipc_cltr_bcast_nodes;
+ bcbearer->remains = tipc_bcast_nmap;
for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
struct bearer *p = bcbearer->bpairs[bp_index].primary;
@@ -774,7 +769,6 @@ int tipc_bclink_init(void)
bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
if (!bcbearer || !bclink) {
- nomem:
warn("Multicast link creation failed, no memory\n");
kfree(bcbearer);
bcbearer = NULL;
@@ -799,14 +793,6 @@ int tipc_bclink_init(void)
bcl->state = WORKING_WORKING;
strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
- if (BCLINK_LOG_BUF_SIZE) {
- char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
-
- if (!pb)
- goto nomem;
- tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
- }
-
return 0;
}
@@ -815,8 +801,6 @@ void tipc_bclink_stop(void)
spin_lock_bh(&bc_lock);
if (bcbearer) {
tipc_link_stop(bcl);
- if (BCLINK_LOG_BUF_SIZE)
- kfree(bcl->print_buf.buf);
bcl = NULL;
kfree(bclink);
bclink = NULL;
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 011c03f0a4ab..51f8c5326ce6 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -51,6 +51,7 @@ struct tipc_node_map {
u32 map[MAX_NODES / WSIZE];
};
+extern struct tipc_node_map tipc_bcast_nmap;
#define PLSIZE 32
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 885da94be4ac..837b7a467885 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -37,13 +37,12 @@
#include "core.h"
#include "config.h"
#include "bearer.h"
-#include "port.h"
#include "discover.h"
#define MAX_ADDR_STR 32
static struct media media_list[MAX_MEDIA];
-static u32 media_count = 0;
+static u32 media_count;
struct bearer tipc_bearers[MAX_BEARERS];
@@ -164,7 +163,6 @@ int tipc_register_media(u32 media_type,
m_ptr->priority = bearer_priority;
m_ptr->tolerance = link_tolerance;
m_ptr->window = send_window_limit;
- dbg("Media <%s> registered\n", name);
res = 0;
exit:
write_unlock_bh(&tipc_net_lock);
@@ -196,9 +194,8 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
unchar *addr = (unchar *)&a->dev_addr;
tipc_printf(pb, "UNKNOWN(%u)", media_type);
- for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++) {
+ for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++)
tipc_printf(pb, "-%02x", addr[i]);
- }
}
}
@@ -253,7 +250,8 @@ static int bearer_name_validate(const char *name,
/* ensure all component parts of bearer name are present */
media_name = name_copy;
- if ((if_name = strchr(media_name, ':')) == NULL)
+ if_name = strchr(media_name, ':');
+ if (if_name == NULL)
return 0;
*(if_name++) = 0;
media_len = if_name - media_name;
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
deleted file mode 100644
index 405be87157ba..000000000000
--- a/net/tipc/cluster.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * net/tipc/cluster.c: TIPC cluster management routines
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "cluster.h"
-#include "link.h"
-
-static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
- u32 lower, u32 upper);
-
-struct tipc_node **tipc_local_nodes = NULL;
-struct tipc_node_map tipc_cltr_bcast_nodes = {0,{0,}};
-u32 tipc_highest_allowed_slave = 0;
-
-struct cluster *tipc_cltr_create(u32 addr)
-{
- struct _zone *z_ptr;
- struct cluster *c_ptr;
- int max_nodes;
-
- c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC);
- if (c_ptr == NULL) {
- warn("Cluster creation failure, no memory\n");
- return NULL;
- }
-
- c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
- if (in_own_cluster(addr))
- max_nodes = LOWEST_SLAVE + tipc_max_slaves;
- else
- max_nodes = tipc_max_nodes + 1;
-
- c_ptr->nodes = kcalloc(max_nodes + 1, sizeof(void*), GFP_ATOMIC);
- if (c_ptr->nodes == NULL) {
- warn("Cluster creation failure, no memory for node area\n");
- kfree(c_ptr);
- return NULL;
- }
-
- if (in_own_cluster(addr))
- tipc_local_nodes = c_ptr->nodes;
- c_ptr->highest_slave = LOWEST_SLAVE - 1;
- c_ptr->highest_node = 0;
-
- z_ptr = tipc_zone_find(tipc_zone(addr));
- if (!z_ptr) {
- z_ptr = tipc_zone_create(addr);
- }
- if (!z_ptr) {
- kfree(c_ptr->nodes);
- kfree(c_ptr);
- return NULL;
- }
-
- tipc_zone_attach_cluster(z_ptr, c_ptr);
- c_ptr->owner = z_ptr;
- return c_ptr;
-}
-
-void tipc_cltr_delete(struct cluster *c_ptr)
-{
- u32 n_num;
-
- if (!c_ptr)
- return;
- for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
- tipc_node_delete(c_ptr->nodes[n_num]);
- }
- for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
- tipc_node_delete(c_ptr->nodes[n_num]);
- }
- kfree(c_ptr->nodes);
- kfree(c_ptr);
-}
-
-
-void tipc_cltr_attach_node(struct cluster *c_ptr, struct tipc_node *n_ptr)
-{
- u32 n_num = tipc_node(n_ptr->addr);
- u32 max_n_num = tipc_max_nodes;
-
- if (in_own_cluster(n_ptr->addr))
- max_n_num = tipc_highest_allowed_slave;
- assert(n_num > 0);
- assert(n_num <= max_n_num);
- assert(c_ptr->nodes[n_num] == NULL);
- c_ptr->nodes[n_num] = n_ptr;
- if (n_num > c_ptr->highest_node)
- c_ptr->highest_node = n_num;
-}
-
-/**
- * tipc_cltr_select_router - select router to a cluster
- *
- * Uses deterministic and fair algorithm.
- */
-
-u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref)
-{
- u32 n_num;
- u32 ulim = c_ptr->highest_node;
- u32 mask;
- u32 tstart;
-
- assert(!in_own_cluster(c_ptr->addr));
- if (!ulim)
- return 0;
-
- /* Start entry must be random */
- mask = tipc_max_nodes;
- while (mask > ulim)
- mask >>= 1;
- tstart = ref & mask;
- n_num = tstart;
-
- /* Lookup upwards with wrap-around */
- do {
- if (tipc_node_is_up(c_ptr->nodes[n_num]))
- break;
- } while (++n_num <= ulim);
- if (n_num > ulim) {
- n_num = 1;
- do {
- if (tipc_node_is_up(c_ptr->nodes[n_num]))
- break;
- } while (++n_num < tstart);
- if (n_num == tstart)
- return 0;
- }
- assert(n_num <= ulim);
- return tipc_node_select_router(c_ptr->nodes[n_num], ref);
-}
-
-/**
- * tipc_cltr_select_node - select destination node within a remote cluster
- *
- * Uses deterministic and fair algorithm.
- */
-
-struct tipc_node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector)
-{
- u32 n_num;
- u32 mask = tipc_max_nodes;
- u32 start_entry;
-
- assert(!in_own_cluster(c_ptr->addr));
- if (!c_ptr->highest_node)
- return NULL;
-
- /* Start entry must be random */
- while (mask > c_ptr->highest_node) {
- mask >>= 1;
- }
- start_entry = (selector & mask) ? selector & mask : 1u;
- assert(start_entry <= c_ptr->highest_node);
-
- /* Lookup upwards with wrap-around */
- for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
- if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
- return c_ptr->nodes[n_num];
- }
- for (n_num = 1; n_num < start_entry; n_num++) {
- if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
- return c_ptr->nodes[n_num];
- }
- return NULL;
-}
-
-/*
- * Routing table management: See description in node.c
- */
-
-static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
-{
- u32 size = INT_H_SIZE + data_size;
- struct sk_buff *buf = tipc_buf_acquire(size);
- struct tipc_msg *msg;
-
- if (buf) {
- msg = buf_msg(buf);
- memset((char *)msg, 0, size);
- tipc_msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest);
- }
- return buf;
-}
-
-void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest,
- u32 lower, u32 upper)
-{
- struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
- struct tipc_msg *msg;
-
- if (buf) {
- msg = buf_msg(buf);
- msg_set_remote_node(msg, dest);
- msg_set_type(msg, ROUTE_ADDITION);
- tipc_cltr_multicast(c_ptr, buf, lower, upper);
- } else {
- warn("Memory squeeze: broadcast of new route failed\n");
- }
-}
-
-void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest,
- u32 lower, u32 upper)
-{
- struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
- struct tipc_msg *msg;
-
- if (buf) {
- msg = buf_msg(buf);
- msg_set_remote_node(msg, dest);
- msg_set_type(msg, ROUTE_REMOVAL);
- tipc_cltr_multicast(c_ptr, buf, lower, upper);
- } else {
- warn("Memory squeeze: broadcast of lost route failed\n");
- }
-}
-
-void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest)
-{
- struct sk_buff *buf;
- struct tipc_msg *msg;
- u32 highest = c_ptr->highest_slave;
- u32 n_num;
- int send = 0;
-
- assert(!is_slave(dest));
- assert(in_own_cluster(dest));
- assert(in_own_cluster(c_ptr->addr));
- if (highest <= LOWEST_SLAVE)
- return;
- buf = tipc_cltr_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
- c_ptr->addr);
- if (buf) {
- msg = buf_msg(buf);
- msg_set_remote_node(msg, c_ptr->addr);
- msg_set_type(msg, SLAVE_ROUTING_TABLE);
- for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
- if (c_ptr->nodes[n_num] &&
- tipc_node_has_active_links(c_ptr->nodes[n_num])) {
- send = 1;
- msg_set_dataoctet(msg, n_num);
- }
- }
- if (send)
- tipc_link_send(buf, dest, dest);
- else
- buf_discard(buf);
- } else {
- warn("Memory squeeze: broadcast of lost route failed\n");
- }
-}
-
-void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest)
-{
- struct sk_buff *buf;
- struct tipc_msg *msg;
- u32 highest = c_ptr->highest_node;
- u32 n_num;
- int send = 0;
-
- if (in_own_cluster(c_ptr->addr))
- return;
- assert(!is_slave(dest));
- assert(in_own_cluster(dest));
- highest = c_ptr->highest_node;
- buf = tipc_cltr_prepare_routing_msg(highest + 1, c_ptr->addr);
- if (buf) {
- msg = buf_msg(buf);
- msg_set_remote_node(msg, c_ptr->addr);
- msg_set_type(msg, EXT_ROUTING_TABLE);
- for (n_num = 1; n_num <= highest; n_num++) {
- if (c_ptr->nodes[n_num] &&
- tipc_node_has_active_links(c_ptr->nodes[n_num])) {
- send = 1;
- msg_set_dataoctet(msg, n_num);
- }
- }
- if (send)
- tipc_link_send(buf, dest, dest);
- else
- buf_discard(buf);
- } else {
- warn("Memory squeeze: broadcast of external route failed\n");
- }
-}
-
-void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest)
-{
- struct sk_buff *buf;
- struct tipc_msg *msg;
- u32 highest = c_ptr->highest_node;
- u32 n_num;
- int send = 0;
-
- assert(is_slave(dest));
- assert(in_own_cluster(c_ptr->addr));
- buf = tipc_cltr_prepare_routing_msg(highest, c_ptr->addr);
- if (buf) {
- msg = buf_msg(buf);
- msg_set_remote_node(msg, c_ptr->addr);
- msg_set_type(msg, LOCAL_ROUTING_TABLE);
- for (n_num = 1; n_num <= highest; n_num++) {
- if (c_ptr->nodes[n_num] &&
- tipc_node_has_active_links(c_ptr->nodes[n_num])) {
- send = 1;
- msg_set_dataoctet(msg, n_num);
- }
- }
- if (send)
- tipc_link_send(buf, dest, dest);
- else
- buf_discard(buf);
- } else {
- warn("Memory squeeze: broadcast of local route failed\n");
- }
-}
-
-void tipc_cltr_recv_routing_table(struct sk_buff *buf)
-{
- struct tipc_msg *msg = buf_msg(buf);
- struct cluster *c_ptr;
- struct tipc_node *n_ptr;
- unchar *node_table;
- u32 table_size;
- u32 router;
- u32 rem_node = msg_remote_node(msg);
- u32 z_num;
- u32 c_num;
- u32 n_num;
-
- c_ptr = tipc_cltr_find(rem_node);
- if (!c_ptr) {
- c_ptr = tipc_cltr_create(rem_node);
- if (!c_ptr) {
- buf_discard(buf);
- return;
- }
- }
-
- node_table = buf->data + msg_hdr_sz(msg);
- table_size = msg_size(msg) - msg_hdr_sz(msg);
- router = msg_prevnode(msg);
- z_num = tipc_zone(rem_node);
- c_num = tipc_cluster(rem_node);
-
- switch (msg_type(msg)) {
- case LOCAL_ROUTING_TABLE:
- assert(is_slave(tipc_own_addr));
- case EXT_ROUTING_TABLE:
- for (n_num = 1; n_num < table_size; n_num++) {
- if (node_table[n_num]) {
- u32 addr = tipc_addr(z_num, c_num, n_num);
- n_ptr = c_ptr->nodes[n_num];
- if (!n_ptr) {
- n_ptr = tipc_node_create(addr);
- }
- if (n_ptr)
- tipc_node_add_router(n_ptr, router);
- }
- }
- break;
- case SLAVE_ROUTING_TABLE:
- assert(!is_slave(tipc_own_addr));
- assert(in_own_cluster(c_ptr->addr));
- for (n_num = 1; n_num < table_size; n_num++) {
- if (node_table[n_num]) {
- u32 slave_num = n_num + LOWEST_SLAVE;
- u32 addr = tipc_addr(z_num, c_num, slave_num);
- n_ptr = c_ptr->nodes[slave_num];
- if (!n_ptr) {
- n_ptr = tipc_node_create(addr);
- }
- if (n_ptr)
- tipc_node_add_router(n_ptr, router);
- }
- }
- break;
- case ROUTE_ADDITION:
- if (!is_slave(tipc_own_addr)) {
- assert(!in_own_cluster(c_ptr->addr) ||
- is_slave(rem_node));
- } else {
- assert(in_own_cluster(c_ptr->addr) &&
- !is_slave(rem_node));
- }
- n_ptr = c_ptr->nodes[tipc_node(rem_node)];
- if (!n_ptr)
- n_ptr = tipc_node_create(rem_node);
- if (n_ptr)
- tipc_node_add_router(n_ptr, router);
- break;
- case ROUTE_REMOVAL:
- if (!is_slave(tipc_own_addr)) {
- assert(!in_own_cluster(c_ptr->addr) ||
- is_slave(rem_node));
- } else {
- assert(in_own_cluster(c_ptr->addr) &&
- !is_slave(rem_node));
- }
- n_ptr = c_ptr->nodes[tipc_node(rem_node)];
- if (n_ptr)
- tipc_node_remove_router(n_ptr, router);
- break;
- default:
- assert(!"Illegal routing manager message received\n");
- }
- buf_discard(buf);
-}
-
-void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router)
-{
- u32 start_entry;
- u32 tstop;
- u32 n_num;
-
- if (is_slave(router))
- return; /* Slave nodes can not be routers */
-
- if (in_own_cluster(c_ptr->addr)) {
- start_entry = LOWEST_SLAVE;
- tstop = c_ptr->highest_slave;
- } else {
- start_entry = 1;
- tstop = c_ptr->highest_node;
- }
-
- for (n_num = start_entry; n_num <= tstop; n_num++) {
- if (c_ptr->nodes[n_num]) {
- tipc_node_remove_router(c_ptr->nodes[n_num], router);
- }
- }
-}
-
-/**
- * tipc_cltr_multicast - multicast message to local nodes
- */
-
-static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
- u32 lower, u32 upper)
-{
- struct sk_buff *buf_copy;
- struct tipc_node *n_ptr;
- u32 n_num;
- u32 tstop;
-
- assert(lower <= upper);
- assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
- ((lower >= LOWEST_SLAVE) && (lower <= tipc_highest_allowed_slave)));
- assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
- ((upper >= LOWEST_SLAVE) && (upper <= tipc_highest_allowed_slave)));
- assert(in_own_cluster(c_ptr->addr));
-
- tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
- if (tstop > upper)
- tstop = upper;
- for (n_num = lower; n_num <= tstop; n_num++) {
- n_ptr = c_ptr->nodes[n_num];
- if (n_ptr && tipc_node_has_active_links(n_ptr)) {
- buf_copy = skb_copy(buf, GFP_ATOMIC);
- if (buf_copy == NULL)
- break;
- msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
- tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
- }
- }
- buf_discard(buf);
-}
-
-/**
- * tipc_cltr_broadcast - broadcast message to all nodes within cluster
- */
-
-void tipc_cltr_broadcast(struct sk_buff *buf)
-{
- struct sk_buff *buf_copy;
- struct cluster *c_ptr;
- struct tipc_node *n_ptr;
- u32 n_num;
- u32 tstart;
- u32 tstop;
- u32 node_type;
-
- if (tipc_mode == TIPC_NET_MODE) {
- c_ptr = tipc_cltr_find(tipc_own_addr);
- assert(in_own_cluster(c_ptr->addr)); /* For now */
-
- /* Send to standard nodes, then repeat loop sending to slaves */
- tstart = 1;
- tstop = c_ptr->highest_node;
- for (node_type = 1; node_type <= 2; node_type++) {
- for (n_num = tstart; n_num <= tstop; n_num++) {
- n_ptr = c_ptr->nodes[n_num];
- if (n_ptr && tipc_node_has_active_links(n_ptr)) {
- buf_copy = skb_copy(buf, GFP_ATOMIC);
- if (buf_copy == NULL)
- goto exit;
- msg_set_destnode(buf_msg(buf_copy),
- n_ptr->addr);
- tipc_link_send(buf_copy, n_ptr->addr,
- n_ptr->addr);
- }
- }
- tstart = LOWEST_SLAVE;
- tstop = c_ptr->highest_slave;
- }
- }
-exit:
- buf_discard(buf);
-}
-
-int tipc_cltr_init(void)
-{
- tipc_highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
- return tipc_cltr_create(tipc_own_addr) ? 0 : -ENOMEM;
-}
-
diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h
deleted file mode 100644
index 32636d98c9c6..000000000000
--- a/net/tipc/cluster.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * net/tipc/cluster.h: Include file for TIPC cluster management routines
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _TIPC_CLUSTER_H
-#define _TIPC_CLUSTER_H
-
-#include "addr.h"
-#include "zone.h"
-
-#define LOWEST_SLAVE 2048u
-
-/**
- * struct cluster - TIPC cluster structure
- * @addr: network address of cluster
- * @owner: pointer to zone that cluster belongs to
- * @nodes: array of pointers to all nodes within cluster
- * @highest_node: id of highest numbered node within cluster
- * @highest_slave: (used for secondary node support)
- */
-
-struct cluster {
- u32 addr;
- struct _zone *owner;
- struct tipc_node **nodes;
- u32 highest_node;
- u32 highest_slave;
-};
-
-
-extern struct tipc_node **tipc_local_nodes;
-extern u32 tipc_highest_allowed_slave;
-extern struct tipc_node_map tipc_cltr_bcast_nodes;
-
-void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router);
-void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest);
-struct tipc_node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector);
-u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref);
-void tipc_cltr_recv_routing_table(struct sk_buff *buf);
-struct cluster *tipc_cltr_create(u32 addr);
-void tipc_cltr_delete(struct cluster *c_ptr);
-void tipc_cltr_attach_node(struct cluster *c_ptr, struct tipc_node *n_ptr);
-void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest);
-void tipc_cltr_broadcast(struct sk_buff *buf);
-int tipc_cltr_init(void);
-
-void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
-void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest);
-void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
-
-static inline struct cluster *tipc_cltr_find(u32 addr)
-{
- struct _zone *z_ptr = tipc_zone_find(addr);
-
- if (z_ptr)
- return z_ptr->clusters[1];
- return NULL;
-}
-
-#endif
diff --git a/net/tipc/config.c b/net/tipc/config.c
index bdde39f0436b..e16750dcf3c1 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -36,17 +36,10 @@
#include "core.h"
#include "port.h"
-#include "link.h"
#include "name_table.h"
-#include "user_reg.h"
#include "config.h"
-struct manager {
- u32 user_ref;
- u32 port_ref;
-};
-
-static struct manager mng = { 0};
+static u32 config_port_ref;
static DEFINE_SPINLOCK(config_lock);
@@ -71,10 +64,8 @@ int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(buf);
int new_tlv_space = TLV_SPACE(tlv_data_size);
- if (skb_tailroom(buf) < new_tlv_space) {
- dbg("tipc_cfg_append_tlv unable to append TLV\n");
+ if (skb_tailroom(buf) < new_tlv_space)
return 0;
- }
skb_put(buf, new_tlv_space);
tlv->tlv_type = htons(tlv_type);
tlv->tlv_len = htons(TLV_LENGTH(tlv_data_size));
@@ -269,38 +260,6 @@ static struct sk_buff *cfg_set_max_ports(void)
return tipc_cfg_reply_none();
}
-static struct sk_buff *cfg_set_max_zones(void)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value == tipc_max_zones)
- return tipc_cfg_reply_none();
- if (value != delimit(value, 1, 255))
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (max zones must be 1-255)");
- if (tipc_mode == TIPC_NET_MODE)
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (cannot change max zones once TIPC has joined a network)");
- tipc_max_zones = value;
- return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_max_clusters(void)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value != delimit(value, 1, 1))
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (max clusters fixed at 1)");
- return tipc_cfg_reply_none();
-}
-
static struct sk_buff *cfg_set_max_nodes(void)
{
u32 value;
@@ -320,19 +279,6 @@ static struct sk_buff *cfg_set_max_nodes(void)
return tipc_cfg_reply_none();
}
-static struct sk_buff *cfg_set_max_slaves(void)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value != 0)
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (max secondary nodes fixed at 0)");
- return tipc_cfg_reply_none();
-}
-
static struct sk_buff *cfg_set_netid(void)
{
u32 value;
@@ -376,8 +322,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
} else if (!tipc_remote_management) {
rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
goto exit;
- }
- else if (cmd >= 0x4000) {
+ } else if (cmd >= 0x4000) {
u32 domain = 0;
if ((tipc_nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
@@ -452,18 +397,9 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
case TIPC_CMD_SET_MAX_SUBSCR:
rep_tlv_buf = cfg_set_max_subscriptions();
break;
- case TIPC_CMD_SET_MAX_ZONES:
- rep_tlv_buf = cfg_set_max_zones();
- break;
- case TIPC_CMD_SET_MAX_CLUSTERS:
- rep_tlv_buf = cfg_set_max_clusters();
- break;
case TIPC_CMD_SET_MAX_NODES:
rep_tlv_buf = cfg_set_max_nodes();
break;
- case TIPC_CMD_SET_MAX_SLAVES:
- rep_tlv_buf = cfg_set_max_slaves();
- break;
case TIPC_CMD_SET_NETID:
rep_tlv_buf = cfg_set_netid();
break;
@@ -479,18 +415,9 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
case TIPC_CMD_GET_MAX_SUBSCR:
rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions);
break;
- case TIPC_CMD_GET_MAX_ZONES:
- rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_zones);
- break;
- case TIPC_CMD_GET_MAX_CLUSTERS:
- rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_clusters);
- break;
case TIPC_CMD_GET_MAX_NODES:
rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_nodes);
break;
- case TIPC_CMD_GET_MAX_SLAVES:
- rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_slaves);
- break;
case TIPC_CMD_GET_NETID:
rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
break;
@@ -498,6 +425,15 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
rep_tlv_buf =
tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
break;
+ case TIPC_CMD_SET_MAX_ZONES:
+ case TIPC_CMD_GET_MAX_ZONES:
+ case TIPC_CMD_SET_MAX_SLAVES:
+ case TIPC_CMD_GET_MAX_SLAVES:
+ case TIPC_CMD_SET_MAX_CLUSTERS:
+ case TIPC_CMD_GET_MAX_CLUSTERS:
+ rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ " (obsolete command)");
+ break;
default:
rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (unknown command)");
@@ -560,20 +496,16 @@ int tipc_cfg_init(void)
struct tipc_name_seq seq;
int res;
- res = tipc_attach(&mng.user_ref);
- if (res)
- goto failed;
-
- res = tipc_createport(mng.user_ref, NULL, TIPC_CRITICAL_IMPORTANCE,
+ res = tipc_createport(NULL, TIPC_CRITICAL_IMPORTANCE,
NULL, NULL, NULL,
NULL, cfg_named_msg_event, NULL,
- NULL, &mng.port_ref);
+ NULL, &config_port_ref);
if (res)
goto failed;
seq.type = TIPC_CFG_SRV;
seq.lower = seq.upper = tipc_own_addr;
- res = tipc_nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq);
+ res = tipc_nametbl_publish_rsv(config_port_ref, TIPC_ZONE_SCOPE, &seq);
if (res)
goto failed;
@@ -581,15 +513,13 @@ int tipc_cfg_init(void)
failed:
err("Unable to create configuration service\n");
- tipc_detach(mng.user_ref);
- mng.user_ref = 0;
return res;
}
void tipc_cfg_stop(void)
{
- if (mng.user_ref) {
- tipc_detach(mng.user_ref);
- mng.user_ref = 0;
+ if (config_port_ref) {
+ tipc_deleteport(config_port_ref);
+ config_port_ref = 0;
}
}
diff --git a/net/tipc/core.c b/net/tipc/core.c
index f5d62c174de2..e071579e0850 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -34,36 +34,17 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/random.h>
-
#include "core.h"
#include "ref.h"
-#include "net.h"
-#include "user_reg.h"
#include "name_table.h"
#include "subscr.h"
#include "config.h"
-#ifndef CONFIG_TIPC_ZONES
-#define CONFIG_TIPC_ZONES 3
-#endif
-
-#ifndef CONFIG_TIPC_CLUSTERS
-#define CONFIG_TIPC_CLUSTERS 1
-#endif
-
#ifndef CONFIG_TIPC_NODES
#define CONFIG_TIPC_NODES 255
#endif
-#ifndef CONFIG_TIPC_SLAVE_NODES
-#define CONFIG_TIPC_SLAVE_NODES 0
-#endif
-
#ifndef CONFIG_TIPC_PORTS
#define CONFIG_TIPC_PORTS 8191
#endif
@@ -84,10 +65,7 @@ const char tipc_alphabet[] =
/* configurable TIPC parameters */
u32 tipc_own_addr;
-int tipc_max_zones;
-int tipc_max_clusters;
int tipc_max_nodes;
-int tipc_max_slaves;
int tipc_max_ports;
int tipc_max_subscriptions;
int tipc_max_publications;
@@ -137,10 +115,11 @@ int tipc_core_start_net(unsigned long addr)
{
int res;
- if ((res = tipc_net_start(addr)) ||
- (res = tipc_eth_media_start())) {
+ res = tipc_net_start(addr);
+ if (!res)
+ res = tipc_eth_media_start();
+ if (res)
tipc_core_stop_net();
- }
return res;
}
@@ -159,7 +138,6 @@ static void tipc_core_stop(void)
tipc_handler_stop();
tipc_cfg_stop();
tipc_subscr_stop();
- tipc_reg_stop();
tipc_nametbl_stop();
tipc_ref_table_stop();
tipc_socket_stop();
@@ -180,16 +158,22 @@ static int tipc_core_start(void)
get_random_bytes(&tipc_random, sizeof(tipc_random));
tipc_mode = TIPC_NODE_MODE;
- if ((res = tipc_handler_start()) ||
- (res = tipc_ref_table_init(tipc_max_ports, tipc_random)) ||
- (res = tipc_reg_start()) ||
- (res = tipc_nametbl_init()) ||
- (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) ||
- (res = tipc_k_signal((Handler)tipc_cfg_init, 0)) ||
- (res = tipc_netlink_start()) ||
- (res = tipc_socket_init())) {
+ res = tipc_handler_start();
+ if (!res)
+ res = tipc_ref_table_init(tipc_max_ports, tipc_random);
+ if (!res)
+ res = tipc_nametbl_init();
+ if (!res)
+ res = tipc_k_signal((Handler)tipc_subscr_start, 0);
+ if (!res)
+ res = tipc_k_signal((Handler)tipc_cfg_init, 0);
+ if (!res)
+ res = tipc_netlink_start();
+ if (!res)
+ res = tipc_socket_init();
+ if (res)
tipc_core_stop();
- }
+
return res;
}
@@ -209,13 +193,11 @@ static int __init tipc_init(void)
tipc_max_publications = 10000;
tipc_max_subscriptions = 2000;
tipc_max_ports = CONFIG_TIPC_PORTS;
- tipc_max_zones = CONFIG_TIPC_ZONES;
- tipc_max_clusters = CONFIG_TIPC_CLUSTERS;
tipc_max_nodes = CONFIG_TIPC_NODES;
- tipc_max_slaves = CONFIG_TIPC_SLAVE_NODES;
tipc_net_id = 4711;
- if ((res = tipc_core_start()))
+ res = tipc_core_start();
+ if (res)
err("Unable to start in single node mode\n");
else
info("Started in single node mode\n");
diff --git a/net/tipc/core.h b/net/tipc/core.h
index ca7e171c1043..997158546e25 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -59,7 +59,7 @@
#define TIPC_MOD_VER "2.0.0"
struct tipc_msg; /* msg.h */
-struct print_buf; /* dbg.h */
+struct print_buf; /* log.h */
/*
* TIPC sanity test macros
@@ -83,6 +83,7 @@ struct print_buf; /* dbg.h */
* user-defined buffers can be configured to do the same thing.
*/
extern struct print_buf *const TIPC_NULL;
+extern struct print_buf *const TIPC_CONS;
extern struct print_buf *const TIPC_LOG;
void tipc_printf(struct print_buf *, const char *fmt, ...);
@@ -95,73 +96,35 @@ void tipc_printf(struct print_buf *, const char *fmt, ...);
#define TIPC_OUTPUT TIPC_LOG
#endif
-/*
- * TIPC can be configured to send system messages to TIPC_OUTPUT
- * or to the system console only.
- */
-
-#ifdef CONFIG_TIPC_DEBUG
-
#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
- KERN_ERR "TIPC: " fmt, ## arg)
+ KERN_ERR "TIPC: " fmt, ## arg)
#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
- KERN_WARNING "TIPC: " fmt, ## arg)
+ KERN_WARNING "TIPC: " fmt, ## arg)
#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
- KERN_NOTICE "TIPC: " fmt, ## arg)
-
-#else
+ KERN_NOTICE "TIPC: " fmt, ## arg)
-#define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg)
-#define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg)
-#define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg)
-
-#endif
+#ifdef CONFIG_TIPC_DEBUG
/*
* DBG_OUTPUT is the destination print buffer for debug messages.
- * It defaults to the the null print buffer, but can be redefined
- * (typically in the individual .c files being debugged) to allow
- * selected debug messages to be generated where needed.
*/
#ifndef DBG_OUTPUT
-#define DBG_OUTPUT TIPC_NULL
+#define DBG_OUTPUT TIPC_LOG
#endif
-/*
- * TIPC can be configured to send debug messages to the specified print buffer
- * (typically DBG_OUTPUT) or to suppress them entirely.
- */
-
-#ifdef CONFIG_TIPC_DEBUG
+#define dbg(fmt, arg...) tipc_printf(DBG_OUTPUT, KERN_DEBUG fmt, ## arg);
-#define dbg(fmt, arg...) \
- do { \
- if (DBG_OUTPUT != TIPC_NULL) \
- tipc_printf(DBG_OUTPUT, fmt, ## arg); \
- } while (0)
-#define msg_dbg(msg, txt) \
- do { \
- if (DBG_OUTPUT != TIPC_NULL) \
- tipc_msg_dbg(DBG_OUTPUT, msg, txt); \
- } while (0)
-#define dump(fmt, arg...) \
- do { \
- if (DBG_OUTPUT != TIPC_NULL) \
- tipc_dump_dbg(DBG_OUTPUT, fmt, ##arg); \
- } while (0)
+#define msg_dbg(msg, txt) tipc_msg_dbg(DBG_OUTPUT, msg, txt);
void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
-void tipc_dump_dbg(struct print_buf *, const char *fmt, ...);
#else
#define dbg(fmt, arg...) do {} while (0)
#define msg_dbg(msg, txt) do {} while (0)
-#define dump(fmt, arg...) do {} while (0)
-#define tipc_msg_dbg(...) do {} while (0)
-#define tipc_dump_dbg(...) do {} while (0)
+#define tipc_msg_dbg(buf, msg, txt) do {} while (0)
#endif
@@ -184,10 +147,7 @@ void tipc_dump_dbg(struct print_buf *, const char *fmt, ...);
*/
extern u32 tipc_own_addr;
-extern int tipc_max_zones;
-extern int tipc_max_clusters;
extern int tipc_max_nodes;
-extern int tipc_max_slaves;
extern int tipc_max_ports;
extern int tipc_max_subscriptions;
extern int tipc_max_publications;
@@ -246,7 +206,6 @@ u32 tipc_k_signal(Handler routine, unsigned long argument);
static inline void k_init_timer(struct timer_list *timer, Handler routine,
unsigned long argument)
{
- dbg("initializing timer %p\n", timer);
setup_timer(timer, routine, argument);
}
@@ -266,7 +225,6 @@ static inline void k_init_timer(struct timer_list *timer, Handler routine,
static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
{
- dbg("starting timer %p for %u\n", timer, msec);
mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
}
@@ -283,7 +241,6 @@ static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
static inline void k_cancel_timer(struct timer_list *timer)
{
- dbg("cancelling timer %p\n", timer);
del_timer_sync(timer);
}
@@ -301,7 +258,6 @@ static inline void k_cancel_timer(struct timer_list *timer)
static inline void k_term_timer(struct timer_list *timer)
{
- dbg("terminating timer %p\n", timer);
}
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index f2ce36baf42e..fa026bd91a68 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -37,8 +37,6 @@
#include "core.h"
#include "link.h"
#include "discover.h"
-#include "port.h"
-#include "name_table.h"
#define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */
#define TIPC_LINK_REQ_FAST 2000 /* normal delay if bearer has no links */
@@ -132,8 +130,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
u32 net_id = msg_bc_netid(msg);
u32 type = msg_type(msg);
- msg_get_media_addr(msg,&media_addr);
- msg_dbg(msg, "RECV:");
+ msg_get_media_addr(msg, &media_addr);
buf_discard(buf);
if (net_id != tipc_net_id)
@@ -149,10 +146,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
}
if (!tipc_in_scope(dest, tipc_own_addr))
return;
- if (is_slave(tipc_own_addr) && is_slave(orig))
- return;
- if (is_slave(orig) && !in_own_cluster(orig))
- return;
if (in_own_cluster(orig)) {
/* Always accept link here */
struct sk_buff *rbuf;
@@ -160,7 +153,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
struct tipc_node *n_ptr = tipc_node_find(orig);
int link_fully_up;
- dbg(" in own cluster\n");
if (n_ptr == NULL) {
n_ptr = tipc_node_create(orig);
if (!n_ptr)
@@ -177,7 +169,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
link = n_ptr->links[b_ptr->identity];
if (!link) {
- dbg("creating link\n");
link = tipc_link_create(b_ptr, orig, &media_addr);
if (!link) {
spin_unlock_bh(&n_ptr->lock);
@@ -202,7 +193,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
return;
rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
if (rbuf != NULL) {
- msg_dbg(buf_msg(rbuf),"SEND:");
b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
buf_discard(rbuf);
}
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index ee683cc8f4b1..b69092eb95d8 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -34,10 +34,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-#include <net/net_namespace.h>
-
#include "core.h"
#include "bearer.h"
@@ -60,7 +56,7 @@ struct eth_bearer {
};
static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
-static int eth_started = 0;
+static int eth_started;
static struct notifier_block notifier;
/**
@@ -148,7 +144,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
/* Find device with specified name */
- for_each_netdev(&init_net, pdev){
+ for_each_netdev(&init_net, pdev) {
if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) {
dev = pdev;
break;
@@ -159,7 +155,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
/* Find Ethernet bearer for device (or create one) */
- for (;(eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev); eb_ptr++);
+ while ((eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev))
+ eb_ptr++;
if (eb_ptr == stop)
return -EDQUOT;
if (!eb_ptr->dev) {
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 0c70010a7dfe..274c98e164b7 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -45,7 +45,7 @@ struct queue_item {
static struct kmem_cache *tipc_queue_item_cache;
static struct list_head signal_queue_head;
static DEFINE_SPINLOCK(qitem_lock);
-static int handler_enabled = 0;
+static int handler_enabled;
static void process_signal_queue(unsigned long dummy);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index cf414cf05e72..18702f58d111 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -96,75 +96,10 @@ static int link_send_sections_long(struct port *sender,
static void link_check_defragm_bufs(struct link *l_ptr);
static void link_state_event(struct link *l_ptr, u32 event);
static void link_reset_statistics(struct link *l_ptr);
-static void link_print(struct link *l_ptr, struct print_buf *buf,
- const char *str);
+static void link_print(struct link *l_ptr, const char *str);
static void link_start(struct link *l_ptr);
static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
-
-/*
- * Debugging code used by link routines only
- *
- * When debugging link problems on a system that has multiple links,
- * the standard TIPC debugging routines may not be useful since they
- * allow the output from multiple links to be intermixed. For this reason
- * routines of the form "dbg_link_XXX()" have been created that will capture
- * debug info into a link's personal print buffer, which can then be dumped
- * into the TIPC system log (TIPC_LOG) upon request.
- *
- * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
- * of the print buffer used by each link. If LINK_LOG_BUF_SIZE is set to 0,
- * the dbg_link_XXX() routines simply send their output to the standard
- * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful
- * when there is only a single link in the system being debugged.
- *
- * Notes:
- * - When enabled, LINK_LOG_BUF_SIZE should be set to at least TIPC_PB_MIN_SIZE
- * - "l_ptr" must be valid when using dbg_link_XXX() macros
- */
-
-#define LINK_LOG_BUF_SIZE 0
-
-#define dbg_link(fmt, arg...) \
- do { \
- if (LINK_LOG_BUF_SIZE) \
- tipc_printf(&l_ptr->print_buf, fmt, ## arg); \
- } while (0)
-#define dbg_link_msg(msg, txt) \
- do { \
- if (LINK_LOG_BUF_SIZE) \
- tipc_msg_dbg(&l_ptr->print_buf, msg, txt); \
- } while (0)
-#define dbg_link_state(txt) \
- do { \
- if (LINK_LOG_BUF_SIZE) \
- link_print(l_ptr, &l_ptr->print_buf, txt); \
- } while (0)
-#define dbg_link_dump() do { \
- if (LINK_LOG_BUF_SIZE) { \
- tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
- tipc_printbuf_move(LOG, &l_ptr->print_buf); \
- } \
-} while (0)
-
-static void dbg_print_link(struct link *l_ptr, const char *str)
-{
- if (DBG_OUTPUT != TIPC_NULL)
- link_print(l_ptr, DBG_OUTPUT, str);
-}
-
-static void dbg_print_buf_chain(struct sk_buff *root_buf)
-{
- if (DBG_OUTPUT != TIPC_NULL) {
- struct sk_buff *buf = root_buf;
-
- while (buf) {
- msg_dbg(buf_msg(buf), "In chain: ");
- buf = buf->next;
- }
- }
-}
-
/*
* Simple link routines
*/
@@ -252,14 +187,17 @@ static int link_name_validate(const char *name, struct link_name *name_parts)
/* ensure all component parts of link name are present */
addr_local = name_copy;
- if ((if_local = strchr(addr_local, ':')) == NULL)
+ if_local = strchr(addr_local, ':');
+ if (if_local == NULL)
return 0;
*(if_local++) = 0;
- if ((addr_peer = strchr(if_local, '-')) == NULL)
+ addr_peer = strchr(if_local, '-');
+ if (addr_peer == NULL)
return 0;
*(addr_peer++) = 0;
if_local_len = addr_peer - if_local;
- if ((if_peer = strchr(addr_peer, ':')) == NULL)
+ if_peer = strchr(addr_peer, ':');
+ if (if_peer == NULL)
return 0;
*(if_peer++) = 0;
if_peer_len = strlen(if_peer) + 1;
@@ -378,17 +316,6 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
return NULL;
}
- if (LINK_LOG_BUF_SIZE) {
- char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
-
- if (!pb) {
- kfree(l_ptr);
- warn("Link creation failed, no memory for print buffer\n");
- return NULL;
- }
- tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
- }
-
l_ptr->addr = peer;
if_name = strchr(b_ptr->publ.name, ':') + 1;
sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
@@ -423,8 +350,6 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
l_ptr->owner = tipc_node_attach_link(l_ptr);
if (!l_ptr->owner) {
- if (LINK_LOG_BUF_SIZE)
- kfree(l_ptr->print_buf.buf);
kfree(l_ptr);
return NULL;
}
@@ -433,9 +358,6 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
list_add_tail(&l_ptr->link_list, &b_ptr->links);
tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
- dbg("tipc_link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
- l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
-
return l_ptr;
}
@@ -455,8 +377,6 @@ void tipc_link_delete(struct link *l_ptr)
return;
}
- dbg("tipc_link_delete()\n");
-
k_cancel_timer(&l_ptr->timer);
tipc_node_lock(l_ptr->owner);
@@ -464,8 +384,6 @@ void tipc_link_delete(struct link *l_ptr)
tipc_node_detach_link(l_ptr->owner, l_ptr);
tipc_link_stop(l_ptr);
list_del_init(&l_ptr->link_list);
- if (LINK_LOG_BUF_SIZE)
- kfree(l_ptr->print_buf.buf);
tipc_node_unlock(l_ptr->owner);
k_term_timer(&l_ptr->timer);
kfree(l_ptr);
@@ -473,7 +391,6 @@ void tipc_link_delete(struct link *l_ptr)
static void link_start(struct link *l_ptr)
{
- dbg("link_start %x\n", l_ptr);
link_state_event(l_ptr, STARTING_EVT);
}
@@ -625,7 +542,6 @@ void tipc_link_reset(struct link *l_ptr)
link_init_max_pkt(l_ptr);
l_ptr->state = RESET_UNKNOWN;
- dbg_link_state("Resetting Link\n");
if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
return;
@@ -699,25 +615,18 @@ static void link_state_event(struct link *l_ptr, unsigned event)
return; /* Not yet. */
if (link_blocked(l_ptr)) {
- if (event == TIMEOUT_EVT) {
+ if (event == TIMEOUT_EVT)
link_set_timer(l_ptr, cont_intv);
- }
return; /* Changeover going on */
}
- dbg_link("STATE_EV: <%s> ", l_ptr->name);
switch (l_ptr->state) {
case WORKING_WORKING:
- dbg_link("WW/");
switch (event) {
case TRAFFIC_MSG_EVT:
- dbg_link("TRF-");
- /* fall through */
case ACTIVATE_MSG:
- dbg_link("ACT\n");
break;
case TIMEOUT_EVT:
- dbg_link("TIM ");
if (l_ptr->next_in_no != l_ptr->checkpoint) {
l_ptr->checkpoint = l_ptr->next_in_no;
if (tipc_bclink_acks_missing(l_ptr->owner)) {
@@ -732,7 +641,6 @@ static void link_state_event(struct link *l_ptr, unsigned event)
link_set_timer(l_ptr, cont_intv);
break;
}
- dbg_link(" -> WU\n");
l_ptr->state = WORKING_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
@@ -740,7 +648,6 @@ static void link_state_event(struct link *l_ptr, unsigned event)
link_set_timer(l_ptr, cont_intv / 4);
break;
case RESET_MSG:
- dbg_link("RES -> RR\n");
info("Resetting link <%s>, requested by peer\n",
l_ptr->name);
tipc_link_reset(l_ptr);
@@ -755,18 +662,14 @@ static void link_state_event(struct link *l_ptr, unsigned event)
}
break;
case WORKING_UNKNOWN:
- dbg_link("WU/");
switch (event) {
case TRAFFIC_MSG_EVT:
- dbg_link("TRF-");
case ACTIVATE_MSG:
- dbg_link("ACT -> WW\n");
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
- dbg_link("RES -> RR\n");
info("Resetting link <%s>, requested by peer "
"while probing\n", l_ptr->name);
tipc_link_reset(l_ptr);
@@ -777,9 +680,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
link_set_timer(l_ptr, cont_intv);
break;
case TIMEOUT_EVT:
- dbg_link("TIM ");
if (l_ptr->next_in_no != l_ptr->checkpoint) {
- dbg_link("-> WW\n");
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
l_ptr->checkpoint = l_ptr->next_in_no;
@@ -790,16 +691,11 @@ static void link_state_event(struct link *l_ptr, unsigned event)
}
link_set_timer(l_ptr, cont_intv);
} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
- dbg_link("Probing %u/%u,timer = %u ms)\n",
- l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
- cont_intv / 4);
tipc_link_send_proto_msg(l_ptr, STATE_MSG,
1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv / 4);
} else { /* Link has failed */
- dbg_link("-> RU (%u probes unanswered)\n",
- l_ptr->fsm_msg_cnt);
warn("Resetting link <%s>, peer not responding\n",
l_ptr->name);
tipc_link_reset(l_ptr);
@@ -816,18 +712,13 @@ static void link_state_event(struct link *l_ptr, unsigned event)
}
break;
case RESET_UNKNOWN:
- dbg_link("RU/");
switch (event) {
case TRAFFIC_MSG_EVT:
- dbg_link("TRF-\n");
break;
case ACTIVATE_MSG:
other = l_ptr->owner->active_links[0];
- if (other && link_working_unknown(other)) {
- dbg_link("ACT\n");
+ if (other && link_working_unknown(other))
break;
- }
- dbg_link("ACT -> WW\n");
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
link_activate(l_ptr);
@@ -836,8 +727,6 @@ static void link_state_event(struct link *l_ptr, unsigned event)
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
- dbg_link("RES\n");
- dbg_link(" -> RR\n");
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
@@ -845,11 +734,9 @@ static void link_state_event(struct link *l_ptr, unsigned event)
link_set_timer(l_ptr, cont_intv);
break;
case STARTING_EVT:
- dbg_link("START-");
l_ptr->started = 1;
/* fall through */
case TIMEOUT_EVT:
- dbg_link("TIM\n");
tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
@@ -859,18 +746,12 @@ static void link_state_event(struct link *l_ptr, unsigned event)
}
break;
case RESET_RESET:
- dbg_link("RR/ ");
switch (event) {
case TRAFFIC_MSG_EVT:
- dbg_link("TRF-");
- /* fall through */
case ACTIVATE_MSG:
other = l_ptr->owner->active_links[0];
- if (other && link_working_unknown(other)) {
- dbg_link("ACT\n");
+ if (other && link_working_unknown(other))
break;
- }
- dbg_link("ACT -> WW\n");
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
link_activate(l_ptr);
@@ -879,14 +760,11 @@ static void link_state_event(struct link *l_ptr, unsigned event)
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
- dbg_link("RES\n");
break;
case TIMEOUT_EVT:
- dbg_link("TIM\n");
tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
- dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
break;
default:
err("Unknown link event %u in RR state\n", event);
@@ -926,9 +804,6 @@ static int link_bundle_buf(struct link *l_ptr,
skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
msg_set_size(bundler_msg, to_pos + size);
msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
- dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n",
- msg_msgcnt(bundler_msg), size, to_pos, msg_seqno(bundler_msg));
- msg_dbg(msg, "PACKD:");
buf_discard(buf);
l_ptr->stats.sent_bundled++;
return 1;
@@ -977,7 +852,6 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
return link_schedule_port(l_ptr, msg_origport(msg),
size);
}
- msg_dbg(msg, "TIPC: Congestion, throwing away\n");
buf_discard(buf);
if (imp > CONN_MANAGER) {
warn("Resetting link <%s>, send queue full", l_ptr->name);
@@ -1061,22 +935,16 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
int res = -ELINKCONG;
read_lock_bh(&tipc_net_lock);
- n_ptr = tipc_node_select(dest, selector);
+ n_ptr = tipc_node_find(dest);
if (n_ptr) {
tipc_node_lock(n_ptr);
l_ptr = n_ptr->active_links[selector & 1];
- if (l_ptr) {
- dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
+ if (l_ptr)
res = tipc_link_send_buf(l_ptr, buf);
- } else {
- dbg("Attempt to send msg to unreachable node:\n");
- msg_dbg(buf_msg(buf),">>>");
+ else
buf_discard(buf);
- }
tipc_node_unlock(n_ptr);
} else {
- dbg("Attempt to send msg to unknown node:\n");
- msg_dbg(buf_msg(buf),">>>");
buf_discard(buf);
}
read_unlock_bh(&tipc_net_lock);
@@ -1103,17 +971,14 @@ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
&l_ptr->media_addr))) {
l_ptr->unacked_window = 0;
- msg_dbg(msg,"SENT_FAST:");
return res;
}
- dbg("failed sent fast...\n");
tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
l_ptr->stats.bearer_congs++;
l_ptr->next_out = buf;
return res;
}
- }
- else
+ } else
*used_max_pkt = l_ptr->max_pkt;
}
return tipc_link_send_buf(l_ptr, buf); /* All other cases */
@@ -1137,12 +1002,10 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
return tipc_port_recv_msg(buf);
read_lock_bh(&tipc_net_lock);
- n_ptr = tipc_node_select(destnode, selector);
+ n_ptr = tipc_node_find(destnode);
if (likely(n_ptr)) {
tipc_node_lock(n_ptr);
l_ptr = n_ptr->active_links[selector];
- dbg("send_fast: buf %x selected %x, destnode = %x\n",
- buf, l_ptr, destnode);
if (likely(l_ptr)) {
res = link_send_buf_fast(l_ptr, buf, &dummy);
tipc_node_unlock(n_ptr);
@@ -1186,7 +1049,7 @@ again:
!sender->user_port, &buf);
read_lock_bh(&tipc_net_lock);
- node = tipc_node_select(destaddr, selector);
+ node = tipc_node_find(destaddr);
if (likely(node)) {
tipc_node_lock(node);
l_ptr = node->active_links[selector];
@@ -1269,10 +1132,10 @@ static int link_send_sections_long(struct port *sender,
struct tipc_node *node;
struct tipc_msg *hdr = &sender->publ.phdr;
u32 dsz = msg_data_sz(hdr);
- u32 max_pkt,fragm_sz,rest;
+ u32 max_pkt, fragm_sz, rest;
struct tipc_msg fragm_hdr;
- struct sk_buff *buf,*buf_chain,*prev;
- u32 fragm_crs,fragm_rest,hsz,sect_rest;
+ struct sk_buff *buf, *buf_chain, *prev;
+ u32 fragm_crs, fragm_rest, hsz, sect_rest;
const unchar *sect_crs;
int curr_sect;
u32 fragm_no;
@@ -1292,7 +1155,6 @@ again:
/* Prepare reusable fragment header: */
- msg_dbg(hdr, ">FRAGMENTING>");
tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
INT_H_SIZE, msg_destnode(hdr));
msg_set_link_selector(&fragm_hdr, sender->publ.ref);
@@ -1308,7 +1170,6 @@ again:
skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
hsz = msg_hdr_sz(hdr);
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
- msg_dbg(buf_msg(buf), ">BUILD>");
/* Chop up message: */
@@ -1351,7 +1212,7 @@ error:
/* Initiate new fragment: */
if (rest <= fragm_sz) {
fragm_sz = rest;
- msg_set_type(&fragm_hdr,LAST_FRAGMENT);
+ msg_set_type(&fragm_hdr, LAST_FRAGMENT);
} else {
msg_set_type(&fragm_hdr, FRAGMENT);
}
@@ -1367,16 +1228,14 @@ error:
skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
fragm_crs = INT_H_SIZE;
fragm_rest = fragm_sz;
- msg_dbg(buf_msg(buf)," >BUILD>");
}
- }
- while (rest > 0);
+ } while (rest > 0);
/*
* Now we have a buffer chain. Select a link and check
* that packet size is still OK
*/
- node = tipc_node_select(destaddr, sender->publ.ref & 1);
+ node = tipc_node_find(destaddr);
if (likely(node)) {
tipc_node_lock(node);
l_ptr = node->active_links[sender->publ.ref & 1];
@@ -1417,7 +1276,6 @@ reject:
l_ptr->stats.sent_fragments++;
msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
link_add_to_outqueue(l_ptr, buf, msg);
- msg_dbg(msg, ">ADD>");
buf = next;
}
@@ -1459,14 +1317,12 @@ u32 tipc_link_push_packet(struct link *l_ptr)
msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
- msg_dbg(buf_msg(buf), ">DEF-RETR>");
l_ptr->retransm_queue_head = mod(++r_q_head);
l_ptr->retransm_queue_size = --r_q_size;
l_ptr->stats.retransmitted++;
return 0;
} else {
l_ptr->stats.bearer_congs++;
- msg_dbg(buf_msg(buf), "|>DEF-RETR>");
return PUSH_FAILED;
}
}
@@ -1476,15 +1332,13 @@ u32 tipc_link_push_packet(struct link *l_ptr)
buf = l_ptr->proto_msg_queue;
if (buf) {
msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
- msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in);
+ msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
- msg_dbg(buf_msg(buf), ">DEF-PROT>");
l_ptr->unacked_window = 0;
buf_discard(buf);
l_ptr->proto_msg_queue = NULL;
return 0;
} else {
- msg_dbg(buf_msg(buf), "|>DEF-PROT>");
l_ptr->stats.bearer_congs++;
return PUSH_FAILED;
}
@@ -1504,11 +1358,9 @@ u32 tipc_link_push_packet(struct link *l_ptr)
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
if (msg_user(msg) == MSG_BUNDLER)
msg_set_type(msg, CLOSED_MSG);
- msg_dbg(msg, ">PUSH-DATA>");
l_ptr->next_out = buf->next;
return 0;
} else {
- msg_dbg(msg, "|PUSH-DATA|");
l_ptr->stats.bearer_congs++;
return PUSH_FAILED;
}
@@ -1556,8 +1408,7 @@ static void link_reset_all(unsigned long addr)
for (i = 0; i < MAX_BEARERS; i++) {
if (n_ptr->links[i]) {
- link_print(n_ptr->links[i], TIPC_OUTPUT,
- "Resetting link\n");
+ link_print(n_ptr->links[i], "Resetting link\n");
tipc_link_reset(n_ptr->links[i]);
}
}
@@ -1571,13 +1422,12 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
struct tipc_msg *msg = buf_msg(buf);
warn("Retransmission failure on link <%s>\n", l_ptr->name);
- tipc_msg_dbg(TIPC_OUTPUT, msg, ">RETR-FAIL>");
if (l_ptr->addr) {
/* Handle failure on standard link */
- link_print(l_ptr, TIPC_OUTPUT, "Resetting link\n");
+ link_print(l_ptr, "Resetting link\n");
tipc_link_reset(l_ptr);
} else {
@@ -1587,21 +1437,21 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
struct tipc_node *n_ptr;
char addr_string[16];
- tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg));
- tipc_printf(TIPC_OUTPUT, "Outstanding acks: %lu\n",
- (unsigned long) TIPC_SKB_CB(buf)->handle);
+ info("Msg seq number: %u, ", msg_seqno(msg));
+ info("Outstanding acks: %lu\n",
+ (unsigned long) TIPC_SKB_CB(buf)->handle);
n_ptr = l_ptr->owner->next;
tipc_node_lock(n_ptr);
tipc_addr_string_fill(addr_string, n_ptr->addr);
- tipc_printf(TIPC_OUTPUT, "Multicast link info for %s\n", addr_string);
- tipc_printf(TIPC_OUTPUT, "Supported: %d, ", n_ptr->bclink.supported);
- tipc_printf(TIPC_OUTPUT, "Acked: %u\n", n_ptr->bclink.acked);
- tipc_printf(TIPC_OUTPUT, "Last in: %u, ", n_ptr->bclink.last_in);
- tipc_printf(TIPC_OUTPUT, "Gap after: %u, ", n_ptr->bclink.gap_after);
- tipc_printf(TIPC_OUTPUT, "Gap to: %u\n", n_ptr->bclink.gap_to);
- tipc_printf(TIPC_OUTPUT, "Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
+ info("Multicast link info for %s\n", addr_string);
+ info("Supported: %d, ", n_ptr->bclink.supported);
+ info("Acked: %u\n", n_ptr->bclink.acked);
+ info("Last in: %u, ", n_ptr->bclink.last_in);
+ info("Gap after: %u, ", n_ptr->bclink.gap_after);
+ info("Gap to: %u\n", n_ptr->bclink.gap_to);
+ info("Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
@@ -1621,12 +1471,8 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
msg = buf_msg(buf);
- dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
-
if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
if (l_ptr->retransm_queue_size == 0) {
- msg_dbg(msg, ">NO_RETR->BCONG>");
- dbg_print_link(l_ptr, " ");
l_ptr->retransm_queue_head = msg_seqno(msg);
l_ptr->retransm_queue_size = retransmits;
} else {
@@ -1653,7 +1499,6 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
- msg_dbg(buf_msg(buf), ">RETR>");
buf = buf->next;
retransmits--;
l_ptr->stats.retransmitted++;
@@ -1779,9 +1624,8 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
/* Ensure message data is a single contiguous unit */
- if (unlikely(buf_linearize(buf))) {
+ if (unlikely(buf_linearize(buf)))
goto cont;
- }
/* Handle arrival of a non-unicast link message */
@@ -1893,7 +1737,7 @@ deliver:
continue;
case ROUTE_DISTRIBUTOR:
tipc_node_unlock(n_ptr);
- tipc_cltr_recv_routing_table(buf);
+ buf_discard(buf);
continue;
case NAME_DISTRIBUTOR:
tipc_node_unlock(n_ptr);
@@ -1939,12 +1783,10 @@ deliver:
tipc_node_unlock(n_ptr);
continue;
}
- msg_dbg(msg,"NSEQ<REC<");
link_state_event(l_ptr, TRAFFIC_MSG_EVT);
if (link_working_working(l_ptr)) {
/* Re-insert in front of queue */
- msg_dbg(msg,"RECV-REINS:");
buf->next = head;
head = buf;
tipc_node_unlock(n_ptr);
@@ -1998,13 +1840,11 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
*head = buf;
return 1;
}
- if (seq_no == msg_seqno(msg)) {
+ if (seq_no == msg_seqno(msg))
break;
- }
prev = crs;
crs = crs->next;
- }
- while (crs);
+ } while (crs);
/* Message is a duplicate of an existing message */
@@ -2026,9 +1866,6 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
return;
}
- dbg("rx OOS msg: seq_no %u, expecting %u (%u)\n",
- seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no);
-
/* Record OOS packet arrival (force mismatch on next timeout) */
l_ptr->checkpoint--;
@@ -2118,11 +1955,10 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
msg_set_max_pkt(msg, l_ptr->max_pkt_target);
}
- if (tipc_node_has_redundant_links(l_ptr->owner)) {
+ if (tipc_node_has_redundant_links(l_ptr->owner))
msg_set_redundant_link(msg);
- } else {
+ else
msg_clear_redundant_link(msg);
- }
msg_set_linkprio(msg, l_ptr->priority);
/* Ensure sequence number will not fit : */
@@ -2146,8 +1982,6 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
/* Message can be sent */
- msg_dbg(msg, ">>");
-
buf = tipc_buf_acquire(msg_size);
if (!buf)
return;
@@ -2181,8 +2015,6 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
u32 msg_tol;
struct tipc_msg *msg = buf_msg(buf);
- dbg("AT(%u):", jiffies_to_msecs(jiffies));
- msg_dbg(msg, "<<");
if (link_blocked(l_ptr))
goto exit;
@@ -2201,11 +2033,8 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
case RESET_MSG:
if (!link_working_unknown(l_ptr) &&
(l_ptr->peer_session != INVALID_SESSION)) {
- if (msg_session(msg) == l_ptr->peer_session) {
- dbg("Duplicate RESET: %u<->%u\n",
- msg_session(msg), l_ptr->peer_session);
+ if (msg_session(msg) == l_ptr->peer_session)
break; /* duplicate: ignore */
- }
}
/* fall thru' */
case ACTIVATE_MSG:
@@ -2213,8 +2042,8 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
- if ((msg_tol = msg_link_tolerance(msg)) &&
- (msg_tol > l_ptr->tolerance))
+ msg_tol = msg_link_tolerance(msg);
+ if (msg_tol > l_ptr->tolerance)
link_set_supervision_props(l_ptr, msg_tol);
if (msg_linkprio(msg) > l_ptr->priority)
@@ -2237,13 +2066,13 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
l_ptr->peer_bearer_id = msg_bearer_id(msg);
/* Synchronize broadcast sequence numbers */
- if (!tipc_node_has_redundant_links(l_ptr->owner)) {
+ if (!tipc_node_has_redundant_links(l_ptr->owner))
l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
- }
break;
case STATE_MSG:
- if ((msg_tol = msg_link_tolerance(msg)))
+ msg_tol = msg_link_tolerance(msg);
+ if (msg_tol)
link_set_supervision_props(l_ptr, msg_tol);
if (msg_linkprio(msg) &&
@@ -2266,8 +2095,6 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
max_pkt_ack = msg_max_pkt(msg);
if (max_pkt_ack > l_ptr->max_pkt) {
- dbg("Link <%s> updated MTU %u -> %u\n",
- l_ptr->name, l_ptr->max_pkt, max_pkt_ack);
l_ptr->max_pkt = max_pkt_ack;
l_ptr->max_pkt_probes = 0;
}
@@ -2275,9 +2102,8 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
max_pkt_ack = 0;
if (msg_probe(msg)) {
l_ptr->stats.recv_probes++;
- if (msg_size(msg) > sizeof(l_ptr->proto_msg)) {
+ if (msg_size(msg) > sizeof(l_ptr->proto_msg))
max_pkt_ack = msg_size(msg);
- }
}
/* Protocol message before retransmits, reduce loss risk */
@@ -2289,14 +2115,11 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
0, rec_gap, 0, 0, max_pkt_ack);
}
if (msg_seq_gap(msg)) {
- msg_dbg(msg, "With Gap:");
l_ptr->stats.recv_nacks++;
tipc_link_retransmit(l_ptr, l_ptr->first_out,
msg_seq_gap(msg));
}
break;
- default:
- msg_dbg(buf_msg(buf), "<DISCARDING UNKNOWN<");
}
exit:
buf_discard(buf);
@@ -2331,8 +2154,6 @@ static void tipc_link_tunnel(struct link *l_ptr,
}
skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
- dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
- msg_dbg(buf_msg(buf), ">SEND>");
tipc_link_send_buf(tunnel, buf);
}
@@ -2364,7 +2185,6 @@ void tipc_link_changeover(struct link *l_ptr)
ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
msg_set_msgcnt(&tunnel_hdr, msgcount);
- dbg("Link changeover requires %u tunnel messages\n", msgcount);
if (!l_ptr->first_out) {
struct sk_buff *buf;
@@ -2373,9 +2193,6 @@ void tipc_link_changeover(struct link *l_ptr)
if (buf) {
skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
msg_set_size(&tunnel_hdr, INT_H_SIZE);
- dbg("%c->%c:", l_ptr->b_ptr->net_plane,
- tunnel->b_ptr->net_plane);
- msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
tipc_link_send_buf(tunnel, buf);
} else {
warn("Link changeover error, "
@@ -2392,11 +2209,11 @@ void tipc_link_changeover(struct link *l_ptr)
if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
struct tipc_msg *m = msg_get_wrapped(msg);
- unchar* pos = (unchar*)m;
+ unchar *pos = (unchar *)m;
msgcount = msg_msgcnt(msg);
while (msgcount--) {
- msg_set_seqno(m,msg_seqno(msg));
+ msg_set_seqno(m, msg_seqno(msg));
tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
msg_link_selector(m));
pos += align(msg_size(m));
@@ -2439,9 +2256,6 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
length);
- dbg("%c->%c:", l_ptr->b_ptr->net_plane,
- tunnel->b_ptr->net_plane);
- msg_dbg(buf_msg(outbuf), ">SEND>");
tipc_link_send_buf(tunnel, outbuf);
if (!tipc_link_is_up(l_ptr))
return;
@@ -2488,31 +2302,24 @@ static int link_recv_changeover_msg(struct link **l_ptr,
u32 msg_count = msg_msgcnt(tunnel_msg);
dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
- if (!dest_link) {
- msg_dbg(tunnel_msg, "NOLINK/<REC<");
+ if (!dest_link)
goto exit;
- }
if (dest_link == *l_ptr) {
err("Unexpected changeover message on link <%s>\n",
(*l_ptr)->name);
goto exit;
}
- dbg("%c<-%c:", dest_link->b_ptr->net_plane,
- (*l_ptr)->b_ptr->net_plane);
*l_ptr = dest_link;
msg = msg_get_wrapped(tunnel_msg);
if (msg_typ == DUPLICATE_MSG) {
- if (less(msg_seqno(msg), mod(dest_link->next_in_no))) {
- msg_dbg(tunnel_msg, "DROP/<REC<");
+ if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
goto exit;
- }
- *buf = buf_extract(tunnel_buf,INT_H_SIZE);
+ *buf = buf_extract(tunnel_buf, INT_H_SIZE);
if (*buf == NULL) {
warn("Link changeover error, duplicate msg dropped\n");
goto exit;
}
- msg_dbg(tunnel_msg, "TNL<REC<");
buf_discard(tunnel_buf);
return 1;
}
@@ -2520,18 +2327,14 @@ static int link_recv_changeover_msg(struct link **l_ptr,
/* First original message ?: */
if (tipc_link_is_up(dest_link)) {
- msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
info("Resetting link <%s>, changeover initiated by peer\n",
dest_link->name);
tipc_link_reset(dest_link);
dest_link->exp_msg_count = msg_count;
- dbg("Expecting %u tunnelled messages\n", msg_count);
if (!msg_count)
goto exit;
} else if (dest_link->exp_msg_count == START_CHANGEOVER) {
- msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
dest_link->exp_msg_count = msg_count;
- dbg("Expecting %u tunnelled messages\n", msg_count);
if (!msg_count)
goto exit;
}
@@ -2541,18 +2344,14 @@ static int link_recv_changeover_msg(struct link **l_ptr,
if (dest_link->exp_msg_count == 0) {
warn("Link switchover error, "
"got too many tunnelled messages\n");
- msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
- dbg_print_link(dest_link, "LINK:");
goto exit;
}
dest_link->exp_msg_count--;
if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
- msg_dbg(tunnel_msg, "DROP/DUPL/<REC<");
goto exit;
} else {
*buf = buf_extract(tunnel_buf, INT_H_SIZE);
if (*buf != NULL) {
- msg_dbg(tunnel_msg, "TNL<REC<");
buf_discard(tunnel_buf);
return 1;
} else {
@@ -2574,7 +2373,6 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
u32 pos = INT_H_SIZE;
struct sk_buff *obuf;
- msg_dbg(buf_msg(buf), "<BNDL<: ");
while (msgcount--) {
obuf = buf_extract(buf, pos);
if (obuf == NULL) {
@@ -2582,7 +2380,6 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
break;
}
pos += align(msg_size(buf_msg(obuf)));
- msg_dbg(buf_msg(obuf), " /");
tipc_net_route_msg(obuf);
}
buf_discard(buf);
@@ -2719,7 +2516,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
u32 long_msg_seq_no = msg_long_msgno(fragm);
*fb = NULL;
- msg_dbg(fragm,"FRG<REC<");
/* Is there an incomplete message waiting for this fragment? */
@@ -2738,7 +2534,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
if (msg_type(imsg) == TIPC_MCAST_MSG)
max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
if (msg_size(imsg) > max) {
- msg_dbg(fragm,"<REC<Oversized: ");
buf_discard(fbuf);
return 0;
}
@@ -2751,8 +2546,8 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
/* Prepare buffer for subsequent fragments. */
set_long_msg_seqno(pbuf, long_msg_seq_no);
- set_fragm_size(pbuf,fragm_sz);
- set_expected_frags(pbuf,exp_fragm_cnt - 1);
+ set_fragm_size(pbuf, fragm_sz);
+ set_expected_frags(pbuf, exp_fragm_cnt - 1);
} else {
warn("Link unable to reassemble fragmented message\n");
}
@@ -2779,13 +2574,9 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
*m = buf_msg(pbuf);
return 1;
}
- set_expected_frags(pbuf,exp_frags);
+ set_expected_frags(pbuf, exp_frags);
return 0;
}
- dbg(" Discarding orphan fragment %x\n",fbuf);
- msg_dbg(fragm,"ORPHAN:");
- dbg("Pending long buffers:\n");
- dbg_print_buf_chain(*pending);
buf_discard(fbuf);
return 0;
}
@@ -2813,11 +2604,6 @@ static void link_check_defragm_bufs(struct link *l_ptr)
incr_timer_cnt(buf);
prev = buf;
} else {
- dbg(" Discarding incomplete long buffer\n");
- msg_dbg(buf_msg(buf), "LONG:");
- dbg_print_link(l_ptr, "curr:");
- dbg("Pending long buffers:\n");
- dbg_print_buf_chain(l_ptr->defragm_buf);
if (prev)
prev->next = buf->next;
else
@@ -2852,7 +2638,6 @@ void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
l_ptr->queue_limit[CONN_MANAGER] = 1200;
- l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200;
l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
/* FRAGMENT and LAST_FRAGMENT packets */
@@ -3154,7 +2939,7 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
return MAX_MSG_SIZE;
read_lock_bh(&tipc_net_lock);
- n_ptr = tipc_node_select(dest, selector);
+ n_ptr = tipc_node_find(dest);
if (n_ptr) {
tipc_node_lock(n_ptr);
l_ptr = n_ptr->active_links[selector & 1];
@@ -3166,27 +2951,22 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
return res;
}
-static void link_dump_send_queue(struct link *l_ptr)
+static void link_print(struct link *l_ptr, const char *str)
{
- if (l_ptr->next_out) {
- info("\nContents of unsent queue:\n");
- dbg_print_buf_chain(l_ptr->next_out);
- }
- info("\nContents of send queue:\n");
- if (l_ptr->first_out) {
- dbg_print_buf_chain(l_ptr->first_out);
- }
- info("Empty send queue\n");
-}
+ char print_area[256];
+ struct print_buf pb;
+ struct print_buf *buf = &pb;
+
+ tipc_printbuf_init(buf, print_area, sizeof(print_area));
-static void link_print(struct link *l_ptr, struct print_buf *buf,
- const char *str)
-{
tipc_printf(buf, str);
- if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
- return;
tipc_printf(buf, "Link %x<%s>:",
l_ptr->addr, l_ptr->b_ptr->publ.name);
+
+#ifdef CONFIG_TIPC_DEBUG
+ if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
+ goto print_state;
+
tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
tipc_printf(buf, "SQUE");
@@ -3204,7 +2984,6 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
- link_dump_send_queue(l_ptr);
}
} else
tipc_printf(buf, "[]");
@@ -3218,14 +2997,20 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
l_ptr->deferred_inqueue_sz);
}
}
+print_state:
+#endif
+
if (link_working_unknown(l_ptr))
tipc_printf(buf, ":WU");
- if (link_reset_reset(l_ptr))
+ else if (link_reset_reset(l_ptr))
tipc_printf(buf, ":RR");
- if (link_reset_unknown(l_ptr))
+ else if (link_reset_unknown(l_ptr))
tipc_printf(buf, ":RU");
- if (link_working_working(l_ptr))
+ else if (link_working_working(l_ptr))
tipc_printf(buf, ":WW");
tipc_printf(buf, "\n");
+
+ tipc_printbuf_validate(buf);
+ info("%s", print_area);
}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index c562888d25da..70967e637027 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -37,7 +37,7 @@
#ifndef _TIPC_LINK_H
#define _TIPC_LINK_H
-#include "dbg.h"
+#include "log.h"
#include "msg.h"
#include "node.h"
@@ -107,7 +107,6 @@
* @long_msg_seq_no: next identifier to use for outbound fragmented messages
* @defragm_buf: list of partially reassembled inbound message fragments
* @stats: collects statistics regarding link activity
- * @print_buf: print buffer used to log link activity
*/
struct link {
@@ -210,8 +209,6 @@ struct link {
u32 msg_lengths_total;
u32 msg_length_profile[7];
} stats;
-
- struct print_buf print_buf;
};
struct port;
@@ -232,8 +229,8 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
void tipc_link_reset(struct link *l_ptr);
int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
-u32 tipc_link_get_max_pkt(u32 dest,u32 selector);
-int tipc_link_send_sections_fast(struct port* sender,
+u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
+int tipc_link_send_sections_fast(struct port *sender,
struct iovec const *msg_sect,
const u32 num_sect,
u32 destnode);
diff --git a/net/tipc/dbg.c b/net/tipc/log.c
index 46f51d208e5e..952c39f643e6 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/log.c
@@ -1,5 +1,5 @@
/*
- * net/tipc/dbg.c: TIPC print buffer routines for debugging
+ * net/tipc/log.c: TIPC print buffer routines for debugging
*
* Copyright (c) 1996-2006, Ericsson AB
* Copyright (c) 2005-2007, Wind River Systems
@@ -36,7 +36,7 @@
#include "core.h"
#include "config.h"
-#include "dbg.h"
+#include "log.h"
/*
* TIPC pre-defines the following print buffers:
@@ -52,7 +52,7 @@ static struct print_buf null_buf = { NULL, 0, NULL, 0 };
struct print_buf *const TIPC_NULL = &null_buf;
static struct print_buf cons_buf = { NULL, 0, NULL, 1 };
-static struct print_buf *const TIPC_CONS = &cons_buf;
+struct print_buf *const TIPC_CONS = &cons_buf;
static struct print_buf log_buf = { NULL, 0, NULL, 1 };
struct print_buf *const TIPC_LOG = &log_buf;
@@ -64,9 +64,9 @@ struct print_buf *const TIPC_LOG = &log_buf;
* 'print_string' when writing to a print buffer. This also protects against
* concurrent writes to the print buffer being written to.
*
- * 2) tipc_dump() and tipc_log_XXX() leverage the aforementioned
- * use of 'print_lock' to protect against all types of concurrent operations
- * on their associated print buffer (not just write operations).
+ * 2) tipc_log_XXX() leverages the aforementioned use of 'print_lock' to
+ * protect against all types of concurrent operations on their associated
+ * print buffer (not just write operations).
*
* Note: All routines of the form tipc_printbuf_XXX() are lock-free, and rely
* on the caller to prevent simultaneous use of the print buffer(s) being
@@ -76,18 +76,16 @@ struct print_buf *const TIPC_LOG = &log_buf;
static char print_string[TIPC_PB_MAX_STR];
static DEFINE_SPINLOCK(print_lock);
-static void tipc_printbuf_reset(struct print_buf *pb);
-static int tipc_printbuf_empty(struct print_buf *pb);
static void tipc_printbuf_move(struct print_buf *pb_to,
struct print_buf *pb_from);
-#define FORMAT(PTR,LEN,FMT) \
+#define FORMAT(PTR, LEN, FMT) \
{\
- va_list args;\
- va_start(args, FMT);\
- LEN = vsprintf(PTR, FMT, args);\
- va_end(args);\
- *(PTR + LEN) = '\0';\
+ va_list args;\
+ va_start(args, FMT);\
+ LEN = vsprintf(PTR, FMT, args);\
+ va_end(args);\
+ *(PTR + LEN) = '\0';\
}
/**
@@ -268,81 +266,6 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
spin_unlock_bh(&print_lock);
}
-#ifdef CONFIG_TIPC_DEBUG
-
-/**
- * print_to_console - write string of bytes to console in multiple chunks
- */
-
-static void print_to_console(char *crs, int len)
-{
- int rest = len;
-
- while (rest > 0) {
- int sz = rest < TIPC_PB_MAX_STR ? rest : TIPC_PB_MAX_STR;
- char c = crs[sz];
-
- crs[sz] = 0;
- printk((const char *)crs);
- crs[sz] = c;
- rest -= sz;
- crs += sz;
- }
-}
-
-/**
- * printbuf_dump - write print buffer contents to console
- */
-
-static void printbuf_dump(struct print_buf *pb)
-{
- int len;
-
- if (!pb->buf) {
- printk("*** PRINT BUFFER NOT ALLOCATED ***");
- return;
- }
-
- /* Dump print buffer from char after cursor to end (if used) */
-
- len = pb->buf + pb->size - pb->crs - 2;
- if ((pb->buf[pb->size - 1] == 0) && (len > 0))
- print_to_console(pb->crs + 1, len);
-
- /* Dump print buffer from start to cursor (always) */
-
- len = pb->crs - pb->buf;
- print_to_console(pb->buf, len);
-}
-
-/**
- * tipc_dump_dbg - dump (non-console) print buffer to console
- * @pb: pointer to print buffer
- */
-
-void tipc_dump_dbg(struct print_buf *pb, const char *fmt, ...)
-{
- int len;
-
- if (pb == TIPC_CONS)
- return;
-
- spin_lock_bh(&print_lock);
-
- FORMAT(print_string, len, fmt);
- printk(print_string);
-
- printk("\n---- Start of %s log dump ----\n\n",
- (pb == TIPC_LOG) ? "global" : "local");
- printbuf_dump(pb);
- tipc_printbuf_reset(pb);
- printk("\n---- End of dump ----\n");
-
- spin_unlock_bh(&print_lock);
-}
-
-#endif
-
/**
* tipc_log_resize - change the size of the TIPC log buffer
* @log_size: print buffer size to use
@@ -353,10 +276,8 @@ int tipc_log_resize(int log_size)
int res = 0;
spin_lock_bh(&print_lock);
- if (TIPC_LOG->buf) {
- kfree(TIPC_LOG->buf);
- TIPC_LOG->buf = NULL;
- }
+ kfree(TIPC_LOG->buf);
+ TIPC_LOG->buf = NULL;
if (log_size) {
if (log_size < TIPC_PB_MIN_SIZE)
log_size = TIPC_PB_MIN_SIZE;
@@ -407,8 +328,7 @@ struct sk_buff *tipc_log_dump(void)
} else if (tipc_printbuf_empty(TIPC_LOG)) {
spin_unlock_bh(&print_lock);
reply = tipc_cfg_reply_ultra_string("log is empty\n");
- }
- else {
+ } else {
struct tlv_desc *rep_tlv;
struct print_buf pb;
int str_len;
@@ -429,4 +349,3 @@ struct sk_buff *tipc_log_dump(void)
}
return reply;
}
-
diff --git a/net/tipc/dbg.h b/net/tipc/log.h
index 3ba6ba8b434a..2248d96238e6 100644
--- a/net/tipc/dbg.h
+++ b/net/tipc/log.h
@@ -1,5 +1,5 @@
/*
- * net/tipc/dbg.h: Include file for TIPC print buffer routines
+ * net/tipc/log.h: Include file for TIPC print buffer routines
*
* Copyright (c) 1997-2006, Ericsson AB
* Copyright (c) 2005-2007, Wind River Systems
@@ -34,8 +34,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _TIPC_DBG_H
-#define _TIPC_DBG_H
+#ifndef _TIPC_LOG_H
+#define _TIPC_LOG_H
/**
* struct print_buf - TIPC print buffer structure
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index ee6b4c68d4a4..bb6180c4fcbb 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -35,7 +35,6 @@
*/
#include "core.h"
-#include "addr.h"
#include "msg.h"
u32 tipc_msg_tot_importance(struct tipc_msg *m)
@@ -92,7 +91,7 @@ int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
int tipc_msg_build(struct tipc_msg *hdr,
struct iovec const *msg_sect, u32 num_sect,
- int max_size, int usrmem, struct sk_buff** buf)
+ int max_size, int usrmem, struct sk_buff **buf)
{
int dsz, sz, hsz, pos, res, cnt;
@@ -138,6 +137,7 @@ int tipc_msg_build(struct tipc_msg *hdr,
void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
{
u32 usr = msg_user(msg);
+ tipc_printf(buf, KERN_DEBUG);
tipc_printf(buf, str);
switch (usr) {
@@ -161,10 +161,10 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
tipc_printf(buf, "LAST:");
break;
default:
- tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
+ tipc_printf(buf, "UNKNOWN:%x", msg_type(msg));
}
- tipc_printf(buf, "NO(%u/%u):",msg_long_msgno(msg),
+ tipc_printf(buf, "NO(%u/%u):", msg_long_msgno(msg),
msg_fragm_no(msg));
break;
case TIPC_LOW_IMPORTANCE:
@@ -190,7 +190,7 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
tipc_printf(buf, "DIR:");
break;
default:
- tipc_printf(buf, "UNKNOWN TYPE %u",msg_type(msg));
+ tipc_printf(buf, "UNKNOWN TYPE %u", msg_type(msg));
}
if (msg_routed(msg) && !msg_non_seq(msg))
tipc_printf(buf, "ROUT:");
@@ -208,7 +208,7 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
tipc_printf(buf, "WDRW:");
break;
default:
- tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
+ tipc_printf(buf, "UNKNOWN:%x", msg_type(msg));
}
if (msg_routed(msg))
tipc_printf(buf, "ROUT:");
@@ -227,39 +227,39 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
break;
case CONN_ACK:
tipc_printf(buf, "CONN_ACK:");
- tipc_printf(buf, "ACK(%u):",msg_msgcnt(msg));
+ tipc_printf(buf, "ACK(%u):", msg_msgcnt(msg));
break;
default:
- tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+ tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
}
if (msg_routed(msg))
tipc_printf(buf, "ROUT:");
if (msg_reroute_cnt(msg))
- tipc_printf(buf, "REROUTED(%u):",msg_reroute_cnt(msg));
+ tipc_printf(buf, "REROUTED(%u):", msg_reroute_cnt(msg));
break;
case LINK_PROTOCOL:
- tipc_printf(buf, "PROT:TIM(%u):",msg_timestamp(msg));
+ tipc_printf(buf, "PROT:TIM(%u):", msg_timestamp(msg));
switch (msg_type(msg)) {
case STATE_MSG:
tipc_printf(buf, "STATE:");
- tipc_printf(buf, "%s:",msg_probe(msg) ? "PRB" :"");
- tipc_printf(buf, "NXS(%u):",msg_next_sent(msg));
- tipc_printf(buf, "GAP(%u):",msg_seq_gap(msg));
- tipc_printf(buf, "LSTBC(%u):",msg_last_bcast(msg));
+ tipc_printf(buf, "%s:", msg_probe(msg) ? "PRB" : "");
+ tipc_printf(buf, "NXS(%u):", msg_next_sent(msg));
+ tipc_printf(buf, "GAP(%u):", msg_seq_gap(msg));
+ tipc_printf(buf, "LSTBC(%u):", msg_last_bcast(msg));
break;
case RESET_MSG:
tipc_printf(buf, "RESET:");
if (msg_size(msg) != msg_hdr_sz(msg))
- tipc_printf(buf, "BEAR:%s:",msg_data(msg));
+ tipc_printf(buf, "BEAR:%s:", msg_data(msg));
break;
case ACTIVATE_MSG:
tipc_printf(buf, "ACTIVATE:");
break;
default:
- tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+ tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
}
- tipc_printf(buf, "PLANE(%c):",msg_net_plane(msg));
- tipc_printf(buf, "SESS(%u):",msg_session(msg));
+ tipc_printf(buf, "PLANE(%c):", msg_net_plane(msg));
+ tipc_printf(buf, "SESS(%u):", msg_session(msg));
break;
case CHANGEOVER_PROTOCOL:
tipc_printf(buf, "TUNL:");
@@ -269,10 +269,10 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
break;
case ORIGINAL_MSG:
tipc_printf(buf, "ORIG:");
- tipc_printf(buf, "EXP(%u)",msg_msgcnt(msg));
+ tipc_printf(buf, "EXP(%u)", msg_msgcnt(msg));
break;
default:
- tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+ tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
}
break;
case ROUTE_DISTRIBUTOR:
@@ -280,26 +280,26 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
switch (msg_type(msg)) {
case EXT_ROUTING_TABLE:
tipc_printf(buf, "EXT_TBL:");
- tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+ tipc_printf(buf, "TO:%x:", msg_remote_node(msg));
break;
case LOCAL_ROUTING_TABLE:
tipc_printf(buf, "LOCAL_TBL:");
- tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+ tipc_printf(buf, "TO:%x:", msg_remote_node(msg));
break;
case SLAVE_ROUTING_TABLE:
tipc_printf(buf, "DP_TBL:");
- tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+ tipc_printf(buf, "TO:%x:", msg_remote_node(msg));
break;
case ROUTE_ADDITION:
tipc_printf(buf, "ADD:");
- tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+ tipc_printf(buf, "TO:%x:", msg_remote_node(msg));
break;
case ROUTE_REMOVAL:
tipc_printf(buf, "REMOVE:");
- tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+ tipc_printf(buf, "TO:%x:", msg_remote_node(msg));
break;
default:
- tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+ tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
}
break;
case LINK_CONFIG:
@@ -312,7 +312,7 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
tipc_printf(buf, "DSC_RESP:");
break;
default:
- tipc_printf(buf, "UNKNOWN TYPE:%x:",msg_type(msg));
+ tipc_printf(buf, "UNKNOWN TYPE:%x:", msg_type(msg));
break;
}
break;
@@ -348,7 +348,8 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
tipc_printf(buf, "UNKNOWN ERROR(%x):",
msg_errcode(msg));
}
- default:{}
+ default:
+ break;
}
tipc_printf(buf, "HZ(%u):", msg_hdr_sz(msg));
@@ -357,9 +358,8 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
if (msg_non_seq(msg))
tipc_printf(buf, "NOSEQ:");
- else {
+ else
tipc_printf(buf, "ACK(%u):", msg_ack(msg));
- }
tipc_printf(buf, "BACK(%u):", msg_bcast_ack(msg));
tipc_printf(buf, "PRND(%x)", msg_prevnode(msg));
@@ -387,14 +387,13 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
if (msg_user(msg) == NAME_DISTRIBUTOR) {
tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
- if (msg_routed(msg)) {
+ if (msg_routed(msg))
tipc_printf(buf, ":CSEQN(%u)", msg_transp_seqno(msg));
- }
}
if (msg_user(msg) == LINK_CONFIG) {
- u32* raw = (u32*)msg;
- struct tipc_media_addr* orig = (struct tipc_media_addr*)&raw[5];
+ u32 *raw = (u32 *)msg;
+ struct tipc_media_addr *orig = (struct tipc_media_addr *)&raw[5];
tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
@@ -405,12 +404,10 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
tipc_printf(buf, "TO(%u):", msg_bcgap_to(msg));
}
tipc_printf(buf, "\n");
- if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
+ if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg)))
tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
- }
- if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
+ if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT))
tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
- }
}
#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index aee53864d7a0..92c4c4fd7b3f 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -174,7 +174,7 @@ static inline u32 msg_hdr_sz(struct tipc_msg *m)
return msg_bits(m, 0, 21, 0xf) << 2;
}
-static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n)
+static inline void msg_set_hdr_sz(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 0, 21, 0xf, n>>2);
}
@@ -425,7 +425,7 @@ static inline u32 msg_routed(struct tipc_msg *m)
{
if (likely(msg_short(m)))
return 0;
- return(msg_destnode(m) ^ msg_orignode(m)) >> 11;
+ return (msg_destnode(m) ^ msg_orignode(m)) >> 11;
}
static inline u32 msg_nametype(struct tipc_msg *m)
@@ -540,7 +540,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
#define MSG_BUNDLER 6
#define LINK_PROTOCOL 7
#define CONN_MANAGER 8
-#define ROUTE_DISTRIBUTOR 9
+#define ROUTE_DISTRIBUTOR 9 /* obsoleted */
#define CHANGEOVER_PROTOCOL 10
#define NAME_DISTRIBUTOR 11
#define MSG_FRAGMENTER 12
@@ -819,11 +819,6 @@ static inline void msg_set_remote_node(struct tipc_msg *m, u32 a)
msg_set_word(m, msg_hdr_sz(m)/4, a);
}
-static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
-{
- msg_data(m)[pos + 4] = 1;
-}
-
/*
* Segmentation message types
*/
@@ -850,7 +845,7 @@ static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
* Routing table message types
*/
#define EXT_ROUTING_TABLE 0
-#define LOCAL_ROUTING_TABLE 1
+#define LOCAL_ROUTING_TABLE 1 /* obsoleted */
#define SLAVE_ROUTING_TABLE 2
#define ROUTE_ADDITION 3
#define ROUTE_REMOVAL 4
@@ -868,7 +863,7 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect);
int tipc_msg_build(struct tipc_msg *hdr,
struct iovec const *msg_sect, u32 num_sect,
- int max_size, int usrmem, struct sk_buff** buf);
+ int max_size, int usrmem, struct sk_buff **buf);
static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
{
@@ -877,7 +872,7 @@ static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr
static inline void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
{
- memcpy(a, &((int*)m)[5], sizeof(*a));
+ memcpy(a, &((int *)m)[5], sizeof(*a));
}
#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 10ff48be3c01..483c226c9581 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -35,7 +35,6 @@
*/
#include "core.h"
-#include "cluster.h"
#include "link.h"
#include "name_distr.h"
@@ -74,7 +73,7 @@ struct distr_item {
*/
static LIST_HEAD(publ_root);
-static u32 publ_cnt = 0;
+static u32 publ_cnt;
/**
* publ_to_item - add publication info to a publication message
@@ -87,7 +86,6 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
i->upper = htonl(p->upper);
i->ref = htonl(p->ref);
i->key = htonl(p->key);
- dbg("publ_to_item: %u, %u, %u\n", p->type, p->lower, p->upper);
}
/**
@@ -107,6 +105,26 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
return buf;
}
+static void named_cluster_distribute(struct sk_buff *buf)
+{
+ struct sk_buff *buf_copy;
+ struct tipc_node *n_ptr;
+ u32 n_num;
+
+ for (n_num = 1; n_num <= tipc_net.highest_node; n_num++) {
+ n_ptr = tipc_net.nodes[n_num];
+ if (n_ptr && tipc_node_has_active_links(n_ptr)) {
+ buf_copy = skb_copy(buf, GFP_ATOMIC);
+ if (!buf_copy)
+ break;
+ msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
+ tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
+ }
+ }
+
+ buf_discard(buf);
+}
+
/**
* tipc_named_publish - tell other nodes about a new publication by this node
*/
@@ -127,8 +145,7 @@ void tipc_named_publish(struct publication *publ)
item = (struct distr_item *)msg_data(buf_msg(buf));
publ_to_item(item, publ);
- dbg("tipc_named_withdraw: broadcasting publish msg\n");
- tipc_cltr_broadcast(buf);
+ named_cluster_distribute(buf);
}
/**
@@ -151,8 +168,7 @@ void tipc_named_withdraw(struct publication *publ)
item = (struct distr_item *)msg_data(buf_msg(buf));
publ_to_item(item, publ);
- dbg("tipc_named_withdraw: broadcasting withdraw msg\n");
- tipc_cltr_broadcast(buf);
+ named_cluster_distribute(buf);
}
/**
@@ -189,9 +205,6 @@ void tipc_named_node_up(unsigned long node)
left -= ITEM_SIZE;
if (!left) {
msg_set_link_selector(buf_msg(buf), node);
- dbg("tipc_named_node_up: sending publish msg to "
- "<%u.%u.%u>\n", tipc_zone(node),
- tipc_cluster(node), tipc_node(node));
tipc_link_send(buf, node, node);
buf = NULL;
}
@@ -216,8 +229,6 @@ static void node_is_down(struct publication *publ)
struct publication *p;
write_lock_bh(&tipc_nametbl_lock);
- dbg("node_is_down: withdrawing %u, %u, %u\n",
- publ->type, publ->lower, publ->upper);
publ->key += 1222345;
p = tipc_nametbl_remove_publ(publ->type, publ->lower,
publ->node, publ->ref, publ->key);
@@ -229,9 +240,7 @@ static void node_is_down(struct publication *publ)
publ->type, publ->lower, publ->node, publ->ref, publ->key);
}
- if (p) {
- kfree(p);
- }
+ kfree(p);
}
/**
@@ -248,9 +257,6 @@ void tipc_named_recv(struct sk_buff *buf)
write_lock_bh(&tipc_nametbl_lock);
while (count--) {
if (msg_type(msg) == PUBLICATION) {
- dbg("tipc_named_recv: got publication for %u, %u, %u\n",
- ntohl(item->type), ntohl(item->lower),
- ntohl(item->upper));
publ = tipc_nametbl_insert_publ(ntohl(item->type),
ntohl(item->lower),
ntohl(item->upper),
@@ -265,9 +271,6 @@ void tipc_named_recv(struct sk_buff *buf)
(net_ev_handler)node_is_down);
}
} else if (msg_type(msg) == WITHDRAWAL) {
- dbg("tipc_named_recv: got withdrawl for %u, %u, %u\n",
- ntohl(item->type), ntohl(item->lower),
- ntohl(item->upper));
publ = tipc_nametbl_remove_publ(ntohl(item->type),
ntohl(item->lower),
msg_orignode(msg),
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index d5adb0456746..205ed4a4e186 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -104,7 +104,7 @@ struct name_table {
u32 local_publ_count;
};
-static struct name_table table = { NULL } ;
+static struct name_table table;
static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
DEFINE_RWLOCK(tipc_nametbl_lock);
@@ -172,8 +172,6 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
spin_lock_init(&nseq->lock);
nseq->type = type;
nseq->sseqs = sseq;
- dbg("tipc_nameseq_create(): nseq = %p, type %u, ssseqs %p, ff: %u\n",
- nseq, type, nseq->sseqs, nseq->first_free);
nseq->alloc = 1;
INIT_HLIST_NODE(&nseq->ns_list);
INIT_LIST_HEAD(&nseq->subscriptions);
@@ -251,8 +249,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
int created_subseq = 0;
sseq = nameseq_find_subseq(nseq, lower);
- dbg("nameseq_ins: for seq %p, {%u,%u}, found sseq %p\n",
- nseq, type, lower, sseq);
if (sseq) {
/* Lower end overlaps existing entry => need an exact match */
@@ -289,38 +285,30 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
type, lower, upper);
return NULL;
}
- dbg("Allocated %u more sseqs\n", nseq->alloc);
memcpy(sseqs, nseq->sseqs,
nseq->alloc * sizeof(struct sub_seq));
kfree(nseq->sseqs);
nseq->sseqs = sseqs;
nseq->alloc *= 2;
}
- dbg("Have %u sseqs for type %u\n", nseq->alloc, type);
/* Insert new sub-sequence */
- dbg("ins in pos %u, ff = %u\n", inspos, nseq->first_free);
sseq = &nseq->sseqs[inspos];
freesseq = &nseq->sseqs[nseq->first_free];
- memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof (*sseq));
- memset(sseq, 0, sizeof (*sseq));
+ memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq));
+ memset(sseq, 0, sizeof(*sseq));
nseq->first_free++;
sseq->lower = lower;
sseq->upper = upper;
created_subseq = 1;
}
- dbg("inserting {%u,%u,%u} from <0x%x:%u> into sseq %p(%u,%u) of seq %p\n",
- type, lower, upper, node, port, sseq,
- sseq->lower, sseq->upper, nseq);
/* Insert a publication: */
publ = publ_create(type, lower, upper, scope, node, port, key);
if (!publ)
return NULL;
- dbg("inserting publ %p, node=0x%x publ->node=0x%x, subscr->node=%p\n",
- publ, node, publ->node, publ->subscr.node);
sseq->zone_list_size++;
if (!sseq->zone_list)
@@ -355,7 +343,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
* Any subscriptions waiting for notification?
*/
list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
- dbg("calling report_overlap()\n");
tipc_subscr_report_overlap(s,
publ->lower,
publ->upper,
@@ -393,9 +380,6 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
if (!sseq)
return NULL;
- dbg("tipc_nameseq_remove_publ: seq: %p, sseq %p, {%u,%u}, key %u\n",
- nseq, sseq, nseq->type, inst, key);
-
/* Remove publication from zone scope list */
prev = sseq->zone_list;
@@ -487,7 +471,7 @@ end_node:
if (!sseq->zone_list) {
free = &nseq->sseqs[nseq->first_free--];
- memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq));
+ memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof(*sseq));
removed_subseq = 1;
}
@@ -523,7 +507,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s
while (sseq != &nseq->sseqs[nseq->first_free]) {
struct publication *zl = sseq->zone_list;
- if (zl && tipc_subscr_overlap(s,sseq->lower,sseq->upper)) {
+ if (zl && tipc_subscr_overlap(s, sseq->lower, sseq->upper)) {
struct publication *crs = zl;
int must_report = 1;
@@ -549,15 +533,10 @@ static struct name_seq *nametbl_find_seq(u32 type)
struct hlist_node *seq_node;
struct name_seq *ns;
- dbg("find_seq %u,(%u,0x%x) table = %p, hash[type] = %u\n",
- type, htonl(type), type, table.types, hash(type));
-
seq_head = &table.types[hash(type)];
hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
- if (ns->type == type) {
- dbg("found %p\n", ns);
+ if (ns->type == type)
return ns;
- }
}
return NULL;
@@ -568,18 +547,14 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
{
struct name_seq *seq = nametbl_find_seq(type);
- dbg("tipc_nametbl_insert_publ: {%u,%u,%u} found %p\n", type, lower, upper, seq);
if (lower > upper) {
warn("Failed to publish illegal {%u,%u,%u}\n",
type, lower, upper);
return NULL;
}
- dbg("Publishing {%u,%u,%u} from 0x%x\n", type, lower, upper, node);
- if (!seq) {
+ if (!seq)
seq = tipc_nameseq_create(type, &table.types[hash(type)]);
- dbg("tipc_nametbl_insert_publ: created %p\n", seq);
- }
if (!seq)
return NULL;
@@ -596,7 +571,6 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
if (!seq)
return NULL;
- dbg("Withdrawing {%u,%u} from 0x%x\n", type, lower, node);
publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
if (!seq->first_free && list_empty(&seq->subscriptions)) {
@@ -777,9 +751,8 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
table.local_publ_count++;
publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
tipc_own_addr, port_ref, key);
- if (publ && (scope != TIPC_NODE_SCOPE)) {
+ if (publ && (scope != TIPC_NODE_SCOPE))
tipc_named_publish(publ);
- }
write_unlock_bh(&tipc_nametbl_lock);
return publ;
}
@@ -792,7 +765,6 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
{
struct publication *publ;
- dbg("tipc_nametbl_withdraw: {%u,%u}, key=%u\n", type, lower, key);
write_lock_bh(&tipc_nametbl_lock);
publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
if (likely(publ)) {
@@ -822,13 +794,10 @@ void tipc_nametbl_subscribe(struct subscription *s)
write_lock_bh(&tipc_nametbl_lock);
seq = nametbl_find_seq(type);
- if (!seq) {
+ if (!seq)
seq = tipc_nameseq_create(type, &table.types[hash(type)]);
- }
- if (seq){
+ if (seq) {
spin_lock_bh(&seq->lock);
- dbg("tipc_nametbl_subscribe:found %p for {%u,%u,%u}\n",
- seq, type, s->seq.lower, s->seq.upper);
tipc_nameseq_subscribe(seq, s);
spin_unlock_bh(&seq->lock);
} else {
@@ -848,7 +817,7 @@ void tipc_nametbl_unsubscribe(struct subscription *s)
write_lock_bh(&tipc_nametbl_lock);
seq = nametbl_find_seq(s->seq.type);
- if (seq != NULL){
+ if (seq != NULL) {
spin_lock_bh(&seq->lock);
list_del_init(&s->nameseq_list);
spin_unlock_bh(&seq->lock);
@@ -881,7 +850,7 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
}
do {
- sprintf (portIdStr, "<%u.%u.%u:%u>",
+ sprintf(portIdStr, "<%u.%u.%u:%u>",
tipc_zone(publ->node), tipc_cluster(publ->node),
tipc_node(publ->node), publ->ref);
tipc_printf(buf, "%-26s ", portIdStr);
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 139882d4ed00..d228bd682655 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -46,7 +46,7 @@ struct port_list;
* TIPC name types reserved for internal TIPC use (both current and planned)
*/
-#define TIPC_ZM_SRV 3 /* zone master service name type */
+#define TIPC_ZM_SRV 3 /* zone master service name type */
/**
diff --git a/net/tipc/net.c b/net/tipc/net.c
index c2b4b86c2e6a..9bacfd00b91e 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -36,11 +36,8 @@
#include "core.h"
#include "net.h"
-#include "zone.h"
-#include "name_table.h"
#include "name_distr.h"
#include "subscr.h"
-#include "link.h"
#include "port.h"
#include "config.h"
@@ -111,46 +108,25 @@
*/
DEFINE_RWLOCK(tipc_net_lock);
-static struct _zone *tipc_zones[256] = { NULL, };
-struct network tipc_net = { tipc_zones };
+struct network tipc_net;
-struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
+static int net_start(void)
{
- return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref);
-}
-
-u32 tipc_net_select_router(u32 addr, u32 ref)
-{
- return tipc_zone_select_router(tipc_net.zones[tipc_zone(addr)], addr, ref);
-}
-
-void tipc_net_remove_as_router(u32 router)
-{
- u32 z_num;
-
- for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
- if (!tipc_net.zones[z_num])
- continue;
- tipc_zone_remove_as_router(tipc_net.zones[z_num], router);
- }
-}
-
-void tipc_net_send_external_routes(u32 dest)
-{
- u32 z_num;
+ tipc_net.nodes = kcalloc(tipc_max_nodes + 1,
+ sizeof(*tipc_net.nodes), GFP_ATOMIC);
+ tipc_net.highest_node = 0;
- for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
- if (tipc_net.zones[z_num])
- tipc_zone_send_external_routes(tipc_net.zones[z_num], dest);
- }
+ return tipc_net.nodes ? 0 : -ENOMEM;
}
static void net_stop(void)
{
- u32 z_num;
+ u32 n_num;
- for (z_num = 1; z_num <= tipc_max_zones; z_num++)
- tipc_zone_delete(tipc_net.zones[z_num]);
+ for (n_num = 1; n_num <= tipc_net.highest_node; n_num++)
+ tipc_node_delete(tipc_net.nodes[n_num]);
+ kfree(tipc_net.nodes);
+ tipc_net.nodes = NULL;
}
static void net_route_named_msg(struct sk_buff *buf)
@@ -160,22 +136,18 @@ static void net_route_named_msg(struct sk_buff *buf)
u32 dport;
if (!msg_named(msg)) {
- msg_dbg(msg, "tipc_net->drop_nam:");
buf_discard(buf);
return;
}
dnode = addr_domain(msg_lookup_scope(msg));
dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
- dbg("tipc_net->lookup<%u,%u>-><%u,%x>\n",
- msg_nametype(msg), msg_nameinst(msg), dport, dnode);
if (dport) {
msg_set_destnode(msg, dnode);
msg_set_destport(msg, dport);
tipc_net_route_msg(buf);
return;
}
- msg_dbg(msg, "tipc_net->rej:NO NAME: ");
tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
}
@@ -191,18 +163,14 @@ void tipc_net_route_msg(struct sk_buff *buf)
msg_incr_reroute_cnt(msg);
if (msg_reroute_cnt(msg) > 6) {
if (msg_errcode(msg)) {
- msg_dbg(msg, "NET>DISC>:");
buf_discard(buf);
} else {
- msg_dbg(msg, "NET>REJ>:");
tipc_reject_msg(buf, msg_destport(msg) ?
TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
}
return;
}
- msg_dbg(msg, "tipc_net->rout: ");
-
/* Handle message for this node */
dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
if (tipc_in_scope(dnode, tipc_own_addr)) {
@@ -216,9 +184,6 @@ void tipc_net_route_msg(struct sk_buff *buf)
return;
}
switch (msg_user(msg)) {
- case ROUTE_DISTRIBUTOR:
- tipc_cltr_recv_routing_table(buf);
- break;
case NAME_DISTRIBUTOR:
tipc_named_recv(buf);
break;
@@ -226,14 +191,12 @@ void tipc_net_route_msg(struct sk_buff *buf)
tipc_port_recv_proto_msg(buf);
break;
default:
- msg_dbg(msg,"DROP/NET/<REC<");
buf_discard(buf);
}
return;
}
/* Handle message for another node */
- msg_dbg(msg, "NET>SEND>: ");
skb_trim(buf, msg_size(msg));
tipc_link_send(buf, dnode, msg_link_selector(msg));
}
@@ -254,10 +217,12 @@ int tipc_net_start(u32 addr)
tipc_named_reinit();
tipc_port_reinit();
- if ((res = tipc_cltr_init()) ||
- (res = tipc_bclink_init())) {
+ res = net_start();
+ if (res)
+ return res;
+ res = tipc_bclink_init();
+ if (res)
return res;
- }
tipc_k_signal((Handler)tipc_subscr_start, 0);
tipc_k_signal((Handler)tipc_cfg_init, 0);
diff --git a/net/tipc/net.h b/net/tipc/net.h
index de2b9ad8f646..4ae59ad04893 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -37,26 +37,26 @@
#ifndef _TIPC_NET_H
#define _TIPC_NET_H
-struct _zone;
+struct tipc_node;
/**
* struct network - TIPC network structure
- * @zones: array of pointers to all zones within network
+ * @nodes: array of pointers to all nodes within cluster
+ * @highest_node: id of highest numbered node within cluster
+ * @links: number of (unicast) links to cluster
*/
struct network {
- struct _zone **zones;
+ struct tipc_node **nodes;
+ u32 highest_node;
+ u32 links;
};
extern struct network tipc_net;
extern rwlock_t tipc_net_lock;
-void tipc_net_remove_as_router(u32 router);
-void tipc_net_send_external_routes(u32 dest);
void tipc_net_route_msg(struct sk_buff *buf);
-struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref);
-u32 tipc_net_select_router(u32 addr, u32 ref);
int tipc_net_start(u32 addr);
void tipc_net_stop(void);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index df71dfc3a9ae..3af53e327f49 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -37,18 +37,14 @@
#include "core.h"
#include "config.h"
#include "node.h"
-#include "port.h"
#include "name_distr.h"
static void node_lost_contact(struct tipc_node *n_ptr);
static void node_established_contact(struct tipc_node *n_ptr);
-/* sorted list of nodes within cluster */
-static struct tipc_node *tipc_nodes = NULL;
-
static DEFINE_SPINLOCK(node_create_lock);
-u32 tipc_own_tag = 0;
+u32 tipc_own_tag;
/**
* tipc_node_create - create neighboring node
@@ -62,65 +58,51 @@ u32 tipc_own_tag = 0;
struct tipc_node *tipc_node_create(u32 addr)
{
- struct cluster *c_ptr;
struct tipc_node *n_ptr;
- struct tipc_node **curr_node;
+ u32 n_num;
spin_lock_bh(&node_create_lock);
- for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
- if (addr < n_ptr->addr)
- break;
- if (addr == n_ptr->addr) {
- spin_unlock_bh(&node_create_lock);
- return n_ptr;
- }
+ n_ptr = tipc_node_find(addr);
+ if (n_ptr) {
+ spin_unlock_bh(&node_create_lock);
+ return n_ptr;
}
- n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC);
+ n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
if (!n_ptr) {
spin_unlock_bh(&node_create_lock);
warn("Node creation failed, no memory\n");
return NULL;
}
- c_ptr = tipc_cltr_find(addr);
- if (!c_ptr) {
- c_ptr = tipc_cltr_create(addr);
- }
- if (!c_ptr) {
- spin_unlock_bh(&node_create_lock);
- kfree(n_ptr);
- return NULL;
- }
-
n_ptr->addr = addr;
- spin_lock_init(&n_ptr->lock);
+ spin_lock_init(&n_ptr->lock);
INIT_LIST_HEAD(&n_ptr->nsub);
- n_ptr->owner = c_ptr;
- tipc_cltr_attach_node(c_ptr, n_ptr);
- n_ptr->last_router = -1;
-
- /* Insert node into ordered list */
- for (curr_node = &tipc_nodes; *curr_node;
- curr_node = &(*curr_node)->next) {
- if (addr < (*curr_node)->addr) {
- n_ptr->next = *curr_node;
- break;
- }
- }
- (*curr_node) = n_ptr;
+
+ n_num = tipc_node(addr);
+ tipc_net.nodes[n_num] = n_ptr;
+ if (n_num > tipc_net.highest_node)
+ tipc_net.highest_node = n_num;
+
spin_unlock_bh(&node_create_lock);
return n_ptr;
}
void tipc_node_delete(struct tipc_node *n_ptr)
{
+ u32 n_num;
+
if (!n_ptr)
return;
- dbg("node %x deleted\n", n_ptr->addr);
+ n_num = tipc_node(n_ptr->addr);
+ tipc_net.nodes[n_num] = NULL;
kfree(n_ptr);
+
+ while (!tipc_net.nodes[tipc_net.highest_node])
+ if (--tipc_net.highest_node == 0)
+ break;
}
@@ -140,7 +122,6 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr)
l_ptr->name, l_ptr->b_ptr->net_plane);
if (!active[0]) {
- dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]);
active[0] = active[1] = l_ptr;
node_established_contact(n_ptr);
return;
@@ -229,14 +210,9 @@ int tipc_node_has_redundant_links(struct tipc_node *n_ptr)
return n_ptr->working_links > 1;
}
-static int tipc_node_has_active_routes(struct tipc_node *n_ptr)
-{
- return n_ptr && (n_ptr->last_router >= 0);
-}
-
int tipc_node_is_up(struct tipc_node *n_ptr)
{
- return tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr);
+ return tipc_node_has_active_links(n_ptr);
}
struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
@@ -257,7 +233,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
if (!n_ptr->links[bearer_id]) {
n_ptr->links[bearer_id] = l_ptr;
- tipc_net.zones[tipc_zone(l_ptr->addr)]->links++;
+ tipc_net.links++;
n_ptr->link_cnt++;
return n_ptr;
}
@@ -271,7 +247,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr)
{
n_ptr->links[l_ptr->b_ptr->identity] = NULL;
- tipc_net.zones[tipc_zone(l_ptr->addr)]->links--;
+ tipc_net.links--;
n_ptr->link_cnt--;
}
@@ -323,48 +299,16 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr)
static void node_established_contact(struct tipc_node *n_ptr)
{
- struct cluster *c_ptr;
-
- dbg("node_established_contact:-> %x\n", n_ptr->addr);
- if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) {
- tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
- }
+ tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
/* Syncronize broadcast acks */
n_ptr->bclink.acked = tipc_bclink_get_last_sent();
- if (is_slave(tipc_own_addr))
- return;
- if (!in_own_cluster(n_ptr->addr)) {
- /* Usage case 1 (see above) */
- c_ptr = tipc_cltr_find(tipc_own_addr);
- if (!c_ptr)
- c_ptr = tipc_cltr_create(tipc_own_addr);
- if (c_ptr)
- tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1,
- tipc_max_nodes);
- return;
- }
-
- c_ptr = n_ptr->owner;
- if (is_slave(n_ptr->addr)) {
- /* Usage case 2 (see above) */
- tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes);
- tipc_cltr_send_local_routes(c_ptr, n_ptr->addr);
- return;
- }
-
if (n_ptr->bclink.supported) {
- tipc_nmap_add(&tipc_cltr_bcast_nodes, n_ptr->addr);
+ tipc_nmap_add(&tipc_bcast_nmap, n_ptr->addr);
if (n_ptr->addr < tipc_own_addr)
tipc_own_tag++;
}
-
- /* Case 3 (see above) */
- tipc_net_send_external_routes(n_ptr->addr);
- tipc_cltr_send_slave_routes(c_ptr, n_ptr->addr);
- tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE,
- tipc_highest_allowed_slave);
}
static void node_cleanup_finished(unsigned long node_addr)
@@ -383,7 +327,6 @@ static void node_cleanup_finished(unsigned long node_addr)
static void node_lost_contact(struct tipc_node *n_ptr)
{
- struct cluster *c_ptr;
struct tipc_node_subscr *ns, *tns;
char addr_string[16];
u32 i;
@@ -391,7 +334,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
/* Clean up broadcast reception remains */
n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
while (n_ptr->bclink.deferred_head) {
- struct sk_buff* buf = n_ptr->bclink.deferred_head;
+ struct sk_buff *buf = n_ptr->bclink.deferred_head;
n_ptr->bclink.deferred_head = buf->next;
buf_discard(buf);
}
@@ -399,41 +342,14 @@ static void node_lost_contact(struct tipc_node *n_ptr)
buf_discard(n_ptr->bclink.defragm);
n_ptr->bclink.defragm = NULL;
}
- if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) {
- tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
- }
- /* Update routing tables */
- if (is_slave(tipc_own_addr)) {
- tipc_net_remove_as_router(n_ptr->addr);
- } else {
- if (!in_own_cluster(n_ptr->addr)) {
- /* Case 4 (see above) */
- c_ptr = tipc_cltr_find(tipc_own_addr);
- tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
- tipc_max_nodes);
- } else {
- /* Case 5 (see above) */
- c_ptr = tipc_cltr_find(n_ptr->addr);
- if (is_slave(n_ptr->addr)) {
- tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
- tipc_max_nodes);
- } else {
- if (n_ptr->bclink.supported) {
- tipc_nmap_remove(&tipc_cltr_bcast_nodes,
- n_ptr->addr);
- if (n_ptr->addr < tipc_own_addr)
- tipc_own_tag--;
- }
- tipc_net_remove_as_router(n_ptr->addr);
- tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr,
- LOWEST_SLAVE,
- tipc_highest_allowed_slave);
- }
- }
+ if (n_ptr->bclink.supported) {
+ tipc_bclink_acknowledge(n_ptr,
+ mod(n_ptr->bclink.acked + 10000));
+ tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
+ if (n_ptr->addr < tipc_own_addr)
+ tipc_own_tag--;
}
- if (tipc_node_has_active_routes(n_ptr))
- return;
info("Lost contact with %s\n",
tipc_addr_string_fill(addr_string, n_ptr->addr));
@@ -462,125 +378,6 @@ static void node_lost_contact(struct tipc_node *n_ptr)
tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr);
}
-/**
- * tipc_node_select_next_hop - find the next-hop node for a message
- *
- * Called by when cluster local lookup has failed.
- */
-
-struct tipc_node *tipc_node_select_next_hop(u32 addr, u32 selector)
-{
- struct tipc_node *n_ptr;
- u32 router_addr;
-
- if (!tipc_addr_domain_valid(addr))
- return NULL;
-
- /* Look for direct link to destination processsor */
- n_ptr = tipc_node_find(addr);
- if (n_ptr && tipc_node_has_active_links(n_ptr))
- return n_ptr;
-
- /* Cluster local system nodes *must* have direct links */
- if (!is_slave(addr) && in_own_cluster(addr))
- return NULL;
-
- /* Look for cluster local router with direct link to node */
- router_addr = tipc_node_select_router(n_ptr, selector);
- if (router_addr)
- return tipc_node_select(router_addr, selector);
-
- /* Slave nodes can only be accessed within own cluster via a
- known router with direct link -- if no router was found,give up */
- if (is_slave(addr))
- return NULL;
-
- /* Inter zone/cluster -- find any direct link to remote cluster */
- addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
- n_ptr = tipc_net_select_remote_node(addr, selector);
- if (n_ptr && tipc_node_has_active_links(n_ptr))
- return n_ptr;
-
- /* Last resort -- look for any router to anywhere in remote zone */
- router_addr = tipc_net_select_router(addr, selector);
- if (router_addr)
- return tipc_node_select(router_addr, selector);
-
- return NULL;
-}
-
-/**
- * tipc_node_select_router - select router to reach specified node
- *
- * Uses a deterministic and fair algorithm for selecting router node.
- */
-
-u32 tipc_node_select_router(struct tipc_node *n_ptr, u32 ref)
-{
- u32 ulim;
- u32 mask;
- u32 start;
- u32 r;
-
- if (!n_ptr)
- return 0;
-
- if (n_ptr->last_router < 0)
- return 0;
- ulim = ((n_ptr->last_router + 1) * 32) - 1;
-
- /* Start entry must be random */
- mask = tipc_max_nodes;
- while (mask > ulim)
- mask >>= 1;
- start = ref & mask;
- r = start;
-
- /* Lookup upwards with wrap-around */
- do {
- if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
- break;
- } while (++r <= ulim);
- if (r > ulim) {
- r = 1;
- do {
- if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
- break;
- } while (++r < start);
- assert(r != start);
- }
- assert(r && (r <= ulim));
- return tipc_addr(own_zone(), own_cluster(), r);
-}
-
-void tipc_node_add_router(struct tipc_node *n_ptr, u32 router)
-{
- u32 r_num = tipc_node(router);
-
- n_ptr->routers[r_num / 32] =
- ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]);
- n_ptr->last_router = tipc_max_nodes / 32;
- while ((--n_ptr->last_router >= 0) &&
- !n_ptr->routers[n_ptr->last_router]);
-}
-
-void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router)
-{
- u32 r_num = tipc_node(router);
-
- if (n_ptr->last_router < 0)
- return; /* No routes */
-
- n_ptr->routers[r_num / 32] =
- ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32]));
- n_ptr->last_router = tipc_max_nodes / 32;
- while ((--n_ptr->last_router >= 0) &&
- !n_ptr->routers[n_ptr->last_router]);
-
- if (!tipc_node_is_up(n_ptr))
- node_lost_contact(n_ptr);
-}
-
struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
{
u32 domain;
@@ -588,6 +385,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
struct tipc_node *n_ptr;
struct tipc_node_info node_info;
u32 payload_size;
+ u32 n_num;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
@@ -598,15 +396,15 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
" (network address)");
read_lock_bh(&tipc_net_lock);
- if (!tipc_nodes) {
+ if (!tipc_net.nodes) {
read_unlock_bh(&tipc_net_lock);
return tipc_cfg_reply_none();
}
- /* For now, get space for all other nodes
- (will need to modify this when slave nodes are supported */
+ /* For now, get space for all other nodes */
- payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1);
+ payload_size = TLV_SPACE(sizeof(node_info)) *
+ (tipc_net.highest_node - 1);
if (payload_size > 32768u) {
read_unlock_bh(&tipc_net_lock);
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
@@ -620,8 +418,9 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
/* Add TLVs for all nodes in scope */
- for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
- if (!tipc_in_scope(domain, n_ptr->addr))
+ for (n_num = 1; n_num <= tipc_net.highest_node; n_num++) {
+ n_ptr = tipc_net.nodes[n_num];
+ if (!n_ptr || !tipc_in_scope(domain, n_ptr->addr))
continue;
node_info.addr = htonl(n_ptr->addr);
node_info.up = htonl(tipc_node_is_up(n_ptr));
@@ -640,6 +439,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
struct tipc_node *n_ptr;
struct tipc_link_info link_info;
u32 payload_size;
+ u32 n_num;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
@@ -656,8 +456,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
/* Get space for all unicast links + multicast link */
- payload_size = TLV_SPACE(sizeof(link_info)) *
- (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1);
+ payload_size = TLV_SPACE(sizeof(link_info)) * (tipc_net.links + 1);
if (payload_size > 32768u) {
read_unlock_bh(&tipc_net_lock);
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
@@ -678,10 +477,11 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
/* Add TLVs for any other links in scope */
- for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
+ for (n_num = 1; n_num <= tipc_net.highest_node; n_num++) {
u32 i;
- if (!tipc_in_scope(domain, n_ptr->addr))
+ n_ptr = tipc_net.nodes[n_num];
+ if (!n_ptr || !tipc_in_scope(domain, n_ptr->addr))
continue;
tipc_node_lock(n_ptr);
for (i = 0; i < MAX_BEARERS; i++) {
diff --git a/net/tipc/node.h b/net/tipc/node.h
index fff331b2d26c..206a8efa410e 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -39,14 +39,13 @@
#include "node_subscr.h"
#include "addr.h"
-#include "cluster.h"
+#include "net.h"
#include "bearer.h"
/**
* struct tipc_node - TIPC node structure
* @addr: network address of node
* @lock: spinlock governing access to structure
- * @owner: pointer to cluster that node belongs to
* @next: pointer to next node in sorted list of cluster's nodes
* @nsub: list of "node down" subscriptions monitoring node
* @active_links: pointers to active links to node
@@ -55,8 +54,6 @@
* @cleanup_required: non-zero if cleaning up after a prior loss of contact
* @link_cnt: number of links to node
* @permit_changeover: non-zero if node has redundant links to this system
- * @routers: bitmap (used for multicluster communication)
- * @last_router: (used for multicluster communication)
* @bclink: broadcast-related info
* @supported: non-zero if node supports TIPC b'cast capability
* @acked: sequence # of last outbound b'cast message acknowledged by node
@@ -72,7 +69,6 @@
struct tipc_node {
u32 addr;
spinlock_t lock;
- struct cluster *owner;
struct tipc_node *next;
struct list_head nsub;
struct link *active_links[2];
@@ -81,8 +77,6 @@ struct tipc_node {
int working_links;
int cleanup_required;
int permit_changeover;
- u32 routers[512/32];
- int last_router;
struct {
int supported;
u32 acked;
@@ -106,34 +100,17 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr);
void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr);
int tipc_node_has_active_links(struct tipc_node *n_ptr);
int tipc_node_has_redundant_links(struct tipc_node *n_ptr);
-u32 tipc_node_select_router(struct tipc_node *n_ptr, u32 ref);
-struct tipc_node *tipc_node_select_next_hop(u32 addr, u32 selector);
int tipc_node_is_up(struct tipc_node *n_ptr);
-void tipc_node_add_router(struct tipc_node *n_ptr, u32 router);
-void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router);
struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
static inline struct tipc_node *tipc_node_find(u32 addr)
{
if (likely(in_own_cluster(addr)))
- return tipc_local_nodes[tipc_node(addr)];
- else if (tipc_addr_domain_valid(addr)) {
- struct cluster *c_ptr = tipc_cltr_find(addr);
-
- if (c_ptr)
- return c_ptr->nodes[tipc_node(addr)];
- }
+ return tipc_net.nodes[tipc_node(addr)];
return NULL;
}
-static inline struct tipc_node *tipc_node_select(u32 addr, u32 selector)
-{
- if (likely(in_own_cluster(addr)))
- return tipc_local_nodes[tipc_node(addr)];
- return tipc_node_select_next_hop(addr, selector);
-}
-
static inline void tipc_node_lock(struct tipc_node *n_ptr)
{
spin_lock_bh(&n_ptr->lock);
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 7873283f4965..067bab2a0b98 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -38,7 +38,6 @@
#include "config.h"
#include "port.h"
#include "name_table.h"
-#include "user_reg.h"
/* Connection management: */
#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
@@ -47,16 +46,16 @@
#define MAX_REJECT_SIZE 1024
-static struct sk_buff *msg_queue_head = NULL;
-static struct sk_buff *msg_queue_tail = NULL;
+static struct sk_buff *msg_queue_head;
+static struct sk_buff *msg_queue_tail;
DEFINE_SPINLOCK(tipc_port_list_lock);
static DEFINE_SPINLOCK(queue_lock);
static LIST_HEAD(ports);
static void port_handle_node_down(unsigned long ref);
-static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
-static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
+static struct sk_buff *port_build_self_abort_msg(struct port *, u32 err);
+static struct sk_buff *port_build_peer_abort_msg(struct port *, u32 err);
static void port_timeout(unsigned long ref);
@@ -132,9 +131,8 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
}
}
res = tipc_bclink_send_msg(buf);
- if ((res < 0) && (dports.count != 0)) {
+ if ((res < 0) && (dports.count != 0))
buf_discard(ibuf);
- }
} else {
ibuf = buf;
}
@@ -156,7 +154,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
{
- struct tipc_msg* msg;
+ struct tipc_msg *msg;
struct port_list dports = {0, NULL, };
struct port_list *item = dp;
int cnt = 0;
@@ -189,13 +187,11 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
if (b == NULL) {
warn("Unable to deliver multicast message(s)\n");
- msg_dbg(msg, "LOST:");
goto exit;
}
- if ((index == 0) && (cnt != 0)) {
+ if ((index == 0) && (cnt != 0))
item = item->next;
- }
- msg_set_destport(buf_msg(b),item->ports[index]);
+ msg_set_destport(buf_msg(b), item->ports[index]);
tipc_port_recv_msg(b);
}
}
@@ -271,10 +267,7 @@ int tipc_deleteport(u32 ref)
buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
tipc_nodesub_unsubscribe(&p_ptr->subscription);
}
- if (p_ptr->user_port) {
- tipc_reg_remove_port(p_ptr->user_port);
- kfree(p_ptr->user_port);
- }
+ kfree(p_ptr->user_port);
spin_lock_bh(&tipc_port_list_lock);
list_del(&p_ptr->port_list);
@@ -282,7 +275,6 @@ int tipc_deleteport(u32 ref)
spin_unlock_bh(&tipc_port_list_lock);
k_term_timer(&p_ptr->timer);
kfree(p_ptr);
- dbg("Deleted port %u\n", ref);
tipc_net_route_msg(buf);
return 0;
}
@@ -368,7 +360,6 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
msg_set_orignode(msg, orignode);
msg_set_transp_seqno(msg, seqno);
msg_set_msgcnt(msg, ack);
- msg_dbg(msg, "PORT>SEND>:");
}
return buf;
}
@@ -386,7 +377,6 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
data_sz = MAX_REJECT_SIZE;
if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE))
imp++;
- msg_dbg(msg, "port->rej: ");
/* discard rejected message if it shouldn't be returned to sender */
if (msg_errcode(msg) || msg_dest_droppable(msg)) {
@@ -492,7 +482,7 @@ static void port_timeout(unsigned long ref)
static void port_handle_node_down(unsigned long ref)
{
struct port *p_ptr = tipc_port_lock(ref);
- struct sk_buff* buf = NULL;
+ struct sk_buff *buf = NULL;
if (!p_ptr)
return;
@@ -549,8 +539,6 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
struct sk_buff *r_buf = NULL;
struct sk_buff *abort_buf = NULL;
- msg_dbg(msg, "PORT<RECV<:");
-
if (!p_ptr) {
err = TIPC_ERR_NO_PORT;
} else if (p_ptr->publ.connected) {
@@ -630,8 +618,7 @@ static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
tipc_printf(buf, " via {%u,%u}",
p_ptr->publ.conn_type,
p_ptr->publ.conn_instance);
- }
- else if (p_ptr->publ.published) {
+ } else if (p_ptr->publ.published) {
tipc_printf(buf, " bound to");
list_for_each_entry(publ, &p_ptr->publications, pport_list) {
if (publ->lower == publ->upper)
@@ -934,12 +921,10 @@ void tipc_acknowledge(u32 ref, u32 ack)
}
/*
- * tipc_createport(): user level call. Will add port to
- * registry if non-zero user_ref.
+ * tipc_createport(): user level call.
*/
-int tipc_createport(u32 user_ref,
- void *usr_handle,
+int tipc_createport(void *usr_handle,
unsigned int importance,
tipc_msg_err_event error_cb,
tipc_named_msg_err_event named_error_cb,
@@ -966,7 +951,6 @@ int tipc_createport(u32 user_ref,
}
p_ptr->user_port = up_ptr;
- up_ptr->user_ref = user_ref;
up_ptr->usr_handle = usr_handle;
up_ptr->ref = p_ptr->publ.ref;
up_ptr->err_cb = error_cb;
@@ -976,8 +960,6 @@ int tipc_createport(u32 user_ref,
up_ptr->named_msg_cb = named_msg_cb;
up_ptr->conn_msg_cb = conn_msg_cb;
up_ptr->continue_event_cb = continue_event_cb;
- INIT_LIST_HEAD(&up_ptr->uport_list);
- tipc_reg_add_port(up_ptr);
*portref = p_ptr->publ.ref;
tipc_port_unlock(p_ptr);
return 0;
@@ -1022,9 +1004,6 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
if (!p_ptr)
return -EINVAL;
- dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
- "lower = %u, upper = %u\n",
- ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
if (p_ptr->publ.connected)
goto exit;
if (seq->lower > seq->upper)
@@ -1110,17 +1089,14 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
msg_set_origport(msg, p_ptr->publ.ref);
msg_set_transp_seqno(msg, 42);
msg_set_type(msg, TIPC_CONN_MSG);
- if (!may_route(peer->node))
- msg_set_hdr_sz(msg, SHORT_H_SIZE);
- else
- msg_set_hdr_sz(msg, LONG_H_SIZE);
+ msg_set_hdr_sz(msg, SHORT_H_SIZE);
p_ptr->probing_interval = PROBING_INTERVAL;
p_ptr->probing_state = CONFIRMED;
p_ptr->publ.connected = 1;
k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
- tipc_nodesub_subscribe(&p_ptr->subscription,peer->node,
+ tipc_nodesub_subscribe(&p_ptr->subscription, peer->node,
(void *)(unsigned long)ref,
(net_ev_handler)port_handle_node_down);
res = 0;
@@ -1367,7 +1343,6 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
skb_push(buf, DIR_MSG_H_SIZE);
skb_copy_to_linear_data(buf, msg, DIR_MSG_H_SIZE);
- msg_dbg(msg, "buf2port: ");
p_ptr->sent++;
if (dest->node == tipc_own_addr)
return tipc_port_recv_msg(buf);
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 3a807fcec2be..8e84b989949c 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -77,15 +77,12 @@ typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
/**
* struct user_port - TIPC user port (used with native API)
- * @user_ref: id of user who created user port
* @usr_handle: user-specified field
* @ref: object reference to associated TIPC port
* <various callback routines>
- * @uport_list: adjacent user ports in list of ports held by user
*/
struct user_port {
- u32 user_ref;
void *usr_handle;
u32 ref;
tipc_msg_err_event err_cb;
@@ -95,7 +92,6 @@ struct user_port {
tipc_named_msg_event named_msg_cb;
tipc_conn_msg_event conn_msg_cb;
tipc_continue_event continue_event_cb;
- struct list_head uport_list;
};
/**
@@ -181,7 +177,7 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
void tipc_acknowledge(u32 port_ref, u32 ack);
-int tipc_createport(unsigned int tipc_user, void *usr_handle,
+int tipc_createport(void *usr_handle,
unsigned int importance, tipc_msg_err_event error_cb,
tipc_named_msg_err_event named_error_cb,
tipc_conn_shutdown_event conn_error_cb, tipc_msg_event msg_cb,
@@ -262,7 +258,7 @@ static inline void tipc_port_unlock(struct port *p_ptr)
spin_unlock_bh(p_ptr->publ.lock);
}
-static inline struct port* tipc_port_deref(u32 ref)
+static inline struct port *tipc_port_deref(u32 ref)
{
return (struct port *)tipc_ref_deref(ref);
}
@@ -320,7 +316,6 @@ static inline int tipc_port_recv_msg(struct sk_buff *buf)
err = TIPC_ERR_NO_PORT;
}
reject:
- dbg("port->rejecting, err = %x..\n",err);
return tipc_reject_msg(buf, err);
}
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index ab8ad32d8c20..83116892528b 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -89,7 +89,7 @@ struct ref_table {
* have a reference value of 0 (although this is unlikely).
*/
-static struct ref_table tipc_ref_table = { NULL };
+static struct ref_table tipc_ref_table;
static DEFINE_RWLOCK(ref_table_lock);
@@ -178,14 +178,12 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
next_plus_upper = entry->ref;
tipc_ref_table.first_free = next_plus_upper & index_mask;
ref = (next_plus_upper & ~index_mask) + index;
- }
- else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
+ } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
index = tipc_ref_table.init_point++;
entry = &(tipc_ref_table.entries[index]);
spin_lock_init(&entry->lock);
ref = tipc_ref_table.start_mask + index;
- }
- else {
+ } else {
ref = 0;
}
write_unlock_bh(&ref_table_lock);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index cd0bb77f2673..2b02a3a80313 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -34,17 +34,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/net.h>
-#include <linux/socket.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/gfp.h>
-#include <asm/string.h>
-#include <asm/atomic.h>
#include <net/sock.h>
#include <linux/tipc.h>
@@ -79,7 +68,7 @@ static const struct proto_ops msg_ops;
static struct proto tipc_proto;
-static int sockets_enabled = 0;
+static int sockets_enabled;
static atomic_t tipc_queue_size = ATOMIC_INIT(0);
@@ -386,7 +375,7 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
*
* NOTE: This routine doesn't need to take the socket lock since it only
* accesses socket information that is unchanging (or which changes in
- * a completely predictable manner).
+ * a completely predictable manner).
*/
static int get_name(struct socket *sock, struct sockaddr *uaddr,
@@ -574,26 +563,26 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
do {
if (dest->addrtype == TIPC_ADDR_NAME) {
- if ((res = dest_name_check(dest, m)))
+ res = dest_name_check(dest, m);
+ if (res)
break;
res = tipc_send2name(tport->ref,
&dest->addr.name.name,
dest->addr.name.domain,
m->msg_iovlen,
m->msg_iov);
- }
- else if (dest->addrtype == TIPC_ADDR_ID) {
+ } else if (dest->addrtype == TIPC_ADDR_ID) {
res = tipc_send2port(tport->ref,
&dest->addr.id,
m->msg_iovlen,
m->msg_iov);
- }
- else if (dest->addrtype == TIPC_ADDR_MCAST) {
+ } else if (dest->addrtype == TIPC_ADDR_MCAST) {
if (needs_conn) {
res = -EOPNOTSUPP;
break;
}
- if ((res = dest_name_check(dest, m)))
+ res = dest_name_check(dest, m);
+ if (res)
break;
res = tipc_multicast(tport->ref,
&dest->addr.nameseq,
@@ -601,9 +590,8 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
m->msg_iov);
}
if (likely(res != -ELINKCONG)) {
- if (needs_conn && (res >= 0)) {
+ if (needs_conn && (res >= 0))
sock->state = SS_CONNECTING;
- }
break;
}
if (m->msg_flags & MSG_DONTWAIT) {
@@ -662,9 +650,8 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
}
res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov);
- if (likely(res != -ELINKCONG)) {
+ if (likely(res != -ELINKCONG))
break;
- }
if (m->msg_flags & MSG_DONTWAIT) {
res = -EWOULDBLOCK;
break;
@@ -763,7 +750,8 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
bytes_to_send = curr_left;
my_iov.iov_base = curr_start;
my_iov.iov_len = bytes_to_send;
- if ((res = send_packet(NULL, sock, &my_msg, 0)) < 0) {
+ res = send_packet(NULL, sock, &my_msg, 0);
+ if (res < 0) {
if (bytes_sent)
res = bytes_sent;
goto exit;
@@ -823,8 +811,8 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
addr->addrtype = TIPC_ADDR_ID;
addr->addr.id.ref = msg_origport(msg);
addr->addr.id.node = msg_orignode(msg);
- addr->addr.name.domain = 0; /* could leave uninitialized */
- addr->scope = 0; /* could leave uninitialized */
+ addr->addr.name.domain = 0; /* could leave uninitialized */
+ addr->scope = 0; /* could leave uninitialized */
m->msg_namelen = sizeof(struct sockaddr_tipc);
}
}
@@ -858,12 +846,15 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
if (unlikely(err)) {
anc_data[0] = err;
anc_data[1] = msg_data_sz(msg);
- if ((res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data)))
- return res;
- if (anc_data[1] &&
- (res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
- msg_data(msg))))
+ res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
+ if (res)
return res;
+ if (anc_data[1]) {
+ res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
+ msg_data(msg));
+ if (res)
+ return res;
+ }
}
/* Optionally capture message destination object */
@@ -891,9 +882,11 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
default:
has_name = 0;
}
- if (has_name &&
- (res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data)))
- return res;
+ if (has_name) {
+ res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
+ if (res)
+ return res;
+ }
return 0;
}
@@ -1226,42 +1219,25 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
*/
if (sock->state == SS_READY) {
- if (msg_connected(msg)) {
- msg_dbg(msg, "dispatch filter 1\n");
+ if (msg_connected(msg))
return TIPC_ERR_NO_PORT;
- }
} else {
- if (msg_mcast(msg)) {
- msg_dbg(msg, "dispatch filter 2\n");
+ if (msg_mcast(msg))
return TIPC_ERR_NO_PORT;
- }
if (sock->state == SS_CONNECTED) {
- if (!msg_connected(msg)) {
- msg_dbg(msg, "dispatch filter 3\n");
+ if (!msg_connected(msg))
return TIPC_ERR_NO_PORT;
- }
- }
- else if (sock->state == SS_CONNECTING) {
- if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
- msg_dbg(msg, "dispatch filter 4\n");
+ } else if (sock->state == SS_CONNECTING) {
+ if (!msg_connected(msg) && (msg_errcode(msg) == 0))
return TIPC_ERR_NO_PORT;
- }
- }
- else if (sock->state == SS_LISTENING) {
- if (msg_connected(msg) || msg_errcode(msg)) {
- msg_dbg(msg, "dispatch filter 5\n");
+ } else if (sock->state == SS_LISTENING) {
+ if (msg_connected(msg) || msg_errcode(msg))
return TIPC_ERR_NO_PORT;
- }
- }
- else if (sock->state == SS_DISCONNECTING) {
- msg_dbg(msg, "dispatch filter 6\n");
+ } else if (sock->state == SS_DISCONNECTING) {
return TIPC_ERR_NO_PORT;
- }
- else /* (sock->state == SS_UNCONNECTED) */ {
- if (msg_connected(msg) || msg_errcode(msg)) {
- msg_dbg(msg, "dispatch filter 7\n");
+ } else /* (sock->state == SS_UNCONNECTED) */ {
+ if (msg_connected(msg) || msg_errcode(msg))
return TIPC_ERR_NO_PORT;
- }
}
}
@@ -1280,7 +1256,6 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
/* Enqueue message (finally!) */
- msg_dbg(msg, "<DISP<: ");
TIPC_SKB_CB(buf)->handle = msg_data(msg);
atomic_inc(&tipc_queue_size);
__skb_queue_tail(&sk->sk_receive_queue, buf);
@@ -1441,9 +1416,8 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
m.msg_name = dest;
m.msg_namelen = destlen;
res = send_msg(NULL, sock, &m, 0);
- if (res < 0) {
+ if (res < 0)
goto exit;
- }
/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
@@ -1465,11 +1439,10 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
advance_rx_queue(sk);
}
} else {
- if (sock->state == SS_CONNECTED) {
+ if (sock->state == SS_CONNECTED)
res = -EISCONN;
- } else {
+ else
res = -ECONNREFUSED;
- }
}
} else {
if (res == 0)
@@ -1588,7 +1561,6 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
* Respond to 'SYN+' by queuing it on new socket.
*/
- msg_dbg(msg,"<ACC<: ");
if (!msg_data_sz(msg)) {
struct msghdr m = {NULL,};
@@ -1696,7 +1668,8 @@ static int setsockopt(struct socket *sock,
return -ENOPROTOOPT;
if (ol < sizeof(value))
return -EINVAL;
- if ((res = get_user(value, (u32 __user *)ov)))
+ res = get_user(value, (u32 __user *)ov);
+ if (res)
return res;
lock_sock(sk);
@@ -1754,7 +1727,8 @@ static int getsockopt(struct socket *sock,
return put_user(0, ol);
if (lvl != SOL_TIPC)
return -ENOPROTOOPT;
- if ((res = get_user(len, ol)))
+ res = get_user(len, ol);
+ if (res)
return res;
lock_sock(sk);
@@ -1773,10 +1747,10 @@ static int getsockopt(struct socket *sock,
value = jiffies_to_msecs(tipc_sk(sk)->conn_timeout);
/* no need to set "res", since already 0 at this point */
break;
- case TIPC_NODE_RECVQ_DEPTH:
+ case TIPC_NODE_RECVQ_DEPTH:
value = (u32)atomic_read(&tipc_queue_size);
break;
- case TIPC_SOCK_RECVQ_DEPTH:
+ case TIPC_SOCK_RECVQ_DEPTH:
value = skb_queue_len(&sk->sk_receive_queue);
break;
default:
@@ -1785,20 +1759,16 @@ static int getsockopt(struct socket *sock,
release_sock(sk);
- if (res) {
- /* "get" failed */
- }
- else if (len < sizeof(value)) {
- res = -EINVAL;
- }
- else if (copy_to_user(ov, &value, sizeof(value))) {
- res = -EFAULT;
- }
- else {
- res = put_user(sizeof(value), ol);
- }
+ if (res)
+ return res; /* "get" failed */
- return res;
+ if (len < sizeof(value))
+ return -EINVAL;
+
+ if (copy_to_user(ov, &value, sizeof(value)))
+ return -EFAULT;
+
+ return put_user(sizeof(value), ol);
}
/**
@@ -1806,7 +1776,7 @@ static int getsockopt(struct socket *sock,
*/
static const struct proto_ops msg_ops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.family = AF_TIPC,
.release = release,
.bind = bind,
@@ -1827,7 +1797,7 @@ static const struct proto_ops msg_ops = {
};
static const struct proto_ops packet_ops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.family = AF_TIPC,
.release = release,
.bind = bind,
@@ -1848,7 +1818,7 @@ static const struct proto_ops packet_ops = {
};
static const struct proto_ops stream_ops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.family = AF_TIPC,
.release = release,
.bind = bind,
@@ -1869,7 +1839,7 @@ static const struct proto_ops stream_ops = {
};
static const struct net_proto_family tipc_family_ops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.family = AF_TIPC,
.create = tipc_create
};
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 23f43d03980c..ca04479c3d42 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -36,7 +36,7 @@
#include "core.h"
#include "name_table.h"
-#include "user_reg.h"
+#include "port.h"
#include "subscr.h"
/**
@@ -64,14 +64,13 @@ struct subscriber {
*/
struct top_srv {
- u32 user_ref;
u32 setup_port;
atomic_t subscription_count;
struct list_head subscriber_list;
spinlock_t lock;
};
-static struct top_srv topsrv = { 0 };
+static struct top_srv topsrv;
/**
* htohl - convert value to endianness used by destination
@@ -250,8 +249,6 @@ static void subscr_terminate(struct subscriber *subscriber)
k_cancel_timer(&sub->timer);
k_term_timer(&sub->timer);
}
- dbg("Term: Removing sub %u,%u,%u from subscriber %x list\n",
- sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
subscr_del(sub);
}
@@ -308,8 +305,6 @@ static void subscr_cancel(struct tipc_subscr *s,
k_term_timer(&sub->timer);
spin_lock_bh(subscriber->lock);
}
- dbg("Cancel: removing sub %u,%u,%u from subscriber %x list\n",
- sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
subscr_del(sub);
}
@@ -494,8 +489,7 @@ static void subscr_named_msg_event(void *usr_handle,
/* Create server port & establish connection to subscriber */
- tipc_createport(topsrv.user_ref,
- subscriber,
+ tipc_createport(subscriber,
importance,
NULL,
NULL,
@@ -544,19 +538,12 @@ int tipc_subscr_start(void)
struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
int res;
- memset(&topsrv, 0, sizeof (topsrv));
+ memset(&topsrv, 0, sizeof(topsrv));
spin_lock_init(&topsrv.lock);
INIT_LIST_HEAD(&topsrv.subscriber_list);
spin_lock_bh(&topsrv.lock);
- res = tipc_attach(&topsrv.user_ref);
- if (res) {
- spin_unlock_bh(&topsrv.lock);
- return res;
- }
-
- res = tipc_createport(topsrv.user_ref,
- NULL,
+ res = tipc_createport(NULL,
TIPC_CRITICAL_IMPORTANCE,
NULL,
NULL,
@@ -570,16 +557,17 @@ int tipc_subscr_start(void)
goto failed;
res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
- if (res)
+ if (res) {
+ tipc_deleteport(topsrv.setup_port);
+ topsrv.setup_port = 0;
goto failed;
+ }
spin_unlock_bh(&topsrv.lock);
return 0;
failed:
err("Failed to create subscription service\n");
- tipc_detach(topsrv.user_ref);
- topsrv.user_ref = 0;
spin_unlock_bh(&topsrv.lock);
return res;
}
@@ -590,8 +578,10 @@ void tipc_subscr_stop(void)
struct subscriber *subscriber_temp;
spinlock_t *subscriber_lock;
- if (topsrv.user_ref) {
+ if (topsrv.setup_port) {
tipc_deleteport(topsrv.setup_port);
+ topsrv.setup_port = 0;
+
list_for_each_entry_safe(subscriber, subscriber_temp,
&topsrv.subscriber_list,
subscriber_list) {
@@ -600,7 +590,5 @@ void tipc_subscr_stop(void)
subscr_terminate(subscriber);
spin_unlock_bh(subscriber_lock);
}
- tipc_detach(topsrv.user_ref);
- topsrv.user_ref = 0;
}
}
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
deleted file mode 100644
index 2e2702e2049c..000000000000
--- a/net/tipc/user_reg.c
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * net/tipc/user_reg.c: TIPC user registry code
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2004-2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "user_reg.h"
-
-/*
- * TIPC user registry keeps track of users of the tipc_port interface.
- *
- * The registry utilizes an array of "TIPC user" entries;
- * a user's ID is the index of their associated array entry.
- * Array entry 0 is not used, so userid 0 is not valid;
- * TIPC sometimes uses this value to denote an anonymous user.
- * The list of free entries is initially chained from last entry to entry 1.
- */
-
-/**
- * struct tipc_user - registered TIPC user info
- * @next: index of next free registry entry (or -1 for an allocated entry)
- * @ports: list of user ports owned by the user
- */
-
-struct tipc_user {
- int next;
- struct list_head ports;
-};
-
-#define MAX_USERID 64
-#define USER_LIST_SIZE ((MAX_USERID + 1) * sizeof(struct tipc_user))
-
-static struct tipc_user *users = NULL;
-static u32 next_free_user = MAX_USERID + 1;
-static DEFINE_SPINLOCK(reg_lock);
-
-/**
- * reg_init - create TIPC user registry (but don't activate it)
- *
- * If registry has been pre-initialized it is left "as is".
- * NOTE: This routine may be called when TIPC is inactive.
- */
-
-static int reg_init(void)
-{
- u32 i;
-
- spin_lock_bh(&reg_lock);
- if (!users) {
- users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC);
- if (users) {
- for (i = 1; i <= MAX_USERID; i++) {
- users[i].next = i - 1;
- }
- next_free_user = MAX_USERID;
- }
- }
- spin_unlock_bh(&reg_lock);
- return users ? 0 : -ENOMEM;
-}
-
-/**
- * tipc_reg_start - activate TIPC user registry
- */
-
-int tipc_reg_start(void)
-{
- return reg_init();
-}
-
-/**
- * tipc_reg_stop - shut down & delete TIPC user registry
- */
-
-void tipc_reg_stop(void)
-{
- if (!users)
- return;
-
- kfree(users);
- users = NULL;
-}
-
-/**
- * tipc_attach - register a TIPC user
- *
- * NOTE: This routine may be called when TIPC is inactive.
- */
-
-int tipc_attach(u32 *userid)
-{
- struct tipc_user *user_ptr;
-
- if (!users)
- reg_init();
-
- spin_lock_bh(&reg_lock);
- if (!next_free_user) {
- spin_unlock_bh(&reg_lock);
- return -EBUSY;
- }
- user_ptr = &users[next_free_user];
- *userid = next_free_user;
- next_free_user = user_ptr->next;
- user_ptr->next = -1;
- spin_unlock_bh(&reg_lock);
-
- INIT_LIST_HEAD(&user_ptr->ports);
- atomic_inc(&tipc_user_count);
-
- return 0;
-}
-
-/**
- * tipc_detach - deregister a TIPC user
- */
-
-void tipc_detach(u32 userid)
-{
- struct tipc_user *user_ptr;
- struct list_head ports_temp;
- struct user_port *up_ptr, *temp_up_ptr;
-
- if ((userid == 0) || (userid > MAX_USERID))
- return;
-
- spin_lock_bh(&reg_lock);
- if ((!users) || (users[userid].next >= 0)) {
- spin_unlock_bh(&reg_lock);
- return;
- }
-
- user_ptr = &users[userid];
- INIT_LIST_HEAD(&ports_temp);
- list_splice(&user_ptr->ports, &ports_temp);
- user_ptr->next = next_free_user;
- next_free_user = userid;
- spin_unlock_bh(&reg_lock);
-
- atomic_dec(&tipc_user_count);
-
- list_for_each_entry_safe(up_ptr, temp_up_ptr, &ports_temp, uport_list) {
- tipc_deleteport(up_ptr->ref);
- }
-}
-
-/**
- * tipc_reg_add_port - register a user's driver port
- */
-
-int tipc_reg_add_port(struct user_port *up_ptr)
-{
- struct tipc_user *user_ptr;
-
- if (up_ptr->user_ref == 0)
- return 0;
- if (up_ptr->user_ref > MAX_USERID)
- return -EINVAL;
- if ((tipc_mode == TIPC_NOT_RUNNING) || !users )
- return -ENOPROTOOPT;
-
- spin_lock_bh(&reg_lock);
- user_ptr = &users[up_ptr->user_ref];
- list_add(&up_ptr->uport_list, &user_ptr->ports);
- spin_unlock_bh(&reg_lock);
- return 0;
-}
-
-/**
- * tipc_reg_remove_port - deregister a user's driver port
- */
-
-int tipc_reg_remove_port(struct user_port *up_ptr)
-{
- if (up_ptr->user_ref == 0)
- return 0;
- if (up_ptr->user_ref > MAX_USERID)
- return -EINVAL;
- if (!users )
- return -ENOPROTOOPT;
-
- spin_lock_bh(&reg_lock);
- list_del_init(&up_ptr->uport_list);
- spin_unlock_bh(&reg_lock);
- return 0;
-}
-
diff --git a/net/tipc/user_reg.h b/net/tipc/user_reg.h
deleted file mode 100644
index 109eed0d6de3..000000000000
--- a/net/tipc/user_reg.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * net/tipc/user_reg.h: Include file for TIPC user registry code
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _TIPC_USER_REG_H
-#define _TIPC_USER_REG_H
-
-#include "port.h"
-
-int tipc_reg_start(void);
-void tipc_reg_stop(void);
-
-int tipc_attach(unsigned int *userref);
-void tipc_detach(unsigned int userref);
-
-int tipc_reg_add_port(struct user_port *up_ptr);
-int tipc_reg_remove_port(struct user_port *up_ptr);
-
-#endif
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
deleted file mode 100644
index 1b61ca8c48ef..000000000000
--- a/net/tipc/zone.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * net/tipc/zone.c: TIPC zone management routines
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "zone.h"
-#include "cluster.h"
-#include "node.h"
-
-struct _zone *tipc_zone_create(u32 addr)
-{
- struct _zone *z_ptr;
- u32 z_num;
-
- if (!tipc_addr_domain_valid(addr)) {
- err("Zone creation failed, invalid domain 0x%x\n", addr);
- return NULL;
- }
-
- z_ptr = kzalloc(sizeof(*z_ptr), GFP_ATOMIC);
- if (!z_ptr) {
- warn("Zone creation failed, insufficient memory\n");
- return NULL;
- }
-
- z_num = tipc_zone(addr);
- z_ptr->addr = tipc_addr(z_num, 0, 0);
- tipc_net.zones[z_num] = z_ptr;
- return z_ptr;
-}
-
-void tipc_zone_delete(struct _zone *z_ptr)
-{
- u32 c_num;
-
- if (!z_ptr)
- return;
- for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
- tipc_cltr_delete(z_ptr->clusters[c_num]);
- }
- kfree(z_ptr);
-}
-
-void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
-{
- u32 c_num = tipc_cluster(c_ptr->addr);
-
- assert(c_ptr->addr);
- assert(c_num <= tipc_max_clusters);
- assert(z_ptr->clusters[c_num] == NULL);
- z_ptr->clusters[c_num] = c_ptr;
-}
-
-void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router)
-{
- u32 c_num;
-
- for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
- if (z_ptr->clusters[c_num]) {
- tipc_cltr_remove_as_router(z_ptr->clusters[c_num],
- router);
- }
- }
-}
-
-void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest)
-{
- u32 c_num;
-
- for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
- if (z_ptr->clusters[c_num]) {
- if (in_own_cluster(z_ptr->addr))
- continue;
- tipc_cltr_send_ext_routes(z_ptr->clusters[c_num], dest);
- }
- }
-}
-
-struct tipc_node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
-{
- struct cluster *c_ptr;
- struct tipc_node *n_ptr;
- u32 c_num;
-
- if (!z_ptr)
- return NULL;
- c_ptr = z_ptr->clusters[tipc_cluster(addr)];
- if (!c_ptr)
- return NULL;
- n_ptr = tipc_cltr_select_node(c_ptr, ref);
- if (n_ptr)
- return n_ptr;
-
- /* Links to any other clusters within this zone ? */
- for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
- c_ptr = z_ptr->clusters[c_num];
- if (!c_ptr)
- return NULL;
- n_ptr = tipc_cltr_select_node(c_ptr, ref);
- if (n_ptr)
- return n_ptr;
- }
- return NULL;
-}
-
-u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
-{
- struct cluster *c_ptr;
- u32 c_num;
- u32 router;
-
- if (!z_ptr)
- return 0;
- c_ptr = z_ptr->clusters[tipc_cluster(addr)];
- router = c_ptr ? tipc_cltr_select_router(c_ptr, ref) : 0;
- if (router)
- return router;
-
- /* Links to any other clusters within the zone? */
- for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
- c_ptr = z_ptr->clusters[c_num];
- router = c_ptr ? tipc_cltr_select_router(c_ptr, ref) : 0;
- if (router)
- return router;
- }
- return 0;
-}
diff --git a/net/tipc/zone.h b/net/tipc/zone.h
deleted file mode 100644
index bd1c20ce9d06..000000000000
--- a/net/tipc/zone.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * net/tipc/zone.h: Include file for TIPC zone management routines
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _TIPC_ZONE_H
-#define _TIPC_ZONE_H
-
-#include "node_subscr.h"
-#include "net.h"
-
-
-/**
- * struct _zone - TIPC zone structure
- * @addr: network address of zone
- * @clusters: array of pointers to all clusters within zone
- * @links: number of (unicast) links to zone
- */
-
-struct _zone {
- u32 addr;
- struct cluster *clusters[2]; /* currently limited to just 1 cluster */
- u32 links;
-};
-
-struct tipc_node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref);
-u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref);
-void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router);
-void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest);
-struct _zone *tipc_zone_create(u32 addr);
-void tipc_zone_delete(struct _zone *z_ptr);
-void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr);
-
-static inline struct _zone *tipc_zone_find(u32 addr)
-{
- return tipc_net.zones[tipc_zone(addr)];
-}
-
-#endif
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 417d7a6c36cf..dd419d286204 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1157,7 +1157,7 @@ restart:
goto restart;
}
- err = security_unix_stream_connect(sock, other->sk_socket, newsk);
+ err = security_unix_stream_connect(sk, other, newsk);
if (err) {
unix_state_unlock(sk);
goto out_unlock;
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index d0ee29063e5d..1f1ef70f34f2 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -95,7 +95,7 @@ config CFG80211_DEBUGFS
If unsure, say N.
config CFG80211_INTERNAL_REGDB
- bool "use statically compiled regulatory rules database" if EMBEDDED
+ bool "use statically compiled regulatory rules database" if EXPERT
default n
depends on CFG80211
---help---
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 79772fcc37bc..e9a5f8ca4c27 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -789,13 +789,23 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
cfg80211_mgd_wext_connect(rdev, wdev);
break;
#endif
+#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
- /* backward compat code ... */
- if (wdev->mesh_id_up_len)
- __cfg80211_join_mesh(rdev, dev, wdev->ssid,
- wdev->mesh_id_up_len,
- &default_mesh_config);
- break;
+ {
+ /* backward compat code... */
+ struct mesh_setup setup;
+ memcpy(&setup, &default_mesh_setup,
+ sizeof(setup));
+ /* back compat only needed for mesh_id */
+ setup.mesh_id = wdev->ssid;
+ setup.mesh_id_len = wdev->mesh_id_up_len;
+ if (wdev->mesh_id_up_len)
+ __cfg80211_join_mesh(rdev, dev,
+ &setup,
+ &default_mesh_config);
+ break;
+ }
+#endif
default:
break;
}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 743203bb61ac..26a0a084e16b 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -287,13 +287,14 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
/* mesh */
extern const struct mesh_config default_mesh_config;
+extern const struct mesh_setup default_mesh_setup;
int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const u8 *mesh_id, u8 mesh_id_len,
+ const struct mesh_setup *setup,
const struct mesh_config *conf);
int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const u8 *mesh_id, u8 mesh_id_len,
+ const struct mesh_setup *setup,
const struct mesh_config *conf);
int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev);
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index e0b9747fe50a..73e39c171ffb 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -50,17 +50,19 @@ const struct mesh_config default_mesh_config = {
.min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT,
};
+const struct mesh_setup default_mesh_setup = {
+ .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
+ .path_metric = IEEE80211_PATH_METRIC_AIRTIME,
+ .vendor_ie = NULL,
+ .vendor_ie_len = 0,
+};
int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const u8 *mesh_id, u8 mesh_id_len,
+ const struct mesh_setup *setup,
const struct mesh_config *conf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct mesh_setup setup = {
- .mesh_id = mesh_id,
- .mesh_id_len = mesh_id_len,
- };
int err;
BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN);
@@ -73,16 +75,16 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
if (wdev->mesh_id_len)
return -EALREADY;
- if (!mesh_id_len)
+ if (!setup->mesh_id_len)
return -EINVAL;
if (!rdev->ops->join_mesh)
return -EOPNOTSUPP;
- err = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, &setup);
+ err = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup);
if (!err) {
- memcpy(wdev->ssid, mesh_id, mesh_id_len);
- wdev->mesh_id_len = mesh_id_len;
+ memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
+ wdev->mesh_id_len = setup->mesh_id_len;
}
return err;
@@ -90,14 +92,14 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const u8 *mesh_id, u8 mesh_id_len,
+ const struct mesh_setup *setup,
const struct mesh_config *conf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
wdev_lock(wdev);
- err = __cfg80211_join_mesh(rdev, dev, mesh_id, mesh_id_len, conf);
+ err = __cfg80211_join_mesh(rdev, dev, setup, conf);
wdev_unlock(wdev);
return err;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index d7680f2a4c5b..aa5df8865ff7 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -263,6 +263,28 @@ void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len)
}
EXPORT_SYMBOL(cfg80211_send_disassoc);
+void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf,
+ size_t len)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ nl80211_send_unprot_deauth(rdev, dev, buf, len, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(cfg80211_send_unprot_deauth);
+
+void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
+ size_t len)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ nl80211_send_unprot_disassoc(rdev, dev, buf, len, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(cfg80211_send_unprot_disassoc);
+
static void __cfg80211_auth_remove(struct wireless_dev *wdev, const u8 *addr)
{
int i;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c3f80e565365..9b62710891a2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -123,7 +123,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
.len = NL80211_MAX_SUPP_RATES },
[NL80211_ATTR_BSS_HT_OPMODE] = { .type = NLA_U16 },
- [NL80211_ATTR_MESH_PARAMS] = { .type = NLA_NESTED },
+ [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED },
[NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY,
.len = NL80211_HT_CAPABILITY_LEN },
@@ -171,6 +171,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_WIPHY_ANTENNA_RX] = { .type = NLA_U32 },
[NL80211_ATTR_MCAST_RATE] = { .type = NLA_U32 },
[NL80211_ATTR_OFFCHANNEL_TX_OK] = { .type = NLA_FLAG },
+ [NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED },
};
/* policy for the key attributes */
@@ -182,6 +183,14 @@ static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = {
[NL80211_KEY_DEFAULT] = { .type = NLA_FLAG },
[NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG },
[NL80211_KEY_TYPE] = { .type = NLA_U32 },
+ [NL80211_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED },
+};
+
+/* policy for the key default flags */
+static const struct nla_policy
+nl80211_key_default_policy[NUM_NL80211_KEY_DEFAULT_TYPES] = {
+ [NL80211_KEY_DEFAULT_TYPE_UNICAST] = { .type = NLA_FLAG },
+ [NL80211_KEY_DEFAULT_TYPE_MULTICAST] = { .type = NLA_FLAG },
};
/* ifidx get helper */
@@ -314,6 +323,7 @@ struct key_parse {
int idx;
int type;
bool def, defmgmt;
+ bool def_uni, def_multi;
};
static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
@@ -327,6 +337,13 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
k->def = !!tb[NL80211_KEY_DEFAULT];
k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT];
+ if (k->def) {
+ k->def_uni = true;
+ k->def_multi = true;
+ }
+ if (k->defmgmt)
+ k->def_multi = true;
+
if (tb[NL80211_KEY_IDX])
k->idx = nla_get_u8(tb[NL80211_KEY_IDX]);
@@ -349,6 +366,19 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
return -EINVAL;
}
+ if (tb[NL80211_KEY_DEFAULT_TYPES]) {
+ struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES];
+ int err = nla_parse_nested(kdt,
+ NUM_NL80211_KEY_DEFAULT_TYPES - 1,
+ tb[NL80211_KEY_DEFAULT_TYPES],
+ nl80211_key_default_policy);
+ if (err)
+ return err;
+
+ k->def_uni = kdt[NL80211_KEY_DEFAULT_TYPE_UNICAST];
+ k->def_multi = kdt[NL80211_KEY_DEFAULT_TYPE_MULTICAST];
+ }
+
return 0;
}
@@ -373,12 +403,32 @@ static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k)
k->def = !!info->attrs[NL80211_ATTR_KEY_DEFAULT];
k->defmgmt = !!info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT];
+ if (k->def) {
+ k->def_uni = true;
+ k->def_multi = true;
+ }
+ if (k->defmgmt)
+ k->def_multi = true;
+
if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
return -EINVAL;
}
+ if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) {
+ struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES];
+ int err = nla_parse_nested(
+ kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1,
+ info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES],
+ nl80211_key_default_policy);
+ if (err)
+ return err;
+
+ k->def_uni = kdt[NL80211_KEY_DEFAULT_TYPE_UNICAST];
+ k->def_multi = kdt[NL80211_KEY_DEFAULT_TYPE_MULTICAST];
+ }
+
return 0;
}
@@ -401,6 +451,11 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
if (k->def && k->defmgmt)
return -EINVAL;
+ if (k->defmgmt) {
+ if (k->def_uni || !k->def_multi)
+ return -EINVAL;
+ }
+
if (k->idx != -1) {
if (k->defmgmt) {
if (k->idx < 4 || k->idx > 5)
@@ -450,6 +505,8 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
goto error;
def = 1;
result->def = parse.idx;
+ if (!parse.def_uni || !parse.def_multi)
+ goto error;
} else if (parse.defmgmt)
goto error;
err = cfg80211_validate_key_settings(rdev, &parse.p,
@@ -548,7 +605,13 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL)
NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE);
- if (dev->ops->get_antenna) {
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
+ dev->wiphy.available_antennas_tx);
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
+ dev->wiphy.available_antennas_rx);
+
+ if ((dev->wiphy.available_antennas_tx ||
+ dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
u32 tx_ant = 0, rx_ant = 0;
int res;
res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant);
@@ -662,7 +725,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
CMD(add_beacon, NEW_BEACON);
CMD(add_station, NEW_STATION);
CMD(add_mpath, NEW_MPATH);
- CMD(update_mesh_params, SET_MESH_PARAMS);
+ CMD(update_mesh_config, SET_MESH_CONFIG);
CMD(change_bss, SET_BSS);
CMD(auth, AUTHENTICATE);
CMD(assoc, ASSOCIATE);
@@ -698,6 +761,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
nla_nest_end(msg, nl_cmds);
+ if (dev->ops->remain_on_channel)
+ NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
+ dev->wiphy.max_remain_on_channel_duration);
+
/* for now at least assume all drivers have it */
if (dev->ops->mgmt_tx)
NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
@@ -1046,7 +1113,9 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] &&
info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]) {
u32 tx_ant, rx_ant;
- if (!rdev->ops->set_antenna) {
+ if ((!rdev->wiphy.available_antennas_tx &&
+ !rdev->wiphy.available_antennas_rx) ||
+ !rdev->ops->set_antenna) {
result = -EOPNOTSUPP;
goto bad_res;
}
@@ -1054,6 +1123,17 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
tx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX]);
rx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]);
+ /* reject antenna configurations which don't match the
+ * available antenna masks, except for the "all" mask */
+ if ((~tx_ant && (tx_ant & ~rdev->wiphy.available_antennas_tx)) ||
+ (~rx_ant && (rx_ant & ~rdev->wiphy.available_antennas_rx))) {
+ result = -EINVAL;
+ goto bad_res;
+ }
+
+ tx_ant = tx_ant & rdev->wiphy.available_antennas_tx;
+ rx_ant = rx_ant & rdev->wiphy.available_antennas_rx;
+
result = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant);
if (result)
goto bad_res;
@@ -1575,8 +1655,6 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
struct key_parse key;
int err;
struct net_device *dev = info->user_ptr[1];
- int (*func)(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index);
err = nl80211_parse_key(info, &key);
if (err)
@@ -1589,27 +1667,61 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
if (!key.def && !key.defmgmt)
return -EINVAL;
- if (key.def)
- func = rdev->ops->set_default_key;
- else
- func = rdev->ops->set_default_mgmt_key;
+ wdev_lock(dev->ieee80211_ptr);
- if (!func)
- return -EOPNOTSUPP;
+ if (key.def) {
+ if (!rdev->ops->set_default_key) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
- wdev_lock(dev->ieee80211_ptr);
- err = nl80211_key_allowed(dev->ieee80211_ptr);
- if (!err)
- err = func(&rdev->wiphy, dev, key.idx);
+ err = nl80211_key_allowed(dev->ieee80211_ptr);
+ if (err)
+ goto out;
+
+ if (!(rdev->wiphy.flags &
+ WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS)) {
+ if (!key.def_uni || !key.def_multi) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ err = rdev->ops->set_default_key(&rdev->wiphy, dev, key.idx,
+ key.def_uni, key.def_multi);
+
+ if (err)
+ goto out;
#ifdef CONFIG_CFG80211_WEXT
- if (!err) {
- if (func == rdev->ops->set_default_key)
- dev->ieee80211_ptr->wext.default_key = key.idx;
- else
- dev->ieee80211_ptr->wext.default_mgmt_key = key.idx;
- }
+ dev->ieee80211_ptr->wext.default_key = key.idx;
#endif
+ } else {
+ if (key.def_uni || !key.def_multi) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!rdev->ops->set_default_mgmt_key) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = nl80211_key_allowed(dev->ieee80211_ptr);
+ if (err)
+ goto out;
+
+ err = rdev->ops->set_default_mgmt_key(&rdev->wiphy,
+ dev, key.idx);
+ if (err)
+ goto out;
+
+#ifdef CONFIG_CFG80211_WEXT
+ dev->ieee80211_ptr->wext.default_mgmt_key = key.idx;
+#endif
+ }
+
+ out:
wdev_unlock(dev->ieee80211_ptr);
return err;
@@ -2569,7 +2681,7 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
return r;
}
-static int nl80211_get_mesh_params(struct sk_buff *skb,
+static int nl80211_get_mesh_config(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -2584,7 +2696,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
if (wdev->iftype != NL80211_IFTYPE_MESH_POINT)
return -EOPNOTSUPP;
- if (!rdev->ops->get_mesh_params)
+ if (!rdev->ops->get_mesh_config)
return -EOPNOTSUPP;
wdev_lock(wdev);
@@ -2592,7 +2704,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
if (!wdev->mesh_id_len)
memcpy(&cur_params, &default_mesh_config, sizeof(cur_params));
else
- err = rdev->ops->get_mesh_params(&rdev->wiphy, dev,
+ err = rdev->ops->get_mesh_config(&rdev->wiphy, dev,
&cur_params);
wdev_unlock(wdev);
@@ -2604,10 +2716,10 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
if (!msg)
return -ENOMEM;
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
- NL80211_CMD_GET_MESH_PARAMS);
+ NL80211_CMD_GET_MESH_CONFIG);
if (!hdr)
goto nla_put_failure;
- pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_PARAMS);
+ pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG);
if (!pinfoattr)
goto nla_put_failure;
NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
@@ -2669,7 +2781,15 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 },
};
-static int nl80211_parse_mesh_params(struct genl_info *info,
+static const struct nla_policy
+ nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = {
+ [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 },
+ [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
+ [NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN },
+};
+
+static int nl80211_parse_mesh_config(struct genl_info *info,
struct mesh_config *cfg,
u32 *mask_out)
{
@@ -2685,10 +2805,10 @@ do {\
} while (0);\
- if (!info->attrs[NL80211_ATTR_MESH_PARAMS])
+ if (!info->attrs[NL80211_ATTR_MESH_CONFIG])
return -EINVAL;
if (nla_parse_nested(tb, NL80211_MESHCONF_ATTR_MAX,
- info->attrs[NL80211_ATTR_MESH_PARAMS],
+ info->attrs[NL80211_ATTR_MESH_CONFIG],
nl80211_meshconf_params_policy))
return -EINVAL;
@@ -2735,15 +2855,51 @@ do {\
dot11MeshHWMPRootMode, mask,
NL80211_MESHCONF_HWMP_ROOTMODE,
nla_get_u8);
-
if (mask_out)
*mask_out = mask;
+
return 0;
#undef FILL_IN_MESH_PARAM_IF_SET
}
-static int nl80211_update_mesh_params(struct sk_buff *skb,
+static int nl80211_parse_mesh_setup(struct genl_info *info,
+ struct mesh_setup *setup)
+{
+ struct nlattr *tb[NL80211_MESH_SETUP_ATTR_MAX + 1];
+
+ if (!info->attrs[NL80211_ATTR_MESH_SETUP])
+ return -EINVAL;
+ if (nla_parse_nested(tb, NL80211_MESH_SETUP_ATTR_MAX,
+ info->attrs[NL80211_ATTR_MESH_SETUP],
+ nl80211_mesh_setup_params_policy))
+ return -EINVAL;
+
+ if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])
+ setup->path_sel_proto =
+ (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ?
+ IEEE80211_PATH_PROTOCOL_VENDOR :
+ IEEE80211_PATH_PROTOCOL_HWMP;
+
+ if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC])
+ setup->path_metric =
+ (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC])) ?
+ IEEE80211_PATH_METRIC_VENDOR :
+ IEEE80211_PATH_METRIC_AIRTIME;
+
+ if (tb[NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE]) {
+ struct nlattr *ieattr =
+ tb[NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE];
+ if (!is_valid_ie_attr(ieattr))
+ return -EINVAL;
+ setup->vendor_ie = nla_data(ieattr);
+ setup->vendor_ie_len = nla_len(ieattr);
+ }
+
+ return 0;
+}
+
+static int nl80211_update_mesh_config(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -2756,10 +2912,10 @@ static int nl80211_update_mesh_params(struct sk_buff *skb,
if (wdev->iftype != NL80211_IFTYPE_MESH_POINT)
return -EOPNOTSUPP;
- if (!rdev->ops->update_mesh_params)
+ if (!rdev->ops->update_mesh_config)
return -EOPNOTSUPP;
- err = nl80211_parse_mesh_params(info, &cfg, &mask);
+ err = nl80211_parse_mesh_config(info, &cfg, &mask);
if (err)
return err;
@@ -2768,7 +2924,7 @@ static int nl80211_update_mesh_params(struct sk_buff *skb,
err = -ENOLINK;
if (!err)
- err = rdev->ops->update_mesh_params(&rdev->wiphy, dev,
+ err = rdev->ops->update_mesh_config(&rdev->wiphy, dev,
mask, &cfg);
wdev_unlock(wdev);
@@ -4128,7 +4284,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
* We should be on that channel for at least one jiffie,
* and more than 5 seconds seems excessive.
*/
- if (!duration || !msecs_to_jiffies(duration) || duration > 5000)
+ if (!duration || !msecs_to_jiffies(duration) ||
+ duration > rdev->wiphy.max_remain_on_channel_duration)
return -EINVAL;
if (!rdev->ops->remain_on_channel)
@@ -4296,6 +4453,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EOPNOTSUPP;
@@ -4336,6 +4494,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EOPNOTSUPP;
@@ -4562,14 +4721,16 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct mesh_config cfg;
+ struct mesh_setup setup;
int err;
/* start with default */
memcpy(&cfg, &default_mesh_config, sizeof(cfg));
+ memcpy(&setup, &default_mesh_setup, sizeof(setup));
- if (info->attrs[NL80211_ATTR_MESH_PARAMS]) {
+ if (info->attrs[NL80211_ATTR_MESH_CONFIG]) {
/* and parse parameters if given */
- err = nl80211_parse_mesh_params(info, &cfg, NULL);
+ err = nl80211_parse_mesh_config(info, &cfg, NULL);
if (err)
return err;
}
@@ -4578,10 +4739,17 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
!nla_len(info->attrs[NL80211_ATTR_MESH_ID]))
return -EINVAL;
- return cfg80211_join_mesh(rdev, dev,
- nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
- nla_len(info->attrs[NL80211_ATTR_MESH_ID]),
- &cfg);
+ setup.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
+ setup.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
+
+ if (info->attrs[NL80211_ATTR_MESH_SETUP]) {
+ /* parse additional setup parameters if given */
+ err = nl80211_parse_mesh_setup(info, &setup);
+ if (err)
+ return err;
+ }
+
+ return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
}
static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
@@ -4847,16 +5015,16 @@ static struct genl_ops nl80211_ops[] = {
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = NL80211_CMD_GET_MESH_PARAMS,
- .doit = nl80211_get_mesh_params,
+ .cmd = NL80211_CMD_GET_MESH_CONFIG,
+ .doit = nl80211_get_mesh_config,
.policy = nl80211_policy,
/* can be retrieved by unprivileged users */
.internal_flags = NL80211_FLAG_NEED_NETDEV |
NL80211_FLAG_NEED_RTNL,
},
{
- .cmd = NL80211_CMD_SET_MESH_PARAMS,
- .doit = nl80211_update_mesh_params,
+ .cmd = NL80211_CMD_SET_MESH_CONFIG,
+ .doit = nl80211_update_mesh_config,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
@@ -5368,6 +5536,22 @@ void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
NL80211_CMD_DISASSOCIATE, gfp);
}
+void nl80211_send_unprot_deauth(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *buf,
+ size_t len, gfp_t gfp)
+{
+ nl80211_send_mlme_event(rdev, netdev, buf, len,
+ NL80211_CMD_UNPROT_DEAUTHENTICATE, gfp);
+}
+
+void nl80211_send_unprot_disassoc(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *buf,
+ size_t len, gfp_t gfp)
+{
+ nl80211_send_mlme_event(rdev, netdev, buf, len,
+ NL80211_CMD_UNPROT_DISASSOCIATE, gfp);
+}
+
static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
struct net_device *netdev, int cmd,
const u8 *addr, gfp_t gfp)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 16c2f7190768..e3f7fa886966 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -25,6 +25,12 @@ void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
const u8 *buf, size_t len, gfp_t gfp);
+void nl80211_send_unprot_deauth(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ const u8 *buf, size_t len, gfp_t gfp);
+void nl80211_send_unprot_disassoc(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ const u8 *buf, size_t len, gfp_t gfp);
void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
const u8 *addr, gfp_t gfp);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 5ed615f94e0c..37693b6ef23a 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -661,7 +661,8 @@ static int freq_reg_info_regd(struct wiphy *wiphy,
* Follow the driver's regulatory domain, if present, unless a country
* IE has been processed or a user wants to help complaince further
*/
- if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ if (!custom_regd &&
+ last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
last_request->initiator != NL80211_REGDOM_SET_BY_USER &&
wiphy->regd)
regd = wiphy->regd;
@@ -751,7 +752,7 @@ static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
snprintf(max_antenna_gain, 32, "%d", power_rule->max_antenna_gain);
REG_DBG_PRINT("Updating information on frequency %d MHz "
- "for %d a MHz width channel with regulatory rule:\n",
+ "for a %d MHz width channel with regulatory rule:\n",
chan->center_freq,
KHZ_TO_MHZ(desired_bw_khz));
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 503ebb86ba18..ea427f418f64 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -464,6 +464,9 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
if (res->pub.beacon_ies) {
size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
size_t ielen = res->pub.len_beacon_ies;
+ bool information_elements_is_beacon_ies =
+ (found->pub.information_elements ==
+ found->pub.beacon_ies);
if (found->pub.beacon_ies &&
!found->beacon_ies_allocated &&
@@ -487,6 +490,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
found->pub.len_beacon_ies = ielen;
}
}
+
+ /* Override IEs if they were from a beacon before */
+ if (information_elements_is_beacon_ies) {
+ found->pub.information_elements =
+ found->pub.beacon_ies;
+ found->pub.len_information_elements =
+ found->pub.len_beacon_ies;
+ }
}
kref_put(&res->ref, bss_release);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 4de624ca4c63..7620ae2fcf18 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -689,7 +689,8 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
continue;
}
if (wdev->connect_keys->def == i)
- if (rdev->ops->set_default_key(wdev->wiphy, dev, i)) {
+ if (rdev->ops->set_default_key(wdev->wiphy, dev,
+ i, true, true)) {
netdev_err(dev, "failed to set defkey %d\n", i);
continue;
}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 12222ee6ebf2..3e5dbd4e4cd5 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -548,8 +548,8 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
__cfg80211_leave_ibss(rdev, wdev->netdev, true);
rejoin = true;
}
- err = rdev->ops->set_default_key(&rdev->wiphy,
- dev, idx);
+ err = rdev->ops->set_default_key(&rdev->wiphy, dev,
+ idx, true, true);
}
if (!err) {
wdev->wext.default_key = idx;
@@ -627,8 +627,8 @@ int cfg80211_wext_siwencode(struct net_device *dev,
err = 0;
wdev_lock(wdev);
if (wdev->current_bss)
- err = rdev->ops->set_default_key(&rdev->wiphy,
- dev, idx);
+ err = rdev->ops->set_default_key(&rdev->wiphy, dev,
+ idx, true, true);
if (!err)
wdev->wext.default_key = idx;
wdev_unlock(wdev);
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 55187c8f6420..406207515b5e 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -27,9 +27,19 @@
#include <net/sock.h>
#include <net/x25.h>
-/*
- * Parse a set of facilities into the facilities structures. Unrecognised
- * facilities are written to the debug log file.
+/**
+ * x25_parse_facilities - Parse facilities from skb into the facilities structs
+ *
+ * @skb: sk_buff to parse
+ * @facilities: Regular facilites, updated as facilities are found
+ * @dte_facs: ITU DTE facilities, updated as DTE facilities are found
+ * @vc_fac_mask: mask is updated with all facilities found
+ *
+ * Return codes:
+ * -1 - Parsing error, caller should drop call and clean up
+ * 0 - Parse OK, this skb has no facilities
+ * >0 - Parse OK, returns the length of the facilities header
+ *
*/
int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
@@ -62,7 +72,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
switch (*p & X25_FAC_CLASS_MASK) {
case X25_FAC_CLASS_A:
if (len < 2)
- return 0;
+ return -1;
switch (*p) {
case X25_FAC_REVERSE:
if((p[1] & 0x81) == 0x81) {
@@ -107,7 +117,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
break;
case X25_FAC_CLASS_B:
if (len < 3)
- return 0;
+ return -1;
switch (*p) {
case X25_FAC_PACKET_SIZE:
facilities->pacsize_in = p[1];
@@ -130,7 +140,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
break;
case X25_FAC_CLASS_C:
if (len < 4)
- return 0;
+ return -1;
printk(KERN_DEBUG "X.25: unknown facility %02X, "
"values %02X, %02X, %02X\n",
p[0], p[1], p[2], p[3]);
@@ -139,18 +149,18 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
break;
case X25_FAC_CLASS_D:
if (len < p[1] + 2)
- return 0;
+ return -1;
switch (*p) {
case X25_FAC_CALLING_AE:
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
- return 0;
+ return -1;
dte_facs->calling_len = p[2];
memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLING_AE;
break;
case X25_FAC_CALLED_AE:
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
- return 0;
+ return -1;
dte_facs->called_len = p[2];
memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLED_AE;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index f729f022be69..15de65f04719 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -91,10 +91,10 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
{
struct x25_address source_addr, dest_addr;
int len;
+ struct x25_sock *x25 = x25_sk(sk);
switch (frametype) {
case X25_CALL_ACCEPTED: {
- struct x25_sock *x25 = x25_sk(sk);
x25_stop_timer(sk);
x25->condition = 0x00;
@@ -113,14 +113,16 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
&dest_addr);
if (len > 0)
skb_pull(skb, len);
+ else if (len < 0)
+ goto out_clear;
len = x25_parse_facilities(skb, &x25->facilities,
&x25->dte_facilities,
&x25->vc_facil_mask);
if (len > 0)
skb_pull(skb, len);
- else
- return -1;
+ else if (len < 0)
+ goto out_clear;
/*
* Copy any Call User Data.
*/
@@ -144,6 +146,12 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
}
return 0;
+
+out_clear:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25->state = X25_STATE_2;
+ x25_start_t23timer(sk);
+ return 0;
}
/*
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8eb889510916..61291965c5f6 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -26,6 +26,7 @@
#include <net/sock.h>
#include <net/xfrm.h>
#include <net/netlink.h>
+#include <net/ah.h>
#include <asm/uaccess.h>
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <linux/in6.h>
@@ -302,7 +303,8 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
if (!algo)
return -ENOSYS;
- if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
+ if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
+ ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
return -EINVAL;
*props = algo->desc.sadb_alg_id;