diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/icmp.c | 22 | ||||
-rw-r--r-- | net/ipv4/route.c | 24 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 6 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 1 | ||||
-rw-r--r-- | net/ipv6/icmp.c | 23 | ||||
-rw-r--r-- | net/ipv6/raw.c | 6 | ||||
-rw-r--r-- | net/ipv6/sysctl_net_ipv6.c | 2 | ||||
-rw-r--r-- | net/mac80211/debugfs_netdev.c | 24 | ||||
-rw-r--r-- | net/mac80211/ieee80211_i.h | 6 | ||||
-rw-r--r-- | net/mac80211/mesh.c | 2 | ||||
-rw-r--r-- | net/mac80211/mlme.c | 52 | ||||
-rw-r--r-- | net/sched/cls_api.c | 2 | ||||
-rw-r--r-- | net/sched/cls_route.c | 2 | ||||
-rw-r--r-- | net/sched/sch_api.c | 70 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 6 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 9 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 4 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 8 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 2 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 2 | ||||
-rw-r--r-- | net/sctp/auth.c | 7 | ||||
-rw-r--r-- | net/sctp/socket.c | 11 |
22 files changed, 174 insertions, 117 deletions
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 860558633b2c..55c355e63234 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -204,18 +204,22 @@ static struct sock *icmp_sk(struct net *net) return net->ipv4.icmp_sk[smp_processor_id()]; } -static inline int icmp_xmit_lock(struct sock *sk) +static inline struct sock *icmp_xmit_lock(struct net *net) { + struct sock *sk; + local_bh_disable(); + sk = icmp_sk(net); + if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path signals a * dst_link_failure() for an outgoing ICMP packet. */ local_bh_enable(); - return 1; + return NULL; } - return 0; + return sk; } static inline void icmp_xmit_unlock(struct sock *sk) @@ -354,15 +358,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) struct ipcm_cookie ipc; struct rtable *rt = skb->rtable; struct net *net = dev_net(rt->u.dst.dev); - struct sock *sk = icmp_sk(net); - struct inet_sock *inet = inet_sk(sk); + struct sock *sk; + struct inet_sock *inet; __be32 daddr; if (ip_options_echo(&icmp_param->replyopts, skb)) return; - if (icmp_xmit_lock(sk)) + sk = icmp_xmit_lock(net); + if (sk == NULL) return; + inet = inet_sk(sk); icmp_param->data.icmph.checksum = 0; @@ -419,7 +425,6 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) if (!rt) goto out; net = dev_net(rt->u.dst.dev); - sk = icmp_sk(net); /* * Find the original header. It is expected to be valid, of course. @@ -483,7 +488,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) } } - if (icmp_xmit_lock(sk)) + sk = icmp_xmit_lock(net); + if (sk == NULL) return; /* diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 71598f64c113..f62187bb6d08 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3122,14 +3122,23 @@ static ctl_table ipv4_route_table[] = { { .ctl_name = 0 } }; -static __net_initdata struct ctl_path ipv4_route_path[] = { +static struct ctl_table empty[1]; + +static struct ctl_table ipv4_skeleton[] = +{ + { .procname = "route", .ctl_name = NET_IPV4_ROUTE, + .mode = 0555, .child = ipv4_route_table}, + { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH, + .mode = 0555, .child = empty}, + { } +}; + +static __net_initdata struct ctl_path ipv4_path[] = { { .procname = "net", .ctl_name = CTL_NET, }, { .procname = "ipv4", .ctl_name = NET_IPV4, }, - { .procname = "route", .ctl_name = NET_IPV4_ROUTE, }, { }, }; - static struct ctl_table ipv4_route_flush_table[] = { { .ctl_name = NET_IPV4_ROUTE_FLUSH, @@ -3142,6 +3151,13 @@ static struct ctl_table ipv4_route_flush_table[] = { { .ctl_name = 0 }, }; +static __net_initdata struct ctl_path ipv4_route_path[] = { + { .procname = "net", .ctl_name = CTL_NET, }, + { .procname = "ipv4", .ctl_name = NET_IPV4, }, + { .procname = "route", .ctl_name = NET_IPV4_ROUTE, }, + { }, +}; + static __net_init int sysctl_route_net_init(struct net *net) { struct ctl_table *tbl; @@ -3293,7 +3309,7 @@ int __init ip_rt_init(void) */ void __init ip_static_sysctl_init(void) { - register_sysctl_paths(ipv4_route_path, ipv4_route_table); + register_sysctl_paths(ipv4_path, ipv4_skeleton); } #endif diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a00532de2a8c..8165f5aa8c71 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -468,7 +468,8 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, } if (likely(sysctl_tcp_window_scaling)) { opts->ws = tp->rx_opt.rcv_wscale; - size += TCPOLEN_WSCALE_ALIGNED; + if(likely(opts->ws)) + size += TCPOLEN_WSCALE_ALIGNED; } if (likely(sysctl_tcp_sack)) { opts->options |= OPTION_SACK_ADVERTISE; @@ -509,7 +510,8 @@ static unsigned tcp_synack_options(struct sock *sk, if (likely(ireq->wscale_ok)) { opts->ws = ireq->rcv_wscale; - size += TCPOLEN_WSCALE_ALIGNED; + if(likely(opts->ws)) + size += TCPOLEN_WSCALE_ALIGNED; } if (likely(doing_ts)) { opts->options |= OPTION_TS; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e2d3b7580b76..7b6a584b62dd 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1688,6 +1688,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, .fc_dst_len = plen, .fc_flags = RTF_UP | flags, .fc_nlinfo.nl_net = dev_net(dev), + .fc_protocol = RTPROT_KERNEL, }; ipv6_addr_copy(&cfg.fc_dst, pfx); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index abedf95fdf2d..b3157a0cc15d 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -91,19 +91,22 @@ static struct inet6_protocol icmpv6_protocol = { .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; -static __inline__ int icmpv6_xmit_lock(struct sock *sk) +static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) { + struct sock *sk; + local_bh_disable(); + sk = icmpv6_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path (f.e. SIT or * ip6ip6 tunnel) signals dst_link_failure() for an * outgoing ICMP6 packet. */ local_bh_enable(); - return 1; + return NULL; } - return 0; + return sk; } static __inline__ void icmpv6_xmit_unlock(struct sock *sk) @@ -392,11 +395,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, fl.fl_icmp_code = code; security_skb_classify_flow(skb, &fl); - sk = icmpv6_sk(net); - np = inet6_sk(sk); - - if (icmpv6_xmit_lock(sk)) + sk = icmpv6_xmit_lock(net); + if (sk == NULL) return; + np = inet6_sk(sk); if (!icmpv6_xrlim_allow(sk, type, &fl)) goto out; @@ -539,11 +541,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb) fl.fl_icmp_type = ICMPV6_ECHO_REPLY; security_skb_classify_flow(skb, &fl); - sk = icmpv6_sk(net); - np = inet6_sk(sk); - - if (icmpv6_xmit_lock(sk)) + sk = icmpv6_xmit_lock(net); + if (sk == NULL) return; + np = inet6_sk(sk); if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) fl.oif = np->mcast_oif; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 01d47674f7e5..e53e493606c5 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -377,14 +377,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); - return 0; + return NET_RX_DROP; } /* Charge it to the socket. */ if (sock_queue_rcv_skb(sk,skb)<0) { atomic_inc(&sk->sk_drops); kfree_skb(skb); - return 0; + return NET_RX_DROP; } return 0; @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) if (skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); - return 0; + return NET_RX_DROP; } } diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index e6dfaeac6be3..587f8f60c489 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -156,7 +156,7 @@ static struct ctl_table_header *ip6_base; int ipv6_static_sysctl_register(void) { static struct ctl_table empty[1]; - ip6_base = register_net_sysctl_rotable(net_ipv6_ctl_path, empty); + ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty); if (ip6_base == NULL) return -ENOMEM; return 0; diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 475f89a8aee1..8165df578c92 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -248,8 +248,8 @@ IEEE80211_IF_WFILE(min_discovery_timeout, static void add_sta_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(drop_unencrypted, sta); - DEBUGFS_ADD(force_unicast_rateidx, ap); - DEBUGFS_ADD(max_ratectrl_rateidx, ap); + DEBUGFS_ADD(force_unicast_rateidx, sta); + DEBUGFS_ADD(max_ratectrl_rateidx, sta); DEBUGFS_ADD(state, sta); DEBUGFS_ADD(bssid, sta); @@ -283,8 +283,8 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata) static void add_wds_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(drop_unencrypted, wds); - DEBUGFS_ADD(force_unicast_rateidx, ap); - DEBUGFS_ADD(max_ratectrl_rateidx, ap); + DEBUGFS_ADD(force_unicast_rateidx, wds); + DEBUGFS_ADD(max_ratectrl_rateidx, wds); DEBUGFS_ADD(peer, wds); } @@ -292,8 +292,8 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata) static void add_vlan_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(drop_unencrypted, vlan); - DEBUGFS_ADD(force_unicast_rateidx, ap); - DEBUGFS_ADD(max_ratectrl_rateidx, ap); + DEBUGFS_ADD(force_unicast_rateidx, vlan); + DEBUGFS_ADD(max_ratectrl_rateidx, vlan); } static void add_monitor_files(struct ieee80211_sub_if_data *sdata) @@ -381,8 +381,8 @@ static void add_files(struct ieee80211_sub_if_data *sdata) static void del_sta_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_DEL(drop_unencrypted, sta); - DEBUGFS_DEL(force_unicast_rateidx, ap); - DEBUGFS_DEL(max_ratectrl_rateidx, ap); + DEBUGFS_DEL(force_unicast_rateidx, sta); + DEBUGFS_DEL(max_ratectrl_rateidx, sta); DEBUGFS_DEL(state, sta); DEBUGFS_DEL(bssid, sta); @@ -416,8 +416,8 @@ static void del_ap_files(struct ieee80211_sub_if_data *sdata) static void del_wds_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_DEL(drop_unencrypted, wds); - DEBUGFS_DEL(force_unicast_rateidx, ap); - DEBUGFS_DEL(max_ratectrl_rateidx, ap); + DEBUGFS_DEL(force_unicast_rateidx, wds); + DEBUGFS_DEL(max_ratectrl_rateidx, wds); DEBUGFS_DEL(peer, wds); } @@ -425,8 +425,8 @@ static void del_wds_files(struct ieee80211_sub_if_data *sdata) static void del_vlan_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_DEL(drop_unencrypted, vlan); - DEBUGFS_DEL(force_unicast_rateidx, ap); - DEBUGFS_DEL(max_ratectrl_rateidx, ap); + DEBUGFS_DEL(force_unicast_rateidx, vlan); + DEBUGFS_DEL(max_ratectrl_rateidx, vlan); } static void del_monitor_files(struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 2bb546744b94..570ae83c71b1 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -472,6 +472,8 @@ struct ieee80211_sub_if_data { struct dentry *auth_transaction; struct dentry *flags; struct dentry *num_beacons_sta; + struct dentry *force_unicast_rateidx; + struct dentry *max_ratectrl_rateidx; } sta; struct { struct dentry *drop_unencrypted; @@ -485,9 +487,13 @@ struct ieee80211_sub_if_data { struct { struct dentry *drop_unencrypted; struct dentry *peer; + struct dentry *force_unicast_rateidx; + struct dentry *max_ratectrl_rateidx; } wds; struct { struct dentry *drop_unencrypted; + struct dentry *force_unicast_rateidx; + struct dentry *max_ratectrl_rateidx; } vlan; struct { struct dentry *mode; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index b631703bcc82..3ccb3599c04f 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -376,7 +376,7 @@ errcopy: hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) tbl->free_node(p, 0); } - __mesh_table_free(tbl); + __mesh_table_free(newtbl); endgrow: return NULL; } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index e088b440aafa..7d53382f1a5b 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -500,51 +500,21 @@ int ieee80211_ht_addt_info_ie_to_ht_bss_info( static void ieee80211_sta_send_associnfo(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_sta *ifsta) { - char *buf; - size_t len; - int i; union iwreq_data wrqu; - if (!ifsta->assocreq_ies && !ifsta->assocresp_ies) - return; - - buf = kmalloc(50 + 2 * (ifsta->assocreq_ies_len + - ifsta->assocresp_ies_len), GFP_KERNEL); - if (!buf) - return; - - len = sprintf(buf, "ASSOCINFO("); if (ifsta->assocreq_ies) { - len += sprintf(buf + len, "ReqIEs="); - for (i = 0; i < ifsta->assocreq_ies_len; i++) { - len += sprintf(buf + len, "%02x", - ifsta->assocreq_ies[i]); - } + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = ifsta->assocreq_ies_len; + wireless_send_event(sdata->dev, IWEVASSOCREQIE, &wrqu, + ifsta->assocreq_ies); } - if (ifsta->assocresp_ies) { - if (ifsta->assocreq_ies) - len += sprintf(buf + len, " "); - len += sprintf(buf + len, "RespIEs="); - for (i = 0; i < ifsta->assocresp_ies_len; i++) { - len += sprintf(buf + len, "%02x", - ifsta->assocresp_ies[i]); - } - } - len += sprintf(buf + len, ")"); - if (len > IW_CUSTOM_MAX) { - len = sprintf(buf, "ASSOCRESPIE="); - for (i = 0; i < ifsta->assocresp_ies_len; i++) { - len += sprintf(buf + len, "%02x", - ifsta->assocresp_ies[i]); - } + if (ifsta->assocresp_ies) { + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = ifsta->assocresp_ies_len; + wireless_send_event(sdata->dev, IWEVASSOCRESPIE, &wrqu, + ifsta->assocresp_ies); } - - memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = len; - wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf); - - kfree(buf); } @@ -864,7 +834,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, } } - if (count == 8) { + if (rates_len > count) { pos = skb_put(skb, rates_len - count + 2); *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = rates_len - count; @@ -2788,7 +2758,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, jiffies); #endif /* CONFIG_MAC80211_IBSS_DEBUG */ if (beacon_timestamp > rx_timestamp) { -#ifndef CONFIG_MAC80211_IBSS_DEBUG +#ifdef CONFIG_MAC80211_IBSS_DEBUG printk(KERN_DEBUG "%s: beacon TSF higher than " "local TSF - IBSS merge with BSSID %s\n", sdata->dev->name, print_mac(mac, mgmt->bssid)); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 5cafdd4c8018..8eb79e92e94c 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -205,7 +205,7 @@ replay: } } - root_lock = qdisc_root_lock(q); + root_lock = qdisc_root_sleeping_lock(q); if (tp == NULL) { /* Proto-tcf does not exist, create new one */ diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 481260a4f10f..e3d8455eebc2 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -75,7 +75,7 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif) static inline void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) { - spinlock_t *root_lock = qdisc_root_lock(q); + spinlock_t *root_lock = qdisc_root_sleeping_lock(q); spin_lock_bh(root_lock); memset(head->fastmap, 0, sizeof(head->fastmap)); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 45f442d7de47..1122c952aa99 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -199,19 +199,53 @@ struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) return NULL; } +/* + * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen() + * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue() + */ +static DEFINE_SPINLOCK(qdisc_list_lock); + +static void qdisc_list_add(struct Qdisc *q) +{ + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { + spin_lock_bh(&qdisc_list_lock); + list_add_tail(&q->list, &qdisc_root_sleeping(q)->list); + spin_unlock_bh(&qdisc_list_lock); + } +} + +void qdisc_list_del(struct Qdisc *q) +{ + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { + spin_lock_bh(&qdisc_list_lock); + list_del(&q->list); + spin_unlock_bh(&qdisc_list_lock); + } +} +EXPORT_SYMBOL(qdisc_list_del); + struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) { unsigned int i; + struct Qdisc *q; + + spin_lock_bh(&qdisc_list_lock); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - struct Qdisc *q, *txq_root = txq->qdisc_sleeping; + struct Qdisc *txq_root = txq->qdisc_sleeping; q = qdisc_match_from_root(txq_root, handle); if (q) - return q; + goto unlock; } - return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); + + q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); + +unlock: + spin_unlock_bh(&qdisc_list_lock); + + return q; } static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) @@ -590,7 +624,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; spinlock_t *root_lock; - root_lock = qdisc_root_lock(oqdisc); + root_lock = qdisc_lock(oqdisc); spin_lock_bh(root_lock); /* Prune old scheduler */ @@ -601,7 +635,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, if (qdisc == NULL) qdisc = &noop_qdisc; dev_queue->qdisc_sleeping = qdisc; - dev_queue->qdisc = &noop_qdisc; + rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); spin_unlock_bh(root_lock); @@ -796,9 +830,16 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, sch->stab = stab; } if (tca[TCA_RATE]) { + spinlock_t *root_lock; + + if ((sch->parent != TC_H_ROOT) && + !(sch->flags & TCQ_F_INGRESS)) + root_lock = qdisc_root_sleeping_lock(sch); + else + root_lock = qdisc_lock(sch); + err = gen_new_estimator(&sch->bstats, &sch->rate_est, - qdisc_root_lock(sch), - tca[TCA_RATE]); + root_lock, tca[TCA_RATE]); if (err) { /* * Any broken qdiscs that would require @@ -810,8 +851,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, goto err_out3; } } - if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS)) - list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list); + + qdisc_list_add(sch); return sch; } @@ -850,7 +891,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) if (tca[TCA_RATE]) gen_replace_estimator(&sch->bstats, &sch->rate_est, - qdisc_root_lock(sch), tca[TCA_RATE]); + qdisc_root_sleeping_lock(sch), + tca[TCA_RATE]); return 0; } @@ -1127,8 +1169,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, if (q->stab && qdisc_dump_stab(skb, q->stab) < 0) goto nla_put_failure; - if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, - TCA_XSTATS, qdisc_root_lock(q), &d) < 0) + if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, + qdisc_root_sleeping_lock(q), &d) < 0) goto nla_put_failure; if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) @@ -1419,8 +1461,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) goto nla_put_failure; - if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, - TCA_XSTATS, qdisc_root_lock(q), &d) < 0) + if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, + qdisc_root_sleeping_lock(q), &d) < 0) goto nla_put_failure; if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 8fa90d68ec6d..8b06fa900482 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1754,7 +1754,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg) if (--cl->refcnt == 0) { #ifdef CONFIG_NET_CLS_ACT - spinlock_t *root_lock = qdisc_root_lock(sch); + spinlock_t *root_lock = qdisc_root_sleeping_lock(sch); struct cbq_sched_data *q = qdisc_priv(sch); spin_lock_bh(root_lock); @@ -1839,7 +1839,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t if (tca[TCA_RATE]) gen_replace_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_lock(sch), + qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); return 0; } @@ -1930,7 +1930,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t if (tca[TCA_RATE]) gen_new_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_lock(sch), tca[TCA_RATE]); + qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); *arg = (unsigned long)cl; return 0; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index c3ed4d44fc14..9634091ee2f0 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -526,10 +526,9 @@ void qdisc_destroy(struct Qdisc *qdisc) !atomic_dec_and_test(&qdisc->refcnt)) return; - if (qdisc->parent) - list_del(&qdisc->list); - #ifdef CONFIG_NET_SCHED + qdisc_list_del(qdisc); + qdisc_put_stab(qdisc->stab); #endif gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); @@ -635,7 +634,7 @@ static void dev_deactivate_queue(struct net_device *dev, if (!(qdisc->flags & TCQ_F_BUILTIN)) set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); - dev_queue->qdisc = qdisc_default; + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); qdisc_reset(qdisc); spin_unlock_bh(qdisc_lock(qdisc)); @@ -710,7 +709,7 @@ static void shutdown_scheduler_queue(struct net_device *dev, struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { - dev_queue->qdisc = qdisc_default; + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); dev_queue->qdisc_sleeping = qdisc_default; qdisc_destroy(qdisc); diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index c2b8d9cce3d2..c1e77da8cd09 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tca[TCA_RATE]) gen_replace_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_lock(sch), + qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); return 0; } @@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tca[TCA_RATE]) gen_new_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_lock(sch), tca[TCA_RATE]); + qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); *arg = (unsigned long)cl; return 0; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 0df0df202ed0..d14f02056ae6 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1043,7 +1043,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) { - spinlock_t *root_lock = qdisc_root_lock(sch); + spinlock_t *root_lock = qdisc_root_sleeping_lock(sch); struct htb_sched *q = qdisc_priv(sch); struct nlattr *nest; struct tc_htb_glob gopt; @@ -1075,7 +1075,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { struct htb_class *cl = (struct htb_class *)arg; - spinlock_t *root_lock = qdisc_root_lock(sch); + spinlock_t *root_lock = qdisc_root_sleeping_lock(sch); struct nlattr *nest; struct tc_htb_opt opt; @@ -1372,7 +1372,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, goto failure; gen_new_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_lock(sch), + qdisc_root_sleeping_lock(sch), tca[TCA_RATE] ? : &est.nla); cl->refcnt = 1; cl->children = 0; @@ -1427,7 +1427,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, } else { if (tca[TCA_RATE]) gen_replace_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_lock(sch), + qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); sch_tree_lock(sch); } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index fb0294d0b55e..3781e55046d0 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -341,7 +341,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) for (i = 0; i < n; i++) d->table[i] = data[i]; - root_lock = qdisc_root_lock(sch); + root_lock = qdisc_root_sleeping_lock(sch); spin_lock_bh(root_lock); d = xchg(&q->delay_dist, d); diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 2c35c678563b..d35ef059abb1 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -161,7 +161,7 @@ teql_destroy(struct Qdisc* sch) txq = netdev_get_tx_queue(master->dev, 0); master->slaves = NULL; - root_lock = qdisc_root_lock(txq->qdisc); + root_lock = qdisc_root_sleeping_lock(txq->qdisc); spin_lock_bh(root_lock); qdisc_reset(txq->qdisc); spin_unlock_bh(root_lock); diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 675a5c3e68a6..52db5f60daa0 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -80,6 +80,10 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) { struct sctp_auth_bytes *key; + /* Verify that we are not going to overflow INT_MAX */ + if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes)) + return NULL; + /* Allocate the shared key */ key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp); if (!key) @@ -782,6 +786,9 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep, for (i = 0; i < hmacs->shmac_num_idents; i++) { id = hmacs->shmac_idents[i]; + if (id > SCTP_AUTH_HMAC_ID_MAX) + return -EOPNOTSUPP; + if (SCTP_AUTH_HMAC_ID_SHA1 == id) has_sha1 = 1; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index bb5c9ef13046..5ffb9dec1c3f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3086,6 +3086,7 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, int optlen) { struct sctp_hmacalgo *hmacs; + u32 idents; int err; if (!sctp_auth_enable) @@ -3103,8 +3104,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, goto out; } - if (hmacs->shmac_num_idents == 0 || - hmacs->shmac_num_idents > SCTP_AUTH_NUM_HMACS) { + idents = hmacs->shmac_num_idents; + if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || + (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { err = -EINVAL; goto out; } @@ -3144,6 +3146,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk, goto out; } + if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { + ret = -EINVAL; + goto out; + } + asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { ret = -EINVAL; |