diff options
Diffstat (limited to 'net/dccp')
-rw-r--r-- | net/dccp/ackvec.c | 2 | ||||
-rw-r--r-- | net/dccp/ackvec.h | 4 | ||||
-rw-r--r-- | net/dccp/ccid.h | 4 | ||||
-rw-r--r-- | net/dccp/ccids/lib/loss_interval.h | 2 | ||||
-rw-r--r-- | net/dccp/ccids/lib/packet_history.h | 4 | ||||
-rw-r--r-- | net/dccp/dccp.h | 1 | ||||
-rw-r--r-- | net/dccp/input.c | 6 | ||||
-rw-r--r-- | net/dccp/ipv4.c | 52 | ||||
-rw-r--r-- | net/dccp/output.c | 42 | ||||
-rw-r--r-- | net/dccp/proto.c | 6 |
10 files changed, 54 insertions, 69 deletions
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index 6530283eafca..c9a62cca22fc 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c @@ -91,7 +91,7 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) } struct dccp_ackvec *dccp_ackvec_alloc(const unsigned int len, - const unsigned int __nocast priority) + const gfp_t priority) { struct dccp_ackvec *av = kmalloc(sizeof(*av) + len, priority); diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h index 8ca51c9191f7..d0fd6c60c574 100644 --- a/net/dccp/ackvec.h +++ b/net/dccp/ackvec.h @@ -74,7 +74,7 @@ struct sk_buff; #ifdef CONFIG_IP_DCCP_ACKVEC extern struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, - const unsigned int __nocast priority); + const gfp_t priority); extern void dccp_ackvec_free(struct dccp_ackvec *av); extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, @@ -93,7 +93,7 @@ static inline int dccp_ackvec_pending(const struct dccp_ackvec *av) } #else /* CONFIG_IP_DCCP_ACKVEC */ static inline struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, - const unsigned int __nocast priority) + const gfp_t priority) { return NULL; } diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 21e55142dcd3..c37eeeaf5c6e 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -110,14 +110,14 @@ static inline int ccid_hc_tx_init(struct ccid *ccid, struct sock *sk) static inline void ccid_hc_rx_exit(struct ccid *ccid, struct sock *sk) { - if (ccid->ccid_hc_rx_exit != NULL && + if (ccid != NULL && ccid->ccid_hc_rx_exit != NULL && dccp_sk(sk)->dccps_hc_rx_ccid_private != NULL) ccid->ccid_hc_rx_exit(sk); } static inline void ccid_hc_tx_exit(struct ccid *ccid, struct sock *sk) { - if (ccid->ccid_hc_tx_exit != NULL && + if (ccid != NULL && ccid->ccid_hc_tx_exit != NULL && dccp_sk(sk)->dccps_hc_tx_ccid_private != NULL) ccid->ccid_hc_tx_exit(sk); } diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h index 13ad47ba1420..417d9d82df3e 100644 --- a/net/dccp/ccids/lib/loss_interval.h +++ b/net/dccp/ccids/lib/loss_interval.h @@ -36,7 +36,7 @@ struct dccp_li_hist_entry { static inline struct dccp_li_hist_entry * dccp_li_hist_entry_new(struct dccp_li_hist *hist, - const unsigned int __nocast prio) + const gfp_t prio) { return kmem_cache_alloc(hist->dccplih_slab, prio); } diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h index b375ebdb7dcf..122e96737ff6 100644 --- a/net/dccp/ccids/lib/packet_history.h +++ b/net/dccp/ccids/lib/packet_history.h @@ -86,7 +86,7 @@ extern struct dccp_rx_hist_entry * static inline struct dccp_tx_hist_entry * dccp_tx_hist_entry_new(struct dccp_tx_hist *hist, - const unsigned int __nocast prio) + const gfp_t prio) { struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab, prio); @@ -137,7 +137,7 @@ static inline struct dccp_rx_hist_entry * const struct sock *sk, const u32 ndp, const struct sk_buff *skb, - const unsigned int __nocast prio) + const gfp_t prio) { struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab, prio); diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 5871c027f9dc..f97b85d55ad8 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -118,7 +118,6 @@ DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); #define DCCP_ADD_STATS_USER(field, val) \ SNMP_ADD_STATS_USER(dccp_statistics, field, val) -extern int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb); extern int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb); extern int dccp_send_response(struct sock *sk); diff --git a/net/dccp/input.c b/net/dccp/input.c index 1b6b2cb12376..3454d5941900 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -375,6 +375,9 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, case DCCP_PKT_RESET: inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); break; + case DCCP_PKT_DATA: + if (sk->sk_state == DCCP_RESPOND) + break; case DCCP_PKT_DATAACK: case DCCP_PKT_ACK: /* @@ -393,7 +396,8 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq; dccp_set_state(sk, DCCP_OPEN); - if (dh->dccph_type == DCCP_PKT_DATAACK) { + if (dh->dccph_type == DCCP_PKT_DATAACK || + dh->dccph_type == DCCP_PKT_DATA) { dccp_rcv_established(sk, skb, dh, len); queued = 1; /* packet was queued (by dccp_rcv_established) */ diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 40fe6afacde6..ca03521112c5 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -31,8 +31,6 @@ struct inet_hashinfo __cacheline_aligned dccp_hashinfo = { .lhash_lock = RW_LOCK_UNLOCKED, .lhash_users = ATOMIC_INIT(0), .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait), - .portalloc_lock = SPIN_LOCK_UNLOCKED, - .port_rover = 1024 - 1, }; EXPORT_SYMBOL_GPL(dccp_hashinfo); @@ -62,27 +60,27 @@ static int __dccp_v4_check_established(struct sock *sk, const __u16 lport, const int dif = sk->sk_bound_dev_if; INET_ADDR_COOKIE(acookie, saddr, daddr) const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport); - const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, - dccp_hashinfo.ehash_size); - struct inet_ehash_bucket *head = &dccp_hashinfo.ehash[hash]; + unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); + struct inet_ehash_bucket *head = inet_ehash_bucket(&dccp_hashinfo, hash); const struct sock *sk2; const struct hlist_node *node; struct inet_timewait_sock *tw; + prefetch(head->chain.first); write_lock(&head->lock); /* Check TIME-WAIT sockets first. */ sk_for_each(sk2, node, &(head + dccp_hashinfo.ehash_size)->chain) { tw = inet_twsk(sk2); - if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) + if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) goto not_unique; } tw = NULL; /* And established part... */ sk_for_each(sk2, node, &head->chain) { - if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif)) + if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) goto not_unique; } @@ -90,7 +88,7 @@ static int __dccp_v4_check_established(struct sock *sk, const __u16 lport, * in hash table socket with a funny identity. */ inet->num = lport; inet->sport = htons(lport); - sk->sk_hashent = hash; + sk->sk_hash = hash; BUG_TRAP(sk_unhashed(sk)); __sk_add_node(sk, &head->chain); sock_prot_inc_use(sk->sk_prot); @@ -125,36 +123,15 @@ static int dccp_v4_hash_connect(struct sock *sk) int ret; if (snum == 0) { - int rover; int low = sysctl_local_port_range[0]; int high = sysctl_local_port_range[1]; int remaining = (high - low) + 1; + int rover = net_random() % (high - low) + low; struct hlist_node *node; struct inet_timewait_sock *tw = NULL; local_bh_disable(); - - /* TODO. Actually it is not so bad idea to remove - * dccp_hashinfo.portalloc_lock before next submission to - * Linus. - * As soon as we touch this place at all it is time to think. - * - * Now it protects single _advisory_ variable - * dccp_hashinfo.port_rover, hence it is mostly useless. - * Code will work nicely if we just delete it, but - * I am afraid in contented case it will work not better or - * even worse: another cpu just will hit the same bucket - * and spin there. - * So some cpu salt could remove both contention and - * memory pingpong. Any ideas how to do this in a nice way? - */ - spin_lock(&dccp_hashinfo.portalloc_lock); - rover = dccp_hashinfo.port_rover; - do { - rover++; - if ((rover < low) || (rover > high)) - rover = low; head = &dccp_hashinfo.bhash[inet_bhashfn(rover, dccp_hashinfo.bhash_size)]; spin_lock(&head->lock); @@ -187,9 +164,9 @@ static int dccp_v4_hash_connect(struct sock *sk) next_port: spin_unlock(&head->lock); + if (++rover > high) + rover = low; } while (--remaining > 0); - dccp_hashinfo.port_rover = rover; - spin_unlock(&dccp_hashinfo.portalloc_lock); local_bh_enable(); @@ -197,9 +174,6 @@ static int dccp_v4_hash_connect(struct sock *sk) ok: /* All locks still held and bhs disabled */ - dccp_hashinfo.port_rover = rover; - spin_unlock(&dccp_hashinfo.portalloc_lock); - inet_bind_hash(sk, tb, rover); if (sk_unhashed(sk)) { inet_sk(sk)->sport = htons(rover); @@ -463,6 +437,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, if (skb != NULL) { const struct inet_request_sock *ireq = inet_rsk(req); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, ireq->rmt_addr, ireq->opt); @@ -647,6 +622,7 @@ int dccp_v4_send_reset(struct sock *sk, enum dccp_reset_codes code) if (skb != NULL) { const struct inet_sock *inet = inet_sk(sk); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); err = ip_build_and_send_pkt(skb, sk, inet->saddr, inet->daddr, NULL); if (err == NET_XMIT_CN) @@ -1287,10 +1263,8 @@ static int dccp_v4_destroy_sock(struct sock *sk) if (inet_csk(sk)->icsk_bind_hash != NULL) inet_put_port(&dccp_hashinfo, sk); - if (dp->dccps_service_list != NULL) { - kfree(dp->dccps_service_list); - dp->dccps_service_list = NULL; - } + kfree(dp->dccps_service_list); + dp->dccps_service_list = NULL; ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk); ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk); diff --git a/net/dccp/output.c b/net/dccp/output.c index 4786bdcddcc9..74ff87025878 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -12,6 +12,7 @@ #include <linux/config.h> #include <linux/dccp.h> +#include <linux/kernel.h> #include <linux/skbuff.h> #include <net/sock.h> @@ -25,13 +26,20 @@ static inline void dccp_event_ack_sent(struct sock *sk) inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); } +static inline void dccp_skb_entail(struct sock *sk, struct sk_buff *skb) +{ + skb_set_owner_w(skb, sk); + WARN_ON(sk->sk_send_head); + sk->sk_send_head = skb; +} + /* * All SKB's seen here are completely headerless. It is our * job to build the DCCP header, and pass the packet down to * IP so it can do the same plus pass the packet off to the * device. */ -int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) +static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) { if (likely(skb != NULL)) { const struct inet_sock *inet = inet_sk(sk); @@ -50,10 +58,21 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) switch (dcb->dccpd_type) { case DCCP_PKT_DATA: set_ack = 0; + /* fall through */ + case DCCP_PKT_DATAACK: break; + case DCCP_PKT_SYNC: case DCCP_PKT_SYNCACK: ackno = dcb->dccpd_seq; + /* fall through */ + default: + /* + * Only data packets should come through with skb->sk + * set. + */ + WARN_ON(skb->sk); + skb_set_owner_w(skb, sk); break; } @@ -62,11 +81,6 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) skb->h.raw = skb_push(skb, dccp_header_size); dh = dccp_hdr(skb); - /* - * Data packets are not cloned as they are never retransmitted - */ - if (skb_cloned(skb)) - skb_set_owner_w(skb, sk); /* Build DCCP header and checksum it. */ memset(dh, 0, dccp_header_size); @@ -102,6 +116,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) DCCP_INC_STATS(DCCP_MIB_OUTSEGS); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); err = ip_queue_xmit(skb, 0); if (err <= 0) return err; @@ -243,7 +258,8 @@ int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo) err = dccp_transmit_skb(sk, skb); ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); - } + } else + kfree_skb(skb); return err; } @@ -393,10 +409,8 @@ int dccp_connect(struct sock *sk) DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; skb->csum = 0; - skb_set_owner_w(skb, sk); - BUG_TRAP(sk->sk_send_head == NULL); - sk->sk_send_head = skb; + dccp_skb_entail(sk, skb); dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS); @@ -425,7 +439,6 @@ void dccp_send_ack(struct sock *sk) skb_reserve(skb, MAX_DCCP_HEADER); skb->csum = 0; DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; - skb_set_owner_w(skb, sk); dccp_transmit_skb(sk, skb); } } @@ -482,7 +495,6 @@ void dccp_send_sync(struct sock *sk, const u64 seq, DCCP_SKB_CB(skb)->dccpd_type = pkt_type; DCCP_SKB_CB(skb)->dccpd_seq = seq; - skb_set_owner_w(skb, sk); dccp_transmit_skb(sk, skb); } @@ -495,7 +507,7 @@ void dccp_send_close(struct sock *sk, const int active) { struct dccp_sock *dp = dccp_sk(sk); struct sk_buff *skb; - const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC; + const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC; skb = alloc_skb(sk->sk_prot->max_header, prio); if (skb == NULL) @@ -507,10 +519,8 @@ void dccp_send_close(struct sock *sk, const int active) DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; - skb_set_owner_w(skb, sk); if (active) { - BUG_TRAP(sk->sk_send_head == NULL); - sk->sk_send_head = skb; + dccp_skb_entail(sk, skb); dccp_transmit_skb(sk, skb_clone(skb, prio)); } else dccp_transmit_skb(sk, skb); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index a1cfd0e9e3bc..8a6b2a9e4581 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -46,6 +46,7 @@ atomic_t dccp_orphan_count = ATOMIC_INIT(0); static struct net_protocol dccp_protocol = { .handler = dccp_v4_rcv, .err_handler = dccp_v4_err, + .no_policy = 1, }; const char *dccp_packet_name(const int type) @@ -238,8 +239,7 @@ static int dccp_setsockopt_service(struct sock *sk, const u32 service, lock_sock(sk); dp->dccps_service = service; - if (dp->dccps_service_list != NULL) - kfree(dp->dccps_service_list); + kfree(dp->dccps_service_list); dp->dccps_service_list = sl; release_sock(sk); @@ -402,8 +402,6 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, * This bug was _quickly_ found & fixed by just looking at an OSTRA * generated callgraph 8) -acme */ - if (rc != 0) - goto out_discard; out_release: release_sock(sk); return rc ? : len; |