diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-27 22:03:20 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-27 22:03:20 +0300 |
commit | 468fc7ed5537615efe671d94248446ac24679773 (patch) | |
tree | 27bc9de792e863d6ec1630927b77ac9e7dabb38a /net/iucv/af_iucv.c | |
parent | 08fd8c17686c6b09fa410a26d516548dd80ff147 (diff) | |
parent | 36232012344b8db67052432742deaf17f82e70e6 (diff) | |
download | linux-468fc7ed5537615efe671d94248446ac24679773.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) Unified UDP encapsulation offload methods for drivers, from
Alexander Duyck.
2) Make DSA binding more sane, from Andrew Lunn.
3) Support QCA9888 chips in ath10k, from Anilkumar Kolli.
4) Several workqueue usage cleanups, from Bhaktipriya Shridhar.
5) Add XDP (eXpress Data Path), essentially running BPF programs on RX
packets as soon as the device sees them, with the option to mirror
the packet on TX via the same interface. From Brenden Blanco and
others.
6) Allow qdisc/class stats dumps to run lockless, from Eric Dumazet.
7) Add VLAN support to b53 and bcm_sf2, from Florian Fainelli.
8) Simplify netlink conntrack entry layout, from Florian Westphal.
9) Add ipv4 forwarding support to mlxsw spectrum driver, from Ido
Schimmel, Yotam Gigi, and Jiri Pirko.
10) Add SKB array infrastructure and convert tun and macvtap over to it.
From Michael S Tsirkin and Jason Wang.
11) Support qdisc packet injection in pktgen, from John Fastabend.
12) Add neighbour monitoring framework to TIPC, from Jon Paul Maloy.
13) Add NV congestion control support to TCP, from Lawrence Brakmo.
14) Add GSO support to SCTP, from Marcelo Ricardo Leitner.
15) Allow GRO and RPS to function on macsec devices, from Paolo Abeni.
16) Support MPLS over IPV4, from Simon Horman.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1622 commits)
xgene: Fix build warning with ACPI disabled.
be2net: perform temperature query in adapter regardless of its interface state
l2tp: Correctly return -EBADF from pppol2tp_getname.
net/mlx5_core/health: Remove deprecated create_singlethread_workqueue
net: ipmr/ip6mr: update lastuse on entry change
macsec: ensure rx_sa is set when validation is disabled
tipc: dump monitor attributes
tipc: add a function to get the bearer name
tipc: get monitor threshold for the cluster
tipc: make cluster size threshold for monitoring configurable
tipc: introduce constants for tipc address validation
net: neigh: disallow transition to NUD_STALE if lladdr is unchanged in neigh_update()
MAINTAINERS: xgene: Add driver and documentation path
Documentation: dtb: xgene: Add MDIO node
dtb: xgene: Add MDIO node
drivers: net: xgene: ethtool: Use phy_ethtool_gset and sset
drivers: net: xgene: Use exported functions
drivers: net: xgene: Enable MDIO driver
drivers: net: xgene: Add backward compatibility
drivers: net: phy: xgene: Add MDIO driver
...
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r-- | net/iucv/af_iucv.c | 223 |
1 files changed, 122 insertions, 101 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index fc3598a922b0..37d674e6f8a9 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1033,6 +1033,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); + size_t headroom, linear; struct sk_buff *skb; struct iucv_message txmsg = {0}; struct cmsghdr *cmsg; @@ -1110,20 +1111,31 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, * this is fine for SOCK_SEQPACKET (unless we want to support * segmented records using the MSG_EOR flag), but * for SOCK_STREAM we might want to improve it in future */ - if (iucv->transport == AF_IUCV_TRANS_HIPER) - skb = sock_alloc_send_skb(sk, - len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN, - noblock, &err); - else - skb = sock_alloc_send_skb(sk, len, noblock, &err); + headroom = (iucv->transport == AF_IUCV_TRANS_HIPER) + ? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0; + if (headroom + len < PAGE_SIZE) { + linear = len; + } else { + /* In nonlinear "classic" iucv skb, + * reserve space for iucv_array + */ + if (iucv->transport != AF_IUCV_TRANS_HIPER) + headroom += sizeof(struct iucv_array) * + (MAX_SKB_FRAGS + 1); + linear = PAGE_SIZE - headroom; + } + skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear, + noblock, &err, 0); if (!skb) goto out; - if (iucv->transport == AF_IUCV_TRANS_HIPER) - skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); - if (memcpy_from_msg(skb_put(skb, len), msg, len)) { - err = -EFAULT; + if (headroom) + skb_reserve(skb, headroom); + skb_put(skb, linear); + skb->len = len; + skb->data_len = len - linear; + err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); + if (err) goto fail; - } /* wait if outstanding messages for iucv path has reached */ timeo = sock_sndtimeo(sk, noblock); @@ -1148,49 +1160,67 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, atomic_dec(&iucv->msg_sent); goto fail; } - goto release; - } - skb_queue_tail(&iucv->send_skb_q, skb); - - if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) - && skb->len <= 7) { - err = iucv_send_iprm(iucv->path, &txmsg, skb); + } else { /* Classic VM IUCV transport */ + skb_queue_tail(&iucv->send_skb_q, skb); + + if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && + skb->len <= 7) { + err = iucv_send_iprm(iucv->path, &txmsg, skb); + + /* on success: there is no message_complete callback */ + /* for an IPRMDATA msg; remove skb from send queue */ + if (err == 0) { + skb_unlink(skb, &iucv->send_skb_q); + kfree_skb(skb); + } - /* on success: there is no message_complete callback - * for an IPRMDATA msg; remove skb from send queue */ - if (err == 0) { - skb_unlink(skb, &iucv->send_skb_q); - kfree_skb(skb); + /* this error should never happen since the */ + /* IUCV_IPRMDATA path flag is set... sever path */ + if (err == 0x15) { + pr_iucv->path_sever(iucv->path, NULL); + skb_unlink(skb, &iucv->send_skb_q); + err = -EPIPE; + goto fail; + } + } else if (skb_is_nonlinear(skb)) { + struct iucv_array *iba = (struct iucv_array *)skb->head; + int i; + + /* skip iucv_array lying in the headroom */ + iba[0].address = (u32)(addr_t)skb->data; + iba[0].length = (u32)skb_headlen(skb); + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + iba[i + 1].address = + (u32)(addr_t)skb_frag_address(frag); + iba[i + 1].length = (u32)skb_frag_size(frag); + } + err = pr_iucv->message_send(iucv->path, &txmsg, + IUCV_IPBUFLST, 0, + (void *)iba, skb->len); + } else { /* non-IPRM Linear skb */ + err = pr_iucv->message_send(iucv->path, &txmsg, + 0, 0, (void *)skb->data, skb->len); } - - /* this error should never happen since the - * IUCV_IPRMDATA path flag is set... sever path */ - if (err == 0x15) { - pr_iucv->path_sever(iucv->path, NULL); + if (err) { + if (err == 3) { + user_id[8] = 0; + memcpy(user_id, iucv->dst_user_id, 8); + appl_id[8] = 0; + memcpy(appl_id, iucv->dst_name, 8); + pr_err( + "Application %s on z/VM guest %s exceeds message limit\n", + appl_id, user_id); + err = -EAGAIN; + } else { + err = -EPIPE; + } skb_unlink(skb, &iucv->send_skb_q); - err = -EPIPE; goto fail; } - } else - err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0, - (void *) skb->data, skb->len); - if (err) { - if (err == 3) { - user_id[8] = 0; - memcpy(user_id, iucv->dst_user_id, 8); - appl_id[8] = 0; - memcpy(appl_id, iucv->dst_name, 8); - pr_err("Application %s on z/VM guest %s" - " exceeds message limit\n", - appl_id, user_id); - err = -EAGAIN; - } else - err = -EPIPE; - skb_unlink(skb, &iucv->send_skb_q); - goto fail; } -release: release_sock(sk); return len; @@ -1201,42 +1231,32 @@ out: return err; } -/* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's - * - * Locking: must be called with message_q.lock held - */ -static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) +static struct sk_buff *alloc_iucv_recv_skb(unsigned long len) { - int dataleft, size, copied = 0; - struct sk_buff *nskb; - - dataleft = len; - while (dataleft) { - if (dataleft >= sk->sk_rcvbuf / 4) - size = sk->sk_rcvbuf / 4; - else - size = dataleft; - - nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); - if (!nskb) - return -ENOMEM; - - /* copy target class to control buffer of new skb */ - IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class; - - /* copy data fragment */ - memcpy(nskb->data, skb->data + copied, size); - copied += size; - dataleft -= size; - - skb_reset_transport_header(nskb); - skb_reset_network_header(nskb); - nskb->len = size; + size_t headroom, linear; + struct sk_buff *skb; + int err; - skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); + if (len < PAGE_SIZE) { + headroom = 0; + linear = len; + } else { + headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1); + linear = PAGE_SIZE - headroom; + } + skb = alloc_skb_with_frags(headroom + linear, len - linear, + 0, &err, GFP_ATOMIC | GFP_DMA); + WARN_ONCE(!skb, + "alloc of recv iucv skb len=%lu failed with errcode=%d\n", + len, err); + if (skb) { + if (headroom) + skb_reserve(skb, headroom); + skb_put(skb, linear); + skb->len = len; + skb->data_len = len - linear; } - - return 0; + return skb; } /* iucv_process_message() - Receive a single outstanding IUCV message @@ -1263,31 +1283,32 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, skb->len = 0; } } else { - rc = pr_iucv->message_receive(path, msg, + if (skb_is_nonlinear(skb)) { + struct iucv_array *iba = (struct iucv_array *)skb->head; + int i; + + iba[0].address = (u32)(addr_t)skb->data; + iba[0].length = (u32)skb_headlen(skb); + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + iba[i + 1].address = + (u32)(addr_t)skb_frag_address(frag); + iba[i + 1].length = (u32)skb_frag_size(frag); + } + rc = pr_iucv->message_receive(path, msg, + IUCV_IPBUFLST, + (void *)iba, len, NULL); + } else { + rc = pr_iucv->message_receive(path, msg, msg->flags & IUCV_IPRMDATA, skb->data, len, NULL); + } if (rc) { kfree_skb(skb); return; } - /* we need to fragment iucv messages for SOCK_STREAM only; - * for SOCK_SEQPACKET, it is only relevant if we support - * record segmentation using MSG_EOR (see also recvmsg()) */ - if (sk->sk_type == SOCK_STREAM && - skb->truesize >= sk->sk_rcvbuf / 4) { - rc = iucv_fragment_skb(sk, skb, len); - kfree_skb(skb); - skb = NULL; - if (rc) { - pr_iucv->path_sever(path, NULL); - return; - } - skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); - } else { - skb_reset_transport_header(skb); - skb_reset_network_header(skb); - skb->len = len; - } + WARN_ON_ONCE(skb->len != len); } IUCV_SKB_CB(skb)->offset = 0; @@ -1306,7 +1327,7 @@ static void iucv_process_message_q(struct sock *sk) struct sock_msg_q *p, *n; list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { - skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA); + skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg)); if (!skb) break; iucv_process_message(sk, skb, p->path, &p->msg); @@ -1801,7 +1822,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) if (len > sk->sk_rcvbuf) goto save_message; - skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA); + skb = alloc_iucv_recv_skb(iucv_msg_length(msg)); if (!skb) goto save_message; |