diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-17 23:08:18 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-17 23:08:18 +0300 |
commit | 16382e17c0ff583df2d5eed56ca7c771d637e9d1 (patch) | |
tree | 568c5a69b55d08cda20b8e1367df3135dbafeeb6 /drivers/staging | |
parent | 93f30c73ecd0281cf3685ef0e4e384980a176176 (diff) | |
parent | cfe057f7db1ff026c8db75469a3f9ba9736e1975 (diff) | |
download | linux-16382e17c0ff583df2d5eed56ca7c771d637e9d1.tar.xz |
Merge branch 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull iov_iter updates from Al Viro:
- bio_{map,copy}_user_iov() series; those are cleanups - fixes from the
same pile went into mainline (and stable) in late September.
- fs/iomap.c iov_iter-related fixes
- new primitive - iov_iter_for_each_range(), which applies a function
to kernel-mapped segments of an iov_iter.
Usable for kvec and bvec ones, the latter does kmap()/kunmap() around
the callback. _Not_ usable for iovec- or pipe-backed iov_iter; the
latter is not hard to fix if the need ever appears, the former is by
design.
Another related primitive will have to wait for the next cycle - it
passes page + offset + size instead of pointer + size, and that one
will be usable for everything _except_ kvec. Unfortunately, that one
didn't get exposure in -next yet, so...
- a bit more lustre iov_iter work, including a use case for
iov_iter_for_each_range() (checksum calculation)
- vhost/scsi leak fix in failure exit
- misc cleanups and detritectomy...
* 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (21 commits)
iomap_dio_actor(): fix iov_iter bugs
switch ksocknal_lib_recv_...() to use of iov_iter_for_each_range()
lustre: switch struct ksock_conn to iov_iter
vhost/scsi: switch to iov_iter_get_pages()
fix a page leak in vhost_scsi_iov_to_sgl() error recovery
new primitive: iov_iter_for_each_range()
lnet_return_rx_credits_locked: don't abuse list_entry
xen: don't open-code iov_iter_kvec()
orangefs: remove detritus from struct orangefs_kiocb_s
kill iov_shorten()
bio_alloc_map_data(): do bmd->iter setup right there
bio_copy_user_iov(): saner bio size calculation
bio_map_user_iov(): get rid of copying iov_iter
bio_copy_from_iter(): get rid of copying iov_iter
move more stuff down into bio_copy_user_iov()
blk_rq_map_user_iov(): move iov_iter_advance() down
bio_map_user_iov(): get rid of the iov_for_each()
bio_map_user_iov(): move alignment check into the main loop
don't rely upon subsequent bio_add_pc_page() calls failing
... and with iov_iter_get_pages_alloc() it becomes even simpler
...
Diffstat (limited to 'drivers/staging')
5 files changed, 59 insertions, 212 deletions
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c index f8ea523863ba..986c2a40d978 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c @@ -1683,10 +1683,10 @@ ksocknal_destroy_conn(struct ksock_conn *conn) case SOCKNAL_RX_LNET_PAYLOAD: last_rcv = conn->ksnc_rx_deadline - cfs_time_seconds(*ksocknal_tunables.ksnd_timeout); - CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n", + CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %zd, left: %d, last alive is %ld secs ago\n", libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type, &conn->ksnc_ipaddr, conn->ksnc_port, - conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left, + iov_iter_count(&conn->ksnc_rx_to), conn->ksnc_rx_nob_left, cfs_duration_sec(cfs_time_sub(cfs_time_current(), last_rcv))); lnet_finalize(conn->ksnc_peer->ksnp_ni, diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index 35a7b396def4..d50ebdf863fa 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -358,11 +358,7 @@ struct ksock_conn { __u8 ksnc_rx_scheduled; /* being progressed */ __u8 ksnc_rx_state; /* what is being read */ int ksnc_rx_nob_left; /* # bytes to next hdr/body */ - int ksnc_rx_nob_wanted;/* bytes actually wanted */ - int ksnc_rx_niov; /* # iovec frags */ - struct kvec *ksnc_rx_iov; /* the iovec frags */ - int ksnc_rx_nkiov; /* # page frags */ - struct bio_vec *ksnc_rx_kiov; /* the page frags */ + struct iov_iter ksnc_rx_to; /* copy destination */ union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */ __u32 ksnc_rx_csum; /* partial checksum for incoming * data @@ -701,8 +697,7 @@ int ksocknal_lib_setup_sock(struct socket *so); int ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx); int ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx); void ksocknal_lib_eager_ack(struct ksock_conn *conn); -int ksocknal_lib_recv_iov(struct ksock_conn *conn); -int ksocknal_lib_recv_kiov(struct ksock_conn *conn); +int ksocknal_lib_recv(struct ksock_conn *conn); int ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index a5f2ecb966fa..27c56d5ae4e5 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c @@ -250,66 +250,16 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx) } static int -ksocknal_recv_iov(struct ksock_conn *conn) +ksocknal_recv_iter(struct ksock_conn *conn) { - struct kvec *iov = conn->ksnc_rx_iov; int nob; int rc; - LASSERT(conn->ksnc_rx_niov > 0); - - /* - * Never touch conn->ksnc_rx_iov or change connection - * status inside ksocknal_lib_recv_iov - */ - rc = ksocknal_lib_recv_iov(conn); - - if (rc <= 0) - return rc; - - /* received something... */ - nob = rc; - - conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); - conn->ksnc_rx_deadline = - cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - mb(); /* order with setting rx_started */ - conn->ksnc_rx_started = 1; - - conn->ksnc_rx_nob_wanted -= nob; - conn->ksnc_rx_nob_left -= nob; - - do { - LASSERT(conn->ksnc_rx_niov > 0); - - if (nob < (int)iov->iov_len) { - iov->iov_len -= nob; - iov->iov_base += nob; - return -EAGAIN; - } - - nob -= iov->iov_len; - conn->ksnc_rx_iov = ++iov; - conn->ksnc_rx_niov--; - } while (nob); - - return rc; -} - -static int -ksocknal_recv_kiov(struct ksock_conn *conn) -{ - struct bio_vec *kiov = conn->ksnc_rx_kiov; - int nob; - int rc; - - LASSERT(conn->ksnc_rx_nkiov > 0); - /* - * Never touch conn->ksnc_rx_kiov or change connection - * status inside ksocknal_lib_recv_iov + * Never touch conn->ksnc_rx_to or change connection + * status inside ksocknal_lib_recv */ - rc = ksocknal_lib_recv_kiov(conn); + rc = ksocknal_lib_recv(conn); if (rc <= 0) return rc; @@ -323,22 +273,11 @@ ksocknal_recv_kiov(struct ksock_conn *conn) mb(); /* order with setting rx_started */ conn->ksnc_rx_started = 1; - conn->ksnc_rx_nob_wanted -= nob; conn->ksnc_rx_nob_left -= nob; - do { - LASSERT(conn->ksnc_rx_nkiov > 0); - - if (nob < (int)kiov->bv_len) { - kiov->bv_offset += nob; - kiov->bv_len -= nob; - return -EAGAIN; - } - - nob -= kiov->bv_len; - conn->ksnc_rx_kiov = ++kiov; - conn->ksnc_rx_nkiov--; - } while (nob); + iov_iter_advance(&conn->ksnc_rx_to, nob); + if (iov_iter_count(&conn->ksnc_rx_to)) + return -EAGAIN; return 1; } @@ -348,7 +287,7 @@ ksocknal_receive(struct ksock_conn *conn) { /* * Return 1 on success, 0 on EOF, < 0 on error. - * Caller checks ksnc_rx_nob_wanted to determine + * Caller checks ksnc_rx_to to determine * progress/completion. */ int rc; @@ -365,11 +304,7 @@ ksocknal_receive(struct ksock_conn *conn) } for (;;) { - if (conn->ksnc_rx_niov) - rc = ksocknal_recv_iov(conn); - else - rc = ksocknal_recv_kiov(conn); - + rc = ksocknal_recv_iter(conn); if (rc <= 0) { /* error/EOF or partial receive */ if (rc == -EAGAIN) { @@ -383,7 +318,7 @@ ksocknal_receive(struct ksock_conn *conn) /* Completed a fragment */ - if (!conn->ksnc_rx_nob_wanted) { + if (!iov_iter_count(&conn->ksnc_rx_to)) { rc = 1; break; } @@ -1051,6 +986,7 @@ int ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip) { static char ksocknal_slop_buffer[4096]; + struct kvec *kvec = (struct kvec *)&conn->ksnc_rx_iov_space; int nob; unsigned int niov; @@ -1071,32 +1007,26 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip) case KSOCK_PROTO_V2: case KSOCK_PROTO_V3: conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER; - conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; - conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg; - - conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u); + kvec->iov_base = &conn->ksnc_msg; + kvec->iov_len = offsetof(struct ksock_msg, ksm_u); conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u); - conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u); + iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, + 1, offsetof(struct ksock_msg, ksm_u)); break; case KSOCK_PROTO_V1: /* Receiving bare struct lnet_hdr */ conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER; - conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr); + kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg; + kvec->iov_len = sizeof(struct lnet_hdr); conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr); - - conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; - conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg; - conn->ksnc_rx_iov[0].iov_len = sizeof(struct lnet_hdr); + iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, + 1, sizeof(struct lnet_hdr)); break; default: LBUG(); } - conn->ksnc_rx_niov = 1; - - conn->ksnc_rx_kiov = NULL; - conn->ksnc_rx_nkiov = 0; conn->ksnc_rx_csum = ~0; return 1; } @@ -1107,15 +1037,14 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip) */ conn->ksnc_rx_state = SOCKNAL_RX_SLOP; conn->ksnc_rx_nob_left = nob_to_skip; - conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; skipped = 0; niov = 0; do { nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer)); - conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer; - conn->ksnc_rx_iov[niov].iov_len = nob; + kvec[niov].iov_base = ksocknal_slop_buffer; + kvec[niov].iov_len = nob; niov++; skipped += nob; nob_to_skip -= nob; @@ -1123,16 +1052,14 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip) } while (nob_to_skip && /* mustn't overflow conn's rx iov */ niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec)); - conn->ksnc_rx_niov = niov; - conn->ksnc_rx_kiov = NULL; - conn->ksnc_rx_nkiov = 0; - conn->ksnc_rx_nob_wanted = skipped; + iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, niov, skipped); return 0; } static int ksocknal_process_receive(struct ksock_conn *conn) { + struct kvec *kvec = (struct kvec *)&conn->ksnc_rx_iov_space; struct lnet_hdr *lhdr; struct lnet_process_id *id; int rc; @@ -1146,7 +1073,7 @@ ksocknal_process_receive(struct ksock_conn *conn) conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER || conn->ksnc_rx_state == SOCKNAL_RX_SLOP); again: - if (conn->ksnc_rx_nob_wanted) { + if (iov_iter_count(&conn->ksnc_rx_to)) { rc = ksocknal_receive(conn); if (rc <= 0) { @@ -1171,7 +1098,7 @@ ksocknal_process_receive(struct ksock_conn *conn) return (!rc ? -ESHUTDOWN : rc); } - if (conn->ksnc_rx_nob_wanted) { + if (iov_iter_count(&conn->ksnc_rx_to)) { /* short read */ return -EAGAIN; } @@ -1234,16 +1161,13 @@ ksocknal_process_receive(struct ksock_conn *conn) } conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER; - conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg); conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg); - conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; - conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg; - conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg); + kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg; + kvec->iov_len = sizeof(struct ksock_lnet_msg); - conn->ksnc_rx_niov = 1; - conn->ksnc_rx_kiov = NULL; - conn->ksnc_rx_nkiov = 0; + iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, + 1, sizeof(struct ksock_lnet_msg)); goto again; /* read lnet header now */ @@ -1345,26 +1269,9 @@ ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg, LASSERT(to->nr_segs <= LNET_MAX_IOV); conn->ksnc_cookie = msg; - conn->ksnc_rx_nob_wanted = iov_iter_count(to); conn->ksnc_rx_nob_left = rlen; - if (to->type & ITER_KVEC) { - conn->ksnc_rx_nkiov = 0; - conn->ksnc_rx_kiov = NULL; - conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov; - conn->ksnc_rx_niov = - lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov, - to->nr_segs, to->kvec, - to->iov_offset, iov_iter_count(to)); - } else { - conn->ksnc_rx_niov = 0; - conn->ksnc_rx_iov = NULL; - conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov; - conn->ksnc_rx_nkiov = - lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov, - to->nr_segs, to->bvec, - to->iov_offset, iov_iter_count(to)); - } + conn->ksnc_rx_to = *to; LASSERT(conn->ksnc_rx_scheduled); @@ -2329,12 +2236,12 @@ ksocknal_find_timed_out_conn(struct ksock_peer *peer) conn->ksnc_rx_deadline)) { /* Timed out incomplete incoming message */ ksocknal_conn_addref(conn); - CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n", + CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %zd left %d\n", libcfs_id2str(peer->ksnp_id), &conn->ksnc_ipaddr, conn->ksnc_port, conn->ksnc_rx_state, - conn->ksnc_rx_nob_wanted, + iov_iter_count(&conn->ksnc_rx_to), conn->ksnc_rx_nob_left); return conn; } diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c index 970140f09258..cb28dd2baf2f 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c @@ -162,94 +162,39 @@ ksocknal_lib_eager_ack(struct ksock_conn *conn) sizeof(opt)); } -int -ksocknal_lib_recv_iov(struct ksock_conn *conn) +static int lustre_csum(struct kvec *v, void *context) { - unsigned int niov = conn->ksnc_rx_niov; - struct kvec *iov = conn->ksnc_rx_iov; - struct msghdr msg = { - .msg_flags = 0 - }; - int nob; - int i; - int rc; - int fragnob; - int sum; - __u32 saved_csum; - - LASSERT(niov > 0); - - for (nob = i = 0; i < niov; i++) - nob += iov[i].iov_len; - - LASSERT(nob <= conn->ksnc_rx_nob_wanted); - - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, niov, nob); - rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT); - - saved_csum = 0; - if (conn->ksnc_proto == &ksocknal_protocol_v2x) { - saved_csum = conn->ksnc_msg.ksm_csum; - conn->ksnc_msg.ksm_csum = 0; - } - - if (saved_csum) { - /* accumulate checksum */ - for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { - LASSERT(i < niov); - - fragnob = iov[i].iov_len; - if (fragnob > sum) - fragnob = sum; - - conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum, - iov[i].iov_base, - fragnob); - } - conn->ksnc_msg.ksm_csum = saved_csum; - } - - return rc; + struct ksock_conn *conn = context; + conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum, + v->iov_base, v->iov_len); + return 0; } int -ksocknal_lib_recv_kiov(struct ksock_conn *conn) +ksocknal_lib_recv(struct ksock_conn *conn) { - unsigned int niov = conn->ksnc_rx_nkiov; - struct bio_vec *kiov = conn->ksnc_rx_kiov; - struct msghdr msg = { - .msg_flags = 0 - }; - int nob; - int i; + struct msghdr msg = { .msg_iter = conn->ksnc_rx_to }; + __u32 saved_csum; int rc; - void *base; - int sum; - int fragnob; - for (nob = i = 0; i < niov; i++) - nob += kiov[i].bv_len; - - LASSERT(nob <= conn->ksnc_rx_nob_wanted); - - iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, kiov, niov, nob); rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT); + if (rc <= 0) + return rc; - if (conn->ksnc_msg.ksm_csum) { - for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { - LASSERT(i < niov); - - base = kmap(kiov[i].bv_page) + kiov[i].bv_offset; - fragnob = kiov[i].bv_len; - if (fragnob > sum) - fragnob = sum; + saved_csum = conn->ksnc_msg.ksm_csum; + if (!saved_csum) + return rc; - conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum, - base, fragnob); + /* header is included only in V2 - V3 checksums only the bulk data */ + if (!(conn->ksnc_rx_to.type & ITER_BVEC) && + conn->ksnc_proto != &ksocknal_protocol_v2x) + return rc; + + /* accumulate checksum */ + conn->ksnc_msg.ksm_csum = 0; + iov_iter_for_each_range(&conn->ksnc_rx_to, rc, lustre_csum, conn); + conn->ksnc_msg.ksm_csum = saved_csum; - kunmap(kiov[i].bv_page); - } - } return rc; } diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c index 27848cd69564..68d16ffec980 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-move.c +++ b/drivers/staging/lustre/lnet/lnet/lib-move.c @@ -890,7 +890,7 @@ lnet_return_rx_credits_locked(struct lnet_msg *msg) */ LASSERT(msg->msg_kiov); - rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]); + rb = container_of(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]); rbp = rb->rb_pool; msg->msg_kiov = NULL; |