diff options
| author | Jani Nikula <jani.nikula@intel.com> | 2025-06-09 12:40:46 +0300 | 
|---|---|---|
| committer | Jani Nikula <jani.nikula@intel.com> | 2025-06-09 12:40:46 +0300 | 
| commit | 34c55367af96f62e89221444f04487440ebc6487 (patch) | |
| tree | fdb36ba67d7dea09455b55037e26043b7e051ef9 /io_uring/net.c | |
| parent | 7247efca0dcbc8ac6147db9200ed1549c0662465 (diff) | |
| parent | 19272b37aa4f83ca52bdf9c16d5d81bdd1354494 (diff) | |
| download | linux-34c55367af96f62e89221444f04487440ebc6487.tar.xz | |
Merge drm/drm-next into drm-intel-next
Sync to v6.16-rc1, among other things to get the fixed size GENMASK_U*()
and BIT_U*() macros.
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'io_uring/net.c')
| -rw-r--r-- | io_uring/net.c | 78 | 
1 files changed, 36 insertions, 42 deletions
diff --git a/io_uring/net.c b/io_uring/net.c index 24040bc3916a..e16633fd6630 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -18,7 +18,6 @@  #include "rsrc.h"  #include "zcrx.h" -#if defined(CONFIG_NET)  struct io_shutdown {  	struct file			*file;  	int				how; @@ -129,7 +128,7 @@ int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)  	ret = __sys_shutdown_sock(sock, shutdown->how);  	io_req_set_res(req, ret, 0); -	return IOU_OK; +	return IOU_COMPLETE;  }  static bool io_net_retry(struct socket *sock, int flags) @@ -190,7 +189,6 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req,  	sr->done_io = 0;  	sr->retry = false;  	sr->len = 0; /* get from the provided buffer */ -	req->buf_index = sr->buf_group;  }  static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg, @@ -359,15 +357,13 @@ static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)  		kmsg->msg.msg_name = &kmsg->addr;  		kmsg->msg.msg_namelen = addr_len;  	} -	if (sr->flags & IORING_RECVSEND_FIXED_BUF) +	if (sr->flags & IORING_RECVSEND_FIXED_BUF) { +		req->flags |= REQ_F_IMPORT_BUFFER;  		return 0; -	if (!io_do_buffer_select(req)) { -		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, -				  &kmsg->msg.msg_iter); -		if (unlikely(ret < 0)) -			return ret;  	} -	return 0; +	if (req->flags & REQ_F_BUFFER_SELECT) +		return 0; +	return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);  }  static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -409,13 +405,12 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)  	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;  	if (sr->msg_flags & MSG_DONTWAIT)  		req->flags |= REQ_F_NOWAIT; +	if (req->flags & REQ_F_BUFFER_SELECT) +		sr->buf_group = req->buf_index;  	if (sr->flags & IORING_RECVSEND_BUNDLE) {  		if (req->opcode == IORING_OP_SENDMSG)  			return -EINVAL; -		if (!(req->flags & REQ_F_BUFFER_SELECT)) -			return -EINVAL;  		sr->msg_flags |= MSG_WAITALL; -		sr->buf_group = req->buf_index;  		req->buf_list = NULL;  		req->flags |= REQ_F_MULTISHOT;  	} @@ -507,7 +502,7 @@ static inline bool io_send_finish(struct io_kiocb *req, int *ret,  	/* Otherwise stop bundle and use the current result. */  finish:  	io_req_set_res(req, *ret, cflags); -	*ret = IOU_OK; +	*ret = IOU_COMPLETE;  	return true;  } @@ -558,7 +553,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)  	else if (sr->done_io)  		ret = sr->done_io;  	io_req_set_res(req, ret, 0); -	return IOU_OK; +	return IOU_COMPLETE;  }  static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags, @@ -571,6 +566,7 @@ static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags,  		.iovs = &kmsg->fast_iov,  		.max_len = min_not_zero(sr->len, INT_MAX),  		.nr_iovs = 1, +		.buf_group = sr->buf_group,  	};  	if (kmsg->vec.iovec) { @@ -723,7 +719,6 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)  {  	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);  	struct io_async_msghdr *kmsg; -	int ret;  	kmsg = io_msg_alloc_async(req);  	if (unlikely(!kmsg)) @@ -739,13 +734,10 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)  		kmsg->msg.msg_iocb = NULL;  		kmsg->msg.msg_ubuf = NULL; -		if (!io_do_buffer_select(req)) { -			ret = import_ubuf(ITER_DEST, sr->buf, sr->len, -					  &kmsg->msg.msg_iter); -			if (unlikely(ret)) -				return ret; -		} -		return 0; +		if (req->flags & REQ_F_BUFFER_SELECT) +			return 0; +		return import_ubuf(ITER_DEST, sr->buf, sr->len, +				   &kmsg->msg.msg_iter);  	}  	return io_recvmsg_copy_hdr(req, kmsg); @@ -827,18 +819,24 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,  		cflags |= IORING_CQE_F_SOCK_NONEMPTY;  	if (sr->flags & IORING_RECVSEND_BUNDLE) { -		cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), +		size_t this_ret = *ret - sr->done_io; + +		cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, this_ret),  				      issue_flags);  		if (sr->retry)  			cflags = req->cqe.flags | (cflags & CQE_F_MASK);  		/* bundle with no more immediate buffers, we're done */  		if (req->flags & REQ_F_BL_EMPTY)  			goto finish; -		/* if more is available, retry and append to this one */ -		if (!sr->retry && kmsg->msg.msg_inq > 0 && *ret > 0) { +		/* +		 * If more is available AND it was a full transfer, retry and +		 * append to this one +		 */ +		if (!sr->retry && kmsg->msg.msg_inq > 1 && this_ret > 0 && +		    !iov_iter_count(&kmsg->msg.msg_iter)) {  			req->cqe.flags = cflags & ~CQE_F_MASK;  			sr->len = kmsg->msg.msg_inq; -			sr->done_io += *ret; +			sr->done_io += this_ret;  			sr->retry = true;  			return false;  		} @@ -985,7 +983,7 @@ retry_multishot:  		void __user *buf;  		size_t len = sr->len; -		buf = io_buffer_select(req, &len, issue_flags); +		buf = io_buffer_select(req, &len, sr->buf_group, issue_flags);  		if (!buf)  			return -ENOBUFS; @@ -1063,6 +1061,7 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg  			.iovs = &kmsg->fast_iov,  			.nr_iovs = 1,  			.mode = KBUF_MODE_EXPAND, +			.buf_group = sr->buf_group,  		};  		if (kmsg->vec.iovec) { @@ -1071,7 +1070,7 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg  			arg.mode |= KBUF_MODE_FREE;  		} -		if (kmsg->msg.msg_inq > 0) +		if (kmsg->msg.msg_inq > 1)  			arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq);  		ret = io_buffers_peek(req, &arg); @@ -1095,7 +1094,7 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg  		void __user *buf;  		*len = sr->len; -		buf = io_buffer_select(req, len, issue_flags); +		buf = io_buffer_select(req, len, sr->buf_group, issue_flags);  		if (!buf)  			return -ENOBUFS;  		sr->buf = buf; @@ -1191,16 +1190,14 @@ int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)  	struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc);  	unsigned ifq_idx; -	if (unlikely(sqe->file_index || sqe->addr2 || sqe->addr || -		     sqe->addr3)) +	if (unlikely(sqe->addr2 || sqe->addr || sqe->addr3))  		return -EINVAL;  	ifq_idx = READ_ONCE(sqe->zcrx_ifq_idx); -	if (ifq_idx != 0) -		return -EINVAL; -	zc->ifq = req->ctx->ifq; +	zc->ifq = xa_load(&req->ctx->zcrx_ctxs, ifq_idx);  	if (!zc->ifq)  		return -EINVAL; +  	zc->len = READ_ONCE(sqe->len);  	zc->flags = READ_ONCE(sqe->ioprio);  	zc->msg_flags = READ_ONCE(sqe->msg_flags); @@ -1321,8 +1318,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)  		return -ENOMEM;  	if (req->opcode == IORING_OP_SEND_ZC) { -		if (zc->flags & IORING_RECVSEND_FIXED_BUF) -			req->flags |= REQ_F_IMPORT_BUFFER;  		ret = io_send_setup(req, sqe);  	} else {  		if (unlikely(sqe->addr2 || sqe->file_index)) @@ -1470,7 +1465,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)  		io_req_msg_cleanup(req, 0);  	}  	io_req_set_res(req, ret, IORING_CQE_F_MORE); -	return IOU_OK; +	return IOU_COMPLETE;  }  int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) @@ -1541,7 +1536,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)  		io_req_msg_cleanup(req, 0);  	}  	io_req_set_res(req, ret, IORING_CQE_F_MORE); -	return IOU_OK; +	return IOU_COMPLETE;  }  void io_sendrecv_fail(struct io_kiocb *req) @@ -1705,7 +1700,7 @@ int io_socket(struct io_kiocb *req, unsigned int issue_flags)  					    sock->file_slot);  	}  	io_req_set_res(req, ret, 0); -	return IOU_OK; +	return IOU_COMPLETE;  }  int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -1772,7 +1767,7 @@ out:  		req_set_fail(req);  	io_req_msg_cleanup(req, issue_flags);  	io_req_set_res(req, ret, 0); -	return IOU_OK; +	return IOU_COMPLETE;  }  int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -1846,4 +1841,3 @@ void io_netmsg_cache_free(const void *entry)  	io_vec_free(&kmsg->vec);  	kfree(kmsg);  } -#endif  | 
