mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
io_uring-7.0-20260320
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmm9O2oQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgprb2D/9EM91W+zo0M+6Xjs+A/YaHETDdSUUQwyuT B8ZNHEHwkB3TdLHxPvFDopf8bC8IFy1T3GhGkHgq/Uvh7hx9oYTvVrV1xjXCqW/W mpP5bgguJYlOTwFW2lYFWQTI5CZDletOeWbng00ecAZPoVVSjJx+KNVFm3DntYSR WeO20Y/OWXZ6vFu4b7825lHFYgVVPdO1iy2X5HYx4XHlWOjj7LK7KW2UDai4lPIF w1IjsOk8Cv7Wj38qYB2d3dMlgryER0dF1Hf2N+1awh1FFjlsiZ+YbLnrTiFyKwa0 zip6ejDXwpevS6/FmCa6u19/DHxQ0WgvEZOwQlMyggCrIdaqN01LMGhCkleTev72 demfEKA71avcG1gWmJLX9aNGfkY0rXT0Q9ArLk8FTPXnImnXQdJDfeLuql0ExMrL 2MWkhJS4Dvp4MDERq0WwOdPrAMiDrG9O6DnHQBZYZjJuuqljF2LWb6Dbj7V9gO+M ePADlnKhR4o0MKvTcFqeM33kKIkdwSNzDvcLO+gnAeDxNFQsCyDSm8erGj+LEMfN LA2668ZWF24plKmWbEq1IZaw8P9CM5AqD6+QCIB+XPge9JNxzpeUuAfTkwuNp+bz V5vDw3RQ8p65cxhhD34rqor/KGN5WlSOYTrp9aU4BNIse9S/o8XAyPcR3ywZChYJ OHvT3W4O3g== =xEWR -----END PGP SIGNATURE----- Merge tag 'io_uring-7.0-20260320' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux Pull io_uring fixes from Jens Axboe: - A bit of a work-around for AF_UNIX recv multishot, as the in-kernel implementation doesn't properly signal EOF. We'll likely rework this one going forward, but the fix is sufficient for now - Two fixes for incrementally consumed buffers, for non-pollable files and for 0 byte reads * tag 'io_uring-7.0-20260320' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: io_uring/kbuf: propagate BUF_MORE through early buffer commit path io_uring/kbuf: fix missing BUF_MORE for incremental buffers at EOF io_uring/poll: fix multishot recv missing EOF on wakeup race
This commit is contained in:
commit
c612261bed
|
|
@ -541,6 +541,7 @@ enum {
|
|||
REQ_F_BL_NO_RECYCLE_BIT,
|
||||
REQ_F_BUFFERS_COMMIT_BIT,
|
||||
REQ_F_BUF_NODE_BIT,
|
||||
REQ_F_BUF_MORE_BIT,
|
||||
REQ_F_HAS_METADATA_BIT,
|
||||
REQ_F_IMPORT_BUFFER_BIT,
|
||||
REQ_F_SQE_COPIED_BIT,
|
||||
|
|
@ -626,6 +627,8 @@ enum {
|
|||
REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
|
||||
/* buf node is valid */
|
||||
REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
|
||||
/* incremental buffer consumption, more space available */
|
||||
REQ_F_BUF_MORE = IO_REQ_FLAG(REQ_F_BUF_MORE_BIT),
|
||||
/* request has read/write metadata assigned */
|
||||
REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT),
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -34,6 +34,10 @@ struct io_provide_buf {
|
|||
|
||||
static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
|
||||
{
|
||||
/* No data consumed, return false early to avoid consuming the buffer */
|
||||
if (!len)
|
||||
return false;
|
||||
|
||||
while (len) {
|
||||
struct io_uring_buf *buf;
|
||||
u32 buf_len, this_len;
|
||||
|
|
@ -212,7 +216,8 @@ static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
|
|||
sel.addr = u64_to_user_ptr(READ_ONCE(buf->addr));
|
||||
|
||||
if (io_should_commit(req, issue_flags)) {
|
||||
io_kbuf_commit(req, sel.buf_list, *len, 1);
|
||||
if (!io_kbuf_commit(req, sel.buf_list, *len, 1))
|
||||
req->flags |= REQ_F_BUF_MORE;
|
||||
sel.buf_list = NULL;
|
||||
}
|
||||
return sel;
|
||||
|
|
@ -345,7 +350,8 @@ int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
|
|||
*/
|
||||
if (ret > 0) {
|
||||
req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
|
||||
io_kbuf_commit(req, sel->buf_list, arg->out_len, ret);
|
||||
if (!io_kbuf_commit(req, sel->buf_list, arg->out_len, ret))
|
||||
req->flags |= REQ_F_BUF_MORE;
|
||||
}
|
||||
} else {
|
||||
ret = io_provided_buffers_select(req, &arg->out_len, sel->buf_list, arg->iovs);
|
||||
|
|
@ -391,8 +397,10 @@ static inline bool __io_put_kbuf_ring(struct io_kiocb *req,
|
|||
|
||||
if (bl)
|
||||
ret = io_kbuf_commit(req, bl, len, nr);
|
||||
if (ret && (req->flags & REQ_F_BUF_MORE))
|
||||
ret = false;
|
||||
|
||||
req->flags &= ~REQ_F_BUFFER_RING;
|
||||
req->flags &= ~(REQ_F_BUFFER_RING | REQ_F_BUF_MORE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -272,6 +272,7 @@ static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
|
|||
atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
|
||||
v &= ~IO_POLL_RETRY_FLAG;
|
||||
}
|
||||
v &= IO_POLL_REF_MASK;
|
||||
}
|
||||
|
||||
/* the mask was stashed in __io_poll_execute */
|
||||
|
|
@ -304,8 +305,13 @@ static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
|
|||
return IOU_POLL_REMOVE_POLL_USE_RES;
|
||||
}
|
||||
} else {
|
||||
int ret = io_poll_issue(req, tw);
|
||||
int ret;
|
||||
|
||||
/* multiple refs and HUP, ensure we loop once more */
|
||||
if ((req->cqe.res & (POLLHUP | POLLRDHUP)) && v != 1)
|
||||
v--;
|
||||
|
||||
ret = io_poll_issue(req, tw);
|
||||
if (ret == IOU_COMPLETE)
|
||||
return IOU_POLL_REMOVE_POLL_USE_RES;
|
||||
else if (ret == IOU_REQUEUE)
|
||||
|
|
@ -321,7 +327,6 @@ static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
|
|||
* Release all references, retry if someone tried to restart
|
||||
* task_work while we were executing it.
|
||||
*/
|
||||
v &= IO_POLL_REF_MASK;
|
||||
} while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
|
||||
|
||||
io_napi_add(req);
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user