io_uring/bpf-ops: add kfunc helpers

Add two kfuncs that should cover most of the needs:

1. bpf_io_uring_submit_sqes(), which allows to submit io_uring requests.
   It mirrors the normal user space submission path and follows all
   related io_uring_enter(2) rules. i.e. SQEs are taken from the SQ
   according to head/tail values. In case of IORING_SETUP_SQ_REWIND,
   it'll submit first N entries.

2. bpf_io_uring_get_region() returns a pointer to the specified region,
   where io_uring regions are kernel-userspace shared chunks of memory.
   It takes the size as an argument, which should be a load time
   constant. There are 3 types of regions:
   - IOU_REGION_SQ returns the submission queue.
   - IOU_REGION_CQ stores the CQ, SQ/CQ headers and the sqarray. In
     other words, it gives same memory that would normally be mmap'ed
     with IORING_FEAT_SINGLE_MMAP enabled IORING_OFF_SQ_RING.
   - IOU_REGION_MEM represents the memory / parameter region. It can be
     used to store request indirect parameters and for kernel - user
     communication.

It intentionally provides a thin but flexible API and expects BPF
programs to implement CQ/SQ header parsing, CQ walking, etc. That
mirrors how the normal user space works with rings and should help
to minimise kernel / kfunc helpers changes while introducing new generic
io_uring features.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://patch.msgid.link/967bcc10e94c796eb273998621551b2a21848cde.1772109579.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2026-02-26 12:48:40 +00:00 committed by Jens Axboe
parent d0e437b76b
commit 890819248a
2 changed files with 61 additions and 0 deletions

View File

@ -5,11 +5,58 @@
#include "io_uring.h"
#include "register.h"
#include "memmap.h"
#include "bpf-ops.h"
#include "loop.h"
static const struct btf_type *loop_params_type;
__bpf_kfunc_start_defs();
__bpf_kfunc int bpf_io_uring_submit_sqes(struct io_ring_ctx *ctx, u32 nr)
{
return io_submit_sqes(ctx, nr);
}
__bpf_kfunc
__u8 *bpf_io_uring_get_region(struct io_ring_ctx *ctx, __u32 region_id,
const size_t rdwr_buf_size)
{
struct io_mapped_region *r;
lockdep_assert_held(&ctx->uring_lock);
switch (region_id) {
case IOU_REGION_MEM:
r = &ctx->param_region;
break;
case IOU_REGION_CQ:
r = &ctx->ring_region;
break;
case IOU_REGION_SQ:
r = &ctx->sq_region;
break;
default:
return NULL;
}
if (unlikely(rdwr_buf_size > io_region_size(r)))
return NULL;
return io_region_get_ptr(r);
}
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(io_uring_kfunc_set)
BTF_ID_FLAGS(func, bpf_io_uring_submit_sqes, KF_SLEEPABLE);
BTF_ID_FLAGS(func, bpf_io_uring_get_region, KF_RET_NULL);
BTF_KFUNCS_END(io_uring_kfunc_set)
static const struct btf_kfunc_id_set bpf_io_uring_kfunc_set = {
.owner = THIS_MODULE,
.set = &io_uring_kfunc_set,
};
static int io_bpf_ops__loop_step(struct io_ring_ctx *ctx,
struct iou_loop_params *lp)
{
@ -68,12 +115,20 @@ io_lookup_struct_type(struct btf *btf, const char *name)
static int bpf_io_init(struct btf *btf)
{
int ret;
loop_params_type = io_lookup_struct_type(btf, "iou_loop_params");
if (!loop_params_type) {
pr_err("io_uring: Failed to locate iou_loop_params\n");
return -EINVAL;
}
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
&bpf_io_uring_kfunc_set);
if (ret) {
pr_err("io_uring: Failed to register kfuncs (%d)\n", ret);
return ret;
}
return 0;
}

View File

@ -4,6 +4,12 @@
#include <linux/io_uring_types.h>
enum {
IOU_REGION_MEM,
IOU_REGION_CQ,
IOU_REGION_SQ,
};
struct io_uring_bpf_ops {
int (*loop_step)(struct io_ring_ctx *ctx, struct iou_loop_params *lp);