]> Gentwo Git Trees - linux/.git/commitdiff
io_uring/zcrx: share an ifq between rings
authorDavid Wei <dw@davidwei.uk>
Thu, 13 Nov 2025 10:46:18 +0000 (10:46 +0000)
committerJens Axboe <axboe@kernel.dk>
Thu, 13 Nov 2025 18:19:37 +0000 (11:19 -0700)
Add a way to share an ifq from a src ring that is real (i.e. bound to a
HW RX queue) with other rings. This is done by passing a new flag
IORING_ZCRX_IFQ_REG_IMPORT in the registration struct
io_uring_zcrx_ifq_reg, alongside the fd of an exported zcrx ifq.

Signed-off-by: David Wei <dw@davidwei.uk>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
include/uapi/linux/io_uring.h
io_uring/zcrx.c

index 4bedc0310a55550512d910400cb2d1252ab32674..deb772222b6dfe4a8cb7c42241cd4c78464c3dc8 100644 (file)
@@ -1063,6 +1063,10 @@ struct io_uring_zcrx_area_reg {
        __u64   __resv2[2];
 };
 
+enum zcrx_reg_flags {
+       ZCRX_REG_IMPORT = 1,
+};
+
 /*
  * Argument for IORING_REGISTER_ZCRX_IFQ
  */
index da7e556c349ef8d4f00b2758f69402f9c6a88baf..b99cf2c6670aa884098f875f396cc2df6b63faf2 100644 (file)
@@ -660,6 +660,63 @@ static int zcrx_export(struct io_ring_ctx *ctx, struct io_zcrx_ifq *ifq,
        return 0;
 }
 
+static int import_zcrx(struct io_ring_ctx *ctx,
+                      struct io_uring_zcrx_ifq_reg __user *arg,
+                      struct io_uring_zcrx_ifq_reg *reg)
+{
+       struct io_zcrx_ifq *ifq;
+       struct file *file;
+       int fd, ret;
+       u32 id;
+
+       if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
+               return -EINVAL;
+       if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
+               return -EINVAL;
+       if (reg->if_rxq || reg->rq_entries || reg->area_ptr || reg->region_ptr)
+               return -EINVAL;
+
+       fd = reg->if_idx;
+       CLASS(fd, f)(fd);
+       if (fd_empty(f))
+               return -EBADF;
+
+       file = fd_file(f);
+       if (file->f_op != &zcrx_box_fops || !file->private_data)
+               return -EBADF;
+
+       ifq = file->private_data;
+       refcount_inc(&ifq->refs);
+       refcount_inc(&ifq->user_refs);
+
+       scoped_guard(mutex, &ctx->mmap_lock) {
+               ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
+               if (ret)
+                       goto err;
+       }
+
+       reg->zcrx_id = id;
+       io_fill_zcrx_offsets(&reg->offsets);
+       if (copy_to_user(arg, reg, sizeof(*reg))) {
+               ret = -EFAULT;
+               goto err_xa_erase;
+       }
+
+       scoped_guard(mutex, &ctx->mmap_lock) {
+               ret = -ENOMEM;
+               if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
+                       goto err_xa_erase;
+       }
+
+       return 0;
+err_xa_erase:
+       scoped_guard(mutex, &ctx->mmap_lock)
+               xa_erase(&ctx->zcrx_ctxs, id);
+err:
+       zcrx_unregister(ifq);
+       return ret;
+}
+
 int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
                          struct io_uring_zcrx_ifq_reg __user *arg)
 {
@@ -685,11 +742,13 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
                return -EINVAL;
        if (copy_from_user(&reg, arg, sizeof(reg)))
                return -EFAULT;
-       if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
-               return -EFAULT;
        if (!mem_is_zero(&reg.__resv, sizeof(reg.__resv)) ||
            reg.__resv2 || reg.zcrx_id)
                return -EINVAL;
+       if (reg.flags & ZCRX_REG_IMPORT)
+               return import_zcrx(ctx, arg, &reg);
+       if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
+               return -EFAULT;
        if (reg.if_rxq == -1 || !reg.rq_entries || reg.flags)
                return -EINVAL;
        if (reg.rq_entries > IO_RQ_MAX_ENTRIES) {