]> Gentwo Git Trees - linux/.git/commitdiff
blk-mq: fix potential uaf for 'queue_hw_ctx'
authorFengnan Chang <fengnanchang@gmail.com>
Fri, 28 Nov 2025 08:53:14 +0000 (16:53 +0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 28 Nov 2025 16:09:19 +0000 (09:09 -0700)
This is just apply Kuai's patch in [1] with mirror changes.

blk_mq_realloc_hw_ctxs() will free the 'queue_hw_ctx'(e.g. undate
submit_queues through configfs for null_blk), while it might still be
used from other context(e.g. switch elevator to none):

t1 t2
elevator_switch
 blk_mq_unquiesce_queue
  blk_mq_run_hw_queues
   queue_for_each_hw_ctx
    // assembly code for hctx = (q)->queue_hw_ctx[i]
    mov    0x48(%rbp),%rdx -> read old queue_hw_ctx

__blk_mq_update_nr_hw_queues
 blk_mq_realloc_hw_ctxs
  hctxs = q->queue_hw_ctx
  q->queue_hw_ctx = new_hctxs
  kfree(hctxs)
    movslq %ebx,%rax
    mov    (%rdx,%rax,8),%rdi ->uaf

This problem was found by code review, and I comfirmed that the concurrent
scenario do exist(specifically 'q->queue_hw_ctx' can be changed during
blk_mq_run_hw_queues()), however, the uaf problem hasn't been repoduced yet
without hacking the kernel.

Sicne the queue is freezed in __blk_mq_update_nr_hw_queues(), fix the
problem by protecting 'queue_hw_ctx' through rcu where it can be accessed
without grabbing 'q_usage_counter'.

[1] https://lore.kernel.org/all/20220225072053.2472431-1-yukuai3@huawei.com/

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Fengnan Chang <changfengnan@bytedance.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
include/linux/blk-mq.h
include/linux/blkdev.h

index 1ef81110eb8acd092271dd7ffd22379ffa9663ce..4e96bb2462475e0dac092066bbf5b976c1611673 100644 (file)
@@ -4535,7 +4535,12 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                if (hctxs)
                        memcpy(new_hctxs, hctxs, q->nr_hw_queues *
                               sizeof(*hctxs));
-               q->queue_hw_ctx = new_hctxs;
+               rcu_assign_pointer(q->queue_hw_ctx, new_hctxs);
+               /*
+                * Make sure reading the old queue_hw_ctx from other
+                * context concurrently won't trigger uaf.
+                */
+               synchronize_rcu_expedited();
                kfree(hctxs);
                hctxs = new_hctxs;
        }
index 9208ff90ae167882e0ceb0cbd0e109ea8d2bdc2e..eb7254b3ddddbb14f17b5dfca84e40bca7501293 100644 (file)
@@ -1015,9 +1015,20 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
        return rq + 1;
 }
 
+static inline struct blk_mq_hw_ctx *queue_hctx(struct request_queue *q, int id)
+{
+       struct blk_mq_hw_ctx *hctx;
+
+       rcu_read_lock();
+       hctx = rcu_dereference(q->queue_hw_ctx)[id];
+       rcu_read_unlock();
+
+       return hctx;
+}
+
 #define queue_for_each_hw_ctx(q, hctx, i)                              \
        for ((i) = 0; (i) < (q)->nr_hw_queues &&                        \
-            ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
+            ({ hctx = queue_hctx((q), i); 1; }); (i)++)
 
 #define hctx_for_each_ctx(hctx, ctx, i)                                        \
        for ((i) = 0; (i) < (hctx)->nr_ctx &&                           \
index 6195f89648dbce6c3d4d7e60b831591779877a36..72e34acd439c963f37d9a68f24db2c21f754e5cb 100644 (file)
@@ -503,7 +503,7 @@ struct request_queue {
 
        /* hw dispatch queues */
        unsigned int            nr_hw_queues;
-       struct blk_mq_hw_ctx    **queue_hw_ctx;
+       struct blk_mq_hw_ctx * __rcu *queue_hw_ctx;
 
        struct percpu_ref       q_usage_counter;
        struct lock_class_key   io_lock_cls_key;