]> Gentwo Git Trees - linux/.git/commitdiff
RDMA/core: Reduce cond_resched() frequency in __ib_umem_release
authorLi RongQing <lirongqing@baidu.com>
Wed, 26 Nov 2025 02:51:47 +0000 (10:51 +0800)
committerLeon Romanovsky <leon@kernel.org>
Wed, 26 Nov 2025 08:15:36 +0000 (03:15 -0500)
The current implementation calls cond_resched() for every SG entry
in __ib_umem_release(), which can increase needless overhead.

This patch introduces RESCHED_LOOP_CNT_THRESHOLD (0x1000) to limit
how often cond_resched() is called. The function now yields the CPU
once every 4096 iterations, and yield at the very first iteration
for lots of small umem case, to reduce scheduling overhead.

Fixes: d056bc45b62b ("RDMA/core: Prevent soft lockup during large user memory region cleanup")
Signed-off-by: Li RongQing <lirongqing@baidu.com>
Link: https://patch.msgid.link/20251126025147.2627-1-lirongqing@baidu.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/core/umem.c

index 8fd84aa37289bcf4768a9ebf29570abe15c64db9..8137031c2a65a4df6e2eab0d47447f66651c844d 100644 (file)
@@ -45,6 +45,8 @@
 
 #include "uverbs.h"
 
+#define RESCHED_LOOP_CNT_THRESHOLD 0x1000
+
 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
 {
        bool make_dirty = umem->writable && dirty;
@@ -58,7 +60,9 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
        for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) {
                unpin_user_page_range_dirty_lock(sg_page(sg),
                        DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
-               cond_resched();
+
+               if (i && !(i % RESCHED_LOOP_CNT_THRESHOLD))
+                       cond_resched();
        }
 
        sg_free_append_table(&umem->sgt_append);