]> Gentwo Git Trees - linux/.git/commitdiff
mm/shmem: use xas_try_split() in shmem_split_large_entry()
authorZi Yan <ziy@nvidia.com>
Tue, 18 Feb 2025 23:54:44 +0000 (18:54 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 23 Feb 2025 06:25:52 +0000 (22:25 -0800)
During shmem_split_large_entry(), large swap entries are covering n slots
and an order-0 folio needs to be inserted.

Instead of splitting all n slots, only the 1 slot covered by the folio
need to be split and the remaining n-1 shadow entries can be retained with
orders ranging from 0 to n-1.  This method only requires
(n/XA_CHUNK_SHIFT) new xa_nodes instead of (n % XA_CHUNK_SHIFT) *
(n/XA_CHUNK_SHIFT) new xa_nodes, compared to the original
xas_split_alloc() + xas_split() one.

For example, to split an order-9 large swap entry (assuming XA_CHUNK_SHIFT
is 6), 1 xa_node is needed instead of 8.

xas_try_split_min_order() is used to reduce the number of calls to
xas_try_split() during split.

Link: https://lkml.kernel.org/r/20250218235444.1543173-3-ziy@nvidia.com
Signed-off-by: Zi Yan <ziy@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Mattew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Kirill A. Shuemov <kirill.shutemov@linux.intel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <yang@os.amperecomputing.com>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index 671f63063fd449c07d648449e708971d0b7a8647..b35ba250c53d86a4cdfad4da7f673e3029addd2e 100644 (file)
@@ -2162,14 +2162,14 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
 {
        struct address_space *mapping = inode->i_mapping;
        XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
-       void *alloced_shadow = NULL;
-       int alloced_order = 0, i;
+       int split_order = 0;
+       int i;
 
        /* Convert user data gfp flags to xarray node gfp flags */
        gfp &= GFP_RECLAIM_MASK;
 
        for (;;) {
-               int order = -1, split_order = 0;
+               int order = -1;
                void *old = NULL;
 
                xas_lock_irq(&xas);
@@ -2181,20 +2181,21 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
 
                order = xas_get_order(&xas);
 
-               /* Swap entry may have changed before we re-acquire the lock */
-               if (alloced_order &&
-                   (old != alloced_shadow || order != alloced_order)) {
-                       xas_destroy(&xas);
-                       alloced_order = 0;
-               }
-
                /* Try to split large swap entry in pagecache */
                if (order > 0) {
-                       if (!alloced_order) {
-                               split_order = order;
-                               goto unlock;
+                       int cur_order = order;
+
+                       split_order = xas_try_split_min_order(cur_order);
+
+                       while (cur_order > 0) {
+                               xas_set_order(&xas, index, split_order);
+                               xas_try_split(&xas, old, cur_order, GFP_NOWAIT);
+                               if (xas_error(&xas))
+                                       goto unlock;
+                               cur_order = split_order;
+                               split_order =
+                                       xas_try_split_min_order(split_order);
                        }
-                       xas_split(&xas, old, order);
 
                        /*
                         * Re-set the swap entry after splitting, and the swap
@@ -2213,26 +2214,14 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
 unlock:
                xas_unlock_irq(&xas);
 
-               /* split needed, alloc here and retry. */
-               if (split_order) {
-                       xas_split_alloc(&xas, old, split_order, gfp);
-                       if (xas_error(&xas))
-                               goto error;
-                       alloced_shadow = old;
-                       alloced_order = split_order;
-                       xas_reset(&xas);
-                       continue;
-               }
-
                if (!xas_nomem(&xas, gfp))
                        break;
        }
 
-error:
        if (xas_error(&xas))
                return xas_error(&xas);
 
-       return alloced_order;
+       return split_order;
 }
 
 /*