/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <linux/interval_tree_generic.h>
#include <linux/slab.h>
-#include <linux/bpf_mem_alloc.h>
#include <linux/bpf.h>
#include "range_tree.h"
* in commit 6772fcc8890a ("xfs: convert xbitmap to interval tree").
*
* The implementation relies on external lock to protect rbtree-s.
- * The alloc/free of range_node-s is done via bpf_mem_alloc.
+ * The alloc/free of range_node-s is done via kmalloc_nolock().
*
* bpf arena is using range_tree to represent unallocated slots.
* At init time:
range_it_insert(rn, rt);
/* Add a range */
- migrate_disable();
- new_rn = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
- migrate_enable();
+ new_rn = kmalloc_nolock(sizeof(struct range_node), 0, NUMA_NO_NODE);
if (!new_rn)
return -ENOMEM;
new_rn->rn_start = last + 1;
} else {
/* in the middle of the clearing range */
range_it_remove(rn, rt);
- migrate_disable();
- bpf_mem_free(&bpf_global_ma, rn);
- migrate_enable();
+ kfree_nolock(rn);
}
}
return 0;
range_it_remove(right, rt);
left->rn_last = right->rn_last;
range_it_insert(left, rt);
- migrate_disable();
- bpf_mem_free(&bpf_global_ma, right);
- migrate_enable();
+ kfree_nolock(right);
} else if (left) {
/* Combine with the left range */
range_it_remove(left, rt);
right->rn_start = start;
range_it_insert(right, rt);
} else {
- migrate_disable();
- left = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
- migrate_enable();
+ left = kmalloc_nolock(sizeof(struct range_node), 0, NUMA_NO_NODE);
if (!left)
return -ENOMEM;
left->rn_start = start;
while ((rn = range_it_iter_first(rt, 0, -1U))) {
range_it_remove(rn, rt);
- bpf_mem_free(&bpf_global_ma, rn);
+ kfree_nolock(rn);
}
}