mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
radix-tree: replace preallocated node array with linked list
Currently we use per-cpu array to hold pointers to preallocated nodes. Let's replace it with linked list. On x86_64 it saves 256 bytes in per-cpu ELF section which may translate into freeing up 2MB of memory for NR_CPUS==8192. [akpm@linux-foundation.org: fix comment, coding style] Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9cf79d115f
commit
9d2a8da006
1 changed files with 17 additions and 11 deletions
|
@ -65,7 +65,8 @@ static struct kmem_cache *radix_tree_node_cachep;
|
|||
*/
|
||||
struct radix_tree_preload {
|
||||
int nr;
|
||||
struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
|
||||
/* nodes->private_data points to next preallocated node */
|
||||
struct radix_tree_node *nodes;
|
||||
};
|
||||
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
|
||||
|
||||
|
@ -197,8 +198,9 @@ radix_tree_node_alloc(struct radix_tree_root *root)
|
|||
*/
|
||||
rtp = this_cpu_ptr(&radix_tree_preloads);
|
||||
if (rtp->nr) {
|
||||
ret = rtp->nodes[rtp->nr - 1];
|
||||
rtp->nodes[rtp->nr - 1] = NULL;
|
||||
ret = rtp->nodes;
|
||||
rtp->nodes = ret->private_data;
|
||||
ret->private_data = NULL;
|
||||
rtp->nr--;
|
||||
}
|
||||
/*
|
||||
|
@ -257,18 +259,21 @@ static int __radix_tree_preload(gfp_t gfp_mask)
|
|||
|
||||
preempt_disable();
|
||||
rtp = this_cpu_ptr(&radix_tree_preloads);
|
||||
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
|
||||
while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
|
||||
preempt_enable();
|
||||
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
|
||||
if (node == NULL)
|
||||
goto out;
|
||||
preempt_disable();
|
||||
rtp = this_cpu_ptr(&radix_tree_preloads);
|
||||
if (rtp->nr < ARRAY_SIZE(rtp->nodes))
|
||||
rtp->nodes[rtp->nr++] = node;
|
||||
else
|
||||
if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
|
||||
node->private_data = rtp->nodes;
|
||||
rtp->nodes = node;
|
||||
rtp->nr++;
|
||||
} else {
|
||||
kmem_cache_free(radix_tree_node_cachep, node);
|
||||
}
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
|
@ -1463,14 +1468,15 @@ static int radix_tree_callback(struct notifier_block *nfb,
|
|||
{
|
||||
int cpu = (long)hcpu;
|
||||
struct radix_tree_preload *rtp;
|
||||
struct radix_tree_node *node;
|
||||
|
||||
/* Free per-cpu pool of perloaded nodes */
|
||||
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
|
||||
rtp = &per_cpu(radix_tree_preloads, cpu);
|
||||
while (rtp->nr) {
|
||||
kmem_cache_free(radix_tree_node_cachep,
|
||||
rtp->nodes[rtp->nr-1]);
|
||||
rtp->nodes[rtp->nr-1] = NULL;
|
||||
node = rtp->nodes;
|
||||
rtp->nodes = node->private_data;
|
||||
kmem_cache_free(radix_tree_node_cachep, node);
|
||||
rtp->nr--;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue