mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
lib/stackdepot: rename next_pool_required to new_pool_required
Rename next_pool_required to new_pool_required. This a purely code readability change: the following patch will change stack depot to store the pointer to the new pool in a separate variable, and "new" seems like a more logical name. Link: https://lkml.kernel.org/r/fd7cd6c6eb250c13ec5d2009d75bb4ddd1470db9.1700502145.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Marco Elver <elver@google.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
94b7d32870
commit
b6a353d3eb
1 changed files with 24 additions and 25 deletions
|
@ -93,12 +93,11 @@ static size_t pool_offset;
|
|||
static DEFINE_RAW_SPINLOCK(pool_lock);
|
||||
/*
|
||||
* Stack depot tries to keep an extra pool allocated even before it runs out
|
||||
* of space in the currently used pool.
|
||||
* This flag marks that this next extra pool needs to be allocated and
|
||||
* initialized. It has the value 0 when either the next pool is not yet
|
||||
* initialized or the limit on the number of pools is reached.
|
||||
* of space in the currently used pool. This flag marks whether this extra pool
|
||||
* needs to be allocated. It has the value 0 when either an extra pool is not
|
||||
* yet allocated or if the limit on the number of pools is reached.
|
||||
*/
|
||||
static int next_pool_required = 1;
|
||||
static int new_pool_required = 1;
|
||||
|
||||
static int __init disable_stack_depot(char *str)
|
||||
{
|
||||
|
@ -225,20 +224,20 @@ int stack_depot_init(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(stack_depot_init);
|
||||
|
||||
/* Keeps the preallocated memory to be used for the next stack depot pool. */
|
||||
static void depot_keep_next_pool(void **prealloc)
|
||||
/* Keeps the preallocated memory to be used for a new stack depot pool. */
|
||||
static void depot_keep_new_pool(void **prealloc)
|
||||
{
|
||||
/*
|
||||
* If the next pool is already saved or the maximum number of
|
||||
* If a new pool is already saved or the maximum number of
|
||||
* pools is reached, do not use the preallocated memory.
|
||||
* Access next_pool_required non-atomically, as there are no concurrent
|
||||
* Access new_pool_required non-atomically, as there are no concurrent
|
||||
* write accesses to this variable.
|
||||
*/
|
||||
if (!next_pool_required)
|
||||
if (!new_pool_required)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Use the preallocated memory for the next pool
|
||||
* Use the preallocated memory for the new pool
|
||||
* as long as we do not exceed the maximum number of pools.
|
||||
*/
|
||||
if (pool_index + 1 < DEPOT_MAX_POOLS) {
|
||||
|
@ -247,13 +246,13 @@ static void depot_keep_next_pool(void **prealloc)
|
|||
}
|
||||
|
||||
/*
|
||||
* At this point, either the next pool is kept or the maximum
|
||||
* At this point, either a new pool is kept or the maximum
|
||||
* number of pools is reached. In either case, take note that
|
||||
* keeping another pool is not required.
|
||||
* smp_store_release() pairs with smp_load_acquire() in
|
||||
* stack_depot_save().
|
||||
*/
|
||||
smp_store_release(&next_pool_required, 0);
|
||||
smp_store_release(&new_pool_required, 0);
|
||||
}
|
||||
|
||||
/* Updates references to the current and the next stack depot pools. */
|
||||
|
@ -268,7 +267,7 @@ static bool depot_update_pools(size_t required_size, void **prealloc)
|
|||
}
|
||||
|
||||
/*
|
||||
* Move on to the next pool.
|
||||
* Move on to the new pool.
|
||||
* WRITE_ONCE() pairs with potential concurrent read in
|
||||
* stack_depot_fetch().
|
||||
*/
|
||||
|
@ -277,12 +276,12 @@ static bool depot_update_pools(size_t required_size, void **prealloc)
|
|||
|
||||
/*
|
||||
* If the maximum number of pools is not reached, take note
|
||||
* that the next pool needs to be initialized.
|
||||
* that yet another new pool needs to be allocated.
|
||||
* smp_store_release() pairs with smp_load_acquire() in
|
||||
* stack_depot_save().
|
||||
*/
|
||||
if (pool_index + 1 < DEPOT_MAX_POOLS)
|
||||
smp_store_release(&next_pool_required, 1);
|
||||
smp_store_release(&new_pool_required, 1);
|
||||
}
|
||||
|
||||
/* Check if the current pool is not yet allocated. */
|
||||
|
@ -293,9 +292,9 @@ static bool depot_update_pools(size_t required_size, void **prealloc)
|
|||
return true;
|
||||
}
|
||||
|
||||
/* Otherwise, try using the preallocated memory for the next pool. */
|
||||
/* Otherwise, try using the preallocated memory for a new pool. */
|
||||
if (*prealloc)
|
||||
depot_keep_next_pool(prealloc);
|
||||
depot_keep_new_pool(prealloc);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -306,7 +305,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
|
|||
struct stack_record *stack;
|
||||
size_t required_size = DEPOT_STACK_RECORD_SIZE;
|
||||
|
||||
/* Update current and next pools if required and possible. */
|
||||
/* Update current and new pools if required and possible. */
|
||||
if (!depot_update_pools(required_size, prealloc))
|
||||
return NULL;
|
||||
|
||||
|
@ -438,13 +437,13 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
|
|||
goto exit;
|
||||
|
||||
/*
|
||||
* Check if another stack pool needs to be initialized. If so, allocate
|
||||
* the memory now - we won't be able to do that under the lock.
|
||||
* Check if another stack pool needs to be allocated. If so, allocate
|
||||
* the memory now: we won't be able to do that under the lock.
|
||||
*
|
||||
* smp_load_acquire() pairs with smp_store_release() in
|
||||
* depot_update_pools() and depot_keep_next_pool().
|
||||
* depot_update_pools() and depot_keep_new_pool().
|
||||
*/
|
||||
if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
|
||||
if (unlikely(can_alloc && smp_load_acquire(&new_pool_required))) {
|
||||
/*
|
||||
* Zero out zone modifiers, as we don't have specific zone
|
||||
* requirements. Keep the flags related to allocation in atomic
|
||||
|
@ -477,9 +476,9 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
|
|||
} else if (prealloc) {
|
||||
/*
|
||||
* Stack depot already contains this stack trace, but let's
|
||||
* keep the preallocated memory for the future.
|
||||
* keep the preallocated memory for future.
|
||||
*/
|
||||
depot_keep_next_pool(&prealloc);
|
||||
depot_keep_new_pool(&prealloc);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&pool_lock, flags);
|
||||
|
|
Loading…
Reference in a new issue