mirror of
https://github.com/torvalds/linux
synced 2024-07-21 10:41:44 +00:00
![Kent Overstreet](/assets/img/avatar_default.png)
It turns out the btree key cache shrinker wasn't actually reclaiming anything, prior to the previous patch. This adds instrumentation so that if we have further issues we can see what's going on. Specifically, sysfs internal/btree_key_cache is greatly expanded with new counters, and the SRCU sequence numbers of the first 10 entries on each pending freelist, and we also add trigger_btree_key_cache_shrink for testing without having to prune all the system caches. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
43 lines
961 B
C
43 lines
961 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _BCACHEFS_BTREE_KEY_CACHE_TYPES_H
|
|
#define _BCACHEFS_BTREE_KEY_CACHE_TYPES_H
|
|
|
|
struct btree_key_cache_freelist {
|
|
struct bkey_cached *objs[16];
|
|
unsigned nr;
|
|
};
|
|
|
|
struct btree_key_cache {
|
|
struct mutex lock;
|
|
struct rhashtable table;
|
|
bool table_init_done;
|
|
|
|
struct list_head freed_pcpu;
|
|
size_t nr_freed_pcpu;
|
|
struct list_head freed_nonpcpu;
|
|
size_t nr_freed_nonpcpu;
|
|
|
|
struct shrinker *shrink;
|
|
unsigned shrink_iter;
|
|
struct btree_key_cache_freelist __percpu *pcpu_freed;
|
|
|
|
atomic_long_t nr_freed;
|
|
atomic_long_t nr_keys;
|
|
atomic_long_t nr_dirty;
|
|
|
|
/* shrinker stats */
|
|
unsigned long requested_to_free;
|
|
unsigned long freed;
|
|
unsigned long moved_to_freelist;
|
|
unsigned long skipped_dirty;
|
|
unsigned long skipped_accessed;
|
|
unsigned long skipped_lock_fail;
|
|
};
|
|
|
|
struct bkey_cached_key {
|
|
u32 btree_id;
|
|
struct bpos pos;
|
|
} __packed __aligned(4);
|
|
|
|
#endif /* _BCACHEFS_BTREE_KEY_CACHE_TYPES_H */
|