bcache: Convert to lock_cmp_fn

Replace one of bcache's lockdep_set_novalidate_class() usage with the
newly introduced custom lock nesting annotation.

[peterz: changelog]
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Coly Li <colyli@suse.de>
Link: https://lkml.kernel.org/r/20230509195847.1745548-2-kent.overstreet@linux.dev
This commit is contained in:
Kent Overstreet 2023-05-09 15:58:47 -04:00 committed by Peter Zijlstra
parent eb1cfd09f7
commit 4c8a49244c
2 changed files with 24 additions and 3 deletions

View file

@ -559,6 +559,27 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
}
}
#define cmp_int(l, r) ((l > r) - (l < r))
#ifdef CONFIG_PROVE_LOCKING
static int btree_lock_cmp_fn(const struct lockdep_map *_a,
const struct lockdep_map *_b)
{
const struct btree *a = container_of(_a, struct btree, lock.dep_map);
const struct btree *b = container_of(_b, struct btree, lock.dep_map);
return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
}
static void btree_lock_print_fn(const struct lockdep_map *map)
{
const struct btree *b = container_of(map, struct btree, lock.dep_map);
printk(KERN_CONT " l=%u %llu:%llu", b->level,
KEY_INODE(&b->key), KEY_OFFSET(&b->key));
}
#endif
static struct btree *mca_bucket_alloc(struct cache_set *c,
struct bkey *k, gfp_t gfp)
{
@ -572,7 +593,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
return NULL;
init_rwsem(&b->lock);
lockdep_set_novalidate_class(&b->lock);
lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
mutex_init(&b->write_lock);
lockdep_set_novalidate_class(&b->write_lock);
INIT_LIST_HEAD(&b->list);

View file

@ -247,8 +247,8 @@ static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
static inline void rw_lock(bool w, struct btree *b, int level)
{
w ? down_write_nested(&b->lock, level + 1)
: down_read_nested(&b->lock, level + 1);
w ? down_write(&b->lock)
: down_read(&b->lock);
if (w)
b->seq++;
}