mirror of
https://github.com/torvalds/linux
synced 2024-10-08 12:22:38 +00:00
bcachefs: New in-memory array for bucket gens
The main in-memory bucket array is going away, but we'll still need to keep bucket generations in memory, at least for now - ptr_stale() needs to be an efficient operation. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
parent
47ac34ec98
commit
a786087744
|
@ -354,6 +354,7 @@ static int bch2_alloc_read_fn(struct btree_trans *trans, struct bkey_s_c k)
|
|||
g = bucket(ca, k.k->p.offset);
|
||||
u = bch2_alloc_unpack(k);
|
||||
|
||||
*bucket_gen(ca, k.k->p.offset) = u.gen;
|
||||
g->_mark.gen = u.gen;
|
||||
g->_mark.data_type = u.data_type;
|
||||
g->_mark.dirty_sectors = u.dirty_sectors;
|
||||
|
@ -748,6 +749,7 @@ static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
|
|||
!bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
|
||||
BUG_ON(m.data_type);
|
||||
bucket_cmpxchg(g, m, m.gen++);
|
||||
*bucket_gen(ca, b) = m.gen;
|
||||
percpu_up_read(&c->mark_lock);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -161,7 +161,7 @@ static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
|
|||
open_bucket_for_each(c, obs, ob, i) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
|
||||
BUG_ON(bucket(ca, ob->bucket)->mark.gen != ob->gen);
|
||||
BUG_ON(*bucket_gen(ca, ob->bucket) != ob->gen);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
#endif
|
||||
|
@ -273,7 +273,7 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
|
|||
ob->sectors_free = ca->mi.bucket_size;
|
||||
ob->alloc_reserve = reserve;
|
||||
ob->dev = ca->dev_idx;
|
||||
ob->gen = bucket(ca, b)->mark.gen;
|
||||
ob->gen = *bucket_gen(ca, b);
|
||||
ob->bucket = b;
|
||||
spin_unlock(&ob->lock);
|
||||
|
||||
|
|
|
@ -445,6 +445,7 @@ struct bch_dev {
|
|||
* Or rcu_read_lock(), but only for ptr_stale():
|
||||
*/
|
||||
struct bucket_array __rcu *buckets[2];
|
||||
struct bucket_gens *bucket_gens;
|
||||
unsigned long *buckets_nouse;
|
||||
struct rw_semaphore bucket_lock;
|
||||
|
||||
|
|
|
@ -535,6 +535,20 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
|
|||
BUG_ON(owned_by_allocator == old.owned_by_allocator);
|
||||
}
|
||||
|
||||
static inline u8 bkey_alloc_gen(struct bkey_s_c k)
|
||||
{
|
||||
switch (k.k->type) {
|
||||
case KEY_TYPE_alloc:
|
||||
return bkey_s_c_to_alloc(k).v->gen;
|
||||
case KEY_TYPE_alloc_v2:
|
||||
return bkey_s_c_to_alloc_v2(k).v->gen;
|
||||
case KEY_TYPE_alloc_v3:
|
||||
return bkey_s_c_to_alloc_v3(k).v->gen;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int bch2_mark_alloc(struct btree_trans *trans,
|
||||
struct bkey_s_c old, struct bkey_s_c new,
|
||||
unsigned flags)
|
||||
|
@ -573,10 +587,14 @@ static int bch2_mark_alloc(struct btree_trans *trans,
|
|||
if (new.k->p.offset >= ca->mi.nbuckets)
|
||||
return 0;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
g = __bucket(ca, new.k->p.offset, gc);
|
||||
u = bch2_alloc_unpack(new);
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
if (!gc && u.gen != bkey_alloc_gen(old))
|
||||
*bucket_gen(ca, new.k->p.offset) = u.gen;
|
||||
|
||||
g = __bucket(ca, new.k->p.offset, gc);
|
||||
|
||||
old_m = bucket_cmpxchg(g, m, ({
|
||||
m.gen = u.gen;
|
||||
m.data_type = u.data_type;
|
||||
|
@ -2131,9 +2149,18 @@ static void buckets_free_rcu(struct rcu_head *rcu)
|
|||
buckets->nbuckets * sizeof(struct bucket));
|
||||
}
|
||||
|
||||
static void bucket_gens_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct bucket_gens *buckets =
|
||||
container_of(rcu, struct bucket_gens, rcu);
|
||||
|
||||
kvpfree(buckets, sizeof(struct bucket_array) + buckets->nbuckets);
|
||||
}
|
||||
|
||||
int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
||||
{
|
||||
struct bucket_array *buckets = NULL, *old_buckets = NULL;
|
||||
struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
|
||||
unsigned long *buckets_nouse = NULL;
|
||||
alloc_fifo free[RESERVE_NR];
|
||||
alloc_fifo free_inc;
|
||||
|
@ -2157,6 +2184,8 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
|||
if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
|
||||
nbuckets * sizeof(struct bucket),
|
||||
GFP_KERNEL|__GFP_ZERO)) ||
|
||||
!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
|
||||
GFP_KERNEL|__GFP_ZERO)) ||
|
||||
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
|
||||
sizeof(unsigned long),
|
||||
GFP_KERNEL|__GFP_ZERO)) ||
|
||||
|
@ -2169,6 +2198,8 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
|||
|
||||
buckets->first_bucket = ca->mi.first_bucket;
|
||||
buckets->nbuckets = nbuckets;
|
||||
bucket_gens->first_bucket = ca->mi.first_bucket;
|
||||
bucket_gens->nbuckets = nbuckets;
|
||||
|
||||
bch2_copygc_stop(c);
|
||||
|
||||
|
@ -2179,6 +2210,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
|||
}
|
||||
|
||||
old_buckets = bucket_array(ca);
|
||||
old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
|
||||
|
||||
if (resize) {
|
||||
size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
|
||||
|
@ -2186,13 +2218,18 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
|||
memcpy(buckets->b,
|
||||
old_buckets->b,
|
||||
n * sizeof(struct bucket));
|
||||
memcpy(bucket_gens->b,
|
||||
old_bucket_gens->b,
|
||||
n);
|
||||
memcpy(buckets_nouse,
|
||||
ca->buckets_nouse,
|
||||
BITS_TO_LONGS(n) * sizeof(unsigned long));
|
||||
}
|
||||
|
||||
rcu_assign_pointer(ca->buckets[0], buckets);
|
||||
buckets = old_buckets;
|
||||
rcu_assign_pointer(ca->bucket_gens, bucket_gens);
|
||||
buckets = old_buckets;
|
||||
bucket_gens = old_bucket_gens;
|
||||
|
||||
swap(ca->buckets_nouse, buckets_nouse);
|
||||
|
||||
|
@ -2226,6 +2263,8 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
|||
free_fifo(&free[i]);
|
||||
kvpfree(buckets_nouse,
|
||||
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
|
||||
if (bucket_gens)
|
||||
call_rcu(&old_buckets->rcu, bucket_gens_free_rcu);
|
||||
if (buckets)
|
||||
call_rcu(&old_buckets->rcu, buckets_free_rcu);
|
||||
|
||||
|
|
|
@ -63,6 +63,24 @@ static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
|
|||
return __bucket(ca, b, false);
|
||||
}
|
||||
|
||||
static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
|
||||
{
|
||||
return rcu_dereference_check(ca->bucket_gens,
|
||||
!ca->fs ||
|
||||
percpu_rwsem_is_held(&ca->fs->mark_lock) ||
|
||||
lockdep_is_held(&ca->fs->gc_lock) ||
|
||||
lockdep_is_held(&ca->bucket_lock));
|
||||
|
||||
}
|
||||
|
||||
static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
|
||||
{
|
||||
struct bucket_gens *gens = bucket_gens(ca);
|
||||
|
||||
BUG_ON(b < gens->first_bucket || b >= gens->nbuckets);
|
||||
return gens->b + b;
|
||||
}
|
||||
|
||||
/*
|
||||
* bucket_gc_gen() returns the difference between the bucket's current gen and
|
||||
* the oldest gen of any pointer into that bucket in the btree.
|
||||
|
@ -123,7 +141,7 @@ static inline u8 ptr_stale(struct bch_dev *ca,
|
|||
u8 ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = gen_after(PTR_BUCKET(ca, ptr)->mark.gen, ptr->gen);
|
||||
ret = gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -52,6 +52,13 @@ struct bucket_array {
|
|||
struct bucket b[];
|
||||
};
|
||||
|
||||
struct bucket_gens {
|
||||
struct rcu_head rcu;
|
||||
u16 first_bucket;
|
||||
size_t nbuckets;
|
||||
u8 b[];
|
||||
};
|
||||
|
||||
struct bch_dev_usage {
|
||||
u64 buckets_ec;
|
||||
u64 buckets_unavailable;
|
||||
|
|
Loading…
Reference in a new issue