bcachefs: Optimize btree_path_alloc()

- move slowpath code to a separate function, btree_path_overflow()
 - no need to use hweight64
 - copy nr_max_paths from btree_transaction_stats to btree_trans,
   avoiding a data dependency in the fast path

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2022-09-26 16:19:56 -04:00
parent 14d8f26ad0
commit 3f3bc66ef0

View file

@ -1408,7 +1408,8 @@ static void bch2_trans_update_max_paths(struct btree_trans *trans)
if (!buf.allocation_failure) { if (!buf.allocation_failure) {
mutex_lock(&s->lock); mutex_lock(&s->lock);
if (s->nr_max_paths < hweight64(trans->paths_allocated)) { if (s->nr_max_paths < hweight64(trans->paths_allocated)) {
s->nr_max_paths = hweight64(trans->paths_allocated); s->nr_max_paths = trans->nr_max_paths =
hweight64(trans->paths_allocated);
swap(s->max_paths_text, buf.buf); swap(s->max_paths_text, buf.buf);
} }
mutex_unlock(&s->lock); mutex_unlock(&s->lock);
@ -1419,17 +1420,21 @@ static void bch2_trans_update_max_paths(struct btree_trans *trans)
trans->nr_max_paths = hweight64(trans->paths_allocated); trans->nr_max_paths = hweight64(trans->paths_allocated);
} }
static struct btree_path *btree_path_alloc(struct btree_trans *trans, static noinline void btree_path_overflow(struct btree_trans *trans)
struct btree_path *pos) {
bch2_dump_trans_paths_updates(trans);
panic("trans path oveflow\n");
}
static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
struct btree_path *pos)
{ {
struct btree_path *path; struct btree_path *path;
unsigned idx; unsigned idx;
if (unlikely(trans->paths_allocated == if (unlikely(trans->paths_allocated ==
~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) { ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
bch2_dump_trans_paths_updates(trans); btree_path_overflow(trans);
panic("trans path oveflow\n");
}
idx = __ffs64(~trans->paths_allocated); idx = __ffs64(~trans->paths_allocated);
trans->paths_allocated |= 1ULL << idx; trans->paths_allocated |= 1ULL << idx;