bcachefs: bch2_btree_path_upgrade() now emits transaction restart

Centralizing the transaction restart/tracepoint in
bch2_btree_path_upgrade() lets us improve the tracepoint - now it emits
old and new locks_want.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2022-09-17 14:36:24 -04:00
parent b8eec67591
commit 367d72dd5f
4 changed files with 68 additions and 30 deletions

View file

@ -345,15 +345,22 @@ bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
bool __bch2_btree_path_upgrade(struct btree_trans *,
struct btree_path *, unsigned);
static inline bool bch2_btree_path_upgrade(struct btree_trans *trans,
struct btree_path *path,
unsigned new_locks_want)
static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
struct btree_path *path,
unsigned new_locks_want)
{
unsigned old_locks_want = path->locks_want;
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
return path->locks_want < new_locks_want
? __bch2_btree_path_upgrade(trans, path, new_locks_want)
: path->uptodate == BTREE_ITER_UPTODATE;
if (path->locks_want < new_locks_want
? __bch2_btree_path_upgrade(trans, path, new_locks_want)
: path->uptodate == BTREE_ITER_UPTODATE)
return 0;
trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
old_locks_want, new_locks_want);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
}
/* misc: */

View file

@ -1003,11 +1003,9 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
if (update_level < BTREE_MAX_DEPTH)
nr_nodes[1] += 1;
if (!bch2_btree_path_upgrade(trans, path, U8_MAX)) {
trace_and_count(c, trans_restart_iter_upgrade, trans, _RET_IP_, path);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
ret = bch2_btree_path_upgrade(trans, path, U8_MAX);
if (ret)
return ERR_PTR(ret);
}
if (flags & BTREE_INSERT_GC_LOCK_HELD)
lockdep_assert_held(&c->gc_lock);
@ -2035,9 +2033,9 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
struct closure cl;
int ret = 0;
if (!btree_node_intent_locked(path, b->c.level) &&
!bch2_btree_path_upgrade(trans, path, b->c.level + 1))
return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
ret = bch2_btree_path_upgrade(trans, path, b->c.level + 1);
if (ret)
return ret;
closure_init_stack(&cl);

View file

@ -806,7 +806,7 @@ static inline bool have_conflicting_read_lock(struct btree_trans *trans, struct
// break;
if (btree_node_read_locked(path, path->level) &&
!bch2_btree_path_upgrade(trans, path, path->level + 1))
!bch2_btree_path_upgrade_noupgrade_sibs(trans, path, path->level + 1))
return true;
}
@ -1131,11 +1131,9 @@ int __bch2_trans_commit(struct btree_trans *trans)
trans_for_each_update(trans, i) {
BUG_ON(!i->path->should_be_locked);
if (unlikely(!bch2_btree_path_upgrade(trans, i->path, i->level + 1))) {
trace_and_count(c, trans_restart_upgrade, trans, _RET_IP_, i->path);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
ret = bch2_btree_path_upgrade(trans, i->path, i->level + 1);
if (unlikely(ret))
goto out;
}
BUG_ON(!btree_node_intent_locked(i->path, i->level));

View file

@ -401,6 +401,7 @@ TRACE_EVENT(btree_path_relock_fail,
__array(char, trans_fn, 32 )
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u8, level )
TRACE_BPOS_entries(pos)
__array(char, node, 24 )
__field(u32, iter_lock_seq )
@ -413,6 +414,7 @@ TRACE_EVENT(btree_path_relock_fail,
strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
__entry->level = path->level;
TRACE_BPOS_assign(pos, path->pos);
if (IS_ERR(b))
strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
@ -422,13 +424,14 @@ TRACE_EVENT(btree_path_relock_fail,
__entry->node_lock_seq = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
),
TP_printk("%s %pS btree %s pos %llu:%llu:%u, node %s iter seq %u lock seq %u",
TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s iter seq %u lock seq %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
bch2_btree_ids[__entry->btree_id],
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
__entry->level,
__entry->node,
__entry->iter_lock_seq,
__entry->node_lock_seq)
@ -445,12 +448,15 @@ TRACE_EVENT(btree_path_upgrade_fail,
__array(char, trans_fn, 32 )
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u8, level )
TRACE_BPOS_entries(pos)
__field(u8, locked )
__field(u8, self_read_count )
__field(u8, self_intent_count)
__field(u8, read_count )
__field(u8, intent_count )
__field(u32, iter_lock_seq )
__field(u32, node_lock_seq )
),
TP_fast_assign(
@ -459,6 +465,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
__entry->level = level;
TRACE_BPOS_assign(pos, path->pos);
__entry->locked = btree_node_locked(path, level);
@ -468,20 +475,25 @@ TRACE_EVENT(btree_path_upgrade_fail,
c = six_lock_counts(&path->l[level].b->c.lock);
__entry->read_count = c.n[SIX_LOCK_read];
__entry->intent_count = c.n[SIX_LOCK_read];
__entry->iter_lock_seq = path->l[level].lock_seq;
__entry->node_lock_seq = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
),
TP_printk("%s %pS btree %s pos %llu:%llu:%u, locked %u held %u:%u lock count %u:%u",
TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
bch2_btree_ids[__entry->btree_id],
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
__entry->level,
__entry->locked,
__entry->self_read_count,
__entry->self_intent_count,
__entry->read_count,
__entry->intent_count)
__entry->intent_count,
__entry->iter_lock_seq,
__entry->node_lock_seq)
);
/* Garbage collection */
@ -894,18 +906,41 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_upgrade,
TRACE_EVENT(trans_restart_upgrade,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
struct btree_path *path,
unsigned old_locks_want,
unsigned new_locks_want),
TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want),
DEFINE_EVENT(transaction_restart_iter, trans_restart_iter_upgrade,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
TP_STRUCT__entry(
__array(char, trans_fn, 32 )
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u8, old_locks_want )
__field(u8, new_locks_want )
TRACE_BPOS_entries(pos)
),
TP_fast_assign(
strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
__entry->old_locks_want = old_locks_want;
__entry->new_locks_want = new_locks_want;
TRACE_BPOS_assign(pos, path->pos)
),
TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
bch2_btree_ids[__entry->btree_id],
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
__entry->old_locks_want,
__entry->new_locks_want)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock,