linux/lib/sbitmap.c

787 lines
19 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Facebook
* Copyright (C) 2013-2014 Jens Axboe
*/
#include <linux/sched.h>
#include <linux/random.h>
#include <linux/sbitmap.h>
#include <linux/seq_file.h>
static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
{
unsigned depth = sb->depth;
sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
if (!sb->alloc_hint)
return -ENOMEM;
if (depth && !sb->round_robin) {
int i;
for_each_possible_cpu(i)
*per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth);
}
return 0;
}
static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
unsigned int depth)
{
unsigned hint;
hint = this_cpu_read(*sb->alloc_hint);
if (unlikely(hint >= depth)) {
hint = depth ? get_random_u32_below(depth) : 0;
this_cpu_write(*sb->alloc_hint, hint);
}
return hint;
}
static inline void update_alloc_hint_after_get(struct sbitmap *sb,
unsigned int depth,
unsigned int hint,
unsigned int nr)
{
if (nr == -1) {
/* If the map is full, a hint won't do us much good. */
this_cpu_write(*sb->alloc_hint, 0);
} else if (nr == hint || unlikely(sb->round_robin)) {
/* Only update the hint if we used it. */
hint = nr + 1;
if (hint >= depth - 1)
hint = 0;
this_cpu_write(*sb->alloc_hint, hint);
}
}
/*
* See if we have deferred clears that we can batch move
*/
sbitmap: fix io hung due to race on sbitmap_word::cleared Configuration for sbq: depth=64, wake_batch=6, shift=6, map_nr=1 1. There are 64 requests in progress: map->word = 0xFFFFFFFFFFFFFFFF 2. After all the 64 requests complete, and no more requests come: map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF 3. Now two tasks try to allocate requests: T1: T2: __blk_mq_get_tag . __sbitmap_queue_get . sbitmap_get . sbitmap_find_bit . sbitmap_find_bit_in_word . __sbitmap_get_word -> nr=-1 __blk_mq_get_tag sbitmap_deferred_clear __sbitmap_queue_get /* map->cleared=0xFFFFFFFFFFFFFFFF */ sbitmap_find_bit if (!READ_ONCE(map->cleared)) sbitmap_find_bit_in_word return false; __sbitmap_get_word -> nr=-1 mask = xchg(&map->cleared, 0) sbitmap_deferred_clear atomic_long_andnot() /* map->cleared=0 */ if (!(map->cleared)) return false; /* * map->cleared is cleared by T1 * T2 fail to acquire the tag */ 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken up due to the wake_batch being set at 6. If no more requests come, T1 will wait here indefinitely. This patch achieves two purposes: 1. Check on ->cleared and update on both ->cleared and ->word need to be done atomically, and using spinlock could be the simplest solution. 2. Add extra check in sbitmap_deferred_clear(), to identify whether ->word has free bits. Fixes: ea86ea2cdced ("sbitmap: ammortize cost of clearing bits") Signed-off-by: Yang Yang <yang.yang@vivo.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20240716082644.659566-1-yang.yang@vivo.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-07-16 08:26:27 +00:00
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
unsigned int depth, unsigned int alloc_hint, bool wrap)
{
sbitmap: fix io hung due to race on sbitmap_word::cleared Configuration for sbq: depth=64, wake_batch=6, shift=6, map_nr=1 1. There are 64 requests in progress: map->word = 0xFFFFFFFFFFFFFFFF 2. After all the 64 requests complete, and no more requests come: map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF 3. Now two tasks try to allocate requests: T1: T2: __blk_mq_get_tag . __sbitmap_queue_get . sbitmap_get . sbitmap_find_bit . sbitmap_find_bit_in_word . __sbitmap_get_word -> nr=-1 __blk_mq_get_tag sbitmap_deferred_clear __sbitmap_queue_get /* map->cleared=0xFFFFFFFFFFFFFFFF */ sbitmap_find_bit if (!READ_ONCE(map->cleared)) sbitmap_find_bit_in_word return false; __sbitmap_get_word -> nr=-1 mask = xchg(&map->cleared, 0) sbitmap_deferred_clear atomic_long_andnot() /* map->cleared=0 */ if (!(map->cleared)) return false; /* * map->cleared is cleared by T1 * T2 fail to acquire the tag */ 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken up due to the wake_batch being set at 6. If no more requests come, T1 will wait here indefinitely. This patch achieves two purposes: 1. Check on ->cleared and update on both ->cleared and ->word need to be done atomically, and using spinlock could be the simplest solution. 2. Add extra check in sbitmap_deferred_clear(), to identify whether ->word has free bits. Fixes: ea86ea2cdced ("sbitmap: ammortize cost of clearing bits") Signed-off-by: Yang Yang <yang.yang@vivo.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20240716082644.659566-1-yang.yang@vivo.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-07-16 08:26:27 +00:00
unsigned long mask, word_mask;
guard(spinlock_irqsave)(&map->swap_lock);
if (!map->cleared) {
if (depth == 0)
return false;
word_mask = (~0UL) >> (BITS_PER_LONG - depth);
/*
* The current behavior is to always retry after moving
* ->cleared to word, and we change it to retry in case
* of any free bits. To avoid an infinite loop, we need
* to take wrap & alloc_hint into account, otherwise a
* soft lockup may occur.
*/
if (!wrap && alloc_hint)
word_mask &= ~((1UL << alloc_hint) - 1);
return (READ_ONCE(map->word) & word_mask) != word_mask;
}
/*
* First get a stable cleared mask, setting the old mask to 0.
*/
mask = xchg(&map->cleared, 0);
/*
* Now clear the masked bits in our free word
*/
atomic_long_andnot(mask, (atomic_long_t *)&map->word);
BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
return true;
}
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
gfp_t flags, int node, bool round_robin,
bool alloc_hint)
{
unsigned int bits_per_word;
sbitmap: fix io hung due to race on sbitmap_word::cleared Configuration for sbq: depth=64, wake_batch=6, shift=6, map_nr=1 1. There are 64 requests in progress: map->word = 0xFFFFFFFFFFFFFFFF 2. After all the 64 requests complete, and no more requests come: map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF 3. Now two tasks try to allocate requests: T1: T2: __blk_mq_get_tag . __sbitmap_queue_get . sbitmap_get . sbitmap_find_bit . sbitmap_find_bit_in_word . __sbitmap_get_word -> nr=-1 __blk_mq_get_tag sbitmap_deferred_clear __sbitmap_queue_get /* map->cleared=0xFFFFFFFFFFFFFFFF */ sbitmap_find_bit if (!READ_ONCE(map->cleared)) sbitmap_find_bit_in_word return false; __sbitmap_get_word -> nr=-1 mask = xchg(&map->cleared, 0) sbitmap_deferred_clear atomic_long_andnot() /* map->cleared=0 */ if (!(map->cleared)) return false; /* * map->cleared is cleared by T1 * T2 fail to acquire the tag */ 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken up due to the wake_batch being set at 6. If no more requests come, T1 will wait here indefinitely. This patch achieves two purposes: 1. Check on ->cleared and update on both ->cleared and ->word need to be done atomically, and using spinlock could be the simplest solution. 2. Add extra check in sbitmap_deferred_clear(), to identify whether ->word has free bits. Fixes: ea86ea2cdced ("sbitmap: ammortize cost of clearing bits") Signed-off-by: Yang Yang <yang.yang@vivo.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20240716082644.659566-1-yang.yang@vivo.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-07-16 08:26:27 +00:00
int i;
if (shift < 0)
shift = sbitmap_calculate_shift(depth);
bits_per_word = 1U << shift;
if (bits_per_word > BITS_PER_LONG)
return -EINVAL;
sb->shift = shift;
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
sb->round_robin = round_robin;
if (depth == 0) {
sb->map = NULL;
return 0;
}
if (alloc_hint) {
if (init_alloc_hint(sb, flags))
return -ENOMEM;
} else {
sb->alloc_hint = NULL;
}
sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
if (!sb->map) {
free_percpu(sb->alloc_hint);
return -ENOMEM;
}
sbitmap: fix io hung due to race on sbitmap_word::cleared Configuration for sbq: depth=64, wake_batch=6, shift=6, map_nr=1 1. There are 64 requests in progress: map->word = 0xFFFFFFFFFFFFFFFF 2. After all the 64 requests complete, and no more requests come: map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF 3. Now two tasks try to allocate requests: T1: T2: __blk_mq_get_tag . __sbitmap_queue_get . sbitmap_get . sbitmap_find_bit . sbitmap_find_bit_in_word . __sbitmap_get_word -> nr=-1 __blk_mq_get_tag sbitmap_deferred_clear __sbitmap_queue_get /* map->cleared=0xFFFFFFFFFFFFFFFF */ sbitmap_find_bit if (!READ_ONCE(map->cleared)) sbitmap_find_bit_in_word return false; __sbitmap_get_word -> nr=-1 mask = xchg(&map->cleared, 0) sbitmap_deferred_clear atomic_long_andnot() /* map->cleared=0 */ if (!(map->cleared)) return false; /* * map->cleared is cleared by T1 * T2 fail to acquire the tag */ 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken up due to the wake_batch being set at 6. If no more requests come, T1 will wait here indefinitely. This patch achieves two purposes: 1. Check on ->cleared and update on both ->cleared and ->word need to be done atomically, and using spinlock could be the simplest solution. 2. Add extra check in sbitmap_deferred_clear(), to identify whether ->word has free bits. Fixes: ea86ea2cdced ("sbitmap: ammortize cost of clearing bits") Signed-off-by: Yang Yang <yang.yang@vivo.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20240716082644.659566-1-yang.yang@vivo.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-07-16 08:26:27 +00:00
for (i = 0; i < sb->map_nr; i++)
spin_lock_init(&sb->map[i].swap_lock);
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_init_node);
void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
{
unsigned int bits_per_word = 1U << sb->shift;
unsigned int i;
for (i = 0; i < sb->map_nr; i++)
sbitmap: fix io hung due to race on sbitmap_word::cleared Configuration for sbq: depth=64, wake_batch=6, shift=6, map_nr=1 1. There are 64 requests in progress: map->word = 0xFFFFFFFFFFFFFFFF 2. After all the 64 requests complete, and no more requests come: map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF 3. Now two tasks try to allocate requests: T1: T2: __blk_mq_get_tag . __sbitmap_queue_get . sbitmap_get . sbitmap_find_bit . sbitmap_find_bit_in_word . __sbitmap_get_word -> nr=-1 __blk_mq_get_tag sbitmap_deferred_clear __sbitmap_queue_get /* map->cleared=0xFFFFFFFFFFFFFFFF */ sbitmap_find_bit if (!READ_ONCE(map->cleared)) sbitmap_find_bit_in_word return false; __sbitmap_get_word -> nr=-1 mask = xchg(&map->cleared, 0) sbitmap_deferred_clear atomic_long_andnot() /* map->cleared=0 */ if (!(map->cleared)) return false; /* * map->cleared is cleared by T1 * T2 fail to acquire the tag */ 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken up due to the wake_batch being set at 6. If no more requests come, T1 will wait here indefinitely. This patch achieves two purposes: 1. Check on ->cleared and update on both ->cleared and ->word need to be done atomically, and using spinlock could be the simplest solution. 2. Add extra check in sbitmap_deferred_clear(), to identify whether ->word has free bits. Fixes: ea86ea2cdced ("sbitmap: ammortize cost of clearing bits") Signed-off-by: Yang Yang <yang.yang@vivo.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20240716082644.659566-1-yang.yang@vivo.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-07-16 08:26:27 +00:00
sbitmap_deferred_clear(&sb->map[i], 0, 0, 0);
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
}
EXPORT_SYMBOL_GPL(sbitmap_resize);
static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
unsigned int hint, bool wrap)
{
int nr;
/* don't wrap if starting from 0 */
wrap = wrap && hint;
while (1) {
nr = find_next_zero_bit(word, depth, hint);
if (unlikely(nr >= depth)) {
/*
* We started with an offset, and we didn't reset the
* offset to 0 in a failure case, so start from 0 to
* exhaust the map.
*/
if (hint && wrap) {
hint = 0;
continue;
}
return -1;
}
if (!test_and_set_bit_lock(nr, word))
break;
hint = nr + 1;
if (hint >= depth - 1)
hint = 0;
}
return nr;
}
static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
unsigned int depth,
unsigned int alloc_hint,
bool wrap)
{
int nr;
do {
nr = __sbitmap_get_word(&map->word, depth,
alloc_hint, wrap);
if (nr != -1)
break;
sbitmap: fix io hung due to race on sbitmap_word::cleared Configuration for sbq: depth=64, wake_batch=6, shift=6, map_nr=1 1. There are 64 requests in progress: map->word = 0xFFFFFFFFFFFFFFFF 2. After all the 64 requests complete, and no more requests come: map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF 3. Now two tasks try to allocate requests: T1: T2: __blk_mq_get_tag . __sbitmap_queue_get . sbitmap_get . sbitmap_find_bit . sbitmap_find_bit_in_word . __sbitmap_get_word -> nr=-1 __blk_mq_get_tag sbitmap_deferred_clear __sbitmap_queue_get /* map->cleared=0xFFFFFFFFFFFFFFFF */ sbitmap_find_bit if (!READ_ONCE(map->cleared)) sbitmap_find_bit_in_word return false; __sbitmap_get_word -> nr=-1 mask = xchg(&map->cleared, 0) sbitmap_deferred_clear atomic_long_andnot() /* map->cleared=0 */ if (!(map->cleared)) return false; /* * map->cleared is cleared by T1 * T2 fail to acquire the tag */ 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken up due to the wake_batch being set at 6. If no more requests come, T1 will wait here indefinitely. This patch achieves two purposes: 1. Check on ->cleared and update on both ->cleared and ->word need to be done atomically, and using spinlock could be the simplest solution. 2. Add extra check in sbitmap_deferred_clear(), to identify whether ->word has free bits. Fixes: ea86ea2cdced ("sbitmap: ammortize cost of clearing bits") Signed-off-by: Yang Yang <yang.yang@vivo.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20240716082644.659566-1-yang.yang@vivo.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-07-16 08:26:27 +00:00
if (!sbitmap_deferred_clear(map, depth, alloc_hint, wrap))
break;
} while (1);
return nr;
}
static int sbitmap_find_bit(struct sbitmap *sb,
unsigned int depth,
unsigned int index,
unsigned int alloc_hint,
bool wrap)
{
unsigned int i;
int nr = -1;
for (i = 0; i < sb->map_nr; i++) {
nr = sbitmap_find_bit_in_word(&sb->map[index],
min_t(unsigned int,
__map_depth(sb, index),
depth),
alloc_hint, wrap);
if (nr != -1) {
nr += index << sb->shift;
break;
}
/* Jump to next index. */
alloc_hint = 0;
if (++index >= sb->map_nr)
index = 0;
}
return nr;
}
static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
{
unsigned int index;
index = SB_NR_TO_INDEX(sb, alloc_hint);
/*
* Unless we're doing round robin tag allocation, just use the
* alloc_hint to find the right word index. No point in looping
* twice in find_next_zero_bit() for that case.
*/
if (sb->round_robin)
alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
else
alloc_hint = 0;
return sbitmap_find_bit(sb, UINT_MAX, index, alloc_hint,
!sb->round_robin);
}
int sbitmap_get(struct sbitmap *sb)
{
int nr;
unsigned int hint, depth;
if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
return -1;
depth = READ_ONCE(sb->depth);
hint = update_alloc_hint_before_get(sb, depth);
nr = __sbitmap_get(sb, hint);
update_alloc_hint_after_get(sb, depth, hint, nr);
return nr;
}
EXPORT_SYMBOL_GPL(sbitmap_get);
static int __sbitmap_get_shallow(struct sbitmap *sb,
unsigned int alloc_hint,
unsigned long shallow_depth)
{
unsigned int index;
index = SB_NR_TO_INDEX(sb, alloc_hint);
alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
return sbitmap_find_bit(sb, shallow_depth, index, alloc_hint, true);
}
int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
{
int nr;
unsigned int hint, depth;
if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
return -1;
depth = READ_ONCE(sb->depth);
hint = update_alloc_hint_before_get(sb, depth);
nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
update_alloc_hint_after_get(sb, depth, hint, nr);
return nr;
}
EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
bool sbitmap_any_bit_set(const struct sbitmap *sb)
{
unsigned int i;
for (i = 0; i < sb->map_nr; i++) {
if (sb->map[i].word & ~sb->map[i].cleared)
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
{
unsigned int i, weight = 0;
for (i = 0; i < sb->map_nr; i++) {
const struct sbitmap_word *word = &sb->map[i];
unsigned int word_depth = __map_depth(sb, i);
if (set)
weight += bitmap_weight(&word->word, word_depth);
else
weight += bitmap_weight(&word->cleared, word_depth);
}
return weight;
}
static unsigned int sbitmap_cleared(const struct sbitmap *sb)
{
return __sbitmap_weight(sb, false);
}
unsigned int sbitmap_weight(const struct sbitmap *sb)
{
return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
}
EXPORT_SYMBOL_GPL(sbitmap_weight);
void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
{
seq_printf(m, "depth=%u\n", sb->depth);
seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
seq_printf(m, "map_nr=%u\n", sb->map_nr);
}
EXPORT_SYMBOL_GPL(sbitmap_show);
static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
{
if ((offset & 0xf) == 0) {
if (offset != 0)
seq_putc(m, '\n');
seq_printf(m, "%08x:", offset);
}
if ((offset & 0x1) == 0)
seq_putc(m, ' ');
seq_printf(m, "%02x", byte);
}
void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
{
u8 byte = 0;
unsigned int byte_bits = 0;
unsigned int offset = 0;
int i;
for (i = 0; i < sb->map_nr; i++) {
unsigned long word = READ_ONCE(sb->map[i].word);
unsigned long cleared = READ_ONCE(sb->map[i].cleared);
unsigned int word_bits = __map_depth(sb, i);
word &= ~cleared;
while (word_bits > 0) {
unsigned int bits = min(8 - byte_bits, word_bits);
byte |= (word & (BIT(bits) - 1)) << byte_bits;
byte_bits += bits;
if (byte_bits == 8) {
emit_byte(m, offset, byte);
byte = 0;
byte_bits = 0;
offset++;
}
word >>= bits;
word_bits -= bits;
}
}
if (byte_bits) {
emit_byte(m, offset, byte);
offset++;
}
if (offset)
seq_putc(m, '\n');
}
EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
unsigned int depth)
{
unsigned int wake_batch;
unsigned int shallow_depth;
/*
* Each full word of the bitmap has bits_per_word bits, and there might
* be a partial word. There are depth / bits_per_word full words and
* depth % bits_per_word bits left over. In bitwise arithmetic:
*
* bits_per_word = 1 << shift
* depth / bits_per_word = depth >> shift
* depth % bits_per_word = depth & ((1 << shift) - 1)
*
* Each word can be limited to sbq->min_shallow_depth bits.
*/
shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
depth = ((depth >> sbq->sb.shift) * shallow_depth +
min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
SBQ_WAKE_BATCH);
return wake_batch;
}
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
int shift, bool round_robin, gfp_t flags, int node)
{
int ret;
int i;
ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
round_robin, true);
if (ret)
return ret;
sbq->min_shallow_depth = UINT_MAX;
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
atomic_set(&sbq->ws_active, 0);
sbitmap: Use single per-bitmap counting to wake up queued tags sbitmap suffers from code complexity, as demonstrated by recent fixes, and eventual lost wake ups on nested I/O completion. The later happens, from what I understand, due to the non-atomic nature of the updates to wait_cnt, which needs to be subtracted and eventually reset when equal to zero. This two step process can eventually miss an update when a nested completion happens to interrupt the CPU in between the wait_cnt updates. This is very hard to fix, as shown by the recent changes to this code. The code complexity arises mostly from the corner cases to avoid missed wakes in this scenario. In addition, the handling of wake_batch recalculation plus the synchronization with sbq_queue_wake_up is non-trivial. This patchset implements the idea originally proposed by Jan [1], which removes the need for the two-step updates of wait_cnt. This is done by tracking the number of completions and wakeups in always increasing, per-bitmap counters. Instead of having to reset the wait_cnt when it reaches zero, we simply keep counting, and attempt to wake up N threads in a single wait queue whenever there is enough space for a batch. Waking up less than batch_wake shouldn't be a problem, because we haven't changed the conditions for wake up, and the existing batch calculation guarantees at least enough remaining completions to wake up a batch for each queue at any time. Performance-wise, one should expect very similar performance to the original algorithm for the case where there is no queueing. In both the old algorithm and this implementation, the first thing is to check ws_active, which bails out if there is no queueing to be managed. In the new code, we took care to avoid accounting completions and wakeups when there is no queueing, to not pay the cost of atomic operations unnecessarily, since it doesn't skew the numbers. For more interesting cases, where there is queueing, we need to take into account the cross-communication of the atomic operations. I've been benchmarking by running parallel fio jobs against a single hctx nullb in different hardware queue depth scenarios, and verifying both IOPS and queueing. Each experiment was repeated 5 times on a 20-CPU box, with 20 parallel jobs. fio was issuing fixed-size randwrites with qd=64 against nullb, varying only the hardware queue length per test. queue size 2 4 8 16 32 64 6.1-rc2 1681.1K (1.6K) 2633.0K (12.7K) 6940.8K (16.3K) 8172.3K (617.5K) 8391.7K (367.1K) 8606.1K (351.2K) patched 1721.8K (15.1K) 3016.7K (3.8K) 7543.0K (89.4K) 8132.5K (303.4K) 8324.2K (230.6K) 8401.8K (284.7K) The following is a similar experiment, ran against a nullb with a single bitmap shared by 20 hctx spread across 2 NUMA nodes. This has 40 parallel fio jobs operating on the same device queue size 2 4 8 16 32 64 6.1-rc2 1081.0K (2.3K) 957.2K (1.5K) 1699.1K (5.7K) 6178.2K (124.6K) 12227.9K (37.7K) 13286.6K (92.9K) patched 1081.8K (2.8K) 1316.5K (5.4K) 2364.4K (1.8K) 6151.4K (20.0K) 11893.6K (17.5K) 12385.6K (18.4K) It has also survived blktests and a 12h-stress run against nullb. I also ran the code against nvme and a scsi SSD, and I didn't observe performance regression in those. If there are other tests you think I should run, please let me know and I will follow up with results. [1] https://lore.kernel.org/all/aef9de29-e9f5-259a-f8be-12d1b734e72@google.com/ Cc: Hugh Dickins <hughd@google.com> Cc: Keith Busch <kbusch@kernel.org> Cc: Liu Song <liusong@linux.alibaba.com> Suggested-by: Jan Kara <jack@suse.cz> Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20221105231055.25953-1-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-11-05 23:10:55 +00:00
atomic_set(&sbq->completion_cnt, 0);
atomic_set(&sbq->wakeup_cnt, 0);
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
sbitmap_free(&sbq->sb);
return -ENOMEM;
}
sbitmap: Use single per-bitmap counting to wake up queued tags sbitmap suffers from code complexity, as demonstrated by recent fixes, and eventual lost wake ups on nested I/O completion. The later happens, from what I understand, due to the non-atomic nature of the updates to wait_cnt, which needs to be subtracted and eventually reset when equal to zero. This two step process can eventually miss an update when a nested completion happens to interrupt the CPU in between the wait_cnt updates. This is very hard to fix, as shown by the recent changes to this code. The code complexity arises mostly from the corner cases to avoid missed wakes in this scenario. In addition, the handling of wake_batch recalculation plus the synchronization with sbq_queue_wake_up is non-trivial. This patchset implements the idea originally proposed by Jan [1], which removes the need for the two-step updates of wait_cnt. This is done by tracking the number of completions and wakeups in always increasing, per-bitmap counters. Instead of having to reset the wait_cnt when it reaches zero, we simply keep counting, and attempt to wake up N threads in a single wait queue whenever there is enough space for a batch. Waking up less than batch_wake shouldn't be a problem, because we haven't changed the conditions for wake up, and the existing batch calculation guarantees at least enough remaining completions to wake up a batch for each queue at any time. Performance-wise, one should expect very similar performance to the original algorithm for the case where there is no queueing. In both the old algorithm and this implementation, the first thing is to check ws_active, which bails out if there is no queueing to be managed. In the new code, we took care to avoid accounting completions and wakeups when there is no queueing, to not pay the cost of atomic operations unnecessarily, since it doesn't skew the numbers. For more interesting cases, where there is queueing, we need to take into account the cross-communication of the atomic operations. I've been benchmarking by running parallel fio jobs against a single hctx nullb in different hardware queue depth scenarios, and verifying both IOPS and queueing. Each experiment was repeated 5 times on a 20-CPU box, with 20 parallel jobs. fio was issuing fixed-size randwrites with qd=64 against nullb, varying only the hardware queue length per test. queue size 2 4 8 16 32 64 6.1-rc2 1681.1K (1.6K) 2633.0K (12.7K) 6940.8K (16.3K) 8172.3K (617.5K) 8391.7K (367.1K) 8606.1K (351.2K) patched 1721.8K (15.1K) 3016.7K (3.8K) 7543.0K (89.4K) 8132.5K (303.4K) 8324.2K (230.6K) 8401.8K (284.7K) The following is a similar experiment, ran against a nullb with a single bitmap shared by 20 hctx spread across 2 NUMA nodes. This has 40 parallel fio jobs operating on the same device queue size 2 4 8 16 32 64 6.1-rc2 1081.0K (2.3K) 957.2K (1.5K) 1699.1K (5.7K) 6178.2K (124.6K) 12227.9K (37.7K) 13286.6K (92.9K) patched 1081.8K (2.8K) 1316.5K (5.4K) 2364.4K (1.8K) 6151.4K (20.0K) 11893.6K (17.5K) 12385.6K (18.4K) It has also survived blktests and a 12h-stress run against nullb. I also ran the code against nvme and a scsi SSD, and I didn't observe performance regression in those. If there are other tests you think I should run, please let me know and I will follow up with results. [1] https://lore.kernel.org/all/aef9de29-e9f5-259a-f8be-12d1b734e72@google.com/ Cc: Hugh Dickins <hughd@google.com> Cc: Keith Busch <kbusch@kernel.org> Cc: Liu Song <liusong@linux.alibaba.com> Suggested-by: Jan Kara <jack@suse.cz> Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20221105231055.25953-1-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-11-05 23:10:55 +00:00
for (i = 0; i < SBQ_WAIT_QUEUES; i++)
init_waitqueue_head(&sbq->ws[i].wait);
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
unsigned int depth)
{
unsigned int wake_batch;
wake_batch = sbq_calc_wake_batch(sbq, depth);
sbitmap: Use single per-bitmap counting to wake up queued tags sbitmap suffers from code complexity, as demonstrated by recent fixes, and eventual lost wake ups on nested I/O completion. The later happens, from what I understand, due to the non-atomic nature of the updates to wait_cnt, which needs to be subtracted and eventually reset when equal to zero. This two step process can eventually miss an update when a nested completion happens to interrupt the CPU in between the wait_cnt updates. This is very hard to fix, as shown by the recent changes to this code. The code complexity arises mostly from the corner cases to avoid missed wakes in this scenario. In addition, the handling of wake_batch recalculation plus the synchronization with sbq_queue_wake_up is non-trivial. This patchset implements the idea originally proposed by Jan [1], which removes the need for the two-step updates of wait_cnt. This is done by tracking the number of completions and wakeups in always increasing, per-bitmap counters. Instead of having to reset the wait_cnt when it reaches zero, we simply keep counting, and attempt to wake up N threads in a single wait queue whenever there is enough space for a batch. Waking up less than batch_wake shouldn't be a problem, because we haven't changed the conditions for wake up, and the existing batch calculation guarantees at least enough remaining completions to wake up a batch for each queue at any time. Performance-wise, one should expect very similar performance to the original algorithm for the case where there is no queueing. In both the old algorithm and this implementation, the first thing is to check ws_active, which bails out if there is no queueing to be managed. In the new code, we took care to avoid accounting completions and wakeups when there is no queueing, to not pay the cost of atomic operations unnecessarily, since it doesn't skew the numbers. For more interesting cases, where there is queueing, we need to take into account the cross-communication of the atomic operations. I've been benchmarking by running parallel fio jobs against a single hctx nullb in different hardware queue depth scenarios, and verifying both IOPS and queueing. Each experiment was repeated 5 times on a 20-CPU box, with 20 parallel jobs. fio was issuing fixed-size randwrites with qd=64 against nullb, varying only the hardware queue length per test. queue size 2 4 8 16 32 64 6.1-rc2 1681.1K (1.6K) 2633.0K (12.7K) 6940.8K (16.3K) 8172.3K (617.5K) 8391.7K (367.1K) 8606.1K (351.2K) patched 1721.8K (15.1K) 3016.7K (3.8K) 7543.0K (89.4K) 8132.5K (303.4K) 8324.2K (230.6K) 8401.8K (284.7K) The following is a similar experiment, ran against a nullb with a single bitmap shared by 20 hctx spread across 2 NUMA nodes. This has 40 parallel fio jobs operating on the same device queue size 2 4 8 16 32 64 6.1-rc2 1081.0K (2.3K) 957.2K (1.5K) 1699.1K (5.7K) 6178.2K (124.6K) 12227.9K (37.7K) 13286.6K (92.9K) patched 1081.8K (2.8K) 1316.5K (5.4K) 2364.4K (1.8K) 6151.4K (20.0K) 11893.6K (17.5K) 12385.6K (18.4K) It has also survived blktests and a 12h-stress run against nullb. I also ran the code against nvme and a scsi SSD, and I didn't observe performance regression in those. If there are other tests you think I should run, please let me know and I will follow up with results. [1] https://lore.kernel.org/all/aef9de29-e9f5-259a-f8be-12d1b734e72@google.com/ Cc: Hugh Dickins <hughd@google.com> Cc: Keith Busch <kbusch@kernel.org> Cc: Liu Song <liusong@linux.alibaba.com> Suggested-by: Jan Kara <jack@suse.cz> Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20221105231055.25953-1-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-11-05 23:10:55 +00:00
if (sbq->wake_batch != wake_batch)
WRITE_ONCE(sbq->wake_batch, wake_batch);
}
void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
unsigned int users)
{
unsigned int wake_batch;
unsigned int depth = (sbq->sb.depth + users - 1) / users;
wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
sbitmap: correct wake_batch recalculation to avoid potential IO hung Commit 180dccb0dba4f ("blk-mq: fix tag_get wait task can't be awakened") mentioned that in case of shared tags, there could be just one real active hctx(queue) because of lazy detection of tag idle. Then driver tag allocation may wait forever on this real active hctx(queue) if wake_batch is > hctx_max_depth where hctx_max_depth is available tags depth for the actve hctx(queue). However, the condition wake_batch > hctx_max_depth is not strong enough to avoid IO hung as the sbitmap_queue_wake_up will only wake up one wait queue for each wake_batch even though there is only one waiter in the woken wait queue. After this, there is only one tag to free and wake_batch may not be reached anymore. Commit 180dccb0dba4f ("blk-mq: fix tag_get wait task can't be awakened") methioned that driver tag allocation may wait forever. Actually, the inactive hctx(queue) will be truely idle after at most 30 seconds and will call blk_mq_tag_wakeup_all to wake one waiter per wait queue to break the hung. But IO hung for 30 seconds is also not acceptable. Set batch size to small enough that depth of the shared hctx(queue) is enough to wake up all of the queues like sbq_calc_wake_batch do to fix this potential IO hung. Although hctx_max_depth will be clamped to at least 4 while wake_batch recalculation does not do the clamp, the wake_batch will be always recalculated to 1 when hctx_max_depth <= 4. Fixes: 180dccb0dba4 ("blk-mq: fix tag_get wait task can't be awakened") Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Link: https://lore.kernel.org/r/20230116205059.3821738-6-shikemeng@huaweicloud.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-01-16 20:50:59 +00:00
1, SBQ_WAKE_BATCH);
sbitmap: Use single per-bitmap counting to wake up queued tags sbitmap suffers from code complexity, as demonstrated by recent fixes, and eventual lost wake ups on nested I/O completion. The later happens, from what I understand, due to the non-atomic nature of the updates to wait_cnt, which needs to be subtracted and eventually reset when equal to zero. This two step process can eventually miss an update when a nested completion happens to interrupt the CPU in between the wait_cnt updates. This is very hard to fix, as shown by the recent changes to this code. The code complexity arises mostly from the corner cases to avoid missed wakes in this scenario. In addition, the handling of wake_batch recalculation plus the synchronization with sbq_queue_wake_up is non-trivial. This patchset implements the idea originally proposed by Jan [1], which removes the need for the two-step updates of wait_cnt. This is done by tracking the number of completions and wakeups in always increasing, per-bitmap counters. Instead of having to reset the wait_cnt when it reaches zero, we simply keep counting, and attempt to wake up N threads in a single wait queue whenever there is enough space for a batch. Waking up less than batch_wake shouldn't be a problem, because we haven't changed the conditions for wake up, and the existing batch calculation guarantees at least enough remaining completions to wake up a batch for each queue at any time. Performance-wise, one should expect very similar performance to the original algorithm for the case where there is no queueing. In both the old algorithm and this implementation, the first thing is to check ws_active, which bails out if there is no queueing to be managed. In the new code, we took care to avoid accounting completions and wakeups when there is no queueing, to not pay the cost of atomic operations unnecessarily, since it doesn't skew the numbers. For more interesting cases, where there is queueing, we need to take into account the cross-communication of the atomic operations. I've been benchmarking by running parallel fio jobs against a single hctx nullb in different hardware queue depth scenarios, and verifying both IOPS and queueing. Each experiment was repeated 5 times on a 20-CPU box, with 20 parallel jobs. fio was issuing fixed-size randwrites with qd=64 against nullb, varying only the hardware queue length per test. queue size 2 4 8 16 32 64 6.1-rc2 1681.1K (1.6K) 2633.0K (12.7K) 6940.8K (16.3K) 8172.3K (617.5K) 8391.7K (367.1K) 8606.1K (351.2K) patched 1721.8K (15.1K) 3016.7K (3.8K) 7543.0K (89.4K) 8132.5K (303.4K) 8324.2K (230.6K) 8401.8K (284.7K) The following is a similar experiment, ran against a nullb with a single bitmap shared by 20 hctx spread across 2 NUMA nodes. This has 40 parallel fio jobs operating on the same device queue size 2 4 8 16 32 64 6.1-rc2 1081.0K (2.3K) 957.2K (1.5K) 1699.1K (5.7K) 6178.2K (124.6K) 12227.9K (37.7K) 13286.6K (92.9K) patched 1081.8K (2.8K) 1316.5K (5.4K) 2364.4K (1.8K) 6151.4K (20.0K) 11893.6K (17.5K) 12385.6K (18.4K) It has also survived blktests and a 12h-stress run against nullb. I also ran the code against nvme and a scsi SSD, and I didn't observe performance regression in those. If there are other tests you think I should run, please let me know and I will follow up with results. [1] https://lore.kernel.org/all/aef9de29-e9f5-259a-f8be-12d1b734e72@google.com/ Cc: Hugh Dickins <hughd@google.com> Cc: Keith Busch <kbusch@kernel.org> Cc: Liu Song <liusong@linux.alibaba.com> Suggested-by: Jan Kara <jack@suse.cz> Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20221105231055.25953-1-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-11-05 23:10:55 +00:00
WRITE_ONCE(sbq->wake_batch, wake_batch);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
{
sbitmap_queue_update_wake_batch(sbq, depth);
sbitmap_resize(&sbq->sb, depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
int __sbitmap_queue_get(struct sbitmap_queue *sbq)
{
return sbitmap_get(&sbq->sb);
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
unsigned int *offset)
{
struct sbitmap *sb = &sbq->sb;
unsigned int hint, depth;
unsigned long index, nr;
int i;
if (unlikely(sb->round_robin))
return 0;
depth = READ_ONCE(sb->depth);
hint = update_alloc_hint_before_get(sb, depth);
index = SB_NR_TO_INDEX(sb, hint);
for (i = 0; i < sb->map_nr; i++) {
struct sbitmap_word *map = &sb->map[index];
unsigned long get_mask;
unsigned int map_depth = __map_depth(sb, index);
unsigned long val;
sbitmap: fix io hung due to race on sbitmap_word::cleared Configuration for sbq: depth=64, wake_batch=6, shift=6, map_nr=1 1. There are 64 requests in progress: map->word = 0xFFFFFFFFFFFFFFFF 2. After all the 64 requests complete, and no more requests come: map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF 3. Now two tasks try to allocate requests: T1: T2: __blk_mq_get_tag . __sbitmap_queue_get . sbitmap_get . sbitmap_find_bit . sbitmap_find_bit_in_word . __sbitmap_get_word -> nr=-1 __blk_mq_get_tag sbitmap_deferred_clear __sbitmap_queue_get /* map->cleared=0xFFFFFFFFFFFFFFFF */ sbitmap_find_bit if (!READ_ONCE(map->cleared)) sbitmap_find_bit_in_word return false; __sbitmap_get_word -> nr=-1 mask = xchg(&map->cleared, 0) sbitmap_deferred_clear atomic_long_andnot() /* map->cleared=0 */ if (!(map->cleared)) return false; /* * map->cleared is cleared by T1 * T2 fail to acquire the tag */ 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken up due to the wake_batch being set at 6. If no more requests come, T1 will wait here indefinitely. This patch achieves two purposes: 1. Check on ->cleared and update on both ->cleared and ->word need to be done atomically, and using spinlock could be the simplest solution. 2. Add extra check in sbitmap_deferred_clear(), to identify whether ->word has free bits. Fixes: ea86ea2cdced ("sbitmap: ammortize cost of clearing bits") Signed-off-by: Yang Yang <yang.yang@vivo.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20240716082644.659566-1-yang.yang@vivo.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-07-16 08:26:27 +00:00
sbitmap_deferred_clear(map, 0, 0, 0);
val = READ_ONCE(map->word);
if (val == (1UL << (map_depth - 1)) - 1)
goto next;
nr = find_first_zero_bit(&val, map_depth);
if (nr + nr_tags <= map_depth) {
atomic_long_t *ptr = (atomic_long_t *) &map->word;
get_mask = ((1UL << nr_tags) - 1) << nr;
while (!atomic_long_try_cmpxchg(ptr, &val,
get_mask | val))
;
get_mask = (get_mask & ~val) >> nr;
if (get_mask) {
*offset = nr + (index << sb->shift);
update_alloc_hint_after_get(sb, depth, hint,
*offset + nr_tags - 1);
return get_mask;
}
}
next:
/* Jump to next index. */
if (++index >= sb->map_nr)
index = 0;
}
return 0;
}
int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
unsigned int shallow_depth)
{
WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
return sbitmap_get_shallow(&sbq->sb, shallow_depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow);
void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
unsigned int min_shallow_depth)
{
sbq->min_shallow_depth = min_shallow_depth;
sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{
int i, wake_index, woken;
if (!atomic_read(&sbq->ws_active))
return;
wake_index = atomic_read(&sbq->wake_index);
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[wake_index];
/*
* Advance the index before checking the current queue.
* It improves fairness, by ensuring the queue doesn't
* need to be fully emptied before trying to wake up
* from the next one.
*/
wake_index = sbq_index_inc(wake_index);
if (waitqueue_active(&ws->wait)) {
woken = wake_up_nr(&ws->wait, nr);
if (woken == nr)
break;
nr -= woken;
}
}
if (wake_index != atomic_read(&sbq->wake_index))
atomic_set(&sbq->wake_index, wake_index);
}
sbitmap: Use single per-bitmap counting to wake up queued tags sbitmap suffers from code complexity, as demonstrated by recent fixes, and eventual lost wake ups on nested I/O completion. The later happens, from what I understand, due to the non-atomic nature of the updates to wait_cnt, which needs to be subtracted and eventually reset when equal to zero. This two step process can eventually miss an update when a nested completion happens to interrupt the CPU in between the wait_cnt updates. This is very hard to fix, as shown by the recent changes to this code. The code complexity arises mostly from the corner cases to avoid missed wakes in this scenario. In addition, the handling of wake_batch recalculation plus the synchronization with sbq_queue_wake_up is non-trivial. This patchset implements the idea originally proposed by Jan [1], which removes the need for the two-step updates of wait_cnt. This is done by tracking the number of completions and wakeups in always increasing, per-bitmap counters. Instead of having to reset the wait_cnt when it reaches zero, we simply keep counting, and attempt to wake up N threads in a single wait queue whenever there is enough space for a batch. Waking up less than batch_wake shouldn't be a problem, because we haven't changed the conditions for wake up, and the existing batch calculation guarantees at least enough remaining completions to wake up a batch for each queue at any time. Performance-wise, one should expect very similar performance to the original algorithm for the case where there is no queueing. In both the old algorithm and this implementation, the first thing is to check ws_active, which bails out if there is no queueing to be managed. In the new code, we took care to avoid accounting completions and wakeups when there is no queueing, to not pay the cost of atomic operations unnecessarily, since it doesn't skew the numbers. For more interesting cases, where there is queueing, we need to take into account the cross-communication of the atomic operations. I've been benchmarking by running parallel fio jobs against a single hctx nullb in different hardware queue depth scenarios, and verifying both IOPS and queueing. Each experiment was repeated 5 times on a 20-CPU box, with 20 parallel jobs. fio was issuing fixed-size randwrites with qd=64 against nullb, varying only the hardware queue length per test. queue size 2 4 8 16 32 64 6.1-rc2 1681.1K (1.6K) 2633.0K (12.7K) 6940.8K (16.3K) 8172.3K (617.5K) 8391.7K (367.1K) 8606.1K (351.2K) patched 1721.8K (15.1K) 3016.7K (3.8K) 7543.0K (89.4K) 8132.5K (303.4K) 8324.2K (230.6K) 8401.8K (284.7K) The following is a similar experiment, ran against a nullb with a single bitmap shared by 20 hctx spread across 2 NUMA nodes. This has 40 parallel fio jobs operating on the same device queue size 2 4 8 16 32 64 6.1-rc2 1081.0K (2.3K) 957.2K (1.5K) 1699.1K (5.7K) 6178.2K (124.6K) 12227.9K (37.7K) 13286.6K (92.9K) patched 1081.8K (2.8K) 1316.5K (5.4K) 2364.4K (1.8K) 6151.4K (20.0K) 11893.6K (17.5K) 12385.6K (18.4K) It has also survived blktests and a 12h-stress run against nullb. I also ran the code against nvme and a scsi SSD, and I didn't observe performance regression in those. If there are other tests you think I should run, please let me know and I will follow up with results. [1] https://lore.kernel.org/all/aef9de29-e9f5-259a-f8be-12d1b734e72@google.com/ Cc: Hugh Dickins <hughd@google.com> Cc: Keith Busch <kbusch@kernel.org> Cc: Liu Song <liusong@linux.alibaba.com> Suggested-by: Jan Kara <jack@suse.cz> Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20221105231055.25953-1-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-11-05 23:10:55 +00:00
void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{
sbitmap: Use single per-bitmap counting to wake up queued tags sbitmap suffers from code complexity, as demonstrated by recent fixes, and eventual lost wake ups on nested I/O completion. The later happens, from what I understand, due to the non-atomic nature of the updates to wait_cnt, which needs to be subtracted and eventually reset when equal to zero. This two step process can eventually miss an update when a nested completion happens to interrupt the CPU in between the wait_cnt updates. This is very hard to fix, as shown by the recent changes to this code. The code complexity arises mostly from the corner cases to avoid missed wakes in this scenario. In addition, the handling of wake_batch recalculation plus the synchronization with sbq_queue_wake_up is non-trivial. This patchset implements the idea originally proposed by Jan [1], which removes the need for the two-step updates of wait_cnt. This is done by tracking the number of completions and wakeups in always increasing, per-bitmap counters. Instead of having to reset the wait_cnt when it reaches zero, we simply keep counting, and attempt to wake up N threads in a single wait queue whenever there is enough space for a batch. Waking up less than batch_wake shouldn't be a problem, because we haven't changed the conditions for wake up, and the existing batch calculation guarantees at least enough remaining completions to wake up a batch for each queue at any time. Performance-wise, one should expect very similar performance to the original algorithm for the case where there is no queueing. In both the old algorithm and this implementation, the first thing is to check ws_active, which bails out if there is no queueing to be managed. In the new code, we took care to avoid accounting completions and wakeups when there is no queueing, to not pay the cost of atomic operations unnecessarily, since it doesn't skew the numbers. For more interesting cases, where there is queueing, we need to take into account the cross-communication of the atomic operations. I've been benchmarking by running parallel fio jobs against a single hctx nullb in different hardware queue depth scenarios, and verifying both IOPS and queueing. Each experiment was repeated 5 times on a 20-CPU box, with 20 parallel jobs. fio was issuing fixed-size randwrites with qd=64 against nullb, varying only the hardware queue length per test. queue size 2 4 8 16 32 64 6.1-rc2 1681.1K (1.6K) 2633.0K (12.7K) 6940.8K (16.3K) 8172.3K (617.5K) 8391.7K (367.1K) 8606.1K (351.2K) patched 1721.8K (15.1K) 3016.7K (3.8K) 7543.0K (89.4K) 8132.5K (303.4K) 8324.2K (230.6K) 8401.8K (284.7K) The following is a similar experiment, ran against a nullb with a single bitmap shared by 20 hctx spread across 2 NUMA nodes. This has 40 parallel fio jobs operating on the same device queue size 2 4 8 16 32 64 6.1-rc2 1081.0K (2.3K) 957.2K (1.5K) 1699.1K (5.7K) 6178.2K (124.6K) 12227.9K (37.7K) 13286.6K (92.9K) patched 1081.8K (2.8K) 1316.5K (5.4K) 2364.4K (1.8K) 6151.4K (20.0K) 11893.6K (17.5K) 12385.6K (18.4K) It has also survived blktests and a 12h-stress run against nullb. I also ran the code against nvme and a scsi SSD, and I didn't observe performance regression in those. If there are other tests you think I should run, please let me know and I will follow up with results. [1] https://lore.kernel.org/all/aef9de29-e9f5-259a-f8be-12d1b734e72@google.com/ Cc: Hugh Dickins <hughd@google.com> Cc: Keith Busch <kbusch@kernel.org> Cc: Liu Song <liusong@linux.alibaba.com> Suggested-by: Jan Kara <jack@suse.cz> Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20221105231055.25953-1-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-11-05 23:10:55 +00:00
unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
unsigned int wakeups;
sbitmap: Use single per-bitmap counting to wake up queued tags sbitmap suffers from code complexity, as demonstrated by recent fixes, and eventual lost wake ups on nested I/O completion. The later happens, from what I understand, due to the non-atomic nature of the updates to wait_cnt, which needs to be subtracted and eventually reset when equal to zero. This two step process can eventually miss an update when a nested completion happens to interrupt the CPU in between the wait_cnt updates. This is very hard to fix, as shown by the recent changes to this code. The code complexity arises mostly from the corner cases to avoid missed wakes in this scenario. In addition, the handling of wake_batch recalculation plus the synchronization with sbq_queue_wake_up is non-trivial. This patchset implements the idea originally proposed by Jan [1], which removes the need for the two-step updates of wait_cnt. This is done by tracking the number of completions and wakeups in always increasing, per-bitmap counters. Instead of having to reset the wait_cnt when it reaches zero, we simply keep counting, and attempt to wake up N threads in a single wait queue whenever there is enough space for a batch. Waking up less than batch_wake shouldn't be a problem, because we haven't changed the conditions for wake up, and the existing batch calculation guarantees at least enough remaining completions to wake up a batch for each queue at any time. Performance-wise, one should expect very similar performance to the original algorithm for the case where there is no queueing. In both the old algorithm and this implementation, the first thing is to check ws_active, which bails out if there is no queueing to be managed. In the new code, we took care to avoid accounting completions and wakeups when there is no queueing, to not pay the cost of atomic operations unnecessarily, since it doesn't skew the numbers. For more interesting cases, where there is queueing, we need to take into account the cross-communication of the atomic operations. I've been benchmarking by running parallel fio jobs against a single hctx nullb in different hardware queue depth scenarios, and verifying both IOPS and queueing. Each experiment was repeated 5 times on a 20-CPU box, with 20 parallel jobs. fio was issuing fixed-size randwrites with qd=64 against nullb, varying only the hardware queue length per test. queue size 2 4 8 16 32 64 6.1-rc2 1681.1K (1.6K) 2633.0K (12.7K) 6940.8K (16.3K) 8172.3K (617.5K) 8391.7K (367.1K) 8606.1K (351.2K) patched 1721.8K (15.1K) 3016.7K (3.8K) 7543.0K (89.4K) 8132.5K (303.4K) 8324.2K (230.6K) 8401.8K (284.7K) The following is a similar experiment, ran against a nullb with a single bitmap shared by 20 hctx spread across 2 NUMA nodes. This has 40 parallel fio jobs operating on the same device queue size 2 4 8 16 32 64 6.1-rc2 1081.0K (2.3K) 957.2K (1.5K) 1699.1K (5.7K) 6178.2K (124.6K) 12227.9K (37.7K) 13286.6K (92.9K) patched 1081.8K (2.8K) 1316.5K (5.4K) 2364.4K (1.8K) 6151.4K (20.0K) 11893.6K (17.5K) 12385.6K (18.4K) It has also survived blktests and a 12h-stress run against nullb. I also ran the code against nvme and a scsi SSD, and I didn't observe performance regression in those. If there are other tests you think I should run, please let me know and I will follow up with results. [1] https://lore.kernel.org/all/aef9de29-e9f5-259a-f8be-12d1b734e72@google.com/ Cc: Hugh Dickins <hughd@google.com> Cc: Keith Busch <kbusch@kernel.org> Cc: Liu Song <liusong@linux.alibaba.com> Suggested-by: Jan Kara <jack@suse.cz> Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20221105231055.25953-1-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-11-05 23:10:55 +00:00
if (!atomic_read(&sbq->ws_active))
return;
sbitmap: Use single per-bitmap counting to wake up queued tags sbitmap suffers from code complexity, as demonstrated by recent fixes, and eventual lost wake ups on nested I/O completion. The later happens, from what I understand, due to the non-atomic nature of the updates to wait_cnt, which needs to be subtracted and eventually reset when equal to zero. This two step process can eventually miss an update when a nested completion happens to interrupt the CPU in between the wait_cnt updates. This is very hard to fix, as shown by the recent changes to this code. The code complexity arises mostly from the corner cases to avoid missed wakes in this scenario. In addition, the handling of wake_batch recalculation plus the synchronization with sbq_queue_wake_up is non-trivial. This patchset implements the idea originally proposed by Jan [1], which removes the need for the two-step updates of wait_cnt. This is done by tracking the number of completions and wakeups in always increasing, per-bitmap counters. Instead of having to reset the wait_cnt when it reaches zero, we simply keep counting, and attempt to wake up N threads in a single wait queue whenever there is enough space for a batch. Waking up less than batch_wake shouldn't be a problem, because we haven't changed the conditions for wake up, and the existing batch calculation guarantees at least enough remaining completions to wake up a batch for each queue at any time. Performance-wise, one should expect very similar performance to the original algorithm for the case where there is no queueing. In both the old algorithm and this implementation, the first thing is to check ws_active, which bails out if there is no queueing to be managed. In the new code, we took care to avoid accounting completions and wakeups when there is no queueing, to not pay the cost of atomic operations unnecessarily, since it doesn't skew the numbers. For more interesting cases, where there is queueing, we need to take into account the cross-communication of the atomic operations. I've been benchmarking by running parallel fio jobs against a single hctx nullb in different hardware queue depth scenarios, and verifying both IOPS and queueing. Each experiment was repeated 5 times on a 20-CPU box, with 20 parallel jobs. fio was issuing fixed-size randwrites with qd=64 against nullb, varying only the hardware queue length per test. queue size 2 4 8 16 32 64 6.1-rc2 1681.1K (1.6K) 2633.0K (12.7K) 6940.8K (16.3K) 8172.3K (617.5K) 8391.7K (367.1K) 8606.1K (351.2K) patched 1721.8K (15.1K) 3016.7K (3.8K) 7543.0K (89.4K) 8132.5K (303.4K) 8324.2K (230.6K) 8401.8K (284.7K) The following is a similar experiment, ran against a nullb with a single bitmap shared by 20 hctx spread across 2 NUMA nodes. This has 40 parallel fio jobs operating on the same device queue size 2 4 8 16 32 64 6.1-rc2 1081.0K (2.3K) 957.2K (1.5K) 1699.1K (5.7K) 6178.2K (124.6K) 12227.9K (37.7K) 13286.6K (92.9K) patched 1081.8K (2.8K) 1316.5K (5.4K) 2364.4K (1.8K) 6151.4K (20.0K) 11893.6K (17.5K) 12385.6K (18.4K) It has also survived blktests and a 12h-stress run against nullb. I also ran the code against nvme and a scsi SSD, and I didn't observe performance regression in those. If there are other tests you think I should run, please let me know and I will follow up with results. [1] https://lore.kernel.org/all/aef9de29-e9f5-259a-f8be-12d1b734e72@google.com/ Cc: Hugh Dickins <hughd@google.com> Cc: Keith Busch <kbusch@kernel.org> Cc: Liu Song <liusong@linux.alibaba.com> Suggested-by: Jan Kara <jack@suse.cz> Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20221105231055.25953-1-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-11-05 23:10:55 +00:00
atomic_add(nr, &sbq->completion_cnt);
wakeups = atomic_read(&sbq->wakeup_cnt);
do {
sbitmap: Use single per-bitmap counting to wake up queued tags sbitmap suffers from code complexity, as demonstrated by recent fixes, and eventual lost wake ups on nested I/O completion. The later happens, from what I understand, due to the non-atomic nature of the updates to wait_cnt, which needs to be subtracted and eventually reset when equal to zero. This two step process can eventually miss an update when a nested completion happens to interrupt the CPU in between the wait_cnt updates. This is very hard to fix, as shown by the recent changes to this code. The code complexity arises mostly from the corner cases to avoid missed wakes in this scenario. In addition, the handling of wake_batch recalculation plus the synchronization with sbq_queue_wake_up is non-trivial. This patchset implements the idea originally proposed by Jan [1], which removes the need for the two-step updates of wait_cnt. This is done by tracking the number of completions and wakeups in always increasing, per-bitmap counters. Instead of having to reset the wait_cnt when it reaches zero, we simply keep counting, and attempt to wake up N threads in a single wait queue whenever there is enough space for a batch. Waking up less than batch_wake shouldn't be a problem, because we haven't changed the conditions for wake up, and the existing batch calculation guarantees at least enough remaining completions to wake up a batch for each queue at any time. Performance-wise, one should expect very similar performance to the original algorithm for the case where there is no queueing. In both the old algorithm and this implementation, the first thing is to check ws_active, which bails out if there is no queueing to be managed. In the new code, we took care to avoid accounting completions and wakeups when there is no queueing, to not pay the cost of atomic operations unnecessarily, since it doesn't skew the numbers. For more interesting cases, where there is queueing, we need to take into account the cross-communication of the atomic operations. I've been benchmarking by running parallel fio jobs against a single hctx nullb in different hardware queue depth scenarios, and verifying both IOPS and queueing. Each experiment was repeated 5 times on a 20-CPU box, with 20 parallel jobs. fio was issuing fixed-size randwrites with qd=64 against nullb, varying only the hardware queue length per test. queue size 2 4 8 16 32 64 6.1-rc2 1681.1K (1.6K) 2633.0K (12.7K) 6940.8K (16.3K) 8172.3K (617.5K) 8391.7K (367.1K) 8606.1K (351.2K) patched 1721.8K (15.1K) 3016.7K (3.8K) 7543.0K (89.4K) 8132.5K (303.4K) 8324.2K (230.6K) 8401.8K (284.7K) The following is a similar experiment, ran against a nullb with a single bitmap shared by 20 hctx spread across 2 NUMA nodes. This has 40 parallel fio jobs operating on the same device queue size 2 4 8 16 32 64 6.1-rc2 1081.0K (2.3K) 957.2K (1.5K) 1699.1K (5.7K) 6178.2K (124.6K) 12227.9K (37.7K) 13286.6K (92.9K) patched 1081.8K (2.8K) 1316.5K (5.4K) 2364.4K (1.8K) 6151.4K (20.0K) 11893.6K (17.5K) 12385.6K (18.4K) It has also survived blktests and a 12h-stress run against nullb. I also ran the code against nvme and a scsi SSD, and I didn't observe performance regression in those. If there are other tests you think I should run, please let me know and I will follow up with results. [1] https://lore.kernel.org/all/aef9de29-e9f5-259a-f8be-12d1b734e72@google.com/ Cc: Hugh Dickins <hughd@google.com> Cc: Keith Busch <kbusch@kernel.org> Cc: Liu Song <liusong@linux.alibaba.com> Suggested-by: Jan Kara <jack@suse.cz> Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20221105231055.25953-1-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-11-05 23:10:55 +00:00
if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
return;
} while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
&wakeups, wakeups + wake_batch));
__sbitmap_queue_wake_up(sbq, wake_batch);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
{
if (likely(!sb->round_robin && tag < sb->depth))
sbitmap: silence data race warning KCSAN complaints about the sbitmap hint update: ================================================================== BUG: KCSAN: data-race in sbitmap_queue_clear / sbitmap_queue_clear write to 0xffffe8ffffd145b8 of 4 bytes by interrupt on cpu 1: sbitmap_queue_clear+0xca/0xf0 lib/sbitmap.c:606 blk_mq_put_tag+0x82/0x90 __blk_mq_free_request+0x114/0x180 block/blk-mq.c:507 blk_mq_free_request+0x2c8/0x340 block/blk-mq.c:541 __blk_mq_end_request+0x214/0x230 block/blk-mq.c:565 blk_mq_end_request+0x37/0x50 block/blk-mq.c:574 lo_complete_rq+0xca/0x170 drivers/block/loop.c:541 blk_complete_reqs block/blk-mq.c:584 [inline] blk_done_softirq+0x69/0x90 block/blk-mq.c:589 __do_softirq+0x12c/0x26e kernel/softirq.c:558 run_ksoftirqd+0x13/0x20 kernel/softirq.c:920 smpboot_thread_fn+0x22f/0x330 kernel/smpboot.c:164 kthread+0x262/0x280 kernel/kthread.c:319 ret_from_fork+0x1f/0x30 write to 0xffffe8ffffd145b8 of 4 bytes by interrupt on cpu 0: sbitmap_queue_clear+0xca/0xf0 lib/sbitmap.c:606 blk_mq_put_tag+0x82/0x90 __blk_mq_free_request+0x114/0x180 block/blk-mq.c:507 blk_mq_free_request+0x2c8/0x340 block/blk-mq.c:541 __blk_mq_end_request+0x214/0x230 block/blk-mq.c:565 blk_mq_end_request+0x37/0x50 block/blk-mq.c:574 lo_complete_rq+0xca/0x170 drivers/block/loop.c:541 blk_complete_reqs block/blk-mq.c:584 [inline] blk_done_softirq+0x69/0x90 block/blk-mq.c:589 __do_softirq+0x12c/0x26e kernel/softirq.c:558 run_ksoftirqd+0x13/0x20 kernel/softirq.c:920 smpboot_thread_fn+0x22f/0x330 kernel/smpboot.c:164 kthread+0x262/0x280 kernel/kthread.c:319 ret_from_fork+0x1f/0x30 value changed: 0x00000035 -> 0x00000044 Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 10 Comm: ksoftirqd/0 Not tainted 5.15.0-rc6-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 ================================================================== which is a data race, but not an important one. This is just updating the percpu alloc hint, and the reader of that hint doesn't ever require it to be valid. Just annotate it with data_race() to silence this one. Reported-by: syzbot+4f8bfd804b4a1f95b8f6@syzkaller.appspotmail.com Acked-by: Marco Elver <elver@google.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-25 16:45:01 +00:00
data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
}
void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
int *tags, int nr_tags)
{
struct sbitmap *sb = &sbq->sb;
unsigned long *addr = NULL;
unsigned long mask = 0;
int i;
smp_mb__before_atomic();
for (i = 0; i < nr_tags; i++) {
const int tag = tags[i] - offset;
unsigned long *this_addr;
/* since we're clearing a batch, skip the deferred map */
this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
if (!addr) {
addr = this_addr;
} else if (addr != this_addr) {
atomic_long_andnot(mask, (atomic_long_t *) addr);
mask = 0;
addr = this_addr;
}
mask |= (1UL << SB_NR_TO_BIT(sb, tag));
}
if (mask)
atomic_long_andnot(mask, (atomic_long_t *) addr);
smp_mb__after_atomic();
sbitmap_queue_wake_up(sbq, nr_tags);
sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
tags[nr_tags - 1] - offset);
}
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
{
sbitmap: order READ/WRITE freed instance and setting clear bit Inside sbitmap_queue_clear(), once the clear bit is set, it will be visiable to allocation path immediately. Meantime READ/WRITE on old associated instance(such as request in case of blk-mq) may be out-of-order with the setting clear bit, so race with re-allocation may be triggered. Adds one memory barrier for ordering READ/WRITE of the freed associated instance with setting clear bit for avoiding race with re-allocation. The following kernel oops triggerd by block/006 on aarch64 may be fixed: [ 142.330954] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000330 [ 142.338794] Mem abort info: [ 142.341554] ESR = 0x96000005 [ 142.344632] Exception class = DABT (current EL), IL = 32 bits [ 142.350500] SET = 0, FnV = 0 [ 142.353544] EA = 0, S1PTW = 0 [ 142.356678] Data abort info: [ 142.359528] ISV = 0, ISS = 0x00000005 [ 142.363343] CM = 0, WnR = 0 [ 142.366305] user pgtable: 64k pages, 48-bit VAs, pgdp = 000000002a3c51c0 [ 142.372983] [0000000000000330] pgd=0000000000000000, pud=0000000000000000 [ 142.379777] Internal error: Oops: 96000005 [#1] SMP [ 142.384613] Modules linked in: null_blk ib_isert iscsi_target_mod ib_srpt target_core_mod ib_srp scsi_transport_srp vfat fat rpcrdma sunrpc rdma_ucm ib_iser rdma_cm iw_cm libiscsi ib_umad scsi_transport_iscsi ib_ipoib ib_cm mlx5_ib ib_uverbs ib_core sbsa_gwdt crct10dif_ce ghash_ce ipmi_ssif sha2_ce ipmi_devintf sha256_arm64 sg sha1_ce ipmi_msghandler ip_tables xfs libcrc32c mlx5_core sdhci_acpi mlxfw ahci_platform at803x sdhci libahci_platform qcom_emac mmc_core hdma hdma_mgmt i2c_dev [last unloaded: null_blk] [ 142.429753] CPU: 7 PID: 1983 Comm: fio Not tainted 5.0.0.cki #2 [ 142.449458] pstate: 00400005 (nzcv daif +PAN -UAO) [ 142.454239] pc : __blk_mq_free_request+0x4c/0xa8 [ 142.458830] lr : blk_mq_free_request+0xec/0x118 [ 142.463344] sp : ffff00003360f6a0 [ 142.466646] x29: ffff00003360f6a0 x28: ffff000010e70000 [ 142.471941] x27: ffff801729a50048 x26: 0000000000010000 [ 142.477232] x25: ffff00003360f954 x24: ffff7bdfff021440 [ 142.482529] x23: 0000000000000000 x22: 00000000ffffffff [ 142.487830] x21: ffff801729810000 x20: 0000000000000000 [ 142.493123] x19: ffff801729a50000 x18: 0000000000000000 [ 142.498413] x17: 0000000000000000 x16: 0000000000000001 [ 142.503709] x15: 00000000000000ff x14: ffff7fe000000000 [ 142.509003] x13: ffff8017dcde09a0 x12: 0000000000000000 [ 142.514308] x11: 0000000000000001 x10: 0000000000000008 [ 142.519597] x9 : ffff8017dcde09a0 x8 : 0000000000002000 [ 142.524889] x7 : ffff8017dcde0a00 x6 : 000000015388f9be [ 142.530187] x5 : 0000000000000001 x4 : 0000000000000000 [ 142.535478] x3 : 0000000000000000 x2 : 0000000000000000 [ 142.540777] x1 : 0000000000000001 x0 : ffff00001041b194 [ 142.546071] Process fio (pid: 1983, stack limit = 0x000000006460a0ea) [ 142.552500] Call trace: [ 142.554926] __blk_mq_free_request+0x4c/0xa8 [ 142.559181] blk_mq_free_request+0xec/0x118 [ 142.563352] blk_mq_end_request+0xfc/0x120 [ 142.567444] end_cmd+0x3c/0xa8 [null_blk] [ 142.571434] null_complete_rq+0x20/0x30 [null_blk] [ 142.576194] blk_mq_complete_request+0x108/0x148 [ 142.580797] null_handle_cmd+0x1d4/0x718 [null_blk] [ 142.585662] null_queue_rq+0x60/0xa8 [null_blk] [ 142.590171] blk_mq_try_issue_directly+0x148/0x280 [ 142.594949] blk_mq_try_issue_list_directly+0x9c/0x108 [ 142.600064] blk_mq_sched_insert_requests+0xb0/0xd0 [ 142.604926] blk_mq_flush_plug_list+0x16c/0x2a0 [ 142.609441] blk_flush_plug_list+0xec/0x118 [ 142.613608] blk_finish_plug+0x3c/0x4c [ 142.617348] blkdev_direct_IO+0x3b4/0x428 [ 142.621336] generic_file_read_iter+0x84/0x180 [ 142.625761] blkdev_read_iter+0x50/0x78 [ 142.629579] aio_read.isra.6+0xf8/0x190 [ 142.633409] __io_submit_one.isra.8+0x148/0x738 [ 142.637912] io_submit_one.isra.9+0x88/0xb8 [ 142.642078] __arm64_sys_io_submit+0xe0/0x238 [ 142.646428] el0_svc_handler+0xa0/0x128 [ 142.650238] el0_svc+0x8/0xc [ 142.653104] Code: b9402a63 f9000a7f 3100047f 540000a0 (f9419a81) [ 142.659202] ---[ end trace 467586bc175eb09d ]--- Fixes: ea86ea2cdced20057da ("sbitmap: ammortize cost of clearing bits") Reported-and-bisected_and_tested-by: Yi Zhang <yi.zhang@redhat.com> Cc: Yi Zhang <yi.zhang@redhat.com> Cc: "jianchao.wang" <jianchao.w.wang@oracle.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-03-22 01:13:51 +00:00
/*
* Once the clear bit is set, the bit may be allocated out.
*
* Orders READ/WRITE on the associated instance(such as request
sbitmap: order READ/WRITE freed instance and setting clear bit Inside sbitmap_queue_clear(), once the clear bit is set, it will be visiable to allocation path immediately. Meantime READ/WRITE on old associated instance(such as request in case of blk-mq) may be out-of-order with the setting clear bit, so race with re-allocation may be triggered. Adds one memory barrier for ordering READ/WRITE of the freed associated instance with setting clear bit for avoiding race with re-allocation. The following kernel oops triggerd by block/006 on aarch64 may be fixed: [ 142.330954] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000330 [ 142.338794] Mem abort info: [ 142.341554] ESR = 0x96000005 [ 142.344632] Exception class = DABT (current EL), IL = 32 bits [ 142.350500] SET = 0, FnV = 0 [ 142.353544] EA = 0, S1PTW = 0 [ 142.356678] Data abort info: [ 142.359528] ISV = 0, ISS = 0x00000005 [ 142.363343] CM = 0, WnR = 0 [ 142.366305] user pgtable: 64k pages, 48-bit VAs, pgdp = 000000002a3c51c0 [ 142.372983] [0000000000000330] pgd=0000000000000000, pud=0000000000000000 [ 142.379777] Internal error: Oops: 96000005 [#1] SMP [ 142.384613] Modules linked in: null_blk ib_isert iscsi_target_mod ib_srpt target_core_mod ib_srp scsi_transport_srp vfat fat rpcrdma sunrpc rdma_ucm ib_iser rdma_cm iw_cm libiscsi ib_umad scsi_transport_iscsi ib_ipoib ib_cm mlx5_ib ib_uverbs ib_core sbsa_gwdt crct10dif_ce ghash_ce ipmi_ssif sha2_ce ipmi_devintf sha256_arm64 sg sha1_ce ipmi_msghandler ip_tables xfs libcrc32c mlx5_core sdhci_acpi mlxfw ahci_platform at803x sdhci libahci_platform qcom_emac mmc_core hdma hdma_mgmt i2c_dev [last unloaded: null_blk] [ 142.429753] CPU: 7 PID: 1983 Comm: fio Not tainted 5.0.0.cki #2 [ 142.449458] pstate: 00400005 (nzcv daif +PAN -UAO) [ 142.454239] pc : __blk_mq_free_request+0x4c/0xa8 [ 142.458830] lr : blk_mq_free_request+0xec/0x118 [ 142.463344] sp : ffff00003360f6a0 [ 142.466646] x29: ffff00003360f6a0 x28: ffff000010e70000 [ 142.471941] x27: ffff801729a50048 x26: 0000000000010000 [ 142.477232] x25: ffff00003360f954 x24: ffff7bdfff021440 [ 142.482529] x23: 0000000000000000 x22: 00000000ffffffff [ 142.487830] x21: ffff801729810000 x20: 0000000000000000 [ 142.493123] x19: ffff801729a50000 x18: 0000000000000000 [ 142.498413] x17: 0000000000000000 x16: 0000000000000001 [ 142.503709] x15: 00000000000000ff x14: ffff7fe000000000 [ 142.509003] x13: ffff8017dcde09a0 x12: 0000000000000000 [ 142.514308] x11: 0000000000000001 x10: 0000000000000008 [ 142.519597] x9 : ffff8017dcde09a0 x8 : 0000000000002000 [ 142.524889] x7 : ffff8017dcde0a00 x6 : 000000015388f9be [ 142.530187] x5 : 0000000000000001 x4 : 0000000000000000 [ 142.535478] x3 : 0000000000000000 x2 : 0000000000000000 [ 142.540777] x1 : 0000000000000001 x0 : ffff00001041b194 [ 142.546071] Process fio (pid: 1983, stack limit = 0x000000006460a0ea) [ 142.552500] Call trace: [ 142.554926] __blk_mq_free_request+0x4c/0xa8 [ 142.559181] blk_mq_free_request+0xec/0x118 [ 142.563352] blk_mq_end_request+0xfc/0x120 [ 142.567444] end_cmd+0x3c/0xa8 [null_blk] [ 142.571434] null_complete_rq+0x20/0x30 [null_blk] [ 142.576194] blk_mq_complete_request+0x108/0x148 [ 142.580797] null_handle_cmd+0x1d4/0x718 [null_blk] [ 142.585662] null_queue_rq+0x60/0xa8 [null_blk] [ 142.590171] blk_mq_try_issue_directly+0x148/0x280 [ 142.594949] blk_mq_try_issue_list_directly+0x9c/0x108 [ 142.600064] blk_mq_sched_insert_requests+0xb0/0xd0 [ 142.604926] blk_mq_flush_plug_list+0x16c/0x2a0 [ 142.609441] blk_flush_plug_list+0xec/0x118 [ 142.613608] blk_finish_plug+0x3c/0x4c [ 142.617348] blkdev_direct_IO+0x3b4/0x428 [ 142.621336] generic_file_read_iter+0x84/0x180 [ 142.625761] blkdev_read_iter+0x50/0x78 [ 142.629579] aio_read.isra.6+0xf8/0x190 [ 142.633409] __io_submit_one.isra.8+0x148/0x738 [ 142.637912] io_submit_one.isra.9+0x88/0xb8 [ 142.642078] __arm64_sys_io_submit+0xe0/0x238 [ 142.646428] el0_svc_handler+0xa0/0x128 [ 142.650238] el0_svc+0x8/0xc [ 142.653104] Code: b9402a63 f9000a7f 3100047f 540000a0 (f9419a81) [ 142.659202] ---[ end trace 467586bc175eb09d ]--- Fixes: ea86ea2cdced20057da ("sbitmap: ammortize cost of clearing bits") Reported-and-bisected_and_tested-by: Yi Zhang <yi.zhang@redhat.com> Cc: Yi Zhang <yi.zhang@redhat.com> Cc: "jianchao.wang" <jianchao.w.wang@oracle.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-03-22 01:13:51 +00:00
* of blk_mq) by this bit for avoiding race with re-allocation,
* and its pair is the memory barrier implied in __sbitmap_get_word.
*
* One invariant is that the clear bit has to be zero when the bit
* is in use.
*/
smp_mb__before_atomic();
sbitmap_deferred_clear_bit(&sbq->sb, nr);
/*
* Pairs with the memory barrier in set_current_state() to ensure the
* proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
* and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
* waiter. See the comment on waitqueue_active().
*/
smp_mb__after_atomic();
sbitmap_queue_wake_up(sbq, 1);
sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
{
int i, wake_index;
/*
* Pairs with the memory barrier in set_current_state() like in
* sbitmap_queue_wake_up().
*/
smp_mb();
wake_index = atomic_read(&sbq->wake_index);
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[wake_index];
if (waitqueue_active(&ws->wait))
wake_up(&ws->wait);
wake_index = sbq_index_inc(wake_index);
}
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
{
bool first;
int i;
sbitmap_show(&sbq->sb, m);
seq_puts(m, "alloc_hint={");
first = true;
for_each_possible_cpu(i) {
if (!first)
seq_puts(m, ", ");
first = false;
seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
}
seq_puts(m, "}\n");
seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
seq_puts(m, "ws={\n");
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[i];
sbitmap: Use single per-bitmap counting to wake up queued tags sbitmap suffers from code complexity, as demonstrated by recent fixes, and eventual lost wake ups on nested I/O completion. The later happens, from what I understand, due to the non-atomic nature of the updates to wait_cnt, which needs to be subtracted and eventually reset when equal to zero. This two step process can eventually miss an update when a nested completion happens to interrupt the CPU in between the wait_cnt updates. This is very hard to fix, as shown by the recent changes to this code. The code complexity arises mostly from the corner cases to avoid missed wakes in this scenario. In addition, the handling of wake_batch recalculation plus the synchronization with sbq_queue_wake_up is non-trivial. This patchset implements the idea originally proposed by Jan [1], which removes the need for the two-step updates of wait_cnt. This is done by tracking the number of completions and wakeups in always increasing, per-bitmap counters. Instead of having to reset the wait_cnt when it reaches zero, we simply keep counting, and attempt to wake up N threads in a single wait queue whenever there is enough space for a batch. Waking up less than batch_wake shouldn't be a problem, because we haven't changed the conditions for wake up, and the existing batch calculation guarantees at least enough remaining completions to wake up a batch for each queue at any time. Performance-wise, one should expect very similar performance to the original algorithm for the case where there is no queueing. In both the old algorithm and this implementation, the first thing is to check ws_active, which bails out if there is no queueing to be managed. In the new code, we took care to avoid accounting completions and wakeups when there is no queueing, to not pay the cost of atomic operations unnecessarily, since it doesn't skew the numbers. For more interesting cases, where there is queueing, we need to take into account the cross-communication of the atomic operations. I've been benchmarking by running parallel fio jobs against a single hctx nullb in different hardware queue depth scenarios, and verifying both IOPS and queueing. Each experiment was repeated 5 times on a 20-CPU box, with 20 parallel jobs. fio was issuing fixed-size randwrites with qd=64 against nullb, varying only the hardware queue length per test. queue size 2 4 8 16 32 64 6.1-rc2 1681.1K (1.6K) 2633.0K (12.7K) 6940.8K (16.3K) 8172.3K (617.5K) 8391.7K (367.1K) 8606.1K (351.2K) patched 1721.8K (15.1K) 3016.7K (3.8K) 7543.0K (89.4K) 8132.5K (303.4K) 8324.2K (230.6K) 8401.8K (284.7K) The following is a similar experiment, ran against a nullb with a single bitmap shared by 20 hctx spread across 2 NUMA nodes. This has 40 parallel fio jobs operating on the same device queue size 2 4 8 16 32 64 6.1-rc2 1081.0K (2.3K) 957.2K (1.5K) 1699.1K (5.7K) 6178.2K (124.6K) 12227.9K (37.7K) 13286.6K (92.9K) patched 1081.8K (2.8K) 1316.5K (5.4K) 2364.4K (1.8K) 6151.4K (20.0K) 11893.6K (17.5K) 12385.6K (18.4K) It has also survived blktests and a 12h-stress run against nullb. I also ran the code against nvme and a scsi SSD, and I didn't observe performance regression in those. If there are other tests you think I should run, please let me know and I will follow up with results. [1] https://lore.kernel.org/all/aef9de29-e9f5-259a-f8be-12d1b734e72@google.com/ Cc: Hugh Dickins <hughd@google.com> Cc: Keith Busch <kbusch@kernel.org> Cc: Liu Song <liusong@linux.alibaba.com> Suggested-by: Jan Kara <jack@suse.cz> Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20221105231055.25953-1-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-11-05 23:10:55 +00:00
seq_printf(m, "\t{.wait=%s},\n",
waitqueue_active(&ws->wait) ? "active" : "inactive");
}
seq_puts(m, "}\n");
seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
struct sbq_wait_state *ws,
struct sbq_wait *sbq_wait)
{
if (!sbq_wait->sbq) {
sbq_wait->sbq = sbq;
atomic_inc(&sbq->ws_active);
sbitmap: only queue kyber's wait callback if not already active Under heavy loads where the kyber I/O scheduler hits the token limits for its scheduling domains, kyber can become stuck. When active requests complete, kyber may not be woken up leaving the I/O requests in kyber stuck. This stuck state is due to a race condition with kyber and the sbitmap functions it uses to run a callback when enough requests have completed. The running of a sbt_wait callback can race with the attempt to insert the sbt_wait. Since sbitmap_del_wait_queue removes the sbt_wait from the list first then sets the sbq field to NULL, kyber can see the item as not on a list but the call to sbitmap_add_wait_queue will see sbq as non-NULL. This results in the sbt_wait being inserted onto the wait list but ws_active doesn't get incremented. So the sbitmap queue does not know there is a waiter on a wait list. Since sbitmap doesn't think there is a waiter, kyber may never be informed that there are domain tokens available and the I/O never advances. With the sbt_wait on a wait list, kyber believes it has an active waiter so cannot insert a new waiter when reaching the domain's full state. This race can be fixed by only adding the sbt_wait to the queue if the sbq field is NULL. If sbq is not NULL, there is already an action active which will trigger the re-running of kyber. Let it run and add the sbt_wait to the wait list if still needing to wait. Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: David Jeffery <djeffery@redhat.com> Reported-by: John Pittman <jpittman@redhat.com> Tested-by: John Pittman <jpittman@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-12-17 16:00:24 +00:00
add_wait_queue(&ws->wait, &sbq_wait->wait);
}
}
EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
{
list_del_init(&sbq_wait->wait.entry);
if (sbq_wait->sbq) {
atomic_dec(&sbq_wait->sbq->ws_active);
sbq_wait->sbq = NULL;
}
}
EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
struct sbq_wait_state *ws,
struct sbq_wait *sbq_wait, int state)
{
if (!sbq_wait->sbq) {
atomic_inc(&sbq->ws_active);
sbq_wait->sbq = sbq;
}
prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
}
EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
struct sbq_wait *sbq_wait)
{
finish_wait(&ws->wait, &sbq_wait->wait);
if (sbq_wait->sbq) {
atomic_dec(&sbq->ws_active);
sbq_wait->sbq = NULL;
}
}
EXPORT_SYMBOL_GPL(sbitmap_finish_wait);