raid5-cache: new helper: r5_reserve_log_entry

Factor out code to reserve log space.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Shaohua Li <shli@fb.com>
Signed-off-by: NeilBrown <neilb@suse.com>
This commit is contained in:
Christoph Hellwig 2015-10-05 09:31:14 +02:00 committed by NeilBrown
parent 51039cd066
commit c1b9919849

View file

@ -271,6 +271,23 @@ static struct bio *r5l_bio_alloc(struct r5l_log *log, struct r5l_io_unit *io)
return bio;
}
static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
{
log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
/*
* If we filled up the log device start from the beginning again,
* which will require a new bio.
*
* Note: for this to work properly the log size needs to me a multiple
* of BLOCK_SECTORS.
*/
if (log->log_start == 0)
io->current_bio = NULL;
io->log_end = log->log_start;
}
static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
{
struct r5l_io_unit *io;
@ -299,11 +316,7 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
log->seq++;
log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
io->log_end = log->log_start;
/* current bio hit disk end */
if (log->log_start == 0)
io->current_bio = NULL;
r5_reserve_log_entry(log, io);
spin_lock_irq(&log->io_list_lock);
list_add_tail(&io->log_sibling, &log->running_ios);
@ -357,13 +370,8 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
io->current_bio = NULL;
goto alloc_bio;
}
log->log_start = r5l_ring_add(log, log->log_start,
BLOCK_SECTORS);
/* current bio hit disk end */
if (log->log_start == 0)
io->current_bio = NULL;
io->log_end = log->log_start;
r5_reserve_log_entry(log, io);
}
static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,