drbd: Rename "mdev" to "device"

sed -i -e 's:mdev:device:g'

Signed-off-by: Andreas Gruenbacher <agruen@linbit.com>
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
This commit is contained in:
Andreas Gruenbacher 2011-07-03 13:26:43 +02:00 committed by Philipp Reisner
parent 5476169793
commit b30ab7913b
13 changed files with 3387 additions and 3387 deletions

File diff suppressed because it is too large Load diff

View file

@ -113,20 +113,20 @@ struct drbd_bitmap {
};
#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
static void __bm_print_lock_info(struct drbd_device *mdev, const char *func)
static void __bm_print_lock_info(struct drbd_device *device, const char *func)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
if (!__ratelimit(&drbd_ratelimit_state))
return;
dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
drbd_task_to_thread_name(mdev->tconn, current),
drbd_task_to_thread_name(device->tconn, current),
func, b->bm_why ?: "?",
drbd_task_to_thread_name(mdev->tconn, b->bm_task));
drbd_task_to_thread_name(device->tconn, b->bm_task));
}
void drbd_bm_lock(struct drbd_device *mdev, char *why, enum bm_flag flags)
void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
int trylock_failed;
if (!b) {
@ -138,9 +138,9 @@ void drbd_bm_lock(struct drbd_device *mdev, char *why, enum bm_flag flags)
if (trylock_failed) {
dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
drbd_task_to_thread_name(mdev->tconn, current),
drbd_task_to_thread_name(device->tconn, current),
why, b->bm_why ?: "?",
drbd_task_to_thread_name(mdev->tconn, b->bm_task));
drbd_task_to_thread_name(device->tconn, b->bm_task));
mutex_lock(&b->bm_change);
}
if (BM_LOCKED_MASK & b->bm_flags)
@ -151,15 +151,15 @@ void drbd_bm_lock(struct drbd_device *mdev, char *why, enum bm_flag flags)
b->bm_task = current;
}
void drbd_bm_unlock(struct drbd_device *mdev)
void drbd_bm_unlock(struct drbd_device *device)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
if (!b) {
dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
return;
}
if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
if (!(BM_LOCKED_MASK & device->bitmap->bm_flags))
dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
b->bm_flags &= ~BM_LOCKED_MASK;
@ -211,19 +211,19 @@ static unsigned long bm_page_to_idx(struct page *page)
/* As is very unlikely that the same page is under IO from more than one
* context, we can get away with a bit per page and one wait queue per bitmap.
*/
static void bm_page_lock_io(struct drbd_device *mdev, int page_nr)
static void bm_page_lock_io(struct drbd_device *device, int page_nr)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
}
static void bm_page_unlock_io(struct drbd_device *mdev, int page_nr)
static void bm_page_unlock_io(struct drbd_device *device, int page_nr)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
wake_up(&mdev->bitmap->bm_io_wait);
wake_up(&device->bitmap->bm_io_wait);
}
/* set _before_ submit_io, so it may be reset due to being changed
@ -242,22 +242,22 @@ static void bm_set_page_need_writeout(struct page *page)
/**
* drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
* @mdev: DRBD device.
* @device: DRBD device.
* @page_nr: the bitmap page to mark with the "hint" flag
*
* From within an activity log transaction, we mark a few pages with these
* hints, then call drbd_bm_write_hinted(), which will only write out changed
* pages which are flagged with this mark.
*/
void drbd_bm_mark_for_writeout(struct drbd_device *mdev, int page_nr)
void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
{
struct page *page;
if (page_nr >= mdev->bitmap->bm_number_of_pages) {
if (page_nr >= device->bitmap->bm_number_of_pages) {
dev_warn(DEV, "BAD: page_nr: %u, number_of_pages: %u\n",
page_nr, (int)mdev->bitmap->bm_number_of_pages);
page_nr, (int)device->bitmap->bm_number_of_pages);
return;
}
page = mdev->bitmap->bm_pages[page_nr];
page = device->bitmap->bm_pages[page_nr];
set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
}
@ -340,7 +340,7 @@ static void bm_unmap(unsigned long *p_addr)
/*
* actually most functions herein should take a struct drbd_bitmap*, not a
* struct drbd_device*, but for the debug macros I like to have the mdev around
* struct drbd_device*, but for the debug macros I like to have the device around
* to be able to report device specific.
*/
@ -436,11 +436,11 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
/*
* called on driver init only. TODO call when a device is created.
* allocates the drbd_bitmap, and stores it in mdev->bitmap.
* allocates the drbd_bitmap, and stores it in device->bitmap.
*/
int drbd_bm_init(struct drbd_device *mdev)
int drbd_bm_init(struct drbd_device *device)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
WARN_ON(b != NULL);
b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
if (!b)
@ -449,28 +449,28 @@ int drbd_bm_init(struct drbd_device *mdev)
mutex_init(&b->bm_change);
init_waitqueue_head(&b->bm_io_wait);
mdev->bitmap = b;
device->bitmap = b;
return 0;
}
sector_t drbd_bm_capacity(struct drbd_device *mdev)
sector_t drbd_bm_capacity(struct drbd_device *device)
{
if (!expect(mdev->bitmap))
if (!expect(device->bitmap))
return 0;
return mdev->bitmap->bm_dev_capacity;
return device->bitmap->bm_dev_capacity;
}
/* called on driver unload. TODO: call when a device is destroyed.
*/
void drbd_bm_cleanup(struct drbd_device *mdev)
void drbd_bm_cleanup(struct drbd_device *device)
{
if (!expect(mdev->bitmap))
if (!expect(device->bitmap))
return;
bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
kfree(mdev->bitmap);
mdev->bitmap = NULL;
bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags));
kfree(device->bitmap);
device->bitmap = NULL;
}
/*
@ -631,9 +631,9 @@ static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
* In case this is actually a resize, we copy the old bitmap into the new one.
* Otherwise, the bitmap is initialized to all bits set.
*/
int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits)
int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
unsigned long bits, words, owords, obits;
unsigned long want, have, onpages; /* number of pages */
struct page **npages, **opages = NULL;
@ -643,7 +643,7 @@ int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits
if (!expect(b))
return -ENOMEM;
drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
drbd_bm_lock(device, "resize", BM_LOCKED_MASK);
dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
(unsigned long long)capacity);
@ -678,9 +678,9 @@ int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits
*/
words = ALIGN(bits, 64) >> LN2_BPL;
if (get_ldev(mdev)) {
u64 bits_on_disk = drbd_md_on_disk_bits(mdev->ldev);
put_ldev(mdev);
if (get_ldev(device)) {
u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev);
put_ldev(device);
if (bits > bits_on_disk) {
dev_info(DEV, "bits = %lu\n", bits);
dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
@ -695,7 +695,7 @@ int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits
D_ASSERT(b->bm_pages != NULL);
npages = b->bm_pages;
} else {
if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
npages = NULL;
else
npages = bm_realloc_pages(b, want);
@ -745,7 +745,7 @@ int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits
dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
out:
drbd_bm_unlock(mdev);
drbd_bm_unlock(device);
return err;
}
@ -757,9 +757,9 @@ int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits
*
* maybe bm_set should be atomic_t ?
*/
unsigned long _drbd_bm_total_weight(struct drbd_device *mdev)
unsigned long _drbd_bm_total_weight(struct drbd_device *device)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
unsigned long s;
unsigned long flags;
@ -775,20 +775,20 @@ unsigned long _drbd_bm_total_weight(struct drbd_device *mdev)
return s;
}
unsigned long drbd_bm_total_weight(struct drbd_device *mdev)
unsigned long drbd_bm_total_weight(struct drbd_device *device)
{
unsigned long s;
/* if I don't have a disk, I don't know about out-of-sync status */
if (!get_ldev_if_state(mdev, D_NEGOTIATING))
if (!get_ldev_if_state(device, D_NEGOTIATING))
return 0;
s = _drbd_bm_total_weight(mdev);
put_ldev(mdev);
s = _drbd_bm_total_weight(device);
put_ldev(device);
return s;
}
size_t drbd_bm_words(struct drbd_device *mdev)
size_t drbd_bm_words(struct drbd_device *device)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
if (!expect(b))
return 0;
if (!expect(b->bm_pages))
@ -797,9 +797,9 @@ size_t drbd_bm_words(struct drbd_device *mdev)
return b->bm_words;
}
unsigned long drbd_bm_bits(struct drbd_device *mdev)
unsigned long drbd_bm_bits(struct drbd_device *device)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
if (!expect(b))
return 0;
@ -811,10 +811,10 @@ unsigned long drbd_bm_bits(struct drbd_device *mdev)
* bitmap must be locked by drbd_bm_lock.
* currently only used from receive_bitmap.
*/
void drbd_bm_merge_lel(struct drbd_device *mdev, size_t offset, size_t number,
void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number,
unsigned long *buffer)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr, *bm;
unsigned long word, bits;
unsigned int idx;
@ -860,10 +860,10 @@ void drbd_bm_merge_lel(struct drbd_device *mdev, size_t offset, size_t number,
/* copy number words from the bitmap starting at offset into the buffer.
* buffer[i] will be little endian unsigned long.
*/
void drbd_bm_get_lel(struct drbd_device *mdev, size_t offset, size_t number,
void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number,
unsigned long *buffer)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr, *bm;
size_t end, do_now;
@ -897,9 +897,9 @@ void drbd_bm_get_lel(struct drbd_device *mdev, size_t offset, size_t number,
}
/* set all bits in the bitmap */
void drbd_bm_set_all(struct drbd_device *mdev)
void drbd_bm_set_all(struct drbd_device *device)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
if (!expect(b))
return;
if (!expect(b->bm_pages))
@ -913,9 +913,9 @@ void drbd_bm_set_all(struct drbd_device *mdev)
}
/* clear all bits in the bitmap */
void drbd_bm_clear_all(struct drbd_device *mdev)
void drbd_bm_clear_all(struct drbd_device *device)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
if (!expect(b))
return;
if (!expect(b->bm_pages))
@ -928,7 +928,7 @@ void drbd_bm_clear_all(struct drbd_device *mdev)
}
struct bm_aio_ctx {
struct drbd_device *mdev;
struct drbd_device *device;
atomic_t in_flight;
unsigned int done;
unsigned flags;
@ -943,7 +943,7 @@ static void bm_aio_ctx_destroy(struct kref *kref)
{
struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
put_ldev(ctx->mdev);
put_ldev(ctx->device);
kfree(ctx);
}
@ -951,8 +951,8 @@ static void bm_aio_ctx_destroy(struct kref *kref)
static void bm_async_io_complete(struct bio *bio, int error)
{
struct bm_aio_ctx *ctx = bio->bi_private;
struct drbd_device *mdev = ctx->mdev;
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
int uptodate = bio_flagged(bio, BIO_UPTODATE);
@ -983,7 +983,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
}
bm_page_unlock_io(mdev, idx);
bm_page_unlock_io(device, idx);
if (ctx->flags & BM_AIO_COPY_PAGES)
mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
@ -992,7 +992,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
if (atomic_dec_and_test(&ctx->in_flight)) {
ctx->done = 1;
wake_up(&mdev->misc_wait);
wake_up(&device->misc_wait);
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
}
}
@ -1000,23 +1000,23 @@ static void bm_async_io_complete(struct bio *bio, int error)
static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
{
struct bio *bio = bio_alloc_drbd(GFP_NOIO);
struct drbd_device *mdev = ctx->mdev;
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
struct page *page;
unsigned int len;
sector_t on_disk_sector =
mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
device->ldev->md.md_offset + device->ldev->md.bm_offset;
on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
/* this might happen with very small
* flexible external meta data device,
* or with PAGE_SIZE > 4k */
len = min_t(unsigned int, PAGE_SIZE,
(drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
(drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9);
/* serialize IO on this page */
bm_page_lock_io(mdev, page_nr);
bm_page_lock_io(device, page_nr);
/* before memcpy and submit,
* so it can be redirtied any time */
bm_set_page_unchanged(b->bm_pages[page_nr]);
@ -1027,7 +1027,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
bio->bi_bdev = mdev->ldev->md_bdev;
bio->bi_bdev = device->ldev->md_bdev;
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
@ -1035,24 +1035,24 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bio->bi_private = ctx;
bio->bi_end_io = bm_async_io_complete;
if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio->bi_rw |= rw;
bio_endio(bio, -EIO);
} else {
submit_bio(rw, bio);
/* this should not count as user activity and cause the
* resync to throttle -- see drbd_rs_should_slow_down(). */
atomic_add(len >> 9, &mdev->rs_sect_ev);
atomic_add(len >> 9, &device->rs_sect_ev);
}
}
/*
* bm_rw: read/write the whole bitmap from/to its on disk location.
*/
static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
{
struct bm_aio_ctx *ctx;
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
int num_pages, i, count = 0;
unsigned long now;
char ppb[10];
@ -1072,7 +1072,7 @@ static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy
return -ENOMEM;
*ctx = (struct bm_aio_ctx) {
.mdev = mdev,
.device = device,
.in_flight = ATOMIC_INIT(1),
.done = 0,
.flags = flags,
@ -1080,7 +1080,7 @@ static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy
.kref = { ATOMIC_INIT(2) },
};
if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
kfree(ctx);
return -ENODEV;
@ -1132,7 +1132,7 @@ static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy
* "in_flight reached zero, all done" event.
*/
if (!atomic_dec_and_test(&ctx->in_flight))
wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
else
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
@ -1144,7 +1144,7 @@ static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy
if (ctx->error) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
err = -EIO; /* ctx->error ? */
}
@ -1153,7 +1153,7 @@ static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy
now = jiffies;
if (rw == WRITE) {
drbd_md_flush(mdev);
drbd_md_flush(device);
} else /* rw == READ */ {
b->bm_set = bm_count_bits(b);
dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
@ -1171,38 +1171,38 @@ static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy
/**
* drbd_bm_read() - Read the whole bitmap from its on disk location.
* @mdev: DRBD device.
* @device: DRBD device.
*/
int drbd_bm_read(struct drbd_device *mdev) __must_hold(local)
int drbd_bm_read(struct drbd_device *device) __must_hold(local)
{
return bm_rw(mdev, READ, 0, 0);
return bm_rw(device, READ, 0, 0);
}
/**
* drbd_bm_write() - Write the whole bitmap to its on disk location.
* @mdev: DRBD device.
* @device: DRBD device.
*
* Will only write pages that have changed since last IO.
*/
int drbd_bm_write(struct drbd_device *mdev) __must_hold(local)
int drbd_bm_write(struct drbd_device *device) __must_hold(local)
{
return bm_rw(mdev, WRITE, 0, 0);
return bm_rw(device, WRITE, 0, 0);
}
/**
* drbd_bm_write_all() - Write the whole bitmap to its on disk location.
* @mdev: DRBD device.
* @device: DRBD device.
*
* Will write all pages.
*/
int drbd_bm_write_all(struct drbd_device *mdev) __must_hold(local)
int drbd_bm_write_all(struct drbd_device *device) __must_hold(local)
{
return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0);
return bm_rw(device, WRITE, BM_WRITE_ALL_PAGES, 0);
}
/**
* drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
* @mdev: DRBD device.
* @device: DRBD device.
*
* Will only write pages that have changed since last IO.
* In contrast to drbd_bm_write(), this will copy the bitmap pages
@ -1211,23 +1211,23 @@ int drbd_bm_write_all(struct drbd_device *mdev) __must_hold(local)
* verify is aborted due to a failed peer disk, while local IO continues, or
* pending resync acks are still being processed.
*/
int drbd_bm_write_copy_pages(struct drbd_device *mdev) __must_hold(local)
int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local)
{
return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
return bm_rw(device, WRITE, BM_AIO_COPY_PAGES, 0);
}
/**
* drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
* @mdev: DRBD device.
* @device: DRBD device.
*/
int drbd_bm_write_hinted(struct drbd_device *mdev) __must_hold(local)
int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local)
{
return bm_rw(mdev, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
return bm_rw(device, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
}
/**
* drbd_bm_write_page() - Writes a PAGE_SIZE aligned piece of bitmap
* @mdev: DRBD device.
* @device: DRBD device.
* @idx: bitmap page index
*
* We don't want to special case on logical_block_size of the backend device,
@ -1237,12 +1237,12 @@ int drbd_bm_write_hinted(struct drbd_device *mdev) __must_hold(local)
* In case this becomes an issue on systems with larger PAGE_SIZE,
* we may want to change this again to write 4k aligned 4k pieces.
*/
int drbd_bm_write_page(struct drbd_device *mdev, unsigned int idx) __must_hold(local)
int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold(local)
{
struct bm_aio_ctx *ctx;
int err;
if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
if (bm_test_page_unchanged(device->bitmap->bm_pages[idx])) {
dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
return 0;
}
@ -1252,7 +1252,7 @@ int drbd_bm_write_page(struct drbd_device *mdev, unsigned int idx) __must_hold(l
return -ENOMEM;
*ctx = (struct bm_aio_ctx) {
.mdev = mdev,
.device = device,
.in_flight = ATOMIC_INIT(1),
.done = 0,
.flags = BM_AIO_COPY_PAGES,
@ -1260,21 +1260,21 @@ int drbd_bm_write_page(struct drbd_device *mdev, unsigned int idx) __must_hold(l
.kref = { ATOMIC_INIT(2) },
};
if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
kfree(ctx);
return -ENODEV;
}
bm_page_io_async(ctx, idx, WRITE_SYNC);
wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
if (ctx->error)
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
/* that causes us to detach, so the in memory bitmap will be
* gone in a moment as well. */
mdev->bm_writ_cnt++;
device->bm_writ_cnt++;
err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
return err;
@ -1288,10 +1288,10 @@ int drbd_bm_write_page(struct drbd_device *mdev, unsigned int idx) __must_hold(l
*
* this returns a bit number, NOT a sector!
*/
static unsigned long __bm_find_next(struct drbd_device *mdev, unsigned long bm_fo,
static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo,
const int find_zero_bit)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr;
unsigned long bit_offset;
unsigned i;
@ -1328,10 +1328,10 @@ static unsigned long __bm_find_next(struct drbd_device *mdev, unsigned long bm_f
return bm_fo;
}
static unsigned long bm_find_next(struct drbd_device *mdev,
static unsigned long bm_find_next(struct drbd_device *device,
unsigned long bm_fo, const int find_zero_bit)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
unsigned long i = DRBD_END_OF_BITMAP;
if (!expect(b))
@ -1341,39 +1341,39 @@ static unsigned long bm_find_next(struct drbd_device *mdev,
spin_lock_irq(&b->bm_lock);
if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(mdev);
bm_print_lock_info(device);
i = __bm_find_next(mdev, bm_fo, find_zero_bit);
i = __bm_find_next(device, bm_fo, find_zero_bit);
spin_unlock_irq(&b->bm_lock);
return i;
}
unsigned long drbd_bm_find_next(struct drbd_device *mdev, unsigned long bm_fo)
unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
{
return bm_find_next(mdev, bm_fo, 0);
return bm_find_next(device, bm_fo, 0);
}
#if 0
/* not yet needed for anything. */
unsigned long drbd_bm_find_next_zero(struct drbd_device *mdev, unsigned long bm_fo)
unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
{
return bm_find_next(mdev, bm_fo, 1);
return bm_find_next(device, bm_fo, 1);
}
#endif
/* does not spin_lock_irqsave.
* you must take drbd_bm_lock() first */
unsigned long _drbd_bm_find_next(struct drbd_device *mdev, unsigned long bm_fo)
unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
{
/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
return __bm_find_next(mdev, bm_fo, 0);
/* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
return __bm_find_next(device, bm_fo, 0);
}
unsigned long _drbd_bm_find_next_zero(struct drbd_device *mdev, unsigned long bm_fo)
unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
{
/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
return __bm_find_next(mdev, bm_fo, 1);
/* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
return __bm_find_next(device, bm_fo, 1);
}
/* returns number of bits actually changed.
@ -1382,10 +1382,10 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_device *mdev, unsigned long bm
* wants bitnr, not sector.
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */
static int __bm_change_bits_to(struct drbd_device *mdev, const unsigned long s,
static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s,
unsigned long e, int val)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr = NULL;
unsigned long bitnr;
unsigned int last_page_nr = -1U;
@ -1431,11 +1431,11 @@ static int __bm_change_bits_to(struct drbd_device *mdev, const unsigned long s,
* for val != 0, we change 0 -> 1, return code positive
* for val == 0, we change 1 -> 0, return code negative
* wants bitnr, not sector */
static int bm_change_bits_to(struct drbd_device *mdev, const unsigned long s,
static int bm_change_bits_to(struct drbd_device *device, const unsigned long s,
const unsigned long e, int val)
{
unsigned long flags;
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
int c = 0;
if (!expect(b))
@ -1445,24 +1445,24 @@ static int bm_change_bits_to(struct drbd_device *mdev, const unsigned long s,
spin_lock_irqsave(&b->bm_lock, flags);
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
bm_print_lock_info(mdev);
bm_print_lock_info(device);
c = __bm_change_bits_to(mdev, s, e, val);
c = __bm_change_bits_to(device, s, e, val);
spin_unlock_irqrestore(&b->bm_lock, flags);
return c;
}
/* returns number of bits changed 0 -> 1 */
int drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e)
int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
return bm_change_bits_to(mdev, s, e, 1);
return bm_change_bits_to(device, s, e, 1);
}
/* returns number of bits changed 1 -> 0 */
int drbd_bm_clear_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e)
int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
return -bm_change_bits_to(mdev, s, e, 0);
return -bm_change_bits_to(device, s, e, 0);
}
/* sets all bits in full words,
@ -1494,7 +1494,7 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
* You must first drbd_bm_lock().
* Can be called to set the whole bitmap in one go.
* Sets bits from s to e _inclusive_. */
void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e)
void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
/* First set_bit from the first bit (s)
* up to the next long boundary (sl),
@ -1504,7 +1504,7 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un
* Do not use memset, because we must account for changes,
* so we need to loop over the words with hweight() anyways.
*/
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
unsigned long sl = ALIGN(s,BITS_PER_LONG);
unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
int first_page;
@ -1516,7 +1516,7 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un
if (e - s <= 3*BITS_PER_LONG) {
/* don't bother; el and sl may even be wrong. */
spin_lock_irq(&b->bm_lock);
__bm_change_bits_to(mdev, s, e, 1);
__bm_change_bits_to(device, s, e, 1);
spin_unlock_irq(&b->bm_lock);
return;
}
@ -1527,7 +1527,7 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un
/* bits filling the current long */
if (sl)
__bm_change_bits_to(mdev, s, sl-1, 1);
__bm_change_bits_to(device, s, sl-1, 1);
first_page = sl >> (3 + PAGE_SHIFT);
last_page = el >> (3 + PAGE_SHIFT);
@ -1539,7 +1539,7 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un
/* first and full pages, unless first page == last page */
for (page_nr = first_page; page_nr < last_page; page_nr++) {
bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word);
spin_unlock_irq(&b->bm_lock);
cond_resched();
first_word = 0;
@ -1555,7 +1555,7 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un
* as we did not allocate it, it is not present in bitmap->bm_pages.
*/
if (last_word)
bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word);
/* possibly trailing bits.
* example: (e & 63) == 63, el will be e+1.
@ -1563,7 +1563,7 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un
* it would trigger an assert in __bm_change_bits_to()
*/
if (el <= e)
__bm_change_bits_to(mdev, el, e, 1);
__bm_change_bits_to(device, el, e, 1);
spin_unlock_irq(&b->bm_lock);
}
@ -1574,10 +1574,10 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un
* 0 ... bit not set
* -1 ... first out of bounds access, stop testing for bits!
*/
int drbd_bm_test_bit(struct drbd_device *mdev, const unsigned long bitnr)
int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr)
{
unsigned long flags;
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr;
int i;
@ -1588,7 +1588,7 @@ int drbd_bm_test_bit(struct drbd_device *mdev, const unsigned long bitnr)
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(mdev);
bm_print_lock_info(device);
if (bitnr < b->bm_bits) {
p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
@ -1605,10 +1605,10 @@ int drbd_bm_test_bit(struct drbd_device *mdev, const unsigned long bitnr)
}
/* returns number of bits set in the range [s, e] */
int drbd_bm_count_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e)
int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
unsigned long flags;
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr = NULL;
unsigned long bitnr;
unsigned int page_nr = -1U;
@ -1625,7 +1625,7 @@ int drbd_bm_count_bits(struct drbd_device *mdev, const unsigned long s, const un
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(mdev);
bm_print_lock_info(device);
for (bitnr = s; bitnr <= e; bitnr++) {
unsigned int idx = bm_bit_to_page_idx(b, bitnr);
if (page_nr != idx) {
@ -1660,9 +1660,9 @@ int drbd_bm_count_bits(struct drbd_device *mdev, const unsigned long s, const un
* reference count of some bitmap extent element from some lru instead...
*
*/
int drbd_bm_e_weight(struct drbd_device *mdev, unsigned long enr)
int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr)
{
struct drbd_bitmap *b = mdev->bitmap;
struct drbd_bitmap *b = device->bitmap;
int count, s, e;
unsigned long flags;
unsigned long *p_addr, *bm;
@ -1674,7 +1674,7 @@ int drbd_bm_e_weight(struct drbd_device *mdev, unsigned long enr)
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
bm_print_lock_info(mdev);
bm_print_lock_info(device);
s = S2W(enr);
e = min((size_t)S2W(enr+1), b->bm_words);

View file

@ -66,7 +66,7 @@
extern unsigned int minor_count;
extern bool disable_sendpage;
extern bool allow_oos;
void tl_abort_disk_io(struct drbd_device *mdev);
void tl_abort_disk_io(struct drbd_device *device);
#ifdef CONFIG_DRBD_FAULT_INJECTION
extern int enable_faults;
@ -102,7 +102,7 @@ struct drbd_tconn;
/* to shorten dev_warn(DEV, "msg"); and relatives statements */
#define DEV (disk_to_dev(mdev->vdisk))
#define DEV (disk_to_dev(device->vdisk))
#define conn_printk(LEVEL, TCONN, FMT, ARGS...) \
printk(LEVEL "d-con %s: " FMT, TCONN->name , ## ARGS)
@ -147,14 +147,14 @@ enum {
};
extern unsigned int
_drbd_insert_fault(struct drbd_device *mdev, unsigned int type);
_drbd_insert_fault(struct drbd_device *device, unsigned int type);
static inline int
drbd_insert_fault(struct drbd_device *mdev, unsigned int type) {
drbd_insert_fault(struct drbd_device *device, unsigned int type) {
#ifdef CONFIG_DRBD_FAULT_INJECTION
return fault_rate &&
(enable_faults & (1<<type)) &&
_drbd_insert_fault(mdev, type);
_drbd_insert_fault(device, type);
#else
return 0;
#endif
@ -189,7 +189,7 @@ struct bm_xfer_ctx {
unsigned bytes[2];
};
extern void INFO_bm_xfer_stats(struct drbd_device *mdev,
extern void INFO_bm_xfer_stats(struct drbd_device *device,
const char *direction, struct bm_xfer_ctx *c);
static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
@ -246,7 +246,7 @@ struct drbd_work {
struct list_head list;
int (*cb)(struct drbd_work *, int cancel);
union {
struct drbd_device *mdev;
struct drbd_device *device;
struct drbd_tconn *tconn;
};
};
@ -377,7 +377,7 @@ enum {
#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
/* flag bits per mdev */
/* flag bits per device */
enum {
UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
MD_DIRTY, /* current uuids and flags not yet on disk */
@ -483,7 +483,7 @@ struct drbd_backing_dev {
struct block_device *backing_bdev;
struct block_device *md_bdev;
struct drbd_md md;
struct disk_conf *disk_conf; /* RCU, for updates: mdev->tconn->conf_update */
struct disk_conf *disk_conf; /* RCU, for updates: device->tconn->conf_update */
sector_t known_size; /* last known size of that backing device */
};
@ -496,8 +496,8 @@ struct bm_io_work {
struct drbd_work w;
char *why;
enum bm_flag flags;
int (*io_fn)(struct drbd_device *mdev);
void (*done)(struct drbd_device *mdev, int rv);
int (*io_fn)(struct drbd_device *device);
void (*done)(struct drbd_device *device, int rv);
};
enum write_ordering_e {
@ -540,7 +540,7 @@ struct drbd_tconn { /* is a resource from the config file */
char *name; /* Resource name */
struct list_head all_tconn; /* linked on global drbd_tconns */
struct kref kref;
struct idr volumes; /* <tconn, vnr> to mdev mapping */
struct idr volumes; /* <tconn, vnr> to device mapping */
enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
unsigned susp:1; /* IO suspended by user */
unsigned susp_nod:1; /* IO suspended because no data */
@ -744,7 +744,7 @@ struct drbd_device {
struct bm_io_work bm_io_work;
u64 ed_uuid; /* UUID of the exposed data */
struct mutex own_state_mutex;
struct mutex *state_mutex; /* either own_state_mutex or mdev->tconn->cstate_mutex */
struct mutex *state_mutex; /* either own_state_mutex or device->tconn->cstate_mutex */
char congestion_reason; /* Why we where congested... */
atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
atomic_t rs_sect_ev; /* for submitted resync data rate, both */
@ -763,17 +763,17 @@ struct drbd_device {
struct submit_worker submit;
};
static inline struct drbd_device *minor_to_mdev(unsigned int minor)
static inline struct drbd_device *minor_to_device(unsigned int minor)
{
return (struct drbd_device *)idr_find(&minors, minor);
}
static inline unsigned int mdev_to_minor(struct drbd_device *mdev)
static inline unsigned int device_to_minor(struct drbd_device *device)
{
return mdev->minor;
return device->minor;
}
static inline struct drbd_device *vnr_to_mdev(struct drbd_tconn *tconn, int vnr)
static inline struct drbd_device *vnr_to_device(struct drbd_tconn *tconn, int vnr)
{
return (struct drbd_device *)idr_find(&tconn->volumes, vnr);
}
@ -789,7 +789,7 @@ enum dds_flags {
DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
};
extern void drbd_init_set_defaults(struct drbd_device *mdev);
extern void drbd_init_set_defaults(struct drbd_device *device);
extern int drbd_thread_start(struct drbd_thread *thi);
extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task);
@ -811,74 +811,74 @@ extern int drbd_send_all(struct drbd_tconn *, struct socket *, void *, size_t,
extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd);
extern int drbd_send_protocol(struct drbd_tconn *tconn);
extern int drbd_send_uuids(struct drbd_device *mdev);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_device *mdev);
extern void drbd_gen_and_send_sync_uuid(struct drbd_device *mdev);
extern int drbd_send_sizes(struct drbd_device *mdev, int trigger_reply, enum dds_flags flags);
extern int drbd_send_state(struct drbd_device *mdev, union drbd_state s);
extern int drbd_send_current_state(struct drbd_device *mdev);
extern int drbd_send_sync_param(struct drbd_device *mdev);
extern int drbd_send_uuids(struct drbd_device *device);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_device *device);
extern void drbd_gen_and_send_sync_uuid(struct drbd_device *device);
extern int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum dds_flags flags);
extern int drbd_send_state(struct drbd_device *device, union drbd_state s);
extern int drbd_send_current_state(struct drbd_device *device);
extern int drbd_send_sync_param(struct drbd_device *device);
extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr,
u32 set_size);
extern int drbd_send_ack(struct drbd_device *, enum drbd_packet,
struct drbd_peer_request *);
extern void drbd_send_ack_rp(struct drbd_device *mdev, enum drbd_packet cmd,
extern void drbd_send_ack_rp(struct drbd_device *device, enum drbd_packet cmd,
struct p_block_req *rp);
extern void drbd_send_ack_dp(struct drbd_device *mdev, enum drbd_packet cmd,
extern void drbd_send_ack_dp(struct drbd_device *device, enum drbd_packet cmd,
struct p_data *dp, int data_size);
extern int drbd_send_ack_ex(struct drbd_device *mdev, enum drbd_packet cmd,
extern int drbd_send_ack_ex(struct drbd_device *device, enum drbd_packet cmd,
sector_t sector, int blksize, u64 block_id);
extern int drbd_send_out_of_sync(struct drbd_device *, struct drbd_request *);
extern int drbd_send_block(struct drbd_device *, enum drbd_packet,
struct drbd_peer_request *);
extern int drbd_send_dblock(struct drbd_device *mdev, struct drbd_request *req);
extern int drbd_send_drequest(struct drbd_device *mdev, int cmd,
extern int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req);
extern int drbd_send_drequest(struct drbd_device *device, int cmd,
sector_t sector, int size, u64 block_id);
extern int drbd_send_drequest_csum(struct drbd_device *mdev, sector_t sector,
extern int drbd_send_drequest_csum(struct drbd_device *device, sector_t sector,
int size, void *digest, int digest_size,
enum drbd_packet cmd);
extern int drbd_send_ov_request(struct drbd_device *mdev, sector_t sector, int size);
extern int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int size);
extern int drbd_send_bitmap(struct drbd_device *mdev);
extern void drbd_send_sr_reply(struct drbd_device *mdev, enum drbd_state_rv retcode);
extern int drbd_send_bitmap(struct drbd_device *device);
extern void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode);
extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode);
extern void drbd_free_bc(struct drbd_backing_dev *ldev);
extern void drbd_mdev_cleanup(struct drbd_device *mdev);
void drbd_print_uuids(struct drbd_device *mdev, const char *text);
extern void drbd_device_cleanup(struct drbd_device *device);
void drbd_print_uuids(struct drbd_device *device, const char *text);
extern void conn_md_sync(struct drbd_tconn *tconn);
extern void drbd_md_write(struct drbd_device *mdev, void *buffer);
extern void drbd_md_sync(struct drbd_device *mdev);
extern int drbd_md_read(struct drbd_device *mdev, struct drbd_backing_dev *bdev);
extern void drbd_uuid_set(struct drbd_device *mdev, int idx, u64 val) __must_hold(local);
extern void _drbd_uuid_set(struct drbd_device *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_uuid_new_current(struct drbd_device *mdev) __must_hold(local);
extern void drbd_uuid_set_bm(struct drbd_device *mdev, u64 val) __must_hold(local);
extern void drbd_uuid_move_history(struct drbd_device *mdev) __must_hold(local);
extern void __drbd_uuid_set(struct drbd_device *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_md_set_flag(struct drbd_device *mdev, int flags) __must_hold(local);
extern void drbd_md_clear_flag(struct drbd_device *mdev, int flags)__must_hold(local);
extern void drbd_md_write(struct drbd_device *device, void *buffer);
extern void drbd_md_sync(struct drbd_device *device);
extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
#ifndef DRBD_DEBUG_MD_SYNC
extern void drbd_md_mark_dirty(struct drbd_device *mdev);
extern void drbd_md_mark_dirty(struct drbd_device *device);
#else
#define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ )
extern void drbd_md_mark_dirty_(struct drbd_device *mdev,
extern void drbd_md_mark_dirty_(struct drbd_device *device,
unsigned int line, const char *func);
#endif
extern void drbd_queue_bitmap_io(struct drbd_device *mdev,
extern void drbd_queue_bitmap_io(struct drbd_device *device,
int (*io_fn)(struct drbd_device *),
void (*done)(struct drbd_device *, int),
char *why, enum bm_flag flags);
extern int drbd_bitmap_io(struct drbd_device *mdev,
extern int drbd_bitmap_io(struct drbd_device *device,
int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags);
extern int drbd_bitmap_io_from_worker(struct drbd_device *mdev,
extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags);
extern int drbd_bmio_set_n_write(struct drbd_device *mdev);
extern int drbd_bmio_clear_n_write(struct drbd_device *mdev);
extern void drbd_ldev_destroy(struct drbd_device *mdev);
extern int drbd_bmio_set_n_write(struct drbd_device *device);
extern int drbd_bmio_clear_n_write(struct drbd_device *device);
extern void drbd_ldev_destroy(struct drbd_device *device);
/* Meta data layout
*
@ -1064,52 +1064,52 @@ struct bm_extent {
#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
#define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
extern int drbd_bm_init(struct drbd_device *mdev);
extern int drbd_bm_resize(struct drbd_device *mdev, sector_t sectors, int set_new_bits);
extern void drbd_bm_cleanup(struct drbd_device *mdev);
extern void drbd_bm_set_all(struct drbd_device *mdev);
extern void drbd_bm_clear_all(struct drbd_device *mdev);
extern int drbd_bm_init(struct drbd_device *device);
extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
extern void drbd_bm_cleanup(struct drbd_device *device);
extern void drbd_bm_set_all(struct drbd_device *device);
extern void drbd_bm_clear_all(struct drbd_device *device);
/* set/clear/test only a few bits at a time */
extern int drbd_bm_set_bits(
struct drbd_device *mdev, unsigned long s, unsigned long e);
struct drbd_device *device, unsigned long s, unsigned long e);
extern int drbd_bm_clear_bits(
struct drbd_device *mdev, unsigned long s, unsigned long e);
struct drbd_device *device, unsigned long s, unsigned long e);
extern int drbd_bm_count_bits(
struct drbd_device *mdev, const unsigned long s, const unsigned long e);
struct drbd_device *device, const unsigned long s, const unsigned long e);
/* bm_set_bits variant for use while holding drbd_bm_lock,
* may process the whole bitmap in one go */
extern void _drbd_bm_set_bits(struct drbd_device *mdev,
extern void _drbd_bm_set_bits(struct drbd_device *device,
const unsigned long s, const unsigned long e);
extern int drbd_bm_test_bit(struct drbd_device *mdev, unsigned long bitnr);
extern int drbd_bm_e_weight(struct drbd_device *mdev, unsigned long enr);
extern int drbd_bm_write_page(struct drbd_device *mdev, unsigned int idx) __must_hold(local);
extern int drbd_bm_read(struct drbd_device *mdev) __must_hold(local);
extern void drbd_bm_mark_for_writeout(struct drbd_device *mdev, int page_nr);
extern int drbd_bm_write(struct drbd_device *mdev) __must_hold(local);
extern int drbd_bm_write_hinted(struct drbd_device *mdev) __must_hold(local);
extern int drbd_bm_write_all(struct drbd_device *mdev) __must_hold(local);
extern int drbd_bm_write_copy_pages(struct drbd_device *mdev) __must_hold(local);
extern size_t drbd_bm_words(struct drbd_device *mdev);
extern unsigned long drbd_bm_bits(struct drbd_device *mdev);
extern sector_t drbd_bm_capacity(struct drbd_device *mdev);
extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
extern int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold(local);
extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
extern size_t drbd_bm_words(struct drbd_device *device);
extern unsigned long drbd_bm_bits(struct drbd_device *device);
extern sector_t drbd_bm_capacity(struct drbd_device *device);
#define DRBD_END_OF_BITMAP (~(unsigned long)0)
extern unsigned long drbd_bm_find_next(struct drbd_device *mdev, unsigned long bm_fo);
extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
/* bm_find_next variants for use while you hold drbd_bm_lock() */
extern unsigned long _drbd_bm_find_next(struct drbd_device *mdev, unsigned long bm_fo);
extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *mdev, unsigned long bm_fo);
extern unsigned long _drbd_bm_total_weight(struct drbd_device *mdev);
extern unsigned long drbd_bm_total_weight(struct drbd_device *mdev);
extern int drbd_bm_rs_done(struct drbd_device *mdev);
extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
extern int drbd_bm_rs_done(struct drbd_device *device);
/* for receive_bitmap */
extern void drbd_bm_merge_lel(struct drbd_device *mdev, size_t offset,
extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
size_t number, unsigned long *buffer);
/* for _drbd_send_bitmap */
extern void drbd_bm_get_lel(struct drbd_device *mdev, size_t offset,
extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
size_t number, unsigned long *buffer);
extern void drbd_bm_lock(struct drbd_device *mdev, char *why, enum bm_flag flags);
extern void drbd_bm_unlock(struct drbd_device *mdev);
extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
extern void drbd_bm_unlock(struct drbd_device *device);
/* drbd_main.c */
extern struct kmem_cache *drbd_request_cache;
@ -1171,15 +1171,15 @@ extern int proc_details;
extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
extern void drbd_make_request(struct request_queue *q, struct bio *bio);
extern int drbd_read_remote(struct drbd_device *mdev, struct drbd_request *req);
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
/* drbd_nl.c */
extern int drbd_msg_put_info(const char *info);
extern void drbd_suspend_io(struct drbd_device *mdev);
extern void drbd_resume_io(struct drbd_device *mdev);
extern void drbd_suspend_io(struct drbd_device *device);
extern void drbd_resume_io(struct drbd_device *device);
extern char *ppsize(char *buf, unsigned long long size);
extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
enum determine_dev_size {
@ -1194,40 +1194,40 @@ enum determine_dev_size {
extern enum determine_dev_size
drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
extern void resync_after_online_grow(struct drbd_device *);
extern void drbd_reconsider_max_bio_size(struct drbd_device *mdev);
extern enum drbd_state_rv drbd_set_role(struct drbd_device *mdev,
extern void drbd_reconsider_max_bio_size(struct drbd_device *device);
extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
enum drbd_role new_role,
int force);
extern bool conn_try_outdate_peer(struct drbd_tconn *tconn);
extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn);
extern int drbd_khelper(struct drbd_device *mdev, char *cmd);
extern int drbd_khelper(struct drbd_device *device, char *cmd);
/* drbd_worker.c */
extern int drbd_worker(struct drbd_thread *thi);
enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *mdev, int o_minor);
void drbd_resync_after_changed(struct drbd_device *mdev);
extern void drbd_start_resync(struct drbd_device *mdev, enum drbd_conns side);
extern void resume_next_sg(struct drbd_device *mdev);
extern void suspend_other_sg(struct drbd_device *mdev);
extern int drbd_resync_finished(struct drbd_device *mdev);
enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
void drbd_resync_after_changed(struct drbd_device *device);
extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
extern void resume_next_sg(struct drbd_device *device);
extern void suspend_other_sg(struct drbd_device *device);
extern int drbd_resync_finished(struct drbd_device *device);
/* maybe rather drbd_main.c ? */
extern void *drbd_md_get_buffer(struct drbd_device *mdev);
extern void drbd_md_put_buffer(struct drbd_device *mdev);
extern int drbd_md_sync_page_io(struct drbd_device *mdev,
extern void *drbd_md_get_buffer(struct drbd_device *device);
extern void drbd_md_put_buffer(struct drbd_device *device);
extern int drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev, sector_t sector, int rw);
extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
extern void wait_until_done_or_force_detached(struct drbd_device *mdev,
extern void wait_until_done_or_force_detached(struct drbd_device *device,
struct drbd_backing_dev *bdev, unsigned int *done);
extern void drbd_rs_controller_reset(struct drbd_device *mdev);
extern void drbd_rs_controller_reset(struct drbd_device *device);
static inline void ov_out_of_sync_print(struct drbd_device *mdev)
static inline void ov_out_of_sync_print(struct drbd_device *device)
{
if (mdev->ov_last_oos_size) {
if (device->ov_last_oos_size) {
dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n",
(unsigned long long)mdev->ov_last_oos_start,
(unsigned long)mdev->ov_last_oos_size);
(unsigned long long)device->ov_last_oos_start,
(unsigned long)device->ov_last_oos_size);
}
mdev->ov_last_oos_size=0;
device->ov_last_oos_size = 0;
}
@ -1256,7 +1256,7 @@ extern void resync_timer_fn(unsigned long data);
extern void start_resync_timer_fn(unsigned long data);
/* drbd_receiver.c */
extern int drbd_rs_should_slow_down(struct drbd_device *mdev, sector_t sector);
extern int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector);
extern int drbd_submit_peer_request(struct drbd_device *,
struct drbd_peer_request *, const unsigned,
const int);
@ -1269,13 +1269,13 @@ extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request
#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
extern struct page *drbd_alloc_pages(struct drbd_device *, unsigned int, bool);
extern void drbd_set_recv_tcq(struct drbd_device *mdev, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_device *mdev, struct list_head *to_be_freed);
extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern void conn_flush_workqueue(struct drbd_tconn *tconn);
extern int drbd_connected(struct drbd_device *mdev);
static inline void drbd_flush_workqueue(struct drbd_device *mdev)
extern int drbd_connected(struct drbd_device *device);
static inline void drbd_flush_workqueue(struct drbd_device *device)
{
conn_flush_workqueue(mdev->tconn);
conn_flush_workqueue(device->tconn);
}
/* Yes, there is kernel_setsockopt, but only since 2.6.18.
@ -1336,28 +1336,28 @@ extern const char *drbd_conn_str(enum drbd_conns s);
extern const char *drbd_role_str(enum drbd_role s);
/* drbd_actlog.c */
extern int drbd_al_begin_io_nonblock(struct drbd_device *mdev, struct drbd_interval *i);
extern void drbd_al_begin_io_commit(struct drbd_device *mdev, bool delegate);
extern bool drbd_al_begin_io_fastpath(struct drbd_device *mdev, struct drbd_interval *i);
extern void drbd_al_begin_io(struct drbd_device *mdev, struct drbd_interval *i, bool delegate);
extern void drbd_al_complete_io(struct drbd_device *mdev, struct drbd_interval *i);
extern void drbd_rs_complete_io(struct drbd_device *mdev, sector_t sector);
extern int drbd_rs_begin_io(struct drbd_device *mdev, sector_t sector);
extern int drbd_try_rs_begin_io(struct drbd_device *mdev, sector_t sector);
extern void drbd_rs_cancel_all(struct drbd_device *mdev);
extern int drbd_rs_del_all(struct drbd_device *mdev);
extern void drbd_rs_failed_io(struct drbd_device *mdev,
extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
extern void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate);
extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate);
extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
extern void drbd_rs_cancel_all(struct drbd_device *device);
extern int drbd_rs_del_all(struct drbd_device *device);
extern void drbd_rs_failed_io(struct drbd_device *device,
sector_t sector, int size);
extern void drbd_advance_rs_marks(struct drbd_device *mdev, unsigned long still_to_go);
extern void __drbd_set_in_sync(struct drbd_device *mdev, sector_t sector,
extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
extern void __drbd_set_in_sync(struct drbd_device *device, sector_t sector,
int size, const char *file, const unsigned int line);
#define drbd_set_in_sync(mdev, sector, size) \
__drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__)
extern int __drbd_set_out_of_sync(struct drbd_device *mdev, sector_t sector,
#define drbd_set_in_sync(device, sector, size) \
__drbd_set_in_sync(device, sector, size, __FILE__, __LINE__)
extern int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector,
int size, const char *file, const unsigned int line);
#define drbd_set_out_of_sync(mdev, sector, size) \
__drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
extern void drbd_al_shrink(struct drbd_device *mdev);
#define drbd_set_out_of_sync(device, sector, size) \
__drbd_set_out_of_sync(device, sector, size, __FILE__, __LINE__)
extern void drbd_al_shrink(struct drbd_device *device);
extern int drbd_initialize_al(struct drbd_device *, void *);
/* drbd_nl.c */
@ -1375,7 +1375,7 @@ struct sib_info {
};
};
};
void drbd_bcast_event(struct drbd_device *mdev, const struct sib_info *sib);
void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
/*
* inline helper functions
@ -1404,26 +1404,26 @@ static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_r
}
static inline enum drbd_state_rv
_drbd_set_state(struct drbd_device *mdev, union drbd_state ns,
_drbd_set_state(struct drbd_device *device, union drbd_state ns,
enum chg_state_flags flags, struct completion *done)
{
enum drbd_state_rv rv;
read_lock(&global_state_lock);
rv = __drbd_set_state(mdev, ns, flags, done);
rv = __drbd_set_state(device, ns, flags, done);
read_unlock(&global_state_lock);
return rv;
}
static inline union drbd_state drbd_read_state(struct drbd_device *mdev)
static inline union drbd_state drbd_read_state(struct drbd_device *device)
{
union drbd_state rv;
rv.i = mdev->state.i;
rv.susp = mdev->tconn->susp;
rv.susp_nod = mdev->tconn->susp_nod;
rv.susp_fen = mdev->tconn->susp_fen;
rv.i = device->state.i;
rv.susp = device->tconn->susp;
rv.susp_nod = device->tconn->susp_nod;
rv.susp_fen = device->tconn->susp_fen;
return rv;
}
@ -1436,22 +1436,22 @@ enum drbd_force_detach_flags {
};
#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
static inline void __drbd_chk_io_error_(struct drbd_device *mdev,
static inline void __drbd_chk_io_error_(struct drbd_device *device,
enum drbd_force_detach_flags df,
const char *where)
{
enum drbd_io_error_p ep;
rcu_read_lock();
ep = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
rcu_read_unlock();
switch (ep) {
case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Local IO failed in %s.\n", where);
if (mdev->state.disk > D_INCONSISTENT)
_drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL);
if (device->state.disk > D_INCONSISTENT)
_drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
break;
}
/* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
@ -1477,13 +1477,13 @@ static inline void __drbd_chk_io_error_(struct drbd_device *mdev,
* we read meta data only once during attach,
* which will fail in case of errors.
*/
set_bit(WAS_IO_ERROR, &mdev->flags);
set_bit(WAS_IO_ERROR, &device->flags);
if (df == DRBD_READ_ERROR)
set_bit(WAS_READ_ERROR, &mdev->flags);
set_bit(WAS_READ_ERROR, &device->flags);
if (df == DRBD_FORCE_DETACH)
set_bit(FORCE_DETACH, &mdev->flags);
if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
set_bit(FORCE_DETACH, &device->flags);
if (device->state.disk > D_FAILED) {
_drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
dev_err(DEV,
"Local IO failed in %s. Detaching...\n", where);
}
@ -1493,21 +1493,21 @@ static inline void __drbd_chk_io_error_(struct drbd_device *mdev,
/**
* drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
* @mdev: DRBD device.
* @device: DRBD device.
* @error: Error code passed to the IO completion callback
* @forcedetach: Force detach. I.e. the error happened while accessing the meta data
*
* See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
*/
#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
static inline void drbd_chk_io_error_(struct drbd_device *mdev,
static inline void drbd_chk_io_error_(struct drbd_device *device,
int error, enum drbd_force_detach_flags forcedetach, const char *where)
{
if (error) {
unsigned long flags;
spin_lock_irqsave(&mdev->tconn->req_lock, flags);
__drbd_chk_io_error_(mdev, forcedetach, where);
spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
spin_lock_irqsave(&device->tconn->req_lock, flags);
__drbd_chk_io_error_(device, forcedetach, where);
spin_unlock_irqrestore(&device->tconn->req_lock, flags);
}
}
@ -1693,22 +1693,22 @@ static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
* _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
* [from tl_clear_barrier]
*/
static inline void inc_ap_pending(struct drbd_device *mdev)
static inline void inc_ap_pending(struct drbd_device *device)
{
atomic_inc(&mdev->ap_pending_cnt);
atomic_inc(&device->ap_pending_cnt);
}
#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
if (atomic_read(&mdev->which) < 0) \
if (atomic_read(&device->which) < 0) \
dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \
func, line, \
atomic_read(&mdev->which))
atomic_read(&device->which))
#define dec_ap_pending(mdev) _dec_ap_pending(mdev, __FUNCTION__, __LINE__)
static inline void _dec_ap_pending(struct drbd_device *mdev, const char *func, int line)
#define dec_ap_pending(device) _dec_ap_pending(device, __FUNCTION__, __LINE__)
static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
{
if (atomic_dec_and_test(&mdev->ap_pending_cnt))
wake_up(&mdev->misc_wait);
if (atomic_dec_and_test(&device->ap_pending_cnt))
wake_up(&device->misc_wait);
ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
}
@ -1718,15 +1718,15 @@ static inline void _dec_ap_pending(struct drbd_device *mdev, const char *func, i
* C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER)
* (or P_NEG_ACK with ID_SYNCER)
*/
static inline void inc_rs_pending(struct drbd_device *mdev)
static inline void inc_rs_pending(struct drbd_device *device)
{
atomic_inc(&mdev->rs_pending_cnt);
atomic_inc(&device->rs_pending_cnt);
}
#define dec_rs_pending(mdev) _dec_rs_pending(mdev, __FUNCTION__, __LINE__)
static inline void _dec_rs_pending(struct drbd_device *mdev, const char *func, int line)
#define dec_rs_pending(device) _dec_rs_pending(device, __FUNCTION__, __LINE__)
static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
{
atomic_dec(&mdev->rs_pending_cnt);
atomic_dec(&device->rs_pending_cnt);
ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
}
@ -1739,37 +1739,37 @@ static inline void _dec_rs_pending(struct drbd_device *mdev, const char *func, i
* receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
* receive_Barrier_* we need to send a P_BARRIER_ACK
*/
static inline void inc_unacked(struct drbd_device *mdev)
static inline void inc_unacked(struct drbd_device *device)
{
atomic_inc(&mdev->unacked_cnt);
atomic_inc(&device->unacked_cnt);
}
#define dec_unacked(mdev) _dec_unacked(mdev, __FUNCTION__, __LINE__)
static inline void _dec_unacked(struct drbd_device *mdev, const char *func, int line)
#define dec_unacked(device) _dec_unacked(device, __FUNCTION__, __LINE__)
static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
{
atomic_dec(&mdev->unacked_cnt);
atomic_dec(&device->unacked_cnt);
ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
#define sub_unacked(mdev, n) _sub_unacked(mdev, n, __FUNCTION__, __LINE__)
static inline void _sub_unacked(struct drbd_device *mdev, int n, const char *func, int line)
#define sub_unacked(device, n) _sub_unacked(device, n, __FUNCTION__, __LINE__)
static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
{
atomic_sub(n, &mdev->unacked_cnt);
atomic_sub(n, &device->unacked_cnt);
ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
/**
* get_ldev() - Increase the ref count on mdev->ldev. Returns 0 if there is no ldev
* get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
* @M: DRBD device.
*
* You have to call put_ldev() when finished working with mdev->ldev.
* You have to call put_ldev() when finished working with device->ldev.
*/
#define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT))
#define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS))
static inline void put_ldev(struct drbd_device *mdev)
static inline void put_ldev(struct drbd_device *device)
{
int i = atomic_dec_return(&mdev->local_cnt);
int i = atomic_dec_return(&device->local_cnt);
/* This may be called from some endio handler,
* so we must not sleep here. */
@ -1777,56 +1777,56 @@ static inline void put_ldev(struct drbd_device *mdev)
__release(local);
D_ASSERT(i >= 0);
if (i == 0) {
if (mdev->state.disk == D_DISKLESS)
if (device->state.disk == D_DISKLESS)
/* even internal references gone, safe to destroy */
drbd_ldev_destroy(mdev);
if (mdev->state.disk == D_FAILED) {
drbd_ldev_destroy(device);
if (device->state.disk == D_FAILED) {
/* all application IO references gone. */
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
if (!test_and_set_bit(GO_DISKLESS, &device->flags))
drbd_queue_work(&device->tconn->sender_work, &device->go_diskless);
}
wake_up(&mdev->misc_wait);
wake_up(&device->misc_wait);
}
}
#ifndef __CHECKER__
static inline int _get_ldev_if_state(struct drbd_device *mdev, enum drbd_disk_state mins)
static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
{
int io_allowed;
/* never get a reference while D_DISKLESS */
if (mdev->state.disk == D_DISKLESS)
if (device->state.disk == D_DISKLESS)
return 0;
atomic_inc(&mdev->local_cnt);
io_allowed = (mdev->state.disk >= mins);
atomic_inc(&device->local_cnt);
io_allowed = (device->state.disk >= mins);
if (!io_allowed)
put_ldev(mdev);
put_ldev(device);
return io_allowed;
}
#else
extern int _get_ldev_if_state(struct drbd_device *mdev, enum drbd_disk_state mins);
extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
#endif
/* you must have an "get_ldev" reference */
static inline void drbd_get_syncer_progress(struct drbd_device *mdev,
static inline void drbd_get_syncer_progress(struct drbd_device *device,
unsigned long *bits_left, unsigned int *per_mil_done)
{
/* this is to break it at compile time when we change that, in case we
* want to support more than (1<<32) bits on a 32bit arch. */
typecheck(unsigned long, mdev->rs_total);
typecheck(unsigned long, device->rs_total);
/* note: both rs_total and rs_left are in bits, i.e. in
* units of BM_BLOCK_SIZE.
* for the percentage, we don't care. */
if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
*bits_left = mdev->ov_left;
if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
*bits_left = device->ov_left;
else
*bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
*bits_left = drbd_bm_total_weight(device) - device->rs_failed;
/* >> 10 to prevent overflow,
* +1 to prevent division by zero */
if (*bits_left > mdev->rs_total) {
if (*bits_left > device->rs_total) {
/* doh. maybe a logic bug somewhere.
* may also be just a race condition
* between this and a disconnect during sync.
@ -1834,8 +1834,8 @@ static inline void drbd_get_syncer_progress(struct drbd_device *mdev,
*/
smp_rmb();
dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
drbd_conn_str(mdev->state.conn),
*bits_left, mdev->rs_total, mdev->rs_failed);
drbd_conn_str(device->state.conn),
*bits_left, device->rs_total, device->rs_failed);
*per_mil_done = 0;
} else {
/* Make sure the division happens in long context.
@ -1847,9 +1847,9 @@ static inline void drbd_get_syncer_progress(struct drbd_device *mdev,
* Note: currently we don't support such large bitmaps on 32bit
* arch anyways, but no harm done to be prepared for it here.
*/
unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10;
unsigned int shift = device->rs_total > UINT_MAX ? 16 : 10;
unsigned long left = *bits_left >> shift;
unsigned long total = 1UL + (mdev->rs_total >> shift);
unsigned long total = 1UL + (device->rs_total >> shift);
unsigned long tmp = 1000UL - left * 1000UL/total;
*per_mil_done = tmp;
}
@ -1859,22 +1859,22 @@ static inline void drbd_get_syncer_progress(struct drbd_device *mdev,
/* this throttles on-the-fly application requests
* according to max_buffers settings;
* maybe re-implement using semaphores? */
static inline int drbd_get_max_buffers(struct drbd_device *mdev)
static inline int drbd_get_max_buffers(struct drbd_device *device)
{
struct net_conf *nc;
int mxb;
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
nc = rcu_dereference(device->tconn->net_conf);
mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
rcu_read_unlock();
return mxb;
}
static inline int drbd_state_is_stable(struct drbd_device *mdev)
static inline int drbd_state_is_stable(struct drbd_device *device)
{
union drbd_dev_state s = mdev->state;
union drbd_dev_state s = device->state;
/* DO NOT add a default clause, we want the compiler to warn us
* for any newly introduced state we may have forgotten to add here */
@ -1908,7 +1908,7 @@ static inline int drbd_state_is_stable(struct drbd_device *mdev)
/* Allow IO in BM exchange states with new protocols */
case C_WF_BITMAP_S:
if (mdev->tconn->agreed_pro_version < 96)
if (device->tconn->agreed_pro_version < 96)
return 0;
break;
@ -1942,20 +1942,20 @@ static inline int drbd_state_is_stable(struct drbd_device *mdev)
return 1;
}
static inline int drbd_suspended(struct drbd_device *mdev)
static inline int drbd_suspended(struct drbd_device *device)
{
struct drbd_tconn *tconn = mdev->tconn;
struct drbd_tconn *tconn = device->tconn;
return tconn->susp || tconn->susp_fen || tconn->susp_nod;
}
static inline bool may_inc_ap_bio(struct drbd_device *mdev)
static inline bool may_inc_ap_bio(struct drbd_device *device)
{
int mxb = drbd_get_max_buffers(mdev);
int mxb = drbd_get_max_buffers(device);
if (drbd_suspended(mdev))
if (drbd_suspended(device))
return false;
if (test_bit(SUSPEND_IO, &mdev->flags))
if (test_bit(SUSPEND_IO, &device->flags))
return false;
/* to avoid potential deadlock or bitmap corruption,
@ -1963,32 +1963,32 @@ static inline bool may_inc_ap_bio(struct drbd_device *mdev)
* to start during "stable" states. */
/* no new io accepted when attaching or detaching the disk */
if (!drbd_state_is_stable(mdev))
if (!drbd_state_is_stable(device))
return false;
/* since some older kernels don't have atomic_add_unless,
* and we are within the spinlock anyways, we have this workaround. */
if (atomic_read(&mdev->ap_bio_cnt) > mxb)
if (atomic_read(&device->ap_bio_cnt) > mxb)
return false;
if (test_bit(BITMAP_IO, &mdev->flags))
if (test_bit(BITMAP_IO, &device->flags))
return false;
return true;
}
static inline bool inc_ap_bio_cond(struct drbd_device *mdev)
static inline bool inc_ap_bio_cond(struct drbd_device *device)
{
bool rv = false;
spin_lock_irq(&mdev->tconn->req_lock);
rv = may_inc_ap_bio(mdev);
spin_lock_irq(&device->tconn->req_lock);
rv = may_inc_ap_bio(device);
if (rv)
atomic_inc(&mdev->ap_bio_cnt);
spin_unlock_irq(&mdev->tconn->req_lock);
atomic_inc(&device->ap_bio_cnt);
spin_unlock_irq(&device->tconn->req_lock);
return rv;
}
static inline void inc_ap_bio(struct drbd_device *mdev)
static inline void inc_ap_bio(struct drbd_device *device)
{
/* we wait here
* as long as the device is suspended
@ -1998,42 +1998,42 @@ static inline void inc_ap_bio(struct drbd_device *mdev)
* to avoid races with the reconnect code,
* we need to atomic_inc within the spinlock. */
wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev));
wait_event(device->misc_wait, inc_ap_bio_cond(device));
}
static inline void dec_ap_bio(struct drbd_device *mdev)
static inline void dec_ap_bio(struct drbd_device *device)
{
int mxb = drbd_get_max_buffers(mdev);
int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
int mxb = drbd_get_max_buffers(device);
int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
D_ASSERT(ap_bio >= 0);
if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
drbd_queue_work(&device->tconn->sender_work, &device->bm_io_work.w);
}
/* this currently does wake_up for every dec_ap_bio!
* maybe rather introduce some type of hysteresis?
* e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
if (ap_bio < mxb)
wake_up(&mdev->misc_wait);
wake_up(&device->misc_wait);
}
static inline bool verify_can_do_stop_sector(struct drbd_device *mdev)
static inline bool verify_can_do_stop_sector(struct drbd_device *device)
{
return mdev->tconn->agreed_pro_version >= 97 &&
mdev->tconn->agreed_pro_version != 100;
return device->tconn->agreed_pro_version >= 97 &&
device->tconn->agreed_pro_version != 100;
}
static inline int drbd_set_ed_uuid(struct drbd_device *mdev, u64 val)
static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
{
int changed = mdev->ed_uuid != val;
mdev->ed_uuid = val;
int changed = device->ed_uuid != val;
device->ed_uuid = val;
return changed;
}
static inline int drbd_queue_order_type(struct drbd_device *mdev)
static inline int drbd_queue_order_type(struct drbd_device *device)
{
/* sorry, we currently have no working implementation
* of distributed TCQ stuff */
@ -2043,21 +2043,21 @@ static inline int drbd_queue_order_type(struct drbd_device *mdev)
return QUEUE_ORDERED_NONE;
}
static inline void drbd_md_flush(struct drbd_device *mdev)
static inline void drbd_md_flush(struct drbd_device *device)
{
int r;
if (mdev->ldev == NULL) {
dev_warn(DEV, "mdev->ldev == NULL in drbd_md_flush\n");
if (device->ldev == NULL) {
dev_warn(DEV, "device->ldev == NULL in drbd_md_flush\n");
return;
}
if (test_bit(MD_NO_FUA, &mdev->flags))
if (test_bit(MD_NO_FUA, &device->flags))
return;
r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
r = blkdev_issue_flush(device->ldev->md_bdev, GFP_NOIO, NULL);
if (r) {
set_bit(MD_NO_FUA, &mdev->flags);
set_bit(MD_NO_FUA, &device->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -66,14 +66,14 @@ static void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
* [=====>..............] 33.5% (23456/123456)
* finish: 2:20:20 speed: 6,345 (6,456) K/sec
*/
static void drbd_syncer_progress(struct drbd_device *mdev, struct seq_file *seq)
static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *seq)
{
unsigned long db, dt, dbdt, rt, rs_left;
unsigned int res;
int i, x, y;
int stalled = 0;
drbd_get_syncer_progress(mdev, &rs_left, &res);
drbd_get_syncer_progress(device, &rs_left, &res);
x = res/50;
y = 20-x;
@ -85,21 +85,21 @@ static void drbd_syncer_progress(struct drbd_device *mdev, struct seq_file *seq)
seq_printf(seq, ".");
seq_printf(seq, "] ");
if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
seq_printf(seq, "verified:");
else
seq_printf(seq, "sync'ed:");
seq_printf(seq, "%3u.%u%% ", res / 10, res % 10);
/* if more than a few GB, display in MB */
if (mdev->rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
if (device->rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
seq_printf(seq, "(%lu/%lu)M",
(unsigned long) Bit2KB(rs_left >> 10),
(unsigned long) Bit2KB(mdev->rs_total >> 10));
(unsigned long) Bit2KB(device->rs_total >> 10));
else
seq_printf(seq, "(%lu/%lu)K\n\t",
(unsigned long) Bit2KB(rs_left),
(unsigned long) Bit2KB(mdev->rs_total));
(unsigned long) Bit2KB(device->rs_total));
/* see drivers/md/md.c
* We do not want to overflow, so the order of operands and
@ -114,14 +114,14 @@ static void drbd_syncer_progress(struct drbd_device *mdev, struct seq_file *seq)
* at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at
* least DRBD_SYNC_MARK_STEP time before it will be modified. */
/* ------------------------ ~18s average ------------------------ */
i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS;
dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
i = (device->rs_last_mark + 2) % DRBD_SYNC_MARKS;
dt = (jiffies - device->rs_mark_time[i]) / HZ;
if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS))
stalled = 1;
if (!dt)
dt++;
db = mdev->rs_mark_left[i] - rs_left;
db = device->rs_mark_left[i] - rs_left;
rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */
seq_printf(seq, "finish: %lu:%02lu:%02lu",
@ -134,11 +134,11 @@ static void drbd_syncer_progress(struct drbd_device *mdev, struct seq_file *seq)
/* ------------------------- ~3s average ------------------------ */
if (proc_details >= 1) {
/* this is what drbd_rs_should_slow_down() uses */
i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
dt = (jiffies - device->rs_mark_time[i]) / HZ;
if (!dt)
dt++;
db = mdev->rs_mark_left[i] - rs_left;
db = device->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
seq_printf(seq, " -- ");
@ -147,34 +147,34 @@ static void drbd_syncer_progress(struct drbd_device *mdev, struct seq_file *seq)
/* --------------------- long term average ---------------------- */
/* mean speed since syncer started
* we do account for PausedSync periods */
dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
if (dt == 0)
dt = 1;
db = mdev->rs_total - rs_left;
db = device->rs_total - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
seq_printf(seq, ")");
if (mdev->state.conn == C_SYNC_TARGET ||
mdev->state.conn == C_VERIFY_S) {
if (device->state.conn == C_SYNC_TARGET ||
device->state.conn == C_VERIFY_S) {
seq_printf(seq, " want: ");
seq_printf_with_thousands_grouping(seq, mdev->c_sync_rate);
seq_printf_with_thousands_grouping(seq, device->c_sync_rate);
}
seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
if (proc_details >= 1) {
/* 64 bit:
* we convert to sectors in the display below. */
unsigned long bm_bits = drbd_bm_bits(mdev);
unsigned long bm_bits = drbd_bm_bits(device);
unsigned long bit_pos;
unsigned long long stop_sector = 0;
if (mdev->state.conn == C_VERIFY_S ||
mdev->state.conn == C_VERIFY_T) {
bit_pos = bm_bits - mdev->ov_left;
if (verify_can_do_stop_sector(mdev))
stop_sector = mdev->ov_stop_sector;
if (device->state.conn == C_VERIFY_S ||
device->state.conn == C_VERIFY_T) {
bit_pos = bm_bits - device->ov_left;
if (verify_can_do_stop_sector(device))
stop_sector = device->ov_stop_sector;
} else
bit_pos = mdev->bm_resync_fo;
bit_pos = device->bm_resync_fo;
/* Total sectors may be slightly off for oddly
* sized devices. So what. */
seq_printf(seq,
@ -202,7 +202,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
{
int i, prev_i = -1;
const char *sn;
struct drbd_device *mdev;
struct drbd_device *device;
struct net_conf *nc;
char wp;
@ -236,72 +236,72 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
*/
rcu_read_lock();
idr_for_each_entry(&minors, mdev, i) {
idr_for_each_entry(&minors, device, i) {
if (prev_i != i - 1)
seq_printf(seq, "\n");
prev_i = i;
sn = drbd_conn_str(mdev->state.conn);
sn = drbd_conn_str(device->state.conn);
if (mdev->state.conn == C_STANDALONE &&
mdev->state.disk == D_DISKLESS &&
mdev->state.role == R_SECONDARY) {
if (device->state.conn == C_STANDALONE &&
device->state.disk == D_DISKLESS &&
device->state.role == R_SECONDARY) {
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
/* reset mdev->congestion_reason */
bdi_rw_congested(&mdev->rq_queue->backing_dev_info);
/* reset device->congestion_reason */
bdi_rw_congested(&device->rq_queue->backing_dev_info);
nc = rcu_dereference(mdev->tconn->net_conf);
nc = rcu_dereference(device->tconn->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
seq_printf(seq,
"%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
" ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
"lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c",
i, sn,
drbd_role_str(mdev->state.role),
drbd_role_str(mdev->state.peer),
drbd_disk_str(mdev->state.disk),
drbd_disk_str(mdev->state.pdsk),
drbd_role_str(device->state.role),
drbd_role_str(device->state.peer),
drbd_disk_str(device->state.disk),
drbd_disk_str(device->state.pdsk),
wp,
drbd_suspended(mdev) ? 's' : 'r',
mdev->state.aftr_isp ? 'a' : '-',
mdev->state.peer_isp ? 'p' : '-',
mdev->state.user_isp ? 'u' : '-',
mdev->congestion_reason ?: '-',
test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-',
mdev->send_cnt/2,
mdev->recv_cnt/2,
mdev->writ_cnt/2,
mdev->read_cnt/2,
mdev->al_writ_cnt,
mdev->bm_writ_cnt,
atomic_read(&mdev->local_cnt),
atomic_read(&mdev->ap_pending_cnt) +
atomic_read(&mdev->rs_pending_cnt),
atomic_read(&mdev->unacked_cnt),
atomic_read(&mdev->ap_bio_cnt),
mdev->tconn->epochs,
write_ordering_chars[mdev->tconn->write_ordering]
drbd_suspended(device) ? 's' : 'r',
device->state.aftr_isp ? 'a' : '-',
device->state.peer_isp ? 'p' : '-',
device->state.user_isp ? 'u' : '-',
device->congestion_reason ?: '-',
test_bit(AL_SUSPENDED, &device->flags) ? 's' : '-',
device->send_cnt/2,
device->recv_cnt/2,
device->writ_cnt/2,
device->read_cnt/2,
device->al_writ_cnt,
device->bm_writ_cnt,
atomic_read(&device->local_cnt),
atomic_read(&device->ap_pending_cnt) +
atomic_read(&device->rs_pending_cnt),
atomic_read(&device->unacked_cnt),
atomic_read(&device->ap_bio_cnt),
device->tconn->epochs,
write_ordering_chars[device->tconn->write_ordering]
);
seq_printf(seq, " oos:%llu\n",
Bit2KB((unsigned long long)
drbd_bm_total_weight(mdev)));
drbd_bm_total_weight(device)));
}
if (mdev->state.conn == C_SYNC_SOURCE ||
mdev->state.conn == C_SYNC_TARGET ||
mdev->state.conn == C_VERIFY_S ||
mdev->state.conn == C_VERIFY_T)
drbd_syncer_progress(mdev, seq);
if (device->state.conn == C_SYNC_SOURCE ||
device->state.conn == C_SYNC_TARGET ||
device->state.conn == C_VERIFY_S ||
device->state.conn == C_VERIFY_T)
drbd_syncer_progress(device, seq);
if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) {
lc_seq_printf_stats(seq, mdev->resync);
lc_seq_printf_stats(seq, mdev->act_log);
put_ldev(mdev);
if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) {
lc_seq_printf_stats(seq, device->resync);
lc_seq_printf_stats(seq, device->act_log);
put_ldev(device);
}
if (proc_details >= 2) {
if (mdev->resync) {
lc_seq_dump_details(seq, mdev->resync, "rs_left",
if (device->resync) {
lc_seq_dump_details(seq, device->resync, "rs_left",
resync_dump_detail);
}
}

File diff suppressed because it is too large Load diff

View file

@ -31,37 +31,37 @@
#include "drbd_req.h"
static bool drbd_may_do_local_read(struct drbd_device *mdev, sector_t sector, int size);
static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_device *mdev, struct drbd_request *req)
static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
{
const int rw = bio_data_dir(req->master_bio);
int cpu;
cpu = part_stat_lock();
part_round_stats(cpu, &mdev->vdisk->part0);
part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], req->i.size >> 9);
part_round_stats(cpu, &device->vdisk->part0);
part_stat_inc(cpu, &device->vdisk->part0, ios[rw]);
part_stat_add(cpu, &device->vdisk->part0, sectors[rw], req->i.size >> 9);
(void) cpu; /* The macro invocations above want the cpu argument, I do not like
the compiler warning about cpu only assigned but never used... */
part_inc_in_flight(&mdev->vdisk->part0, rw);
part_inc_in_flight(&device->vdisk->part0, rw);
part_stat_unlock();
}
/* Update disk stats when completing request upwards */
static void _drbd_end_io_acct(struct drbd_device *mdev, struct drbd_request *req)
static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
{
int rw = bio_data_dir(req->master_bio);
unsigned long duration = jiffies - req->start_time;
int cpu;
cpu = part_stat_lock();
part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration);
part_round_stats(cpu, &mdev->vdisk->part0);
part_dec_in_flight(&mdev->vdisk->part0, rw);
part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration);
part_round_stats(cpu, &device->vdisk->part0);
part_dec_in_flight(&device->vdisk->part0, rw);
part_stat_unlock();
}
static struct drbd_request *drbd_req_new(struct drbd_device *mdev,
static struct drbd_request *drbd_req_new(struct drbd_device *device,
struct bio *bio_src)
{
struct drbd_request *req;
@ -72,7 +72,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *mdev,
drbd_req_make_private_bio(req, bio_src);
req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
req->w.mdev = mdev;
req->w.device = device;
req->master_bio = bio_src;
req->epoch = 0;
@ -95,7 +95,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *mdev,
void drbd_req_destroy(struct kref *kref)
{
struct drbd_request *req = container_of(kref, struct drbd_request, kref);
struct drbd_device *mdev = req->w.mdev;
struct drbd_device *device = req->w.device;
const unsigned s = req->rq_state;
if ((req->master_bio && !(s & RQ_POSTPONED)) ||
@ -132,10 +132,10 @@ void drbd_req_destroy(struct kref *kref)
*/
if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
drbd_set_out_of_sync(device, req->i.sector, req->i.size);
if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
drbd_set_in_sync(mdev, req->i.sector, req->i.size);
drbd_set_in_sync(device, req->i.sector, req->i.size);
}
/* one might be tempted to move the drbd_al_complete_io
@ -149,9 +149,9 @@ void drbd_req_destroy(struct kref *kref)
* we would forget to resync the corresponding extent.
*/
if (s & RQ_IN_ACT_LOG) {
if (get_ldev_if_state(mdev, D_FAILED)) {
drbd_al_complete_io(mdev, &req->i);
put_ldev(mdev);
if (get_ldev_if_state(device, D_FAILED)) {
drbd_al_complete_io(device, &req->i);
put_ldev(device);
} else if (__ratelimit(&drbd_ratelimit_state)) {
dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), "
"but my Disk seems to have failed :(\n",
@ -179,25 +179,25 @@ void start_new_tl_epoch(struct drbd_tconn *tconn)
wake_all_senders(tconn);
}
void complete_master_bio(struct drbd_device *mdev,
void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m)
{
bio_endio(m->bio, m->error);
dec_ap_bio(mdev);
dec_ap_bio(device);
}
static void drbd_remove_request_interval(struct rb_root *root,
struct drbd_request *req)
{
struct drbd_device *mdev = req->w.mdev;
struct drbd_device *device = req->w.device;
struct drbd_interval *i = &req->i;
drbd_remove_interval(root, i);
/* Wake up any processes waiting for this request to complete. */
if (i->waiting)
wake_up(&mdev->misc_wait);
wake_up(&device->misc_wait);
}
/* Helper for __req_mod().
@ -210,7 +210,7 @@ static
void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{
const unsigned s = req->rq_state;
struct drbd_device *mdev = req->w.mdev;
struct drbd_device *device = req->w.device;
int rw;
int error, ok;
@ -259,9 +259,9 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
struct rb_root *root;
if (rw == WRITE)
root = &mdev->write_requests;
root = &device->write_requests;
else
root = &mdev->read_requests;
root = &device->read_requests;
drbd_remove_request_interval(root, req);
}
@ -273,11 +273,11 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* and reset the transfer log epoch write_cnt.
*/
if (rw == WRITE &&
req->epoch == atomic_read(&mdev->tconn->current_tle_nr))
start_new_tl_epoch(mdev->tconn);
req->epoch == atomic_read(&device->tconn->current_tle_nr))
start_new_tl_epoch(device->tconn);
/* Update disk stats */
_drbd_end_io_acct(mdev, req);
_drbd_end_io_acct(device, req);
/* If READ failed,
* have it be pushed back to the retry work queue,
@ -305,7 +305,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
{
struct drbd_device *mdev = req->w.mdev;
struct drbd_device *device = req->w.device;
D_ASSERT(m || (req->rq_state & RQ_POSTPONED));
if (!atomic_sub_and_test(put, &req->completion_ref))
@ -328,12 +328,12 @@ static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_
static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
int clear, int set)
{
struct drbd_device *mdev = req->w.mdev;
struct drbd_device *device = req->w.device;
unsigned s = req->rq_state;
int c_put = 0;
int k_put = 0;
if (drbd_suspended(mdev) && !((s | clear) & RQ_COMPLETION_SUSP))
if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
set |= RQ_COMPLETION_SUSP;
/* apply */
@ -351,7 +351,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
atomic_inc(&req->completion_ref);
if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
inc_ap_pending(mdev);
inc_ap_pending(device);
atomic_inc(&req->completion_ref);
}
@ -362,7 +362,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
kref_get(&req->kref); /* wait for the DONE */
if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT))
atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
atomic_add(req->i.size >> 9, &device->ap_in_flight);
if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
atomic_inc(&req->completion_ref);
@ -388,7 +388,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
}
if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
dec_ap_pending(mdev);
dec_ap_pending(device);
++c_put;
}
@ -397,7 +397,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
if ((s & RQ_EXP_BARR_ACK) && !(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
if (req->rq_state & RQ_NET_SENT)
atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
atomic_sub(req->i.size >> 9, &device->ap_in_flight);
++k_put;
}
@ -416,7 +416,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
/* If we made progress, retry conflicting peer requests, if any. */
if (req->i.waiting)
wake_up(&mdev->misc_wait);
wake_up(&device->misc_wait);
if (c_put)
k_put += drbd_req_put_completion_ref(req, m, c_put);
@ -424,7 +424,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
kref_sub(&req->kref, k_put, drbd_req_destroy);
}
static void drbd_report_io_error(struct drbd_device *mdev, struct drbd_request *req)
static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
{
char b[BDEVNAME_SIZE];
@ -435,7 +435,7 @@ static void drbd_report_io_error(struct drbd_device *mdev, struct drbd_request *
(req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
(unsigned long long)req->i.sector,
req->i.size >> 9,
bdevname(mdev->ldev->backing_bdev, b));
bdevname(device->ldev->backing_bdev, b));
}
/* obviously this could be coded as many single functions
@ -453,7 +453,7 @@ static void drbd_report_io_error(struct drbd_device *mdev, struct drbd_request *
int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m)
{
struct drbd_device *mdev = req->w.mdev;
struct drbd_device *device = req->w.device;
struct net_conf *nc;
int p, rv = 0;
@ -476,7 +476,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* and from w_read_retry_remote */
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
nc = rcu_dereference(device->tconn->net_conf);
p = nc->wire_protocol;
rcu_read_unlock();
req->rq_state |=
@ -493,9 +493,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
case COMPLETED_OK:
if (req->rq_state & RQ_WRITE)
mdev->writ_cnt += req->i.size >> 9;
device->writ_cnt += req->i.size >> 9;
else
mdev->read_cnt += req->i.size >> 9;
device->read_cnt += req->i.size >> 9;
mod_rq_state(req, m, RQ_LOCAL_PENDING,
RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
@ -506,15 +506,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case WRITE_COMPLETED_WITH_ERROR:
drbd_report_io_error(mdev, req);
__drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
drbd_report_io_error(device, req);
__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
case READ_COMPLETED_WITH_ERROR:
drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
drbd_report_io_error(mdev, req);
__drbd_chk_io_error(mdev, DRBD_READ_ERROR);
drbd_set_out_of_sync(device, req->i.sector, req->i.size);
drbd_report_io_error(device, req);
__drbd_chk_io_error(device, DRBD_READ_ERROR);
/* fall through. */
case READ_AHEAD_COMPLETED_WITH_ERROR:
/* it is legal to fail READA, no __drbd_chk_io_error in that case. */
@ -533,15 +533,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* Corresponding drbd_remove_request_interval is in
* drbd_req_complete() */
D_ASSERT(drbd_interval_empty(&req->i));
drbd_insert_interval(&mdev->read_requests, &req->i);
drbd_insert_interval(&device->read_requests, &req->i);
set_bit(UNPLUG_REMOTE, &mdev->flags);
set_bit(UNPLUG_REMOTE, &device->flags);
D_ASSERT(req->rq_state & RQ_NET_PENDING);
D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_read_req;
drbd_queue_work(&mdev->tconn->sender_work, &req->w);
drbd_queue_work(&device->tconn->sender_work, &req->w);
break;
case QUEUE_FOR_NET_WRITE:
@ -551,7 +551,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* Corresponding drbd_remove_request_interval is in
* drbd_req_complete() */
D_ASSERT(drbd_interval_empty(&req->i));
drbd_insert_interval(&mdev->write_requests, &req->i);
drbd_insert_interval(&device->write_requests, &req->i);
/* NOTE
* In case the req ended up on the transfer log before being
@ -570,28 +570,28 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* otherwise we may lose an unplug, which may cause some remote
* io-scheduler timeout to expire, increasing maximum latency,
* hurting performance. */
set_bit(UNPLUG_REMOTE, &mdev->flags);
set_bit(UNPLUG_REMOTE, &device->flags);
/* queue work item to send data */
D_ASSERT(req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
req->w.cb = w_send_dblock;
drbd_queue_work(&mdev->tconn->sender_work, &req->w);
drbd_queue_work(&device->tconn->sender_work, &req->w);
/* close the epoch, in case it outgrew the limit */
rcu_read_lock();
nc = rcu_dereference(mdev->tconn->net_conf);
nc = rcu_dereference(device->tconn->net_conf);
p = nc->max_epoch_size;
rcu_read_unlock();
if (mdev->tconn->current_tle_writes >= p)
start_new_tl_epoch(mdev->tconn);
if (device->tconn->current_tle_writes >= p)
start_new_tl_epoch(device->tconn);
break;
case QUEUE_FOR_SEND_OOS:
mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_out_of_sync;
drbd_queue_work(&mdev->tconn->sender_work, &req->w);
drbd_queue_work(&device->tconn->sender_work, &req->w);
break;
case READ_RETRY_REMOTE_CANCELED:
@ -673,7 +673,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT(req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_POSTPONED;
if (req->i.waiting)
wake_up(&mdev->misc_wait);
wake_up(&device->misc_wait);
/* Do not clear RQ_NET_PENDING. This request will make further
* progress via restart_conflicting_writes() or
* fail_postponed_requests(). Hopefully. */
@ -701,9 +701,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
if (bio_data_dir(req->master_bio) == WRITE)
rv = MR_WRITE;
get_ldev(mdev); /* always succeeds in this call path */
get_ldev(device); /* always succeeds in this call path */
req->w.cb = w_restart_disk_io;
drbd_queue_work(&mdev->tconn->sender_work, &req->w);
drbd_queue_work(&device->tconn->sender_work, &req->w);
break;
case RESEND:
@ -724,7 +724,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
if (req->w.cb) {
drbd_queue_work(&mdev->tconn->sender_work, &req->w);
drbd_queue_work(&device->tconn->sender_work, &req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
} /* else: FIXME can this happen? */
break;
@ -756,7 +756,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case QUEUE_AS_DRBD_BARRIER:
start_new_tl_epoch(mdev->tconn);
start_new_tl_epoch(device->tconn);
mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
break;
};
@ -771,27 +771,27 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* since size may be bigger than BM_BLOCK_SIZE,
* we may need to check several bits.
*/
static bool drbd_may_do_local_read(struct drbd_device *mdev, sector_t sector, int size)
static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
{
unsigned long sbnr, ebnr;
sector_t esector, nr_sectors;
if (mdev->state.disk == D_UP_TO_DATE)
if (device->state.disk == D_UP_TO_DATE)
return true;
if (mdev->state.disk != D_INCONSISTENT)
if (device->state.disk != D_INCONSISTENT)
return false;
esector = sector + (size >> 9) - 1;
nr_sectors = drbd_get_capacity(mdev->this_bdev);
nr_sectors = drbd_get_capacity(device->this_bdev);
D_ASSERT(sector < nr_sectors);
D_ASSERT(esector < nr_sectors);
sbnr = BM_SECT_TO_BIT(sector);
ebnr = BM_SECT_TO_BIT(esector);
return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0;
return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
}
static bool remote_due_to_read_balancing(struct drbd_device *mdev, sector_t sector,
static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
enum drbd_read_balancing rbm)
{
struct backing_dev_info *bdi;
@ -799,11 +799,11 @@ static bool remote_due_to_read_balancing(struct drbd_device *mdev, sector_t sect
switch (rbm) {
case RB_CONGESTED_REMOTE:
bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
return bdi_read_congested(bdi);
case RB_LEAST_PENDING:
return atomic_read(&mdev->local_cnt) >
atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt);
return atomic_read(&device->local_cnt) >
atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
case RB_32K_STRIPING: /* stripe_shift = 15 */
case RB_64K_STRIPING:
case RB_128K_STRIPING:
@ -813,7 +813,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *mdev, sector_t sect
stripe_shift = (rbm - RB_32K_STRIPING + 15);
return (sector >> (stripe_shift - 9)) & 1;
case RB_ROUND_ROBIN:
return test_and_change_bit(READ_BALANCE_RR, &mdev->flags);
return test_and_change_bit(READ_BALANCE_RR, &device->flags);
case RB_PREFER_REMOTE:
return true;
case RB_PREFER_LOCAL:
@ -834,33 +834,33 @@ static bool remote_due_to_read_balancing(struct drbd_device *mdev, sector_t sect
static void complete_conflicting_writes(struct drbd_request *req)
{
DEFINE_WAIT(wait);
struct drbd_device *mdev = req->w.mdev;
struct drbd_device *device = req->w.device;
struct drbd_interval *i;
sector_t sector = req->i.sector;
int size = req->i.size;
i = drbd_find_overlap(&mdev->write_requests, sector, size);
i = drbd_find_overlap(&device->write_requests, sector, size);
if (!i)
return;
for (;;) {
prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
i = drbd_find_overlap(&mdev->write_requests, sector, size);
prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
i = drbd_find_overlap(&device->write_requests, sector, size);
if (!i)
break;
/* Indicate to wake up device->misc_wait on progress. */
i->waiting = true;
spin_unlock_irq(&mdev->tconn->req_lock);
spin_unlock_irq(&device->tconn->req_lock);
schedule();
spin_lock_irq(&mdev->tconn->req_lock);
spin_lock_irq(&device->tconn->req_lock);
}
finish_wait(&mdev->misc_wait, &wait);
finish_wait(&device->misc_wait, &wait);
}
/* called within req_lock and rcu_read_lock() */
static void maybe_pull_ahead(struct drbd_device *mdev)
static void maybe_pull_ahead(struct drbd_device *device)
{
struct drbd_tconn *tconn = mdev->tconn;
struct drbd_tconn *tconn = device->tconn;
struct net_conf *nc;
bool congested = false;
enum drbd_on_congestion on_congestion;
@ -875,32 +875,32 @@ static void maybe_pull_ahead(struct drbd_device *mdev)
/* If I don't even have good local storage, we can not reasonably try
* to pull ahead of the peer. We also need the local reference to make
* sure mdev->act_log is there.
* sure device->act_log is there.
*/
if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
if (!get_ldev_if_state(device, D_UP_TO_DATE))
return;
if (nc->cong_fill &&
atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) {
atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
dev_info(DEV, "Congestion-fill threshold reached\n");
congested = true;
}
if (mdev->act_log->used >= nc->cong_extents) {
if (device->act_log->used >= nc->cong_extents) {
dev_info(DEV, "Congestion-extents threshold reached\n");
congested = true;
}
if (congested) {
/* start a new epoch for non-mirrored writes */
start_new_tl_epoch(mdev->tconn);
start_new_tl_epoch(device->tconn);
if (on_congestion == OC_PULL_AHEAD)
_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
_drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
else /*nc->on_congestion == OC_DISCONNECT */
_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
_drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
}
put_ldev(mdev);
put_ldev(device);
}
/* If this returns false, and req->private_bio is still set,
@ -914,19 +914,19 @@ static void maybe_pull_ahead(struct drbd_device *mdev)
*/
static bool do_remote_read(struct drbd_request *req)
{
struct drbd_device *mdev = req->w.mdev;
struct drbd_device *device = req->w.device;
enum drbd_read_balancing rbm;
if (req->private_bio) {
if (!drbd_may_do_local_read(mdev,
if (!drbd_may_do_local_read(device,
req->i.sector, req->i.size)) {
bio_put(req->private_bio);
req->private_bio = NULL;
put_ldev(mdev);
put_ldev(device);
}
}
if (mdev->state.pdsk != D_UP_TO_DATE)
if (device->state.pdsk != D_UP_TO_DATE)
return false;
if (req->private_bio == NULL)
@ -936,17 +936,17 @@ static bool do_remote_read(struct drbd_request *req)
* protocol, pending requests etc. */
rcu_read_lock();
rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing;
rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
rcu_read_unlock();
if (rbm == RB_PREFER_LOCAL && req->private_bio)
return false; /* submit locally */
if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) {
if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
if (req->private_bio) {
bio_put(req->private_bio);
req->private_bio = NULL;
put_ldev(mdev);
put_ldev(device);
}
return true;
}
@ -959,11 +959,11 @@ static bool do_remote_read(struct drbd_request *req)
* which does NOT include those that we are L_AHEAD for. */
static int drbd_process_write_request(struct drbd_request *req)
{
struct drbd_device *mdev = req->w.mdev;
struct drbd_device *device = req->w.device;
int remote, send_oos;
remote = drbd_should_do_remote(mdev->state);
send_oos = drbd_should_send_out_of_sync(mdev->state);
remote = drbd_should_do_remote(device->state);
send_oos = drbd_should_send_out_of_sync(device->state);
/* Need to replicate writes. Unless it is an empty flush,
* which is better mapped to a DRBD P_BARRIER packet,
@ -987,7 +987,7 @@ static int drbd_process_write_request(struct drbd_request *req)
if (remote) {
_req_mod(req, TO_BE_SENT);
_req_mod(req, QUEUE_FOR_NET_WRITE);
} else if (drbd_set_out_of_sync(mdev, req->i.sector, req->i.size))
} else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
_req_mod(req, QUEUE_FOR_SEND_OOS);
return remote;
@ -996,36 +996,36 @@ static int drbd_process_write_request(struct drbd_request *req)
static void
drbd_submit_req_private_bio(struct drbd_request *req)
{
struct drbd_device *mdev = req->w.mdev;
struct drbd_device *device = req->w.device;
struct bio *bio = req->private_bio;
const int rw = bio_rw(bio);
bio->bi_bdev = mdev->ldev->backing_bdev;
bio->bi_bdev = device->ldev->backing_bdev;
/* State may have changed since we grabbed our reference on the
* ->ldev member. Double check, and short-circuit to endio.
* In case the last activity log transaction failed to get on
* stable storage, and this is a WRITE, we may not even submit
* this bio. */
if (get_ldev(mdev)) {
if (drbd_insert_fault(mdev,
if (get_ldev(device)) {
if (drbd_insert_fault(device,
rw == WRITE ? DRBD_FAULT_DT_WR
: rw == READ ? DRBD_FAULT_DT_RD
: DRBD_FAULT_DT_RA))
bio_endio(bio, -EIO);
else
generic_make_request(bio);
put_ldev(mdev);
put_ldev(device);
} else
bio_endio(bio, -EIO);
}
static void drbd_queue_write(struct drbd_device *mdev, struct drbd_request *req)
static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
{
spin_lock(&mdev->submit.lock);
list_add_tail(&req->tl_requests, &mdev->submit.writes);
spin_unlock(&mdev->submit.lock);
queue_work(mdev->submit.wq, &mdev->submit.worker);
spin_lock(&device->submit.lock);
list_add_tail(&req->tl_requests, &device->submit.writes);
spin_unlock(&device->submit.lock);
queue_work(device->submit.wq, &device->submit.worker);
}
/* returns the new drbd_request pointer, if the caller is expected to
@ -1034,15 +1034,15 @@ static void drbd_queue_write(struct drbd_device *mdev, struct drbd_request *req)
* Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
*/
static struct drbd_request *
drbd_request_prepare(struct drbd_device *mdev, struct bio *bio, unsigned long start_time)
drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_time)
{
const int rw = bio_data_dir(bio);
struct drbd_request *req;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
req = drbd_req_new(device, bio);
if (!req) {
dec_ap_bio(mdev);
dec_ap_bio(device);
/* only pass the error to the upper layers.
* if user cannot handle io errors, that's not our business. */
dev_err(DEV, "could not kmalloc() req\n");
@ -1051,18 +1051,18 @@ drbd_request_prepare(struct drbd_device *mdev, struct bio *bio, unsigned long st
}
req->start_time = start_time;
if (!get_ldev(mdev)) {
if (!get_ldev(device)) {
bio_put(req->private_bio);
req->private_bio = NULL;
}
/* Update disk stats */
_drbd_start_io_acct(mdev, req);
_drbd_start_io_acct(device, req);
if (rw == WRITE && req->private_bio && req->i.size
&& !test_bit(AL_SUSPENDED, &mdev->flags)) {
if (!drbd_al_begin_io_fastpath(mdev, &req->i)) {
drbd_queue_write(mdev, req);
&& !test_bit(AL_SUSPENDED, &device->flags)) {
if (!drbd_al_begin_io_fastpath(device, &req->i)) {
drbd_queue_write(device, req);
return NULL;
}
req->rq_state |= RQ_IN_ACT_LOG;
@ -1071,13 +1071,13 @@ drbd_request_prepare(struct drbd_device *mdev, struct bio *bio, unsigned long st
return req;
}
static void drbd_send_and_submit(struct drbd_device *mdev, struct drbd_request *req)
static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{
const int rw = bio_rw(req->master_bio);
struct bio_and_error m = { NULL, };
bool no_remote = false;
spin_lock_irq(&mdev->tconn->req_lock);
spin_lock_irq(&device->tconn->req_lock);
if (rw == WRITE) {
/* This may temporarily give up the req_lock,
* but will re-aquire it before it returns here.
@ -1087,17 +1087,17 @@ static void drbd_send_and_submit(struct drbd_device *mdev, struct drbd_request *
/* check for congestion, and potentially stop sending
* full data updates, but start sending "dirty bits" only. */
maybe_pull_ahead(mdev);
maybe_pull_ahead(device);
}
if (drbd_suspended(mdev)) {
if (drbd_suspended(device)) {
/* push back and retry: */
req->rq_state |= RQ_POSTPONED;
if (req->private_bio) {
bio_put(req->private_bio);
req->private_bio = NULL;
put_ldev(mdev);
put_ldev(device);
}
goto out;
}
@ -1111,15 +1111,15 @@ static void drbd_send_and_submit(struct drbd_device *mdev, struct drbd_request *
}
/* which transfer log epoch does this belong to? */
req->epoch = atomic_read(&mdev->tconn->current_tle_nr);
req->epoch = atomic_read(&device->tconn->current_tle_nr);
/* no point in adding empty flushes to the transfer log,
* they are mapped to drbd barriers already. */
if (likely(req->i.size!=0)) {
if (rw == WRITE)
mdev->tconn->current_tle_writes++;
device->tconn->current_tle_writes++;
list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log);
list_add_tail(&req->tl_requests, &device->tconn->transfer_log);
}
if (rw == WRITE) {
@ -1139,9 +1139,9 @@ static void drbd_send_and_submit(struct drbd_device *mdev, struct drbd_request *
/* needs to be marked within the same spinlock */
_req_mod(req, TO_BE_SUBMITTED);
/* but we need to give up the spinlock to submit */
spin_unlock_irq(&mdev->tconn->req_lock);
spin_unlock_irq(&device->tconn->req_lock);
drbd_submit_req_private_bio(req);
spin_lock_irq(&mdev->tconn->req_lock);
spin_lock_irq(&device->tconn->req_lock);
} else if (no_remote) {
nodata:
if (__ratelimit(&drbd_ratelimit_state))
@ -1154,21 +1154,21 @@ static void drbd_send_and_submit(struct drbd_device *mdev, struct drbd_request *
out:
if (drbd_req_put_completion_ref(req, &m, 1))
kref_put(&req->kref, drbd_req_destroy);
spin_unlock_irq(&mdev->tconn->req_lock);
spin_unlock_irq(&device->tconn->req_lock);
if (m.bio)
complete_master_bio(mdev, &m);
complete_master_bio(device, &m);
}
void __drbd_make_request(struct drbd_device *mdev, struct bio *bio, unsigned long start_time)
void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_time)
{
struct drbd_request *req = drbd_request_prepare(mdev, bio, start_time);
struct drbd_request *req = drbd_request_prepare(device, bio, start_time);
if (IS_ERR_OR_NULL(req))
return;
drbd_send_and_submit(mdev, req);
drbd_send_and_submit(device, req);
}
static void submit_fast_path(struct drbd_device *mdev, struct list_head *incoming)
static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
{
struct drbd_request *req, *tmp;
list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
@ -1176,19 +1176,19 @@ static void submit_fast_path(struct drbd_device *mdev, struct list_head *incomin
if (rw == WRITE /* rw != WRITE should not even end up here! */
&& req->private_bio && req->i.size
&& !test_bit(AL_SUSPENDED, &mdev->flags)) {
if (!drbd_al_begin_io_fastpath(mdev, &req->i))
&& !test_bit(AL_SUSPENDED, &device->flags)) {
if (!drbd_al_begin_io_fastpath(device, &req->i))
continue;
req->rq_state |= RQ_IN_ACT_LOG;
}
list_del_init(&req->tl_requests);
drbd_send_and_submit(mdev, req);
drbd_send_and_submit(device, req);
}
}
static bool prepare_al_transaction_nonblock(struct drbd_device *mdev,
static bool prepare_al_transaction_nonblock(struct drbd_device *device,
struct list_head *incoming,
struct list_head *pending)
{
@ -1196,9 +1196,9 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *mdev,
int wake = 0;
int err;
spin_lock_irq(&mdev->al_lock);
spin_lock_irq(&device->al_lock);
list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
err = drbd_al_begin_io_nonblock(mdev, &req->i);
err = drbd_al_begin_io_nonblock(device, &req->i);
if (err == -EBUSY)
wake = 1;
if (err)
@ -1206,30 +1206,30 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *mdev,
req->rq_state |= RQ_IN_ACT_LOG;
list_move_tail(&req->tl_requests, pending);
}
spin_unlock_irq(&mdev->al_lock);
spin_unlock_irq(&device->al_lock);
if (wake)
wake_up(&mdev->al_wait);
wake_up(&device->al_wait);
return !list_empty(pending);
}
void do_submit(struct work_struct *ws)
{
struct drbd_device *mdev = container_of(ws, struct drbd_device, submit.worker);
struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
LIST_HEAD(incoming);
LIST_HEAD(pending);
struct drbd_request *req, *tmp;
for (;;) {
spin_lock(&mdev->submit.lock);
list_splice_tail_init(&mdev->submit.writes, &incoming);
spin_unlock(&mdev->submit.lock);
spin_lock(&device->submit.lock);
list_splice_tail_init(&device->submit.writes, &incoming);
spin_unlock(&device->submit.lock);
submit_fast_path(mdev, &incoming);
submit_fast_path(device, &incoming);
if (list_empty(&incoming))
break;
wait_event(mdev->al_wait, prepare_al_transaction_nonblock(mdev, &incoming, &pending));
wait_event(device->al_wait, prepare_al_transaction_nonblock(device, &incoming, &pending));
/* Maybe more was queued, while we prepared the transaction?
* Try to stuff them into this transaction as well.
* Be strictly non-blocking here, no wait_event, we already
@ -1243,17 +1243,17 @@ void do_submit(struct work_struct *ws)
/* It is ok to look outside the lock,
* it's only an optimization anyways */
if (list_empty(&mdev->submit.writes))
if (list_empty(&device->submit.writes))
break;
spin_lock(&mdev->submit.lock);
list_splice_tail_init(&mdev->submit.writes, &more_incoming);
spin_unlock(&mdev->submit.lock);
spin_lock(&device->submit.lock);
list_splice_tail_init(&device->submit.writes, &more_incoming);
spin_unlock(&device->submit.lock);
if (list_empty(&more_incoming))
break;
made_progress = prepare_al_transaction_nonblock(mdev, &more_incoming, &more_pending);
made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending);
list_splice_tail_init(&more_pending, &pending);
list_splice_tail_init(&more_incoming, &incoming);
@ -1261,18 +1261,18 @@ void do_submit(struct work_struct *ws)
if (!made_progress)
break;
}
drbd_al_begin_io_commit(mdev, false);
drbd_al_begin_io_commit(device, false);
list_for_each_entry_safe(req, tmp, &pending, tl_requests) {
list_del_init(&req->tl_requests);
drbd_send_and_submit(mdev, req);
drbd_send_and_submit(device, req);
}
}
}
void drbd_make_request(struct request_queue *q, struct bio *bio)
{
struct drbd_device *mdev = (struct drbd_device *) q->queuedata;
struct drbd_device *device = (struct drbd_device *) q->queuedata;
unsigned long start_time;
start_time = jiffies;
@ -1282,8 +1282,8 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
*/
D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
inc_ap_bio(mdev);
__drbd_make_request(mdev, bio, start_time);
inc_ap_bio(device);
__drbd_make_request(device, bio, start_time);
}
/* This is called by bio_add_page().
@ -1300,20 +1300,20 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
*/
int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
{
struct drbd_device *mdev = (struct drbd_device *) q->queuedata;
struct drbd_device *device = (struct drbd_device *) q->queuedata;
unsigned int bio_size = bvm->bi_size;
int limit = DRBD_MAX_BIO_SIZE;
int backing_limit;
if (bio_size && get_ldev(mdev)) {
if (bio_size && get_ldev(device)) {
unsigned int max_hw_sectors = queue_max_hw_sectors(q);
struct request_queue * const b =
mdev->ldev->backing_bdev->bd_disk->queue;
device->ldev->backing_bdev->bd_disk->queue;
if (b->merge_bvec_fn) {
backing_limit = b->merge_bvec_fn(b, bvm, bvec);
limit = min(limit, backing_limit);
}
put_ldev(mdev);
put_ldev(device);
if ((limit >> 9) > max_hw_sectors)
limit = max_hw_sectors << 9;
}
@ -1334,8 +1334,8 @@ static struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
void request_timer_fn(unsigned long data)
{
struct drbd_device *mdev = (struct drbd_device *) data;
struct drbd_tconn *tconn = mdev->tconn;
struct drbd_device *device = (struct drbd_device *) data;
struct drbd_tconn *tconn = device->tconn;
struct drbd_request *req; /* oldest request */
struct net_conf *nc;
unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
@ -1343,12 +1343,12 @@ void request_timer_fn(unsigned long data)
rcu_read_lock();
nc = rcu_dereference(tconn->net_conf);
if (nc && mdev->state.conn >= C_WF_REPORT_PARAMS)
if (nc && device->state.conn >= C_WF_REPORT_PARAMS)
ent = nc->timeout * HZ/10 * nc->ko_count;
if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */
dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10;
put_ldev(mdev);
if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
put_ldev(device);
}
rcu_read_unlock();
@ -1363,7 +1363,7 @@ void request_timer_fn(unsigned long data)
req = find_oldest_request(tconn);
if (!req) {
spin_unlock_irq(&tconn->req_lock);
mod_timer(&mdev->request_timer, now + et);
mod_timer(&device->request_timer, now + et);
return;
}
@ -1387,15 +1387,15 @@ void request_timer_fn(unsigned long data)
time_after(now, req->start_time + ent) &&
!time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) {
dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
_drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
_drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
}
if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev &&
if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.device == device &&
time_after(now, req->start_time + dt) &&
!time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
!time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
__drbd_chk_io_error(mdev, DRBD_FORCE_DETACH);
__drbd_chk_io_error(device, DRBD_FORCE_DETACH);
}
nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
spin_unlock_irq(&tconn->req_lock);
mod_timer(&mdev->request_timer, nt);
mod_timer(&device->request_timer, nt);
}

View file

@ -281,7 +281,7 @@ extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m);
extern void complete_master_bio(struct drbd_device *mdev,
extern void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m);
extern void request_timer_fn(unsigned long data);
extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
@ -294,14 +294,14 @@ extern void drbd_restart_request(struct drbd_request *req);
* outside the spinlock, e.g. when walking some list on cleanup. */
static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
{
struct drbd_device *mdev = req->w.mdev;
struct drbd_device *device = req->w.device;
struct bio_and_error m;
int rv;
/* __req_mod possibly frees req, do not touch req after that! */
rv = __req_mod(req, what, &m);
if (m.bio)
complete_master_bio(mdev, &m);
complete_master_bio(device, &m);
return rv;
}
@ -314,16 +314,16 @@ static inline int req_mod(struct drbd_request *req,
enum drbd_req_event what)
{
unsigned long flags;
struct drbd_device *mdev = req->w.mdev;
struct drbd_device *device = req->w.device;
struct bio_and_error m;
int rv;
spin_lock_irqsave(&mdev->tconn->req_lock, flags);
spin_lock_irqsave(&device->tconn->req_lock, flags);
rv = __req_mod(req, what, &m);
spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
spin_unlock_irqrestore(&device->tconn->req_lock, flags);
if (m.bio)
complete_master_bio(mdev, &m);
complete_master_bio(device, &m);
return rv;
}

File diff suppressed because it is too large Load diff

View file

@ -107,7 +107,7 @@ union drbd_dev_state {
unsigned int i;
};
extern enum drbd_state_rv drbd_change_state(struct drbd_device *mdev,
extern enum drbd_state_rv drbd_change_state(struct drbd_device *device,
enum chg_state_flags f,
union drbd_state mask,
union drbd_state val);
@ -131,12 +131,12 @@ enum drbd_state_rv
conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags);
extern void drbd_resume_al(struct drbd_device *mdev);
extern void drbd_resume_al(struct drbd_device *device);
extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
/**
* drbd_request_state() - Reqest a state change
* @mdev: DRBD device.
* @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
*
@ -144,11 +144,11 @@ extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
* quite verbose in case the state change is not possible, and all those
* state changes are globally serialized.
*/
static inline int drbd_request_state(struct drbd_device *mdev,
static inline int drbd_request_state(struct drbd_device *device,
union drbd_state mask,
union drbd_state val)
{
return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED);
}
enum drbd_role conn_highest_role(struct drbd_tconn *tconn);

File diff suppressed because it is too large Load diff

View file

@ -9,12 +9,12 @@
extern char *drbd_sec_holder;
/* sets the number of 512 byte sectors of our virtual device */
static inline void drbd_set_my_capacity(struct drbd_device *mdev,
static inline void drbd_set_my_capacity(struct drbd_device *device,
sector_t size)
{
/* set_capacity(mdev->this_bdev->bd_disk, size); */
set_capacity(mdev->vdisk, size);
mdev->this_bdev->bd_inode->i_size = (loff_t)size << 9;
/* set_capacity(device->this_bdev->bd_disk, size); */
set_capacity(device->vdisk, size);
device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
}
#define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE)
@ -27,20 +27,20 @@ extern void drbd_request_endio(struct bio *bio, int error);
/*
* used to submit our private bio
*/
static inline void drbd_generic_make_request(struct drbd_device *mdev,
static inline void drbd_generic_make_request(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
if (!bio->bi_bdev) {
printk(KERN_ERR "drbd%d: drbd_generic_make_request: "
"bio->bi_bdev == NULL\n",
mdev_to_minor(mdev));
device_to_minor(device));
dump_stack();
bio_endio(bio, -ENODEV);
return;
}
if (drbd_insert_fault(mdev, fault_type))
if (drbd_insert_fault(device, fault_type))
bio_endio(bio, -EIO);
else
generic_make_request(bio);