Block patches:

- Asynchronous copying for block-copy (i.e., the backup job)
 - Allow resizing of qcow2 images when they have internal snapshots
 - iotests: Logging improvements for Python tests
 - iotest 153 fix, and block comment cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQFGBAABCAAwFiEEkb62CjDbPohX0Rgp9AfbAGHVz0AFAl6xYpoSHG1yZWl0ekBy
 ZWRoYXQuY29tAAoJEPQH2wBh1c9AF7IH/j1wr6m8OrZtdAoebVqFQn2buydT+kGP
 DP4IVfJ4YoTwmZCBxoR5ZuH6kWTWgUDz1w5x7U5A70tVLqK+RwCnAxrlz19s6rVP
 ACp1d6GO7iLAEH58KRsvSvy7OTvpKzEXP8tS8kDsK58xl8m65vSBLIt9xUpZMql2
 VAiftyu7MYGpObEoy2SnTuhM5H9pPcNiuwATGLNrJqdmi+bW8sIKiV3+yND7s1cz
 lWqVHaWq0XUfBaSG4sx6BqBeBl+cchv0XyRrwNJefY3lVLxIiizwxmb7UYBRSBrY
 XIKGOl1qZNE0AETuF2wh7XzTJuHe7Asash5dIIip0sqtbiToVlSxQwY=
 =vtg/
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2020-05-05' into staging

Block patches:
- Asynchronous copying for block-copy (i.e., the backup job)
- Allow resizing of qcow2 images when they have internal snapshots
- iotests: Logging improvements for Python tests
- iotest 153 fix, and block comment cleanups

# gpg: Signature made Tue 05 May 2020 13:56:58 BST
# gpg:                using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40
# gpg:                issuer "mreitz@redhat.com"
# gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full]
# Primary key fingerprint: 91BE B60A 30DB 3E88 57D1  1829 F407 DB00 61D5 CF40

* remotes/maxreitz/tags/pull-block-2020-05-05: (24 commits)
  block/block-copy: use aio-task-pool API
  block/block-copy: refactor task creation
  block/block-copy: add state pointer to BlockCopyTask
  block/block-copy: alloc task on each iteration
  block/block-copy: rename in-flight requests to tasks
  Fix iotest 153
  block: Comment cleanups
  qcow2: Tweak comment about bitmaps vs. resize
  qcow2: Allow resize of images with internal snapshots
  block: Add blk_new_with_bs() helper
  iotests: use python logging for iotests.log()
  iotests: Mark verify functions as private
  iotest 258: use script_main
  iotests: add script_initialize
  iotests: add hmp helper with logging
  iotests: limit line length to 79 chars
  iotests: touch up log function signature
  iotests: drop pre-Python 3.4 compatibility code
  iotests: alphabetize standard imports
  iotests: add pylintrc file
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-05-05 16:46:37 +01:00
commit ea1329bb3a
71 changed files with 729 additions and 387 deletions

View file

@ -355,6 +355,29 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
return blk;
}
/*
* Create a new BlockBackend connected to an existing BlockDriverState.
*
* @perm is a bitmasks of BLK_PERM_* constants which describes the
* permissions to request for @bs that is attached to this
* BlockBackend. @shared_perm is a bitmask which describes which
* permissions may be granted to other users of the attached node.
* Both sets of permissions can be changed later using blk_set_perm().
*
* Return the new BlockBackend on success, null on failure.
*/
BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm,
uint64_t shared_perm, Error **errp)
{
BlockBackend *blk = blk_new(bdrv_get_aio_context(bs), perm, shared_perm);
if (blk_insert_bs(blk, bs, errp) < 0) {
blk_unref(blk);
return NULL;
}
return blk;
}
/*
* Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
* The new BlockBackend is in the main AioContext.

View file

@ -19,17 +19,37 @@
#include "block/block-copy.h"
#include "sysemu/block-backend.h"
#include "qemu/units.h"
#include "qemu/coroutine.h"
#include "block/aio_task.h"
#define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
#define BLOCK_COPY_MAX_MEM (128 * MiB)
#define BLOCK_COPY_MAX_WORKERS 64
typedef struct BlockCopyInFlightReq {
static coroutine_fn int block_copy_task_entry(AioTask *task);
typedef struct BlockCopyCallState {
bool failed;
bool error_is_read;
} BlockCopyCallState;
typedef struct BlockCopyTask {
AioTask task;
BlockCopyState *s;
BlockCopyCallState *call_state;
int64_t offset;
int64_t bytes;
QLIST_ENTRY(BlockCopyInFlightReq) list;
CoQueue wait_queue; /* coroutines blocked on this request */
} BlockCopyInFlightReq;
bool zeroes;
QLIST_ENTRY(BlockCopyTask) list;
CoQueue wait_queue; /* coroutines blocked on this task */
} BlockCopyTask;
static int64_t task_end(BlockCopyTask *task)
{
return task->offset + task->bytes;
}
typedef struct BlockCopyState {
/*
@ -45,7 +65,7 @@ typedef struct BlockCopyState {
bool use_copy_range;
int64_t copy_size;
uint64_t len;
QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs;
QLIST_HEAD(, BlockCopyTask) tasks;
BdrvRequestFlags write_flags;
@ -73,15 +93,14 @@ typedef struct BlockCopyState {
SharedResource *mem;
} BlockCopyState;
static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
int64_t offset,
int64_t bytes)
static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
int64_t offset, int64_t bytes)
{
BlockCopyInFlightReq *req;
BlockCopyTask *t;
QLIST_FOREACH(req, &s->inflight_reqs, list) {
if (offset + bytes > req->offset && offset < req->offset + req->bytes) {
return req;
QLIST_FOREACH(t, &s->tasks, list) {
if (offset + bytes > t->offset && offset < t->offset + t->bytes) {
return t;
}
}
@ -89,73 +108,92 @@ static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
}
/*
* If there are no intersecting requests return false. Otherwise, wait for the
* first found intersecting request to finish and return true.
* If there are no intersecting tasks return false. Otherwise, wait for the
* first found intersecting tasks to finish and return true.
*/
static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
int64_t bytes)
{
BlockCopyInFlightReq *req = find_conflicting_inflight_req(s, offset, bytes);
BlockCopyTask *task = find_conflicting_task(s, offset, bytes);
if (!req) {
if (!task) {
return false;
}
qemu_co_queue_wait(&req->wait_queue, NULL);
qemu_co_queue_wait(&task->wait_queue, NULL);
return true;
}
/* Called only on full-dirty region */
static void block_copy_inflight_req_begin(BlockCopyState *s,
BlockCopyInFlightReq *req,
int64_t offset, int64_t bytes)
/*
* Search for the first dirty area in offset/bytes range and create task at
* the beginning of it.
*/
static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
BlockCopyCallState *call_state,
int64_t offset, int64_t bytes)
{
assert(!find_conflicting_inflight_req(s, offset, bytes));
BlockCopyTask *task;
if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
offset, offset + bytes,
s->copy_size, &offset, &bytes))
{
return NULL;
}
/* region is dirty, so no existent tasks possible in it */
assert(!find_conflicting_task(s, offset, bytes));
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
s->in_flight_bytes += bytes;
req->offset = offset;
req->bytes = bytes;
qemu_co_queue_init(&req->wait_queue);
QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
task = g_new(BlockCopyTask, 1);
*task = (BlockCopyTask) {
.task.func = block_copy_task_entry,
.s = s,
.call_state = call_state,
.offset = offset,
.bytes = bytes,
};
qemu_co_queue_init(&task->wait_queue);
QLIST_INSERT_HEAD(&s->tasks, task, list);
return task;
}
/*
* block_copy_inflight_req_shrink
* block_copy_task_shrink
*
* Drop the tail of the request to be handled later. Set dirty bits back and
* wake up all requests waiting for us (may be some of them are not intersecting
* with shrunk request)
* Drop the tail of the task to be handled later. Set dirty bits back and
* wake up all tasks waiting for us (may be some of them are not intersecting
* with shrunk task)
*/
static void coroutine_fn block_copy_inflight_req_shrink(BlockCopyState *s,
BlockCopyInFlightReq *req, int64_t new_bytes)
static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
int64_t new_bytes)
{
if (new_bytes == req->bytes) {
if (new_bytes == task->bytes) {
return;
}
assert(new_bytes > 0 && new_bytes < req->bytes);
assert(new_bytes > 0 && new_bytes < task->bytes);
s->in_flight_bytes -= req->bytes - new_bytes;
bdrv_set_dirty_bitmap(s->copy_bitmap,
req->offset + new_bytes, req->bytes - new_bytes);
task->s->in_flight_bytes -= task->bytes - new_bytes;
bdrv_set_dirty_bitmap(task->s->copy_bitmap,
task->offset + new_bytes, task->bytes - new_bytes);
req->bytes = new_bytes;
qemu_co_queue_restart_all(&req->wait_queue);
task->bytes = new_bytes;
qemu_co_queue_restart_all(&task->wait_queue);
}
static void coroutine_fn block_copy_inflight_req_end(BlockCopyState *s,
BlockCopyInFlightReq *req,
int ret)
static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
{
s->in_flight_bytes -= req->bytes;
task->s->in_flight_bytes -= task->bytes;
if (ret < 0) {
bdrv_set_dirty_bitmap(s->copy_bitmap, req->offset, req->bytes);
bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes);
}
QLIST_REMOVE(req, list);
qemu_co_queue_restart_all(&req->wait_queue);
QLIST_REMOVE(task, list);
qemu_co_queue_restart_all(&task->wait_queue);
}
void block_copy_state_free(BlockCopyState *s)
@ -223,7 +261,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
}
QLIST_INIT(&s->inflight_reqs);
QLIST_INIT(&s->tasks);
return s;
}
@ -242,6 +280,38 @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
s->progress = pm;
}
/*
* Takes ownership of @task
*
* If pool is NULL directly run the task, otherwise schedule it into the pool.
*
* Returns: task.func return code if pool is NULL
* otherwise -ECANCELED if pool status is bad
* otherwise 0 (successfully scheduled)
*/
static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
BlockCopyTask *task)
{
if (!pool) {
int ret = task->task.func(&task->task);
g_free(task);
return ret;
}
aio_task_pool_wait_slot(pool);
if (aio_task_pool_status(pool) < 0) {
co_put_to_shres(task->s->mem, task->bytes);
block_copy_task_end(task, -ECANCELED);
g_free(task);
return -ECANCELED;
}
aio_task_pool_start_task(pool, &task->task);
return 0;
}
/*
* block_copy_do_copy
*
@ -345,6 +415,27 @@ out:
return ret;
}
static coroutine_fn int block_copy_task_entry(AioTask *task)
{
BlockCopyTask *t = container_of(task, BlockCopyTask, task);
bool error_is_read;
int ret;
ret = block_copy_do_copy(t->s, t->offset, t->bytes, t->zeroes,
&error_is_read);
if (ret < 0 && !t->call_state->failed) {
t->call_state->failed = true;
t->call_state->error_is_read = error_is_read;
} else {
progress_work_done(t->s->progress, t->bytes);
t->s->progress_bytes_callback(t->bytes, t->s->progress_opaque);
}
co_put_to_shres(t->s->mem, t->bytes);
block_copy_task_end(t, ret);
return ret;
}
static int block_copy_block_status(BlockCopyState *s, int64_t offset,
int64_t bytes, int64_t *pnum)
{
@ -462,6 +553,9 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
{
int ret = 0;
bool found_dirty = false;
int64_t end = offset + bytes;
AioTaskPool *aio = NULL;
BlockCopyCallState call_state = {false, false};
/*
* block_copy() user is responsible for keeping source and target in same
@ -473,63 +567,78 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
while (bytes) {
BlockCopyInFlightReq req;
int64_t next_zero, cur_bytes, status_bytes;
while (bytes && aio_task_pool_status(aio) == 0) {
BlockCopyTask *task;
int64_t status_bytes;
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) {
trace_block_copy_skip(s, offset);
offset += s->cluster_size;
bytes -= s->cluster_size;
continue; /* already copied */
task = block_copy_task_create(s, &call_state, offset, bytes);
if (!task) {
/* No more dirty bits in the bitmap */
trace_block_copy_skip_range(s, offset, bytes);
break;
}
if (task->offset > offset) {
trace_block_copy_skip_range(s, offset, task->offset - offset);
}
found_dirty = true;
cur_bytes = MIN(bytes, s->copy_size);
next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, offset,
cur_bytes);
if (next_zero >= 0) {
assert(next_zero > offset); /* offset is dirty */
assert(next_zero < offset + cur_bytes); /* no need to do MIN() */
cur_bytes = next_zero - offset;
}
block_copy_inflight_req_begin(s, &req, offset, cur_bytes);
ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes);
ret = block_copy_block_status(s, task->offset, task->bytes,
&status_bytes);
assert(ret >= 0); /* never fail */
cur_bytes = MIN(cur_bytes, status_bytes);
block_copy_inflight_req_shrink(s, &req, cur_bytes);
if (status_bytes < task->bytes) {
block_copy_task_shrink(task, status_bytes);
}
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
block_copy_inflight_req_end(s, &req, 0);
block_copy_task_end(task, 0);
g_free(task);
progress_set_remaining(s->progress,
bdrv_get_dirty_count(s->copy_bitmap) +
s->in_flight_bytes);
trace_block_copy_skip_range(s, offset, status_bytes);
offset += status_bytes;
bytes -= status_bytes;
trace_block_copy_skip_range(s, task->offset, task->bytes);
offset = task_end(task);
bytes = end - offset;
continue;
}
task->zeroes = ret & BDRV_BLOCK_ZERO;
trace_block_copy_process(s, offset);
trace_block_copy_process(s, task->offset);
co_get_from_shres(s->mem, cur_bytes);
ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO,
error_is_read);
co_put_to_shres(s->mem, cur_bytes);
block_copy_inflight_req_end(s, &req, ret);
if (ret < 0) {
return ret;
co_get_from_shres(s->mem, task->bytes);
offset = task_end(task);
bytes = end - offset;
if (!aio && bytes) {
aio = aio_task_pool_new(BLOCK_COPY_MAX_WORKERS);
}
progress_work_done(s->progress, cur_bytes);
s->progress_bytes_callback(cur_bytes, s->progress_opaque);
offset += cur_bytes;
bytes -= cur_bytes;
ret = block_copy_task_run(aio, task);
if (ret < 0) {
goto out;
}
}
return found_dirty;
out:
if (aio) {
aio_task_pool_wait_all(aio);
/*
* We are not really interested in -ECANCELED returned from
* block_copy_task_run. If it fails, it means some task already failed
* for real reason, let's return first failure.
* Still, assert that we don't rewrite failure by success.
*/
assert(ret == 0 || aio_task_pool_status(aio) < 0);
ret = aio_task_pool_status(aio);
aio_task_pool_free(aio);
}
if (error_is_read && ret < 0) {
*error_is_read = call_state.error_is_read;
}
return ret < 0 ? ret : found_dirty;
}
/*

View file

@ -261,11 +261,10 @@ static int block_crypto_co_create_generic(BlockDriverState *bs,
QCryptoBlock *crypto = NULL;
struct BlockCryptoCreateData data;
blk = blk_new(bdrv_get_aio_context(bs),
BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
ret = blk_insert_bs(blk, bs, errp);
if (ret < 0) {
blk = blk_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
errp);
if (!blk) {
ret = -EPERM;
goto cleanup;
}

View file

@ -960,7 +960,7 @@ int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
* flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
* BDRV_REQ_FUA).
*
* Returns < 0 on error, 0 on success. For error codes see bdrv_write().
* Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
*/
int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
{
@ -994,6 +994,7 @@ int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
}
}
/* return < 0 if error. See bdrv_pwrite() for the return codes */
int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
{
int ret;

View file

@ -559,10 +559,10 @@ static int coroutine_fn parallels_co_create(BlockdevCreateOptions* opts,
return -EIO;
}
blk = blk_new(bdrv_get_aio_context(bs),
BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
ret = blk_insert_bs(blk, bs, errp);
if (ret < 0) {
blk = blk_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
errp);
if (!blk) {
ret = -EPERM;
goto out;
}
blk_set_allow_write_beyond_eof(blk, true);

View file

@ -849,10 +849,10 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts,
return -EIO;
}
qcow_blk = blk_new(bdrv_get_aio_context(bs),
BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
ret = blk_insert_bs(qcow_blk, bs, errp);
if (ret < 0) {
qcow_blk = blk_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE,
BLK_PERM_ALL, errp);
if (!qcow_blk) {
ret = -EPERM;
goto exit;
}
blk_set_allow_write_beyond_eof(qcow_blk, true);

View file

@ -2660,7 +2660,7 @@ fail:
* - 0 if writing to this offset will not affect the mentioned metadata
* - a positive QCow2MetadataOverlap value indicating one overlapping section
* - a negative value (-errno) indicating an error while performing a check,
* e.g. when bdrv_read failed on QCOW2_OL_INACTIVE_L2
* e.g. when bdrv_pread failed on QCOW2_OL_INACTIVE_L2
*/
int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
int64_t size)

View file

@ -23,6 +23,7 @@
*/
#include "qemu/osdep.h"
#include "sysemu/block-backend.h"
#include "qapi/error.h"
#include "qcow2.h"
#include "qemu/bswap.h"
@ -775,10 +776,21 @@ int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id)
}
if (sn->disk_size != bs->total_sectors * BDRV_SECTOR_SIZE) {
error_report("qcow2: Loading snapshots with different disk "
"size is not implemented");
ret = -ENOTSUP;
goto fail;
BlockBackend *blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL,
&local_err);
if (!blk) {
error_report_err(local_err);
ret = -ENOTSUP;
goto fail;
}
ret = blk_truncate(blk, sn->disk_size, true, PREALLOC_MODE_OFF, 0,
&local_err);
blk_unref(blk);
if (ret < 0) {
error_report_err(local_err);
goto fail;
}
}
/*

View file

@ -3405,10 +3405,10 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
}
/* Create BlockBackend to write to the image */
blk = blk_new(bdrv_get_aio_context(bs),
BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
ret = blk_insert_bs(blk, bs, errp);
if (ret < 0) {
blk = blk_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
errp);
if (!blk) {
ret = -EPERM;
goto out;
}
blk_set_allow_write_beyond_eof(blk, true);
@ -3989,14 +3989,17 @@ static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset,
qemu_co_mutex_lock(&s->lock);
/* cannot proceed if image has snapshots */
if (s->nb_snapshots) {
error_setg(errp, "Can't resize an image which has snapshots");
/*
* Even though we store snapshot size for all images, it was not
* required until v3, so it is not safe to proceed for v2.
*/
if (s->nb_snapshots && s->qcow_version < 3) {
error_setg(errp, "Can't resize a v2 image which has snapshots");
ret = -ENOTSUP;
goto fail;
}
/* cannot proceed if image has bitmaps */
/* See qcow2-bitmap.c for which bitmap scenarios prevent a resize. */
if (qcow2_truncate_bitmaps_check(bs, errp)) {
ret = -ENOTSUP;
goto fail;
@ -5005,6 +5008,7 @@ static int qcow2_downgrade(BlockDriverState *bs, int target_version,
BDRVQcow2State *s = bs->opaque;
int current_version = s->qcow_version;
int ret;
int i;
/* This is qcow2_downgrade(), not qcow2_upgrade() */
assert(target_version < current_version);
@ -5022,6 +5026,21 @@ static int qcow2_downgrade(BlockDriverState *bs, int target_version,
return -ENOTSUP;
}
/*
* If any internal snapshot has a different size than the current
* image size, or VM state size that exceeds 32 bits, downgrading
* is unsafe. Even though we would still use v3-compliant output
* to preserve that data, other v2 programs might not realize
* those optional fields are important.
*/
for (i = 0; i < s->nb_snapshots; i++) {
if (s->snapshots[i].vm_state_size > UINT32_MAX ||
s->snapshots[i].disk_size != bs->total_sectors * BDRV_SECTOR_SIZE) {
error_setg(errp, "Internal snapshots prevent downgrade of image");
return -ENOTSUP;
}
}
/* clear incompatible features */
if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
ret = qcow2_mark_clean(bs);
@ -5412,12 +5431,10 @@ static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
}
if (new_size) {
BlockBackend *blk = blk_new(bdrv_get_aio_context(bs),
BLK_PERM_RESIZE, BLK_PERM_ALL);
ret = blk_insert_bs(blk, bs, errp);
if (ret < 0) {
blk_unref(blk);
return ret;
BlockBackend *blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL,
errp);
if (!blk) {
return -EPERM;
}
/*

View file

@ -651,10 +651,10 @@ static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts,
return -EIO;
}
blk = blk_new(bdrv_get_aio_context(bs),
BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
ret = blk_insert_bs(blk, bs, errp);
if (ret < 0) {
blk = blk_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
errp);
if (!blk) {
ret = -EPERM;
goto out;
}
blk_set_allow_write_beyond_eof(blk, true);

View file

@ -1803,12 +1803,12 @@ static int sd_prealloc(BlockDriverState *bs, int64_t old_size, int64_t new_size,
void *buf = NULL;
int ret;
blk = blk_new(bdrv_get_aio_context(bs),
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | BLK_PERM_RESIZE,
BLK_PERM_ALL);
blk = blk_new_with_bs(bs,
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | BLK_PERM_RESIZE,
BLK_PERM_ALL, errp);
ret = blk_insert_bs(blk, bs, errp);
if (ret < 0) {
if (!blk) {
ret = -EPERM;
goto out_with_err_set;
}

View file

@ -804,10 +804,10 @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options,
goto exit;
}
blk = blk_new(bdrv_get_aio_context(bs_file),
BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
ret = blk_insert_bs(blk, bs_file, errp);
if (ret < 0) {
blk = blk_new_with_bs(bs_file, BLK_PERM_WRITE | BLK_PERM_RESIZE,
BLK_PERM_ALL, errp);
if (!blk) {
ret = -EPERM;
goto exit;
}

View file

@ -1983,10 +1983,10 @@ static int coroutine_fn vhdx_co_create(BlockdevCreateOptions *opts,
return -EIO;
}
blk = blk_new(bdrv_get_aio_context(bs),
BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
ret = blk_insert_bs(blk, bs, errp);
if (ret < 0) {
blk = blk_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
errp);
if (!blk) {
ret = -EPERM;
goto delete_and_exit;
}
blk_set_allow_write_beyond_eof(blk, true);

View file

@ -2717,11 +2717,10 @@ static BlockBackend *vmdk_co_create_cb(int64_t size, int idx,
if (!bs) {
return NULL;
}
blk = blk_new(bdrv_get_aio_context(bs),
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | BLK_PERM_RESIZE,
BLK_PERM_ALL);
if (blk_insert_bs(blk, bs, errp)) {
bdrv_unref(bs);
blk = blk_new_with_bs(bs,
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | BLK_PERM_RESIZE,
BLK_PERM_ALL, errp);
if (!blk) {
return NULL;
}
blk_set_allow_write_beyond_eof(blk, true);

View file

@ -1012,10 +1012,10 @@ static int coroutine_fn vpc_co_create(BlockdevCreateOptions *opts,
return -EIO;
}
blk = blk_new(bdrv_get_aio_context(bs),
BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
ret = blk_insert_bs(blk, bs, errp);
if (ret < 0) {
blk = blk_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
errp);
if (!blk) {
ret = -EPERM;
goto out;
}
blk_set_allow_write_beyond_eof(blk, true);

View file

@ -2148,7 +2148,7 @@ DLOG(checkpoint());
* - get modified FAT
* - compare the two FATs (TODO)
* - get buffer for marking used clusters
* - recurse direntries from root (using bs->bdrv_read to make
* - recurse direntries from root (using bs->bdrv_pread to make
* sure to get the new data)
* - check that the FAT agrees with the size
* - count the number of clusters occupied by this directory and
@ -2913,9 +2913,9 @@ static int handle_deletes(BDRVVVFATState* s)
/*
* synchronize mapping with new state:
*
* - copy FAT (with bdrv_read)
* - copy FAT (with bdrv_pread)
* - mark all filenames corresponding to mappings as deleted
* - recurse direntries from root (using bs->bdrv_read)
* - recurse direntries from root (using bs->bdrv_pread)
* - delete files corresponding to mappings marked as deleted
*/
static int do_commit(BDRVVVFATState* s)
@ -2935,10 +2935,10 @@ static int do_commit(BDRVVVFATState* s)
return ret;
}
/* copy FAT (with bdrv_read) */
/* copy FAT (with bdrv_pread) */
memcpy(s->fat.pointer, s->fat2, 0x200 * s->sectors_per_fat);
/* recurse direntries from root (using bs->bdrv_read) */
/* recurse direntries from root (using bs->bdrv_pread) */
ret = commit_direntries(s, 0, -1);
if (ret) {
fprintf(stderr, "Fatal: error while committing (%d)\n", ret);

View file

@ -2711,7 +2711,6 @@ void qmp_block_resize(bool has_device, const char *device,
BlockBackend *blk = NULL;
BlockDriverState *bs;
AioContext *aio_context;
int ret;
bs = bdrv_lookup_bs(has_device ? device : NULL,
has_node_name ? node_name : NULL,
@ -2734,9 +2733,8 @@ void qmp_block_resize(bool has_device, const char *device,
goto out;
}
blk = blk_new(bdrv_get_aio_context(bs), BLK_PERM_RESIZE, BLK_PERM_ALL);
ret = blk_insert_bs(blk, bs, errp);
if (ret < 0) {
blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, errp);
if (!blk) {
goto out;
}

View file

@ -397,16 +397,13 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
{
BlockBackend *blk;
BlockJob *job;
int ret;
if (job_id == NULL && !(flags & JOB_INTERNAL)) {
job_id = bdrv_get_device_name(bs);
}
blk = blk_new(bdrv_get_aio_context(bs), perm, shared_perm);
ret = blk_insert_bs(blk, bs, errp);
if (ret < 0) {
blk_unref(blk);
blk = blk_new_with_bs(bs, perm, shared_perm, errp);
if (!blk) {
return NULL;
}

View file

@ -77,6 +77,8 @@ typedef struct BlockBackendPublic {
} BlockBackendPublic;
BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm);
BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm,
uint64_t shared_perm, Error **errp);
BlockBackend *blk_new_open(const char *filename, const char *reference,
QDict *options, int flags, Error **errp);
int blk_get_refcnt(BlockBackend *blk);

View file

@ -1,6 +1,6 @@
#!/usr/bin/env bash
#
# Test simple read/write using plain bdrv_read/bdrv_write
# Test simple read/write using plain bdrv_pread/bdrv_pwrite
#
# Copyright (C) 2009 Red Hat, Inc.
#

View file

@ -411,8 +411,8 @@ class TestParallelOps(iotests.QMPTestCase):
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
self.assert_qmp(result, 'return', {})
self.vm.run_job(job='drive0', auto_dismiss=True, use_log=False)
self.vm.run_job(job='node4', auto_dismiss=True, use_log=False)
self.vm.run_job(job='drive0', auto_dismiss=True)
self.vm.run_job(job='node4', auto_dismiss=True)
self.assert_no_active_block_jobs()
# Test a block-stream and a block-commit job in parallel

View file

@ -1,6 +1,6 @@
#!/usr/bin/env bash
#
# Test bdrv_read/bdrv_write using BDRV_O_SNAPSHOT
# Test bdrv_pread/bdrv_pwrite using BDRV_O_SNAPSHOT
#
# Copyright (C) 2013 Red Hat, Inc.
#

View file

@ -469,7 +469,8 @@ class TestDriveCompression(iotests.QMPTestCase):
qemu_img('create', '-f', fmt, blockdev_target_img,
str(TestDriveCompression.image_len), *args)
if attach_target:
self.vm.add_drive(blockdev_target_img, format=fmt, interface="none")
self.vm.add_drive(blockdev_target_img,
img_format=fmt, interface="none")
self.vm.launch()

View file

@ -111,6 +111,41 @@ $PYTHON qcow2.py "$TEST_IMG" dump-header
$QEMU_IO -c "read -P 0x2a 42M 64k" "$TEST_IMG" | _filter_qemu_io
_check_test_img
echo
echo "=== Testing resize with snapshots ==="
echo
_make_test_img -o "compat=0.10" 32M
$QEMU_IO -c "write -P 0x2a 24M 64k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IMG snapshot -c foo "$TEST_IMG"
$QEMU_IMG resize "$TEST_IMG" 64M &&
echo "unexpected pass"
$PYTHON qcow2.py "$TEST_IMG" dump-header | grep '^\(version\|size\|nb_snap\)'
$QEMU_IMG amend -o "compat=1.1,size=128M" "$TEST_IMG" ||
echo "unexpected fail"
$PYTHON qcow2.py "$TEST_IMG" dump-header | grep '^\(version\|size\|nb_snap\)'
$QEMU_IMG snapshot -c bar "$TEST_IMG"
$QEMU_IMG resize --shrink "$TEST_IMG" 64M ||
echo "unexpected fail"
$PYTHON qcow2.py "$TEST_IMG" dump-header | grep '^\(version\|size\|nb_snap\)'
$QEMU_IMG amend -o "compat=0.10,size=32M" "$TEST_IMG" &&
echo "unexpected pass"
$PYTHON qcow2.py "$TEST_IMG" dump-header | grep '^\(version\|size\|nb_snap\)'
$QEMU_IMG snapshot -a bar "$TEST_IMG" ||
echo "unexpected fail"
$PYTHON qcow2.py "$TEST_IMG" dump-header | grep '^\(version\|size\|nb_snap\)'
$QEMU_IMG snapshot -d bar "$TEST_IMG"
$QEMU_IMG amend -o "compat=0.10,size=32M" "$TEST_IMG" ||
echo "unexpected fail"
$PYTHON qcow2.py "$TEST_IMG" dump-header | grep '^\(version\|size\|nb_snap\)'
_check_test_img
echo
echo "=== Testing dirty lazy_refcounts=off ==="
echo

View file

@ -271,6 +271,34 @@ read 65536/65536 bytes at offset 44040192
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
No errors were found on the image.
=== Testing resize with snapshots ===
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=33554432
wrote 65536/65536 bytes at offset 25165824
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
qemu-img: Can't resize a v2 image which has snapshots
version 2
size 33554432
nb_snapshots 1
version 3
size 134217728
nb_snapshots 1
Image resized.
version 3
size 67108864
nb_snapshots 2
qemu-img: Internal snapshots prevent downgrade of image
version 3
size 33554432
nb_snapshots 2
version 3
size 134217728
nb_snapshots 2
version 2
size 33554432
nb_snapshots 1
No errors were found on the image.
=== Testing dirty lazy_refcounts=off ===
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864

View file

@ -1,6 +1,6 @@
#!/usr/bin/env bash
#
# Test encrypted read/write using plain bdrv_read/bdrv_write
# Test encrypted read/write using plain bdrv_pread/bdrv_pwrite
#
# Copyright (C) 2015 Red Hat, Inc.
#

View file

@ -382,8 +382,7 @@ def test_once(config, qemu_img=False):
# Obviously we only work with the luks image format
iotests.verify_image_format(supported_fmts=['luks'])
iotests.verify_platform()
iotests.script_initialize(supported_fmts=['luks'])
# We need sudo in order to run cryptsetup to create
# dm-crypt devices. This is safe to use on any

View file

@ -122,7 +122,7 @@ for opts1 in "" "read-only=on" "read-only=on,force-share=on"; do
_run_cmd $QEMU_IMG check $L "${TEST_IMG}"
_run_cmd $QEMU_IMG compare $L "${TEST_IMG}" "${TEST_IMG}"
_run_cmd $QEMU_IMG map $L "${TEST_IMG}"
_run_cmd $QEMU_IMG amend -o "" $L "${TEST_IMG}"
_run_cmd $QEMU_IMG amend -o "size=$size" $L "${TEST_IMG}"
_run_cmd $QEMU_IMG commit $L "${TEST_IMG}"
_run_cmd $QEMU_IMG resize $L "${TEST_IMG}" $size
_run_cmd $QEMU_IMG rebase $L "${TEST_IMG}" -b "${TEST_IMG}.base"

View file

@ -56,7 +56,7 @@ _qemu_img_wrapper map TEST_DIR/t.qcow2
qemu-img: Could not open 'TEST_DIR/t.qcow2': Failed to get shared "write" lock
Is another process using the image [TEST_DIR/t.qcow2]?
_qemu_img_wrapper amend -o TEST_DIR/t.qcow2
_qemu_img_wrapper amend -o size=32M TEST_DIR/t.qcow2
qemu-img: Could not open 'TEST_DIR/t.qcow2': Failed to get "write" lock
Is another process using the image [TEST_DIR/t.qcow2]?
@ -118,7 +118,7 @@ _qemu_img_wrapper compare -U TEST_DIR/t.qcow2 TEST_DIR/t.qcow2
_qemu_img_wrapper map -U TEST_DIR/t.qcow2
_qemu_img_wrapper amend -o -U TEST_DIR/t.qcow2
_qemu_img_wrapper amend -o size=32M -U TEST_DIR/t.qcow2
qemu-img: unrecognized option '-U'
Try 'qemu-img --help' for more information
@ -187,7 +187,7 @@ _qemu_img_wrapper compare TEST_DIR/t.qcow2 TEST_DIR/t.qcow2
_qemu_img_wrapper map TEST_DIR/t.qcow2
_qemu_img_wrapper amend -o TEST_DIR/t.qcow2
_qemu_img_wrapper amend -o size=32M TEST_DIR/t.qcow2
qemu-img: Could not open 'TEST_DIR/t.qcow2': Failed to get "write" lock
Is another process using the image [TEST_DIR/t.qcow2]?
@ -241,7 +241,7 @@ _qemu_img_wrapper compare -U TEST_DIR/t.qcow2 TEST_DIR/t.qcow2
_qemu_img_wrapper map -U TEST_DIR/t.qcow2
_qemu_img_wrapper amend -o -U TEST_DIR/t.qcow2
_qemu_img_wrapper amend -o size=32M -U TEST_DIR/t.qcow2
qemu-img: unrecognized option '-U'
Try 'qemu-img --help' for more information
@ -303,7 +303,7 @@ _qemu_img_wrapper compare TEST_DIR/t.qcow2 TEST_DIR/t.qcow2
_qemu_img_wrapper map TEST_DIR/t.qcow2
_qemu_img_wrapper amend -o TEST_DIR/t.qcow2
_qemu_img_wrapper amend -o size=32M TEST_DIR/t.qcow2
_qemu_img_wrapper commit TEST_DIR/t.qcow2
@ -345,7 +345,7 @@ _qemu_img_wrapper compare -U TEST_DIR/t.qcow2 TEST_DIR/t.qcow2
_qemu_img_wrapper map -U TEST_DIR/t.qcow2
_qemu_img_wrapper amend -o -U TEST_DIR/t.qcow2
_qemu_img_wrapper amend -o size=32M -U TEST_DIR/t.qcow2
qemu-img: unrecognized option '-U'
Try 'qemu-img --help' for more information

View file

@ -188,7 +188,7 @@ class MirrorBaseClass(BaseClass):
self.assert_qmp(result, 'return', {})
self.vm.run_job('mirror-job', use_log=False, auto_finalize=False,
self.vm.run_job('mirror-job', auto_finalize=False,
pre_finalize=self.openBacking, auto_dismiss=True)
def testFull(self):

View file

@ -1,6 +1,6 @@
#!/usr/bin/env bash
#
# Test encrypted read/write using plain bdrv_read/bdrv_write
# Test encrypted read/write using plain bdrv_pread/bdrv_pwrite
#
# Copyright (C) 2017 Red Hat, Inc.
#

View file

@ -21,8 +21,8 @@
import iotests
iotests.verify_image_format(supported_fmts=['qcow2', 'qed', 'raw'])
iotests.verify_platform(['linux'])
iotests.script_initialize(supported_fmts=['qcow2', 'qed', 'raw'],
supported_platforms=['linux'])
with iotests.FilePath('source.img') as source_img_path, \
iotests.FilePath('dest.img') as dest_img_path, \

View file

@ -24,8 +24,8 @@
import iotests
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.verify_platform(['linux'])
iotests.script_initialize(supported_fmts=['qcow2'],
supported_platforms=['linux'])
with iotests.FilePath('disk0.img') as disk0_img_path, \
iotests.FilePath('disk1.img') as disk1_img_path, \

View file

@ -24,8 +24,8 @@
import iotests
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.verify_platform(['linux'])
iotests.script_initialize(supported_fmts=['qcow2'],
supported_platforms=['linux'])
with iotests.FilePath('disk0.img') as disk0_img_path, \
iotests.FilePath('disk1.img') as disk1_img_path, \

View file

@ -23,7 +23,7 @@
import iotests
from iotests import imgfmt
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.script_initialize(supported_fmts=['qcow2'])
with iotests.FilePath('t.qcow2') as disk_path, \
iotests.FilePath('t.qcow2.base') as backing_path, \

View file

@ -24,8 +24,10 @@ import iotests
import subprocess
import re
iotests.verify_image_format(supported_fmts=['raw'])
iotests.verify_protocol(supported=['ssh'])
iotests.script_initialize(
supported_fmts=['raw'],
supported_protocols=['ssh'],
)
def filter_hash(qmsg):
def _filter(key, value):

View file

@ -22,7 +22,7 @@
import iotests
iotests.verify_image_format(supported_fmts=['generic'])
iotests.script_initialize(supported_fmts=['generic'])
with iotests.FilePath('disk.img') as disk_img_path, \
iotests.FilePath('disk-snapshot.img') as disk_snapshot_img_path, \

View file

@ -22,7 +22,7 @@ import iotests
from iotests import qemu_img_create, qemu_io, qemu_img_verbose, qemu_nbd, \
file_path
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.script_initialize(supported_fmts=['qcow2'])
disk = file_path('disk')
nbd_sock = file_path('nbd-sock', base_dir=iotests.sock_dir)

View file

@ -23,8 +23,10 @@
import iotests
from iotests import imgfmt
iotests.verify_image_format(supported_fmts=['luks'])
iotests.verify_protocol(supported=['file'])
iotests.script_initialize(
supported_fmts=['luks'],
supported_protocols=['file'],
)
with iotests.FilePath('t.luks') as disk_path, \
iotests.VM() as vm:

View file

@ -23,8 +23,10 @@
import iotests
from iotests import imgfmt
iotests.verify_image_format(supported_fmts=['vdi'])
iotests.verify_protocol(supported=['file'])
iotests.script_initialize(
supported_fmts=['vdi'],
supported_protocols=['file'],
)
def blockdev_create(vm, options):
error = vm.blockdev_create(options)

View file

@ -23,8 +23,10 @@
import iotests
from iotests import imgfmt
iotests.verify_image_format(supported_fmts=['parallels'])
iotests.verify_protocol(supported=['file'])
iotests.script_initialize(
supported_fmts=['parallels'],
supported_protocols=['file'],
)
with iotests.FilePath('t.parallels') as disk_path, \
iotests.VM() as vm:

View file

@ -23,8 +23,10 @@
import iotests
from iotests import imgfmt
iotests.verify_image_format(supported_fmts=['vhdx'])
iotests.verify_protocol(supported=['file'])
iotests.script_initialize(
supported_fmts=['vhdx'],
supported_protocols=['file'],
)
with iotests.FilePath('t.vhdx') as disk_path, \
iotests.VM() as vm:

View file

@ -23,8 +23,8 @@ import iotests
from iotests import log, qemu_img, qemu_io_silent
# Need backing file support
iotests.verify_image_format(supported_fmts=['qcow2', 'qcow', 'qed', 'vmdk'])
iotests.verify_platform(['linux'])
iotests.script_initialize(supported_fmts=['qcow2', 'qcow', 'qed', 'vmdk'],
supported_platforms=['linux'])
log('')
log('=== Copy-on-read across nodes ===')

View file

@ -29,7 +29,7 @@
import iotests
from iotests import log, qemu_img, qemu_io_silent
iotests.verify_image_format(supported_fmts=['qcow2', 'raw'])
iotests.script_initialize(supported_fmts=['qcow2', 'raw'])
# Launches the VM, adds two null-co nodes (source and target), and

View file

@ -21,7 +21,7 @@
import iotests
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.script_initialize(supported_fmts=['qcow2'])
img_size = 4 * 1024 * 1024

View file

@ -24,9 +24,10 @@
import iotests
from iotests import log, qemu_img, qemu_io, qemu_io_silent
iotests.verify_platform(['linux'])
iotests.verify_image_format(supported_fmts=['qcow2', 'qcow', 'qed', 'vmdk',
'vhdx', 'raw'])
iotests.script_initialize(
supported_fmts=['qcow2', 'qcow', 'qed', 'vmdk', 'vhdx', 'raw'],
supported_platforms=['linux'],
)
patterns = [("0x5d", "0", "64k"),
("0xd5", "1M", "64k"),

View file

@ -26,8 +26,8 @@ from iotests import log, qemu_img, qemu_io_silent, filter_qmp_testfiles, \
import json
# Need backing file support (for arbitrary backing formats)
iotests.verify_image_format(supported_fmts=['qcow2', 'qcow', 'qed'])
iotests.verify_platform(['linux'])
iotests.script_initialize(supported_fmts=['qcow2', 'qcow', 'qed'],
supported_platforms=['linux'])
# There are two variations of this test:

View file

@ -25,8 +25,10 @@ from iotests import log, qemu_img, filter_testfiles, filter_imgfmt, \
filter_qmp_testfiles, filter_qmp_imgfmt
# Need backing file and change-backing-file support
iotests.verify_image_format(supported_fmts=['qcow2', 'qed'])
iotests.verify_platform(['linux'])
iotests.script_initialize(
supported_fmts=['qcow2', 'qed'],
supported_platforms=['linux'],
)
def log_node_info(node):

View file

@ -23,8 +23,8 @@
import iotests
import os
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.verify_platform(['linux'])
iotests.script_initialize(supported_fmts=['qcow2'],
supported_platforms=['linux'])
with iotests.FilePath('img') as img_path, \
iotests.FilePath('backing') as backing_path, \

View file

@ -27,6 +27,8 @@ sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python'))
from qemu.machine import QEMUMachine
iotests.script_initialize(supported_fmts=['qcow2'])
# Note:
# This test was added to check that mirror dead-lock was fixed (see previous
# commit before this test addition).
@ -40,8 +42,6 @@ from qemu.machine import QEMUMachine
size = 1 * 1024 * 1024 * 1024
iotests.verify_image_format(supported_fmts=['qcow2'])
disk = file_path('disk')
# prepare source image

View file

@ -22,7 +22,7 @@
import iotests
from iotests import log
iotests.verify_image_format(supported_fmts=['generic'])
iotests.script_initialize(supported_fmts=['generic'])
size = 64 * 1024 * 1024
granularity = 64 * 1024

View file

@ -24,7 +24,7 @@ import math
import iotests
from iotests import imgfmt
iotests.verify_image_format(supported_fmts=['vmdk'])
iotests.script_initialize(supported_fmts=['vmdk'])
with iotests.FilePath('t.vmdk') as disk_path, \
iotests.FilePath('t.vmdk.1') as extent1_path, \

View file

@ -23,6 +23,8 @@ import os
import iotests
from iotests import log
iotests.script_initialize()
virtio_scsi_device = iotests.get_virtio_scsi_device()
vm = iotests.VM()

View file

@ -24,7 +24,7 @@ import struct
from iotests import qemu_img_create, qemu_io, qemu_img_pipe, \
file_path, img_info_log, log, filter_qemu_io
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.script_initialize(supported_fmts=['qcow2'])
disk = file_path('disk')
chunk = 256 * 1024

View file

@ -1027,5 +1027,6 @@ class TestBlockdevReopen(iotests.QMPTestCase):
self.run_test_iothreads(None, 'iothread0')
if __name__ == '__main__':
iotests.activate_logging()
iotests.main(supported_fmts=["qcow2"],
supported_protocols=["file"])

View file

@ -1,8 +1,3 @@
.....................
----------------------------------------------------------------------
Ran 21 tests
OK
{"execute": "job-finalize", "arguments": {"id": "commit0"}}
{"return": {}}
{"data": {"id": "commit0", "type": "commit"}, "event": "BLOCK_JOB_PENDING", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
@ -15,3 +10,8 @@ OK
{"return": {}}
{"data": {"id": "stream0", "type": "stream"}, "event": "BLOCK_JOB_PENDING", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"device": "stream0", "len": 3145728, "offset": 3145728, "speed": 0, "type": "stream"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
.....................
----------------------------------------------------------------------
Ran 21 tests
OK

View file

@ -22,7 +22,7 @@
import iotests
from iotests import log
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.script_initialize(supported_fmts=['qcow2'])
size = 64 * 1024 * 1024 * 1024
gran_small = 32 * 1024
gran_large = 128 * 1024

View file

@ -21,7 +21,7 @@
import iotests
from iotests import qemu_img_create, qemu_io, file_path, filter_qmp_testfiles
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.script_initialize(supported_fmts=['qcow2'])
source, target = file_path('source', 'target')
size = 5 * 1024 * 1024

View file

@ -21,7 +21,7 @@
import iotests
from iotests import qemu_img_create, file_path, log
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.script_initialize(supported_fmts=['qcow2'])
disk, top = file_path('disk', 'top')
size = 1024 * 1024

View file

@ -23,7 +23,7 @@
import iotests
from iotests import imgfmt
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.script_initialize(supported_fmts=['qcow2'])
iotests.log('Finishing a commit job with background reads')
iotests.log('============================================')

View file

@ -23,7 +23,7 @@ import os
import iotests
from iotests import log
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.script_initialize(supported_fmts=['qcow2'])
size = 64 * 1024 * 1024
with iotests.FilePath('img0') as img0_path, \

View file

@ -23,11 +23,6 @@ import iotests
from iotests import log, qemu_img, qemu_io_silent, \
filter_qmp_testfiles, filter_qmp_imgfmt
# Need backing file and change-backing-file support
iotests.verify_image_format(supported_fmts=['qcow2', 'qed'])
iotests.verify_platform(['linux'])
# Returns a node for blockdev-add
def node(node_name, path, backing=None, fmt=None, throttle=None):
if fmt is None:
@ -160,4 +155,7 @@ def main():
test_concurrent_finish(False)
if __name__ == '__main__':
main()
# Need backing file and change-backing-file support
iotests.script_main(main,
supported_fmts=['qcow2', 'qed'],
supported_platforms=['linux'])

View file

@ -21,7 +21,9 @@
import iotests
from iotests import qemu_img_create, file_path, log, filter_qmp_event
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.script_initialize(
supported_fmts=['qcow2']
)
base, top = file_path('base', 'top')
size = 64 * 1024 * 3

View file

@ -23,8 +23,8 @@
import iotests
import os
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.verify_platform(['linux'])
iotests.script_initialize(supported_fmts=['qcow2'],
supported_platforms=['linux'])
with iotests.FilePath('img') as img_path, \
iotests.FilePath('mig_fifo') as fifo, \

View file

@ -24,7 +24,9 @@ import iotests
from iotests import qemu_img_create, qemu_io_silent_check, file_path, \
qemu_nbd_popen, log
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.script_initialize(
supported_fmts=['qcow2'],
)
disk_a, disk_b, nbd_sock = file_path('disk_a', 'disk_b', 'nbd-sock')
nbd_uri = 'nbd+unix:///?socket=' + nbd_sock

View file

@ -21,8 +21,8 @@
import iotests
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.verify_platform(['linux'])
iotests.script_initialize(supported_fmts=['qcow2'],
supported_platforms=['linux'])
size_short = 1 * 1024 * 1024
size_long = 2 * 1024 * 1024

View file

@ -23,6 +23,8 @@ import subprocess
import iotests
from iotests import file_path, log
iotests.script_initialize()
nbd_sock, conf_file = file_path('nbd-sock', 'nbd-fault-injector.conf')

View file

@ -22,9 +22,11 @@
import iotests
import os
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.verify_protocol(supported=['file'])
iotests.verify_platform(['linux'])
iotests.script_initialize(
supported_fmts=['qcow2'],
supported_protocols=['file'],
supported_platforms=['linux'],
)
with iotests.FilePath('base') as base_path , \
iotests.FilePath('top') as top_path, \

View file

@ -21,7 +21,9 @@
import iotests
# The test is unrelated to formats, restrict it to qcow2 to avoid extra runs
iotests.verify_image_format(supported_fmts=['qcow2'])
iotests.script_initialize(
supported_fmts=['qcow2'],
)
size = 1024 * 1024

View file

@ -16,26 +16,39 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import errno
import os
import re
import subprocess
import string
import unittest
import sys
import struct
import json
import signal
import logging
import atexit
import io
from collections import OrderedDict
import faulthandler
import io
import json
import logging
import os
import re
import signal
import struct
import subprocess
import sys
from typing import (Any, Callable, Dict, Iterable,
List, Optional, Sequence, TypeVar)
import unittest
# pylint: disable=import-error, wrong-import-position
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python'))
from qemu import qtest
assert sys.version_info >= (3,6)
assert sys.version_info >= (3, 6)
# Type Aliases
QMPResponse = Dict[str, Any]
# Use this logger for logging messages directly from the iotests module
logger = logging.getLogger('qemu.iotests')
logger.addHandler(logging.NullHandler())
# Use this logger for messages that ought to be used for diff output.
test_logger = logging.getLogger('qemu.iotests.diff_io')
faulthandler.enable()
@ -80,9 +93,11 @@
def qemu_img(*args):
'''Run qemu-img and return the exit code'''
devnull = open('/dev/null', 'r+')
exitcode = subprocess.call(qemu_img_args + list(args), stdin=devnull, stdout=devnull)
exitcode = subprocess.call(qemu_img_args + list(args),
stdin=devnull, stdout=devnull)
if exitcode < 0:
sys.stderr.write('qemu-img received signal %i: %s\n' % (-exitcode, ' '.join(qemu_img_args + list(args))))
sys.stderr.write('qemu-img received signal %i: %s\n'
% (-exitcode, ' '.join(qemu_img_args + list(args))))
return exitcode
def ordered_qmp(qmsg, conv_keys=True):
@ -121,7 +136,8 @@ def qemu_img_verbose(*args):
'''Run qemu-img without suppressing its output and return the exit code'''
exitcode = subprocess.call(qemu_img_args + list(args))
if exitcode < 0:
sys.stderr.write('qemu-img received signal %i: %s\n' % (-exitcode, ' '.join(qemu_img_args + list(args))))
sys.stderr.write('qemu-img received signal %i: %s\n'
% (-exitcode, ' '.join(qemu_img_args + list(args))))
return exitcode
def qemu_img_pipe(*args):
@ -132,7 +148,8 @@ def qemu_img_pipe(*args):
universal_newlines=True)
exitcode = subp.wait()
if exitcode < 0:
sys.stderr.write('qemu-img received signal %i: %s\n' % (-exitcode, ' '.join(qemu_img_args + list(args))))
sys.stderr.write('qemu-img received signal %i: %s\n'
% (-exitcode, ' '.join(qemu_img_args + list(args))))
return subp.communicate()[0]
def qemu_img_log(*args):
@ -140,12 +157,12 @@ def qemu_img_log(*args):
log(result, filters=[filter_testfiles])
return result
def img_info_log(filename, filter_path=None, imgopts=False, extra_args=[]):
args = [ 'info' ]
def img_info_log(filename, filter_path=None, imgopts=False, extra_args=()):
args = ['info']
if imgopts:
args.append('--image-opts')
else:
args += [ '-f', imgfmt ]
args += ['-f', imgfmt]
args += extra_args
args.append(filename)
@ -162,7 +179,8 @@ def qemu_io(*args):
universal_newlines=True)
exitcode = subp.wait()
if exitcode < 0:
sys.stderr.write('qemu-io received signal %i: %s\n' % (-exitcode, ' '.join(args)))
sys.stderr.write('qemu-io received signal %i: %s\n'
% (-exitcode, ' '.join(args)))
return subp.communicate()[0]
def qemu_io_log(*args):
@ -224,7 +242,7 @@ def cmd(self, cmd):
# quit command is in close(), '\n' is added automatically
assert '\n' not in cmd
cmd = cmd.strip()
assert cmd != 'q' and cmd != 'quit'
assert cmd not in ('q', 'quit')
self._p.stdin.write(cmd + '\n')
self._p.stdin.flush()
return self._read_output()
@ -246,10 +264,8 @@ def qemu_nbd_early_pipe(*args):
sys.stderr.write('qemu-nbd received signal %i: %s\n' %
(-exitcode,
' '.join(qemu_nbd_args + ['--fork'] + list(args))))
if exitcode == 0:
return exitcode, ''
else:
return exitcode, subp.communicate()[0]
return exitcode, subp.communicate()[0] if exitcode else ''
def qemu_nbd_popen(*args):
'''Run qemu-nbd in daemon mode and return the parent's exit code'''
@ -286,10 +302,13 @@ def filter_test_dir(msg):
def filter_win32(msg):
return win32_re.sub("", msg)
qemu_io_re = re.compile(r"[0-9]* ops; [0-9\/:. sec]* \([0-9\/.inf]* [EPTGMKiBbytes]*\/sec and [0-9\/.inf]* ops\/sec\)")
qemu_io_re = re.compile(r"[0-9]* ops; [0-9\/:. sec]* "
r"\([0-9\/.inf]* [EPTGMKiBbytes]*\/sec "
r"and [0-9\/.inf]* ops\/sec\)")
def filter_qemu_io(msg):
msg = filter_win32(msg)
return qemu_io_re.sub("X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)", msg)
return qemu_io_re.sub("X ops; XX:XX:XX.X "
"(XXX YYY/sec and XXX ops/sec)", msg)
chown_re = re.compile(r"chown [0-9]+:[0-9]+")
def filter_chown(msg):
@ -313,7 +332,7 @@ def filter_qmp(qmsg, filter_fn):
items = qmsg.items()
for k, v in items:
if isinstance(v, list) or isinstance(v, dict):
if isinstance(v, (dict, list)):
qmsg[k] = filter_qmp(v, filter_fn)
else:
qmsg[k] = filter_fn(k, v)
@ -324,7 +343,7 @@ def filter_testfiles(msg):
return msg.replace(prefix, 'TEST_DIR/PID-')
def filter_qmp_testfiles(qmsg):
def _filter(key, value):
def _filter(_key, value):
if is_str(value):
return filter_testfiles(value)
return value
@ -342,7 +361,9 @@ def filter_img_info(output, filename):
line = filter_testfiles(line)
line = line.replace(imgfmt, 'IMGFMT')
line = re.sub('iters: [0-9]+', 'iters: XXX', line)
line = re.sub('uuid: [-a-f0-9]+', 'uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', line)
line = re.sub('uuid: [-a-f0-9]+',
'uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX',
line)
line = re.sub('cid: [0-9]+', 'cid: XXXXXXXXXX', line)
lines.append(line)
return '\n'.join(lines)
@ -351,36 +372,40 @@ def filter_imgfmt(msg):
return msg.replace(imgfmt, 'IMGFMT')
def filter_qmp_imgfmt(qmsg):
def _filter(key, value):
def _filter(_key, value):
if is_str(value):
return filter_imgfmt(value)
return value
return filter_qmp(qmsg, _filter)
def log(msg, filters=[], indent=None):
'''Logs either a string message or a JSON serializable message (like QMP).
If indent is provided, JSON serializable messages are pretty-printed.'''
Msg = TypeVar('Msg', Dict[str, Any], List[Any], str)
def log(msg: Msg,
filters: Iterable[Callable[[Msg], Msg]] = (),
indent: Optional[int] = None) -> None:
"""
Logs either a string message or a JSON serializable message (like QMP).
If indent is provided, JSON serializable messages are pretty-printed.
"""
for flt in filters:
msg = flt(msg)
if isinstance(msg, dict) or isinstance(msg, list):
# Python < 3.4 needs to know not to add whitespace when pretty-printing:
separators = (', ', ': ') if indent is None else (',', ': ')
if isinstance(msg, (dict, list)):
# Don't sort if it's already sorted
do_sort = not isinstance(msg, OrderedDict)
print(json.dumps(msg, sort_keys=do_sort,
indent=indent, separators=separators))
test_logger.info(json.dumps(msg, sort_keys=do_sort, indent=indent))
else:
print(msg)
test_logger.info(msg)
class Timeout:
def __init__(self, seconds, errmsg = "Timeout"):
def __init__(self, seconds, errmsg="Timeout"):
self.seconds = seconds
self.errmsg = errmsg
def __enter__(self):
signal.signal(signal.SIGALRM, self.timeout)
signal.setitimer(signal.ITIMER_REAL, self.seconds)
return self
def __exit__(self, type, value, traceback):
def __exit__(self, exc_type, value, traceback):
signal.setitimer(signal.ITIMER_REAL, 0)
return False
def timeout(self, signum, frame):
@ -389,7 +414,7 @@ def timeout(self, signum, frame):
def file_pattern(name):
return "{0}-{1}".format(os.getpid(), name)
class FilePaths(object):
class FilePaths:
"""
FilePaths is an auto-generated filename that cleans itself up.
@ -490,21 +515,21 @@ def add_drive_raw(self, opts):
self._args.append(opts)
return self
def add_drive(self, path, opts='', interface='virtio', format=imgfmt):
def add_drive(self, path, opts='', interface='virtio', img_format=imgfmt):
'''Add a virtio-blk drive to the VM'''
options = ['if=%s' % interface,
'id=drive%d' % self._num_drives]
if path is not None:
options.append('file=%s' % path)
options.append('format=%s' % format)
options.append('format=%s' % img_format)
options.append('cache=%s' % cachemode)
options.append('aio=%s' % aiomode)
if opts:
options.append(opts)
if format == 'luks' and 'key-secret' not in opts:
if img_format == 'luks' and 'key-secret' not in opts:
# default luks support
if luks_default_secret_object not in self._args:
self.add_object(luks_default_secret_object)
@ -529,30 +554,37 @@ def add_incoming(self, addr):
self._args.append(addr)
return self
def pause_drive(self, drive, event=None):
'''Pause drive r/w operations'''
def hmp(self, command_line: str, use_log: bool = False) -> QMPResponse:
cmd = 'human-monitor-command'
kwargs = {'command-line': command_line}
if use_log:
return self.qmp_log(cmd, **kwargs)
else:
return self.qmp(cmd, **kwargs)
def pause_drive(self, drive: str, event: Optional[str] = None) -> None:
"""Pause drive r/w operations"""
if not event:
self.pause_drive(drive, "read_aio")
self.pause_drive(drive, "write_aio")
return
self.qmp('human-monitor-command',
command_line='qemu-io %s "break %s bp_%s"' % (drive, event, drive))
self.hmp(f'qemu-io {drive} "break {event} bp_{drive}"')
def resume_drive(self, drive):
self.qmp('human-monitor-command',
command_line='qemu-io %s "remove_break bp_%s"' % (drive, drive))
def resume_drive(self, drive: str) -> None:
"""Resume drive r/w operations"""
self.hmp(f'qemu-io {drive} "remove_break bp_{drive}"')
def hmp_qemu_io(self, drive, cmd):
'''Write to a given drive using an HMP command'''
return self.qmp('human-monitor-command',
command_line='qemu-io %s "%s"' % (drive, cmd))
def hmp_qemu_io(self, drive: str, cmd: str,
use_log: bool = False) -> QMPResponse:
"""Write to a given drive using an HMP command"""
return self.hmp(f'qemu-io {drive} "{cmd}"', use_log=use_log)
def flatten_qmp_object(self, obj, output=None, basestr=''):
if output is None:
output = dict()
if isinstance(obj, list):
for i in range(len(obj)):
self.flatten_qmp_object(obj[i], output, basestr + str(i) + '.')
for i, item in enumerate(obj):
self.flatten_qmp_object(item, output, basestr + str(i) + '.')
elif isinstance(obj, dict):
for key in obj:
self.flatten_qmp_object(obj[key], output, basestr + key + '.')
@ -573,7 +605,7 @@ def get_qmp_events_filtered(self, wait=60.0):
result.append(filter_qmp_event(ev))
return result
def qmp_log(self, cmd, filters=[], indent=None, **kwargs):
def qmp_log(self, cmd, filters=(), indent=None, **kwargs):
full_cmd = OrderedDict((
("execute", cmd),
("arguments", ordered_qmp(kwargs))
@ -585,7 +617,7 @@ def qmp_log(self, cmd, filters=[], indent=None, **kwargs):
# Returns None on success, and an error string on failure
def run_job(self, job, auto_finalize=True, auto_dismiss=False,
pre_finalize=None, cancel=False, use_log=True, wait=60.0):
pre_finalize=None, cancel=False, wait=60.0):
"""
run_job moves a job from creation through to dismissal.
@ -598,7 +630,6 @@ def run_job(self, job, auto_finalize=True, auto_dismiss=False,
invoked prior to issuing job-finalize, if any.
:param cancel: Bool. When true, cancels the job after the pre_finalize
callback.
:param use_log: Bool. When false, does not log QMP messages.
:param wait: Float. Timeout value specifying how long to wait for any
event, in seconds. Defaults to 60.0.
"""
@ -616,8 +647,7 @@ def run_job(self, job, auto_finalize=True, auto_dismiss=False,
while True:
ev = filter_qmp_event(self.events_wait(events, timeout=wait))
if ev['event'] != 'JOB_STATUS_CHANGE':
if use_log:
log(ev)
log(ev)
continue
status = ev['data']['status']
if status == 'aborting':
@ -625,29 +655,18 @@ def run_job(self, job, auto_finalize=True, auto_dismiss=False,
for j in result['return']:
if j['id'] == job:
error = j['error']
if use_log:
log('Job failed: %s' % (j['error']))
log('Job failed: %s' % (j['error']))
elif status == 'ready':
if use_log:
self.qmp_log('job-complete', id=job)
else:
self.qmp('job-complete', id=job)
self.qmp_log('job-complete', id=job)
elif status == 'pending' and not auto_finalize:
if pre_finalize:
pre_finalize()
if cancel and use_log:
if cancel:
self.qmp_log('job-cancel', id=job)
elif cancel:
self.qmp('job-cancel', id=job)
elif use_log:
else:
self.qmp_log('job-finalize', id=job)
else:
self.qmp('job-finalize', id=job)
elif status == 'concluded' and not auto_dismiss:
if use_log:
self.qmp_log('job-dismiss', id=job)
else:
self.qmp('job-dismiss', id=job)
self.qmp_log('job-dismiss', id=job)
elif status == 'null':
return error
@ -710,9 +729,7 @@ def get_bitmap(self, node_name, bitmap_name, recording=None, bitmaps=None):
for bitmap in bitmaps[node_name]:
if bitmap.get('name', '') == bitmap_name:
if recording is None:
return bitmap
elif bitmap.get('recording') == recording:
if recording is None or bitmap.get('recording') == recording:
return bitmap
return None
@ -763,12 +780,13 @@ def assert_block_path(self, root, path, expected_node, graph=None):
assert node is not None, 'Cannot follow path %s%s' % (root, path)
try:
node_id = next(edge['child'] for edge in graph['edges'] \
if edge['parent'] == node['id'] and
edge['name'] == child_name)
node_id = next(edge['child'] for edge in graph['edges']
if (edge['parent'] == node['id'] and
edge['name'] == child_name))
node = next(node for node in graph['nodes']
if node['id'] == node_id)
node = next(node for node in graph['nodes'] \
if node['id'] == node_id)
except StopIteration:
node = None
@ -786,6 +804,12 @@ def assert_block_path(self, root, path, expected_node, graph=None):
class QMPTestCase(unittest.TestCase):
'''Abstract base class for QMP test cases'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Many users of this class set a VM property we rely on heavily
# in the methods below.
self.vm = None
def dictpath(self, d, path):
'''Traverse a path in a nested dict'''
for component in path.split('/'):
@ -795,16 +819,18 @@ def dictpath(self, d, path):
idx = int(idx)
if not isinstance(d, dict) or component not in d:
self.fail('failed path traversal for "%s" in "%s"' % (path, str(d)))
self.fail(f'failed path traversal for "{path}" in "{d}"')
d = d[component]
if m:
if not isinstance(d, list):
self.fail('path component "%s" in "%s" is not a list in "%s"' % (component, path, str(d)))
self.fail(f'path component "{component}" in "{path}" '
f'is not a list in "{d}"')
try:
d = d[idx]
except IndexError:
self.fail('invalid index "%s" in path "%s" in "%s"' % (idx, path, str(d)))
self.fail(f'invalid index "{idx}" in path "{path}" '
f'in "{d}"')
return d
def assert_qmp_absent(self, d, path):
@ -831,7 +857,7 @@ def assert_qmp(self, d, path, value):
else:
self.assertEqual(result, value,
'"%s" is "%s", expected "%s"'
% (path, str(result), str(value)))
% (path, str(result), str(value)))
def assert_no_active_block_jobs(self):
result = self.vm.qmp('query-block-jobs')
@ -841,24 +867,27 @@ def assert_has_block_node(self, node_name=None, file_name=None):
"""Issue a query-named-block-nodes and assert node_name and/or
file_name is present in the result"""
def check_equal_or_none(a, b):
return a == None or b == None or a == b
return a is None or b is None or a == b
assert node_name or file_name
result = self.vm.qmp('query-named-block-nodes')
for x in result["return"]:
if check_equal_or_none(x.get("node-name"), node_name) and \
check_equal_or_none(x.get("file"), file_name):
return
self.assertTrue(False, "Cannot find %s %s in result:\n%s" % \
(node_name, file_name, result))
self.fail("Cannot find %s %s in result:\n%s" %
(node_name, file_name, result))
def assert_json_filename_equal(self, json_filename, reference):
'''Asserts that the given filename is a json: filename and that its
content is equal to the given reference object'''
self.assertEqual(json_filename[:5], 'json:')
self.assertEqual(self.vm.flatten_qmp_object(json.loads(json_filename[5:])),
self.vm.flatten_qmp_object(reference))
self.assertEqual(
self.vm.flatten_qmp_object(json.loads(json_filename[5:])),
self.vm.flatten_qmp_object(reference)
)
def cancel_and_wait(self, drive='drive0', force=False, resume=False, wait=60.0):
def cancel_and_wait(self, drive='drive0', force=False,
resume=False, wait=60.0):
'''Cancel a block job and wait for it to finish, returning the event'''
result = self.vm.qmp('block-job-cancel', device=drive, force=force)
self.assert_qmp(result, 'return', {})
@ -882,8 +911,8 @@ def cancel_and_wait(self, drive='drive0', force=False, resume=False, wait=60.0):
self.assert_no_active_block_jobs()
return result
def wait_until_completed(self, drive='drive0', check_offset=True, wait=60.0,
error=None):
def wait_until_completed(self, drive='drive0', check_offset=True,
wait=60.0, error=None):
'''Wait for a block job to finish, returning the event'''
while True:
for event in self.vm.get_qmp_events(wait=wait):
@ -898,13 +927,13 @@ def wait_until_completed(self, drive='drive0', check_offset=True, wait=60.0,
self.assert_qmp(event, 'data/error', error)
self.assert_no_active_block_jobs()
return event
elif event['event'] == 'JOB_STATUS_CHANGE':
if event['event'] == 'JOB_STATUS_CHANGE':
self.assert_qmp(event, 'data/id', drive)
def wait_ready(self, drive='drive0'):
'''Wait until a block job BLOCK_JOB_READY event'''
f = {'data': {'type': 'mirror', 'device': drive } }
event = self.vm.event_wait(name='BLOCK_JOB_READY', match=f)
"""Wait until a BLOCK_JOB_READY event, and return the event."""
f = {'data': {'type': 'mirror', 'device': drive}}
return self.vm.event_wait(name='BLOCK_JOB_READY', match=f)
def wait_ready_and_cancel(self, drive='drive0'):
self.wait_ready(drive=drive)
@ -933,7 +962,7 @@ def pause_wait(self, job_id='job0'):
for job in result['return']:
if job['device'] == job_id:
found = True
if job['paused'] == True and job['busy'] == False:
if job['paused'] and not job['busy']:
return job
break
assert found
@ -957,7 +986,7 @@ def notrun(reason):
seq = os.path.basename(sys.argv[0])
open('%s/%s.notrun' % (output_dir, seq), 'w').write(reason + '\n')
print('%s not run: %s' % (seq, reason))
logger.warning("%s not run: %s", seq, reason)
sys.exit(0)
def case_notrun(reason):
@ -972,7 +1001,8 @@ def case_notrun(reason):
open('%s/%s.casenotrun' % (output_dir, seq), 'a').write(
' [case not run] ' + reason + '\n')
def verify_image_format(supported_fmts=[], unsupported_fmts=[]):
def _verify_image_format(supported_fmts: Sequence[str] = (),
unsupported_fmts: Sequence[str] = ()) -> None:
assert not (supported_fmts and unsupported_fmts)
if 'generic' in supported_fmts and \
@ -986,7 +1016,8 @@ def verify_image_format(supported_fmts=[], unsupported_fmts=[]):
if not_sup or (imgfmt in unsupported_fmts):
notrun('not suitable for this image format: %s' % imgfmt)
def verify_protocol(supported=[], unsupported=[]):
def _verify_protocol(supported: Sequence[str] = (),
unsupported: Sequence[str] = ()) -> None:
assert not (supported and unsupported)
if 'generic' in supported:
@ -996,20 +1027,20 @@ def verify_protocol(supported=[], unsupported=[]):
if not_sup or (imgproto in unsupported):
notrun('not suitable for this protocol: %s' % imgproto)
def verify_platform(supported=None, unsupported=None):
if unsupported is not None:
if any((sys.platform.startswith(x) for x in unsupported)):
notrun('not suitable for this OS: %s' % sys.platform)
def _verify_platform(supported: Sequence[str] = (),
unsupported: Sequence[str] = ()) -> None:
if any((sys.platform.startswith(x) for x in unsupported)):
notrun('not suitable for this OS: %s' % sys.platform)
if supported is not None:
if supported:
if not any((sys.platform.startswith(x) for x in supported)):
notrun('not suitable for this OS: %s' % sys.platform)
def verify_cache_mode(supported_cache_modes=[]):
def _verify_cache_mode(supported_cache_modes: Sequence[str] = ()) -> None:
if supported_cache_modes and (cachemode not in supported_cache_modes):
notrun('not suitable for this cache mode: %s' % cachemode)
def verify_aio_mode(supported_aio_modes=[]):
def _verify_aio_mode(supported_aio_modes: Sequence[str] = ()):
if supported_aio_modes and (aiomode not in supported_aio_modes):
notrun('not suitable for this aio mode: %s' % aiomode)
@ -1022,16 +1053,19 @@ def verify_quorum():
notrun('quorum support missing')
def qemu_pipe(*args):
'''Run qemu with an option to print something and exit (e.g. a help option),
and return its output'''
"""
Run qemu with an option to print something and exit (e.g. a help option).
:return: QEMU's stdout output.
"""
args = [qemu_prog] + qemu_opts + list(args)
subp = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
exitcode = subp.wait()
if exitcode < 0:
sys.stderr.write('qemu received signal %i: %s\n' % (-exitcode,
' '.join(args)))
sys.stderr.write('qemu received signal %i: %s\n' %
(-exitcode, ' '.join(args)))
return subp.communicate()[0]
def supported_formats(read_only=False):
@ -1049,7 +1083,7 @@ def supported_formats(read_only=False):
return supported_formats.formats[read_only]
def skip_if_unsupported(required_formats=[], read_only=False):
def skip_if_unsupported(required_formats=(), read_only=False):
'''Skip Test Decorator
Runs the test if all the required formats are whitelisted'''
def skip_test_decorator(func):
@ -1061,8 +1095,9 @@ def func_wrapper(test_case: QMPTestCase, *args, **kwargs):
usf_list = list(set(fmts) - set(supported_formats(read_only)))
if usf_list:
test_case.case_skip('{}: formats {} are not whitelisted'.format(
test_case, usf_list))
msg = f'{test_case}: formats {usf_list} are not whitelisted'
test_case.case_skip(msg)
return None
else:
return func(test_case, *args, **kwargs)
return func_wrapper
@ -1074,11 +1109,23 @@ def skip_if_user_is_root(func):
def func_wrapper(*args, **kwargs):
if os.getuid() == 0:
case_notrun('{}: cannot be run as root'.format(args[0]))
return None
else:
return func(*args, **kwargs)
return func_wrapper
def execute_unittest(output, verbosity, debug):
def execute_unittest(debug=False):
"""Executes unittests within the calling module."""
verbosity = 2 if debug else 1
if debug:
output = sys.stdout
else:
# We need to filter out the time taken from the output so that
# qemu-iotest can reliably diff the results against master output.
output = io.StringIO()
runner = unittest.TextTestRunner(stream=output, descriptions=True,
verbosity=verbosity)
try:
@ -1086,6 +1133,8 @@ def execute_unittest(output, verbosity, debug):
# exception
unittest.main(testRunner=runner)
finally:
# We need to filter out the time taken from the output so that
# qemu-iotest can reliably diff the results against master output.
if not debug:
out = output.getvalue()
out = re.sub(r'Ran (\d+) tests? in [\d.]+s', r'Ran \1 tests', out)
@ -1097,13 +1146,19 @@ def execute_unittest(output, verbosity, debug):
sys.stderr.write(out)
def execute_test(test_function=None,
supported_fmts=[],
supported_platforms=None,
supported_cache_modes=[], supported_aio_modes={},
unsupported_fmts=[], supported_protocols=[],
unsupported_protocols=[]):
"""Run either unittest or script-style tests."""
def execute_setup_common(supported_fmts: Sequence[str] = (),
supported_platforms: Sequence[str] = (),
supported_cache_modes: Sequence[str] = (),
supported_aio_modes: Sequence[str] = (),
unsupported_fmts: Sequence[str] = (),
supported_protocols: Sequence[str] = (),
unsupported_protocols: Sequence[str] = ()) -> bool:
"""
Perform necessary setup for either script-style or unittest-style tests.
:return: Bool; Whether or not debug mode has been requested via the CLI.
"""
# Note: Python 3.6 and pylint do not like 'Collection' so use 'Sequence'.
# We are using TEST_DIR and QEMU_DEFAULT_MACHINE as proxies to
# indicate that we're not being run via "check". There may be
@ -1113,34 +1168,51 @@ def execute_test(test_function=None,
sys.stderr.write('Please run this test via the "check" script\n')
sys.exit(os.EX_USAGE)
_verify_image_format(supported_fmts, unsupported_fmts)
_verify_protocol(supported_protocols, unsupported_protocols)
_verify_platform(supported=supported_platforms)
_verify_cache_mode(supported_cache_modes)
_verify_aio_mode(supported_aio_modes)
debug = '-d' in sys.argv
verbosity = 1
verify_image_format(supported_fmts, unsupported_fmts)
verify_protocol(supported_protocols, unsupported_protocols)
verify_platform(supported=supported_platforms)
verify_cache_mode(supported_cache_modes)
verify_aio_mode(supported_aio_modes)
if debug:
output = sys.stdout
verbosity = 2
sys.argv.remove('-d')
else:
# We need to filter out the time taken from the output so that
# qemu-iotest can reliably diff the results against master output.
output = io.StringIO()
logging.basicConfig(level=(logging.DEBUG if debug else logging.WARN))
logger.debug("iotests debugging messages active")
return debug
def execute_test(*args, test_function=None, **kwargs):
"""Run either unittest or script-style tests."""
debug = execute_setup_common(*args, **kwargs)
if not test_function:
execute_unittest(output, verbosity, debug)
execute_unittest(debug)
else:
test_function()
def activate_logging():
"""Activate iotests.log() output to stdout for script-style tests."""
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
test_logger.addHandler(handler)
test_logger.setLevel(logging.INFO)
test_logger.propagate = False
# This is called from script-style iotests without a single point of entry
def script_initialize(*args, **kwargs):
"""Initialize script-style tests without running any tests."""
activate_logging()
execute_setup_common(*args, **kwargs)
# This is called from script-style iotests with a single point of entry
def script_main(test_function, *args, **kwargs):
"""Run script-style tests outside of the unittest framework"""
execute_test(test_function, *args, **kwargs)
activate_logging()
execute_test(*args, test_function=test_function, **kwargs)
# This is called from unittest style iotests
def main(*args, **kwargs):
"""Run tests using the unittest framework"""
execute_test(None, *args, **kwargs)
execute_test(*args, **kwargs)

View file

@ -0,0 +1,26 @@
[MESSAGES CONTROL]
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once). You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=invalid-name,
no-else-return,
too-few-public-methods,
too-many-arguments,
too-many-branches,
too-many-lines,
too-many-locals,
too-many-public-methods,
# These are temporary, and should be removed:
missing-docstring,
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=79