Mostly bugfixes, plus a patch to mark accelerator MemoryRegions in "info

mtree" that has been lingering for too long.
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAl0yOgoUHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroMmdwf/WM4Su7hXnyp34Z6lcT4Wq25qb397
 Bmv3GSqA94Ex3mUAFPx8+PyF1KVxRGsFuobxZ9KartPt7VwLFONApN6D+Ul1GXMn
 aSZ/eR9K7GCdrjVCKMSEtIX2KSgyrAhNIKVF61DjWCGXXYVXllqbtaaCHAkl012g
 JR5nlCqRTYqODgwhkynoqNtq13gkRokiAO0BMsk3xwzJ9UO6aOIu71TtFy3jsUn5
 ff0Mm4G6SEP9IIAC3L9lbwZvEArnWbJlL7X1j5C1tbid+Gx5b/W5CWDWO84idZZh
 FctkRgCPoVHucQYZh+OdAveWuN24tBLfA1a4zu4vSKNkTKS/SHb5YpSXAA==
 =nIGk
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging

Mostly bugfixes, plus a patch to mark accelerator MemoryRegions in "info
mtree" that has been lingering for too long.

# gpg: Signature made Fri 19 Jul 2019 22:45:46 BST
# gpg:                using RSA key F13338574B662389866C7682BFFBD25F78C7AE83
# gpg:                issuer "pbonzini@redhat.com"
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full]
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>" [full]
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream:
  target/i386: sev: fix failed message typos
  i386: indicate that 'pconfig' feature was removed intentionally
  build-sys: do no support modules on Windows
  qmp: don't emit the RESET event on wakeup
  hmp: Print if memory section is registered with an accelerator
  test-bitmap: add test for bitmap_set
  scsi-generic: Check sense key before request snooping and patching
  vhost-user-scsi: Call virtio_scsi_common_unrealize() when device realize failed
  vhost-scsi: Call virtio_scsi_common_unrealize() when device realize failed
  virtio-scsi: remove unused argument to virtio_scsi_common_realize
  target/i386: skip KVM_GET/SET_NESTED_STATE if VMX disabled, or for SVM
  target/i386: kvm: Demand nested migration kernel capabilities only when vCPU may have enabled VMX

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-07-22 13:20:49 +01:00
commit 9d2e1fcd14
17 changed files with 220 additions and 79 deletions

View file

@ -111,6 +111,13 @@ struct KVMState
/* memory encryption */
void *memcrypt_handle;
int (*memcrypt_encrypt_data)(void *handle, uint8_t *ptr, uint64_t len);
/* For "info mtree -f" to tell if an MR is registered in KVM */
int nr_as;
struct KVMAs {
KVMMemoryListener *ml;
AddressSpace *as;
} *as;
};
KVMState *kvm_state;
@ -1159,6 +1166,14 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
kml->listener.priority = 10;
memory_listener_register(&kml->listener, as);
for (i = 0; i < s->nr_as; ++i) {
if (!s->as[i].as) {
s->as[i].as = as;
s->as[i].ml = kml;
break;
}
}
}
static MemoryListener kvm_io_listener = {
@ -1809,6 +1824,12 @@ static int kvm_init(MachineState *ms)
s->nr_slots = 32;
}
s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
if (s->nr_as <= 1) {
s->nr_as = 1;
}
s->as = g_new0(struct KVMAs, s->nr_as);
kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
if (mc->kvm_type) {
type = mc->kvm_type(ms, kvm_type);
@ -2828,11 +2849,28 @@ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
return r;
}
static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
hwaddr start_addr, hwaddr size)
{
KVMState *kvm = KVM_STATE(ms->accelerator);
int i;
for (i = 0; i < kvm->nr_as; ++i) {
if (kvm->as[i].as == as && kvm->as[i].ml) {
return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
start_addr, size);
}
}
return false;
}
static void kvm_accel_class_init(ObjectClass *oc, void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "KVM";
ac->init_machine = kvm_init;
ac->has_memory = kvm_accel_has_memory;
ac->allowed = &kvm_allowed;
}

7
configure vendored
View file

@ -1752,7 +1752,7 @@ disabled with --disable-FEATURE, default is enabled if available:
guest-agent build the QEMU Guest Agent
guest-agent-msi build guest agent Windows MSI installation package
pie Position Independent Executables
modules modules support
modules modules support (non-Windows)
debug-tcg TCG debugging (default is disabled)
debug-info debugging information
sparse sparse checker
@ -2007,6 +2007,11 @@ else
QEMU_CFLAGS="$QEMU_CFLAGS -Wno-missing-braces"
fi
# Our module code doesn't support Windows
if test "$modules" = "yes" && test "$mingw32" = "yes" ; then
error_exit "Modules are not available for Windows"
fi
# Static linking is not possible with modules or PIE
if test "$static" = "yes" ; then
if test "$modules" = "yes" ; then

View file

@ -254,24 +254,28 @@ static void scsi_read_complete(void * opaque, int ret)
r->len = -1;
/*
* Check if this is a VPD Block Limits request that
* resulted in sense error but would need emulation.
* In this case, emulate a valid VPD response.
*/
if (s->needs_vpd_bl_emulation && ret == 0 &&
(r->io_header.driver_status & SG_ERR_DRIVER_SENSE) &&
r->req.cmd.buf[0] == INQUIRY &&
(r->req.cmd.buf[1] & 0x01) &&
r->req.cmd.buf[2] == 0xb0) {
if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
SCSISense sense =
scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr);
if (sense.key == ILLEGAL_REQUEST) {
/*
* Check if this is a VPD Block Limits request that
* resulted in sense error but would need emulation.
* In this case, emulate a valid VPD response.
*/
if (sense.key == ILLEGAL_REQUEST &&
s->needs_vpd_bl_emulation &&
r->req.cmd.buf[0] == INQUIRY &&
(r->req.cmd.buf[1] & 0x01) &&
r->req.cmd.buf[2] == 0xb0) {
len = scsi_generic_emulate_block_limits(r, s);
/*
* No need to let scsi_read_complete go on and handle an
* It's okay to jup to req_complete: no need to
* let scsi_handle_inquiry_reply handle an
* INQUIRY VPD BL request we created manually.
*/
}
if (sense.key) {
goto req_complete;
}
}

View file

@ -210,7 +210,7 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
if (err) {
error_propagate(errp, err);
error_free(vsc->migration_blocker);
goto close_fd;
goto free_virtio;
}
}
@ -240,6 +240,8 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
migrate_del_blocker(vsc->migration_blocker);
}
g_free(vsc->dev.vqs);
free_virtio:
virtio_scsi_common_unrealize(dev);
close_fd:
close(vhostfd);
return;
@ -262,7 +264,7 @@ static void vhost_scsi_unrealize(DeviceState *dev, Error **errp)
vhost_dev_cleanup(&vsc->dev);
g_free(vqs);
virtio_scsi_common_unrealize(dev, errp);
virtio_scsi_common_unrealize(dev);
}
static Property vhost_scsi_properties[] = {

View file

@ -87,7 +87,7 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
}
if (!vhost_user_init(&s->vhost_user, &vs->conf.chardev, errp)) {
return;
goto free_virtio;
}
vsc->dev.nvqs = 2 + vs->conf.num_queues;
@ -101,15 +101,21 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
if (ret < 0) {
error_setg(errp, "vhost-user-scsi: vhost initialization failed: %s",
strerror(-ret));
vhost_user_cleanup(&s->vhost_user);
g_free(vqs);
return;
goto free_vhost;
}
/* Channel and lun both are 0 for bootable vhost-user-scsi disk */
vsc->channel = 0;
vsc->lun = 0;
vsc->target = vs->conf.boot_tpgt;
return;
free_vhost:
vhost_user_cleanup(&s->vhost_user);
g_free(vqs);
free_virtio:
virtio_scsi_common_unrealize(dev);
}
static void vhost_user_scsi_unrealize(DeviceState *dev, Error **errp)
@ -125,7 +131,7 @@ static void vhost_user_scsi_unrealize(DeviceState *dev, Error **errp)
vhost_dev_cleanup(&vsc->dev);
g_free(vqs);
virtio_scsi_common_unrealize(dev, errp);
virtio_scsi_common_unrealize(dev);
vhost_user_cleanup(&s->vhost_user);
}

View file

@ -922,7 +922,7 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
virtio_scsi_dataplane_setup(s, errp);
}
void virtio_scsi_common_unrealize(DeviceState *dev, Error **errp)
void virtio_scsi_common_unrealize(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
@ -936,7 +936,7 @@ static void virtio_scsi_device_unrealize(DeviceState *dev, Error **errp)
VirtIOSCSI *s = VIRTIO_SCSI(dev);
qbus_set_hotplug_handler(BUS(&s->bus), NULL, &error_abort);
virtio_scsi_common_unrealize(dev, errp);
virtio_scsi_common_unrealize(dev);
}
static Property virtio_scsi_properties[] = {

View file

@ -145,7 +145,7 @@ void virtio_scsi_common_realize(DeviceState *dev,
VirtIOHandleOutput cmd,
Error **errp);
void virtio_scsi_common_unrealize(DeviceState *dev, Error **errp);
void virtio_scsi_common_unrealize(DeviceState *dev);
bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq);
bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq);
bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq);

View file

@ -25,6 +25,7 @@
#include "qom/object.h"
#include "hw/qdev-properties.h"
#include "exec/hwaddr.h"
typedef struct AccelState {
/*< private >*/
@ -39,6 +40,8 @@ typedef struct AccelClass {
const char *name;
int (*init_machine)(MachineState *ms);
void (*setup_post)(MachineState *ms, AccelState *accel);
bool (*has_memory)(MachineState *ms, AddressSpace *as,
hwaddr start_addr, hwaddr size);
bool *allowed;
/*
* Array of global properties that would be applied when specific

View file

@ -30,7 +30,9 @@
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
#include "sysemu/tcg.h"
#include "sysemu/accel.h"
#include "hw/qdev-properties.h"
#include "hw/boards.h"
#include "migration/vmstate.h"
//#define DEBUG_UNASSIGNED
@ -2999,6 +3001,8 @@ struct FlatViewInfo {
int counter;
bool dispatch_tree;
bool owner;
AccelClass *ac;
const char *ac_name;
};
static void mtree_print_flatview(gpointer key, gpointer value,
@ -3061,6 +3065,17 @@ static void mtree_print_flatview(gpointer key, gpointer value,
if (fvi->owner) {
mtree_print_mr_owner(mr);
}
if (fvi->ac) {
for (i = 0; i < fv_address_spaces->len; ++i) {
as = g_array_index(fv_address_spaces, AddressSpace*, i);
if (fvi->ac->has_memory(current_machine, as,
int128_get64(range->addr.start),
MR_SIZE(range->addr.size) + 1)) {
qemu_printf(" %s", fvi->ac_name);
}
}
}
qemu_printf("\n");
range++;
}
@ -3101,6 +3116,13 @@ void mtree_info(bool flatview, bool dispatch_tree, bool owner)
};
GArray *fv_address_spaces;
GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
AccelClass *ac = ACCEL_GET_CLASS(current_machine->accelerator);
if (ac->has_memory) {
fvi.ac = ac;
fvi.ac_name = current_machine->accel ? current_machine->accel :
object_class_get_name(OBJECT_CLASS(ac));
}
/* Gather all FVs in one table */
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {

View file

@ -1083,7 +1083,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
NULL, NULL, NULL, NULL,
NULL, NULL, "md-clear", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL /* pconfig */, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, "spec-ctrl", "stibp",
NULL, "arch-capabilities", "core-capability", "ssbd",

View file

@ -1877,6 +1877,28 @@ static inline bool cpu_has_vmx(CPUX86State *env)
return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
}
/*
* In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
* Since it was set, CR4.VMXE must remain set as long as vCPU is in
* VMX operation. This is because CR4.VMXE is one of the bits set
* in MSR_IA32_VMX_CR4_FIXED1.
*
* There is one exception to above statement when vCPU enters SMM mode.
* When a vCPU enters SMM mode, it temporarily exit VMX operation and
* may also reset CR4.VMXE during execution in SMM mode.
* When vCPU exits SMM mode, vCPU state is restored to be in VMX operation
* and CR4.VMXE is restored to it's original value of being set.
*
* Therefore, when vCPU is not in SMM mode, we can infer whether
* VMX is being used by examining CR4.VMXE. Otherwise, we cannot
* know for certain.
*/
static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
{
return cpu_has_vmx(env) &&
((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK));
}
/* fpu_helper.c */
void update_fp_status(CPUX86State *env);
void update_mxcsr_status(CPUX86State *env);

View file

@ -128,6 +128,11 @@ bool kvm_has_adjust_clock_stable(void)
return (ret == KVM_CLOCK_TSC_STABLE);
}
bool kvm_has_exception_payload(void)
{
return has_exception_payload;
}
bool kvm_allows_irq0_override(void)
{
return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
@ -1342,7 +1347,6 @@ static int hyperv_init_vcpu(X86CPU *cpu)
}
static Error *invtsc_mig_blocker;
static Error *nested_virt_mig_blocker;
#define KVM_MAX_CPUID_ENTRIES 100
@ -1653,22 +1657,6 @@ int kvm_arch_init_vcpu(CPUState *cs)
!!(c->ecx & CPUID_EXT_SMX);
}
if (cpu_has_vmx(env) && !nested_virt_mig_blocker &&
((kvm_max_nested_state_length() <= 0) || !has_exception_payload)) {
error_setg(&nested_virt_mig_blocker,
"Kernel do not provide required capabilities for "
"nested virtualization migration. "
"(CAP_NESTED_STATE=%d, CAP_EXCEPTION_PAYLOAD=%d)",
kvm_max_nested_state_length() > 0,
has_exception_payload);
r = migrate_add_blocker(nested_virt_mig_blocker, &local_err);
if (local_err) {
error_report_err(local_err);
error_free(nested_virt_mig_blocker);
return r;
}
}
if (env->mcg_cap & MCG_LMCE_P) {
has_msr_mcg_ext_ctl = has_msr_feature_control = true;
}
@ -1683,7 +1671,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (local_err) {
error_report_err(local_err);
error_free(invtsc_mig_blocker);
goto fail2;
return r;
}
}
}
@ -1723,15 +1711,15 @@ int kvm_arch_init_vcpu(CPUState *cs)
max_nested_state_len = kvm_max_nested_state_length();
if (max_nested_state_len > 0) {
assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
env->nested_state = g_malloc0(max_nested_state_len);
env->nested_state->size = max_nested_state_len;
if (IS_INTEL_CPU(env)) {
struct kvm_vmx_nested_state_hdr *vmx_hdr =
&env->nested_state->hdr.vmx;
if (cpu_has_vmx(env)) {
struct kvm_vmx_nested_state_hdr *vmx_hdr;
env->nested_state = g_malloc0(max_nested_state_len);
env->nested_state->size = max_nested_state_len;
env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
vmx_hdr = &env->nested_state->hdr.vmx;
vmx_hdr->vmxon_pa = -1ull;
vmx_hdr->vmcs12_pa = -1ull;
}
@ -1752,8 +1740,6 @@ int kvm_arch_init_vcpu(CPUState *cs)
fail:
migrate_del_blocker(invtsc_mig_blocker);
fail2:
migrate_del_blocker(nested_virt_mig_blocker);
return r;
}
@ -3529,7 +3515,7 @@ static int kvm_put_nested_state(X86CPU *cpu)
CPUX86State *env = &cpu->env;
int max_nested_state_len = kvm_max_nested_state_length();
if (max_nested_state_len <= 0) {
if (!env->nested_state) {
return 0;
}
@ -3543,7 +3529,7 @@ static int kvm_get_nested_state(X86CPU *cpu)
int max_nested_state_len = kvm_max_nested_state_length();
int ret;
if (max_nested_state_len <= 0) {
if (!env->nested_state) {
return 0;
}

View file

@ -35,6 +35,7 @@
bool kvm_allows_irq0_override(void);
bool kvm_has_smm(void);
bool kvm_has_adjust_clock_stable(void);
bool kvm_has_exception_payload(void);
void kvm_synchronize_all_tsc(void);
void kvm_arch_reset_vcpu(X86CPU *cs);
void kvm_arch_do_init_vcpu(X86CPU *cs);

View file

@ -7,6 +7,7 @@
#include "hw/isa/isa.h"
#include "migration/cpu.h"
#include "hyperv.h"
#include "kvm_i386.h"
#include "sysemu/kvm.h"
#include "sysemu/tcg.h"
@ -232,10 +233,25 @@ static int cpu_pre_save(void *opaque)
}
#ifdef CONFIG_KVM
/* Verify we have nested virtualization state from kernel if required */
if (kvm_enabled() && cpu_has_vmx(env) && !env->nested_state) {
error_report("Guest enabled nested virtualization but kernel "
"does not support saving of nested state");
/*
* In case vCPU may have enabled VMX, we need to make sure kernel have
* required capabilities in order to perform migration correctly:
*
* 1) We must be able to extract vCPU nested-state from KVM.
*
* 2) In case vCPU is running in guest-mode and it has a pending exception,
* we must be able to determine if it's in a pending or injected state.
* Note that in case KVM don't have required capability to do so,
* a pending/injected exception will always appear as an
* injected exception.
*/
if (kvm_enabled() && cpu_vmx_maybe_enabled(env) &&
(!env->nested_state ||
(!kvm_has_exception_payload() && (env->hflags & HF_GUEST_MASK) &&
env->exception_injected))) {
error_report("Guest maybe enabled nested virtualization but kernel "
"does not support required capabilities to save vCPU "
"nested state");
return -EINVAL;
}
#endif
@ -1019,31 +1035,13 @@ static const VMStateDescription vmstate_vmx_nested_state = {
}
};
static bool svm_nested_state_needed(void *opaque)
{
struct kvm_nested_state *nested_state = opaque;
return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM);
}
static const VMStateDescription vmstate_svm_nested_state = {
.name = "cpu/kvm_nested_state/svm",
.version_id = 1,
.minimum_version_id = 1,
.needed = svm_nested_state_needed,
.fields = (VMStateField[]) {
VMSTATE_END_OF_LIST()
}
};
static bool nested_state_needed(void *opaque)
{
X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env;
return (env->nested_state &&
(vmx_nested_state_needed(env->nested_state) ||
svm_nested_state_needed(env->nested_state)));
vmx_nested_state_needed(env->nested_state));
}
static int nested_state_post_load(void *opaque, int version_id)
@ -1105,7 +1103,6 @@ static const VMStateDescription vmstate_kvm_nested_state = {
},
.subsections = (const VMStateDescription*[]) {
&vmstate_vmx_nested_state,
&vmstate_svm_nested_state,
NULL
}
};

View file

@ -763,7 +763,7 @@ sev_guest_init(const char *id)
"reduced-phys-bits", NULL);
if (s->reduced_phys_bits < 1) {
error_report("%s: reduced_phys_bits check failed, it should be >=1,"
"' requested '%d'", __func__, s->reduced_phys_bits);
" requested '%d'", __func__, s->reduced_phys_bits);
goto err;
}
@ -783,7 +783,7 @@ sev_guest_init(const char *id)
ret = sev_platform_ioctl(s->sev_fd, SEV_PLATFORM_STATUS, &status,
&fw_error);
if (ret) {
error_report("%s: failed to get platform status ret=%d"
error_report("%s: failed to get platform status ret=%d "
"fw_error='%d: %s'", __func__, ret, fw_error,
fw_error_to_str(fw_error));
goto err;

View file

@ -59,12 +59,67 @@ static void check_bitmap_copy_with_offset(void)
g_free(bmap3);
}
typedef void (*bmap_set_func)(unsigned long *map, long i, long len);
static void bitmap_set_case(bmap_set_func set_func)
{
unsigned long *bmap;
int offset;
bmap = bitmap_new(BMAP_SIZE);
/* Both Aligned, set bits [BITS_PER_LONG, 3*BITS_PER_LONG] */
set_func(bmap, BITS_PER_LONG, 2 * BITS_PER_LONG);
g_assert_cmpuint(bmap[1], ==, -1ul);
g_assert_cmpuint(bmap[2], ==, -1ul);
g_assert_cmpint(find_first_bit(bmap, BITS_PER_LONG), ==, BITS_PER_LONG);
g_assert_cmpint(find_next_zero_bit(bmap, 3 * BITS_PER_LONG, BITS_PER_LONG),
==, 3 * BITS_PER_LONG);
for (offset = 0; offset <= BITS_PER_LONG; offset++) {
bitmap_clear(bmap, 0, BMAP_SIZE);
/* End Aligned, set bits [BITS_PER_LONG - offset, 3*BITS_PER_LONG] */
set_func(bmap, BITS_PER_LONG - offset, 2 * BITS_PER_LONG + offset);
g_assert_cmpuint(bmap[1], ==, -1ul);
g_assert_cmpuint(bmap[2], ==, -1ul);
g_assert_cmpint(find_first_bit(bmap, BITS_PER_LONG),
==, BITS_PER_LONG - offset);
g_assert_cmpint(find_next_zero_bit(bmap,
3 * BITS_PER_LONG,
BITS_PER_LONG - offset),
==, 3 * BITS_PER_LONG);
}
for (offset = 0; offset <= BITS_PER_LONG; offset++) {
bitmap_clear(bmap, 0, BMAP_SIZE);
/* Start Aligned, set bits [BITS_PER_LONG, 3*BITS_PER_LONG + offset] */
set_func(bmap, BITS_PER_LONG, 2 * BITS_PER_LONG + offset);
g_assert_cmpuint(bmap[1], ==, -1ul);
g_assert_cmpuint(bmap[2], ==, -1ul);
g_assert_cmpint(find_first_bit(bmap, BITS_PER_LONG),
==, BITS_PER_LONG);
g_assert_cmpint(find_next_zero_bit(bmap,
3 * BITS_PER_LONG + offset,
BITS_PER_LONG),
==, 3 * BITS_PER_LONG + offset);
}
g_free(bmap);
}
static void check_bitmap_set(void)
{
bitmap_set_case(bitmap_set);
bitmap_set_case(bitmap_set_atomic);
}
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
g_test_add_func("/bitmap/bitmap_copy_with_offset",
check_bitmap_copy_with_offset);
g_test_add_func("/bitmap/bitmap_set",
check_bitmap_set);
g_test_run();

2
vl.c
View file

@ -1550,7 +1550,7 @@ void qemu_system_reset(ShutdownCause reason)
} else {
qemu_devices_reset();
}
if (reason != SHUTDOWN_CAUSE_SUBSYSTEM_RESET) {
if (reason && reason != SHUTDOWN_CAUSE_SUBSYSTEM_RESET) {
qapi_event_send_reset(shutdown_caused_by_guest(reason), reason);
}
cpu_synchronize_all_post_reset();