qemu/monitor/qmp-cmds.c

599 lines
16 KiB
C
Raw Normal View History

/*
* QEMU Management Protocol commands
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* Contributions after 2012-01-13 are licensed under the terms of the
* GNU GPL, version 2 or (at your option) any later version.
*/
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qemu/option.h"
#include "monitor/monitor.h"
#include "sysemu/sysemu.h"
#include "qemu/config-file.h"
#include "qemu/uuid.h"
#include "chardev/char.h"
#include "ui/qemu-spice.h"
#include "ui/console.h"
#include "ui/dbus-display.h"
#include "sysemu/kvm.h"
#include "sysemu/runstate.h"
#include "sysemu/runstate-action.h"
#include "sysemu/blockdev.h"
#include "sysemu/block-backend.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-acpi.h"
#include "qapi/qapi-commands-block.h"
#include "qapi/qapi-commands-control.h"
#include "qapi/qapi-commands-machine.h"
#include "qapi/qapi-commands-misc.h"
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
#include "qapi/qapi-commands-stats.h"
#include "qapi/qapi-commands-ui.h"
#include "qapi/type-helpers.h"
#include "qapi/qmp/qerror.h"
#include "exec/ramlist.h"
#include "hw/mem/memory-device.h"
#include "hw/acpi/acpi_dev_interface.h"
#include "hw/intc/intc.h"
#include "hw/rdma/rdma.h"
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
#include "monitor/stats.h"
NameInfo *qmp_query_name(Error **errp)
{
NameInfo *info = g_malloc0(sizeof(*info));
if (qemu_name) {
info->has_name = true;
info->name = g_strdup(qemu_name);
}
return info;
}
KvmInfo *qmp_query_kvm(Error **errp)
{
KvmInfo *info = g_malloc0(sizeof(*info));
info->enabled = kvm_enabled();
info->present = accel_find("kvm");
return info;
}
UuidInfo *qmp_query_uuid(Error **errp)
{
UuidInfo *info = g_malloc0(sizeof(*info));
info->UUID = qemu_uuid_unparse_strdup(&qemu_uuid);
return info;
}
void qmp_quit(Error **errp)
{
shutdown_action = SHUTDOWN_ACTION_POWEROFF;
qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_QMP_QUIT);
}
void qmp_stop(Error **errp)
{
/* if there is a dump in background, we should wait until the dump
* finished */
if (qemu_system_dump_in_progress()) {
error_setg(errp, "There is a dump in process, please wait.");
return;
}
if (runstate_check(RUN_STATE_INMIGRATE)) {
autostart = 0;
} else {
vm_stop(RUN_STATE_PAUSED);
}
}
void qmp_system_reset(Error **errp)
{
qemu_system_reset_request(SHUTDOWN_CAUSE_HOST_QMP_SYSTEM_RESET);
}
void qmp_system_powerdown(Error **errp)
{
qemu_system_powerdown_request();
}
void qmp_cont(Error **errp)
{
BlockBackend *blk;
BlockJob *job;
Error *local_err = NULL;
/* if there is a dump in background, we should wait until the dump
* finished */
if (qemu_system_dump_in_progress()) {
error_setg(errp, "There is a dump in process, please wait.");
return;
}
if (runstate_needs_reset()) {
error_setg(errp, "Resetting the Virtual Machine is required");
return;
} else if (runstate_check(RUN_STATE_SUSPENDED)) {
return;
} else if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
error_setg(errp, "Migration is not finalized yet");
return;
}
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
blk_iostatus_reset(blk);
}
for (job = block_job_next(NULL); job; job = block_job_next(job)) {
block_job_iostatus_reset(job);
}
block: Inactivate BDS when migration completes So far, live migration with shared storage meant that the image is in a not-really-ready don't-touch-me state on the destination while the source is still actively using it, but after completing the migration, the image was fully opened on both sides. This is bad. This patch adds a block driver callback to inactivate images on the source before completing the migration. Inactivation means that it goes to a state as if it was just live migrated to the qemu instance on the source (i.e. BDRV_O_INACTIVE is set). You're then supposed to continue either on the source or on the destination, which takes ownership of the image. A typical migration looks like this now with respect to disk images: 1. Destination qemu is started, the image is opened with BDRV_O_INACTIVE. The image is fully opened on the source. 2. Migration is about to complete. The source flushes the image and inactivates it. Now both sides have the image opened with BDRV_O_INACTIVE and are expecting the other side to still modify it. 3. One side (the destination on success) continues and calls bdrv_invalidate_all() in order to take ownership of the image again. This removes BDRV_O_INACTIVE on the resuming side; the flag remains set on the other side. This ensures that the same image isn't written to by both instances (unless both are resumed, but then you get what you deserve). This is important because .bdrv_close for non-BDRV_O_INACTIVE images could write to the image file, which is definitely forbidden while another host is using the image. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: John Snow <jsnow@redhat.com>
2015-12-22 13:07:08 +00:00
/* Continuing after completed migration. Images have been inactivated to
* allow the destination to take control. Need to get control back now.
*
* If there are no inactive block nodes (e.g. because the VM was just
* paused rather than completing a migration), bdrv_inactivate_all() simply
* doesn't do anything. */
bdrv_activate_all(&local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
block: Inactivate BDS when migration completes So far, live migration with shared storage meant that the image is in a not-really-ready don't-touch-me state on the destination while the source is still actively using it, but after completing the migration, the image was fully opened on both sides. This is bad. This patch adds a block driver callback to inactivate images on the source before completing the migration. Inactivation means that it goes to a state as if it was just live migrated to the qemu instance on the source (i.e. BDRV_O_INACTIVE is set). You're then supposed to continue either on the source or on the destination, which takes ownership of the image. A typical migration looks like this now with respect to disk images: 1. Destination qemu is started, the image is opened with BDRV_O_INACTIVE. The image is fully opened on the source. 2. Migration is about to complete. The source flushes the image and inactivates it. Now both sides have the image opened with BDRV_O_INACTIVE and are expecting the other side to still modify it. 3. One side (the destination on success) continues and calls bdrv_invalidate_all() in order to take ownership of the image again. This removes BDRV_O_INACTIVE on the resuming side; the flag remains set on the other side. This ensures that the same image isn't written to by both instances (unless both are resumed, but then you get what you deserve). This is important because .bdrv_close for non-BDRV_O_INACTIVE images could write to the image file, which is definitely forbidden while another host is using the image. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: John Snow <jsnow@redhat.com>
2015-12-22 13:07:08 +00:00
}
if (runstate_check(RUN_STATE_INMIGRATE)) {
autostart = 1;
} else {
vm_start();
}
}
void qmp_system_wakeup(Error **errp)
{
qmp hmp: Make system_wakeup check wake-up support and run state The qmp/hmp command 'system_wakeup' is simply a direct call to 'qemu_system_wakeup_request' from vl.c. This function verifies if runstate is SUSPENDED and if the wake up reason is valid before proceeding. However, no error or warning is thrown if any of those pre-requirements isn't met. There is no way for the caller to differentiate between a successful wakeup or an error state caused when trying to wake up a guest that wasn't suspended. This means that system_wakeup is silently failing, which can be considered a bug. Adding error handling isn't an API break in this case - applications that didn't check the result will remain broken, the ones that check it will have a chance to deal with it. Adding to that, the commit before previous created a new QMP API called query-current-machine, with a new flag called wakeup-suspend-support, that indicates if the guest has the capability of waking up from suspended state. Although such guest will never reach SUSPENDED state and erroring it out in this scenario would suffice, it is more informative for the user to differentiate between a failure because the guest isn't suspended versus a failure because the guest does not have support for wake up at all. All this considered, this patch changes qmp_system_wakeup to check if the guest is capable of waking up from suspend, and if it is suspended. After this patch, this is the output of system_wakeup in a guest that does not have wake-up from suspend support (ppc64): (qemu) system_wakeup wake-up from suspend is not supported by this guest (qemu) And this is the output of system_wakeup in a x86 guest that has the support but isn't suspended: (qemu) system_wakeup Unable to wake up: guest is not in suspended state (qemu) Reported-by: Balamuruhan S <bala24@linux.vnet.ibm.com> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com> Message-Id: <20181205194701.17836-4-danielhb413@gmail.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Acked-by: Eduardo Habkost <ehabkost@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2018-12-05 19:47:01 +00:00
if (!qemu_wakeup_suspend_enabled()) {
error_setg(errp,
"wake-up from suspend is not supported by this guest");
return;
}
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, errp);
}
void qmp_set_password(SetPasswordOptions *opts, Error **errp)
{
int rc;
if (opts->protocol == DISPLAY_PROTOCOL_SPICE) {
if (!qemu_using_spice(errp)) {
return;
}
rc = qemu_spice.set_passwd(opts->password,
opts->connected == SET_PASSWORD_ACTION_FAIL,
opts->connected == SET_PASSWORD_ACTION_DISCONNECT);
} else {
assert(opts->protocol == DISPLAY_PROTOCOL_VNC);
if (opts->connected != SET_PASSWORD_ACTION_KEEP) {
/* vnc supports "connected=keep" only */
error_setg(errp, QERR_INVALID_PARAMETER, "connected");
return;
}
/* Note that setting an empty password will not disable login through
* this interface. */
rc = vnc_display_password(opts->u.vnc.display, opts->password);
}
if (rc != 0) {
error_setg(errp, "Could not set password");
}
}
void qmp_expire_password(ExpirePasswordOptions *opts, Error **errp)
{
time_t when;
int rc;
const char *whenstr = opts->time;
if (strcmp(whenstr, "now") == 0) {
when = 0;
} else if (strcmp(whenstr, "never") == 0) {
when = TIME_MAX;
} else if (whenstr[0] == '+') {
when = time(NULL) + strtoull(whenstr+1, NULL, 10);
} else {
when = strtoull(whenstr, NULL, 10);
}
if (opts->protocol == DISPLAY_PROTOCOL_SPICE) {
if (!qemu_using_spice(errp)) {
return;
}
rc = qemu_spice.set_pw_expire(when);
} else {
assert(opts->protocol == DISPLAY_PROTOCOL_VNC);
rc = vnc_display_pw_expire(opts->u.vnc.display, when);
}
if (rc != 0) {
error_setg(errp, "Could not set password expire time");
}
}
#ifdef CONFIG_VNC
void qmp_change_vnc_password(const char *password, Error **errp)
{
if (vnc_display_password(NULL, password) < 0) {
error_setg(errp, "Could not set password");
}
}
qapi: add conditions to VNC type/commands/events on the schema Add #if defined(CONFIG_VNC) in generated code, and adjust the qmp/hmp code accordingly. query-qmp-schema no longer reports the command/events etc as available when disabled at compile. Commands made conditional: * query-vnc, query-vnc-servers, change-vnc-password Before the patch, the commands for !CONFIG_VNC are stubs that fail like this: {"error": {"class": "GenericError", "desc": "The feature 'vnc' is not enabled"}} Afterwards, they fail like this: {"error": {"class": "CommandNotFound", "desc": "The command FOO has not been found"}} I call that an improvement, because it lets clients distinguish between command unavailable (class CommandNotFound) and command failed (class GenericError). Events made conditional: * VNC_CONNECTED, VNC_INITIALIZED, VNC_DISCONNECTED HMP change: * info vnc Will return "unknown command: 'info vnc'" when VNC is compiled out (same as error for spice when --disable-spice) Occurrences of VNC (case insensitive) in the schema that aren't covered by this change: * add_client Command has other uses, including "socket bases character devices". These are unconditional as far as I can tell. * set_password, expire_password In theory, these commands could be used for managing any service's password. In practice, they're used for VNC and SPICE services. They're documented for "remote display session" / "remote display server". The service is selected by argument @protocol. The code special-cases protocol-specific argument checking, then calls a protocol-specific function to do the work. If it fails, the command fails with "Could not set password". It does when the service isn't compiled in (it's a stub then). We could make these commands conditional on the conjunction of all services [currently: defined(CONFIG_VNC) || defined(CONFIG_SPICE)], but I doubt it's worthwhile. * change Command has other uses, namely changing media. This patch inlines a stub; no functional change. Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> Reviewed-by: Gerd Hoffmann <kraxel@redhat.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Message-Id: <20180703155648.11933-14-marcandre.lureau@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2018-07-03 15:56:47 +00:00
#endif
void qmp_add_client(const char *protocol, const char *fdname,
bool has_skipauth, bool skipauth, bool has_tls, bool tls,
Error **errp)
{
Chardev *s;
int fd;
fd = monitor_get_fd(monitor_cur(), fdname, errp);
if (fd < 0) {
return;
}
if (strcmp(protocol, "spice") == 0) {
if (!qemu_using_spice(errp)) {
close(fd);
return;
}
skipauth = has_skipauth ? skipauth : false;
tls = has_tls ? tls : false;
if (qemu_spice.display_add_client(fd, skipauth, tls) < 0) {
error_setg(errp, "spice failed to add client");
close(fd);
}
return;
#ifdef CONFIG_VNC
} else if (strcmp(protocol, "vnc") == 0) {
skipauth = has_skipauth ? skipauth : false;
vnc_display_add_client(NULL, fd, skipauth);
return;
#endif
#ifdef CONFIG_DBUS_DISPLAY
} else if (strcmp(protocol, "@dbus-display") == 0) {
if (!qemu_using_dbus_display(errp)) {
close(fd);
return;
}
if (!qemu_dbus_display.add_client(fd, errp)) {
close(fd);
return;
}
return;
#endif
} else if ((s = qemu_chr_find(protocol)) != NULL) {
if (qemu_chr_add_client(s, fd) < 0) {
error_setg(errp, "failed to add client");
close(fd);
return;
}
return;
}
error_setg(errp, "protocol '%s' is invalid", protocol);
close(fd);
}
MemoryDeviceInfoList *qmp_query_memory_devices(Error **errp)
{
return qmp_memory_device_list();
}
ACPIOSTInfoList *qmp_query_acpi_ospm_status(Error **errp)
{
bool ambig;
ACPIOSTInfoList *head = NULL;
ACPIOSTInfoList **prev = &head;
Object *obj = object_resolve_path_type("", TYPE_ACPI_DEVICE_IF, &ambig);
if (obj) {
AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(obj);
AcpiDeviceIf *adev = ACPI_DEVICE_IF(obj);
adevc->ospm_status(adev, &prev);
} else {
error_setg(errp, "command is not supported, missing ACPI device");
}
return head;
}
MemoryInfo *qmp_query_memory_size_summary(Error **errp)
{
MemoryInfo *mem_info = g_new0(MemoryInfo, 1);
MachineState *ms = MACHINE(qdev_get_machine());
mem_info->base_memory = ms->ram_size;
mem_info->plugged_memory = get_plugged_memory_size();
mem_info->has_plugged_memory =
mem_info->plugged_memory != (uint64_t)-1;
return mem_info;
}
void qmp_display_reload(DisplayReloadOptions *arg, Error **errp)
{
switch (arg->type) {
case DISPLAY_RELOAD_TYPE_VNC:
#ifdef CONFIG_VNC
if (arg->u.vnc.has_tls_certs && arg->u.vnc.tls_certs) {
vnc_display_reload_certs(NULL, errp);
}
#else
error_setg(errp, "vnc is invalid, missing 'CONFIG_VNC'");
#endif
break;
default:
abort();
}
}
void qmp_display_update(DisplayUpdateOptions *arg, Error **errp)
{
switch (arg->type) {
case DISPLAY_UPDATE_TYPE_VNC:
#ifdef CONFIG_VNC
vnc_display_update(&arg->u.vnc, errp);
#else
error_setg(errp, "vnc is invalid, missing 'CONFIG_VNC'");
#endif
break;
default:
abort();
}
}
static int qmp_x_query_rdma_foreach(Object *obj, void *opaque)
{
RdmaProvider *rdma;
RdmaProviderClass *k;
GString *buf = opaque;
if (object_dynamic_cast(obj, INTERFACE_RDMA_PROVIDER)) {
rdma = RDMA_PROVIDER(obj);
k = RDMA_PROVIDER_GET_CLASS(obj);
if (k->format_statistics) {
k->format_statistics(rdma, buf);
} else {
g_string_append_printf(buf,
"RDMA statistics not available for %s.\n",
object_get_typename(obj));
}
}
return 0;
}
HumanReadableText *qmp_x_query_rdma(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
object_child_foreach_recursive(object_get_root(),
qmp_x_query_rdma_foreach, buf);
return human_readable_text_from_str(buf);
}
HumanReadableText *qmp_x_query_ramblock(Error **errp)
{
g_autoptr(GString) buf = ram_block_format();
return human_readable_text_from_str(buf);
}
static int qmp_x_query_irq_foreach(Object *obj, void *opaque)
{
InterruptStatsProvider *intc;
InterruptStatsProviderClass *k;
GString *buf = opaque;
if (object_dynamic_cast(obj, TYPE_INTERRUPT_STATS_PROVIDER)) {
intc = INTERRUPT_STATS_PROVIDER(obj);
k = INTERRUPT_STATS_PROVIDER_GET_CLASS(obj);
uint64_t *irq_counts;
unsigned int nb_irqs, i;
if (k->get_statistics &&
k->get_statistics(intc, &irq_counts, &nb_irqs)) {
if (nb_irqs > 0) {
g_string_append_printf(buf, "IRQ statistics for %s:\n",
object_get_typename(obj));
for (i = 0; i < nb_irqs; i++) {
if (irq_counts[i] > 0) {
g_string_append_printf(buf, "%2d: %" PRId64 "\n", i,
irq_counts[i]);
}
}
}
} else {
g_string_append_printf(buf,
"IRQ statistics not available for %s.\n",
object_get_typename(obj));
}
}
return 0;
}
HumanReadableText *qmp_x_query_irq(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
object_child_foreach_recursive(object_get_root(),
qmp_x_query_irq_foreach, buf);
return human_readable_text_from_str(buf);
}
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
typedef struct StatsCallbacks {
StatsProvider provider;
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
StatRetrieveFunc *stats_cb;
SchemaRetrieveFunc *schemas_cb;
QTAILQ_ENTRY(StatsCallbacks) next;
} StatsCallbacks;
static QTAILQ_HEAD(, StatsCallbacks) stats_callbacks =
QTAILQ_HEAD_INITIALIZER(stats_callbacks);
void add_stats_callbacks(StatsProvider provider,
StatRetrieveFunc *stats_fn,
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
SchemaRetrieveFunc *schemas_fn)
{
StatsCallbacks *entry = g_new(StatsCallbacks, 1);
entry->provider = provider;
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
entry->stats_cb = stats_fn;
entry->schemas_cb = schemas_fn;
QTAILQ_INSERT_TAIL(&stats_callbacks, entry, next);
}
static bool invoke_stats_cb(StatsCallbacks *entry,
StatsResultList **stats_results,
StatsFilter *filter, StatsRequest *request,
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
Error **errp)
{
strList *targets = NULL;
strList *names = NULL;
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
ERRP_GUARD();
if (request) {
if (request->provider != entry->provider) {
return true;
}
if (request->has_names && !request->names) {
return true;
}
names = request->has_names ? request->names : NULL;
}
switch (filter->target) {
case STATS_TARGET_VM:
break;
case STATS_TARGET_VCPU:
if (filter->u.vcpu.has_vcpus) {
if (!filter->u.vcpu.vcpus) {
/* No targets allowed? Return no statistics. */
return true;
}
targets = filter->u.vcpu.vcpus;
}
break;
default:
abort();
}
entry->stats_cb(stats_results, filter->target, names, targets, errp);
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
if (*errp) {
qapi_free_StatsResultList(*stats_results);
*stats_results = NULL;
return false;
}
return true;
}
StatsResultList *qmp_query_stats(StatsFilter *filter, Error **errp)
{
StatsResultList *stats_results = NULL;
StatsCallbacks *entry;
StatsRequestList *request;
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
QTAILQ_FOREACH(entry, &stats_callbacks, next) {
if (filter->has_providers) {
for (request = filter->providers; request; request = request->next) {
if (!invoke_stats_cb(entry, &stats_results, filter,
request->value, errp)) {
break;
}
}
} else {
if (!invoke_stats_cb(entry, &stats_results, filter, NULL, errp)) {
break;
}
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
}
}
return stats_results;
}
StatsSchemaList *qmp_query_stats_schemas(bool has_provider,
StatsProvider provider,
Error **errp)
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
{
StatsSchemaList *stats_results = NULL;
StatsCallbacks *entry;
ERRP_GUARD();
QTAILQ_FOREACH(entry, &stats_callbacks, next) {
if (!has_provider || provider == entry->provider) {
entry->schemas_cb(&stats_results, errp);
if (*errp) {
qapi_free_StatsSchemaList(stats_results);
return NULL;
}
qmp: Support for querying stats Gathering statistics is important for development, for monitoring and for performance measurement. There are tools such as kvm_stat that do this and they rely on the _user_ knowing the interesting data points rather than the tool (which can treat them as opaque). The commands introduced in this commit introduce QMP support for querying stats; the goal is to take the capabilities of these tools and making them available throughout the whole virtualization stack, so that one can observe, monitor and measure virtual machines without having shell access + root on the host that runs them. query-stats returns a list of all stats per target type (only VM and vCPU to start); future commits add extra options for specifying stat names, vCPU qom paths, and providers. All these are used by the HMP command "info stats". Because of the development usecases around statistics, a good HMP interface is important. query-stats-schemas returns a list of stats included in each target type, with an option for specifying the provider. The concepts in the schema are based on the KVM binary stats' own introspection data, just translated to QAPI. There are two reasons to have a separate schema that is not tied to the QAPI schema. The first is the contents of the schemas: the new introspection data provides different information than the QAPI data, namely unit of measurement, how the numbers are gathered and change (peak/instant/cumulative/histogram), and histogram bucket sizes. There's really no reason to have this kind of metadata in the QAPI introspection schema (except possibly for the unit of measure, but there's a very weak justification). Another reason is the dynamicity of the schema. The QAPI introspection data is very much static; and while QOM is somewhat more dynamic, generally we consider that to be a bug rather than a feature these days. On the other hand, the statistics that are exposed by QEMU might be passed through from another source, such as KVM, and the disadvantages of manually updating the QAPI schema for outweight the benefits from vetting the statistics and filtering out anything that seems "too unstable". Running old QEMU with new kernel is a supported usecase; if old QEMU cannot expose statistics from a new kernel, or if a kernel developer needs to change QEMU before gathering new info from the new kernel, then that is a poor user interface. The framework provides a method to register callbacks for these QMP commands. Most of the work in fact is done by the callbacks, and a large majority of this patch is new QAPI structs and commands. Examples (with KVM stats): - Query all VM stats: { "execute": "query-stats", "arguments" : { "target": "vm" } } { "return": [ { "provider": "kvm", "stats": [ { "name": "max_mmu_page_hash_collisions", "value": 0 }, { "name": "max_mmu_rmap_size", "value": 0 }, { "name": "nx_lpage_splits", "value": 148 }, ... ] }, { "provider": "xyz", "stats": [ ... ] } ] } - Query all vCPU stats: { "execute": "query-stats", "arguments" : { "target": "vcpu" } } { "return": [ { "provider": "kvm", "qom_path": "/machine/unattached/device[0]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, { "provider": "kvm", "qom_path": "/machine/unattached/device[1]" "stats": [ { "name": "guest_mode", "value": 0 }, { "name": "directed_yield_successful", "value": 0 }, { "name": "directed_yield_attempted", "value": 106 }, ... ] }, ] } - Retrieve the schemas: { "execute": "query-stats-schemas" } { "return": [ { "provider": "kvm", "target": "vcpu", "stats": [ { "name": "guest_mode", "unit": "none", "base": 10, "exponent": 0, "type": "instant" }, { "name": "directed_yield_successful", "unit": "none", "base": 10, "exponent": 0, "type": "cumulative" }, ... ] }, { "provider": "kvm", "target": "vm", "stats": [ { "name": "max_mmu_page_hash_collisions", "unit": "none", "base": 10, "exponent": 0, "type": "peak" }, ... ] }, { "provider": "xyz", "target": "vm", "stats": [ ... ] } ] } Signed-off-by: Mark Kanda <mark.kanda@oracle.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-02-15 15:04:31 +00:00
}
}
return stats_results;
}
void add_stats_entry(StatsResultList **stats_results, StatsProvider provider,
const char *qom_path, StatsList *stats_list)
{
StatsResult *entry = g_new0(StatsResult, 1);
entry->provider = provider;
if (qom_path) {
entry->has_qom_path = true;
entry->qom_path = g_strdup(qom_path);
}
entry->stats = stats_list;
QAPI_LIST_PREPEND(*stats_results, entry);
}
void add_stats_schema(StatsSchemaList **schema_results,
StatsProvider provider, StatsTarget target,
StatsSchemaValueList *stats_list)
{
StatsSchema *entry = g_new0(StatsSchema, 1);
entry->provider = provider;
entry->target = target;
entry->stats = stats_list;
QAPI_LIST_PREPEND(*schema_results, entry);
}
bool apply_str_list_filter(const char *string, strList *list)
{
strList *str_list = NULL;
if (!list) {
return true;
}
for (str_list = list; str_list; str_list = str_list->next) {
if (g_str_equal(string, str_list->value)) {
return true;
}
}
return false;
}