Convert vmm_ops calls to IFUNC

There is no need for these to be function pointers since they are
never modified post-module load.

Rename AMD/Intel ops to be more consistent.

Submitted by:	adam_fenn.io
Reviewed by:	markj, grehan
Approved by:	grehan (bhyve)
MFC after:	3 weeks
Differential Revision:	https://reviews.freebsd.org/D27375
This commit is contained in:
Peter Grehan 2020-11-28 01:16:59 +00:00
parent a36179937e
commit 15add60d37
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=368115
7 changed files with 228 additions and 171 deletions

View file

@ -189,32 +189,32 @@ typedef int (*vmi_snapshot_vmcx_t)(void *vmi, struct vm_snapshot_meta *meta,
typedef int (*vmi_restore_tsc_t)(void *vmi, int vcpuid, uint64_t now);
struct vmm_ops {
vmm_init_func_t init; /* module wide initialization */
vmm_cleanup_func_t cleanup;
vmm_resume_func_t resume;
vmm_init_func_t modinit; /* module wide initialization */
vmm_cleanup_func_t modcleanup;
vmm_resume_func_t modresume;
vmi_init_func_t vminit; /* vm-specific initialization */
vmi_run_func_t vmrun;
vmi_cleanup_func_t vmcleanup;
vmi_get_register_t vmgetreg;
vmi_set_register_t vmsetreg;
vmi_get_desc_t vmgetdesc;
vmi_set_desc_t vmsetdesc;
vmi_get_cap_t vmgetcap;
vmi_set_cap_t vmsetcap;
vmi_init_func_t init; /* vm-specific initialization */
vmi_run_func_t run;
vmi_cleanup_func_t cleanup;
vmi_get_register_t getreg;
vmi_set_register_t setreg;
vmi_get_desc_t getdesc;
vmi_set_desc_t setdesc;
vmi_get_cap_t getcap;
vmi_set_cap_t setcap;
vmi_vmspace_alloc vmspace_alloc;
vmi_vmspace_free vmspace_free;
vmi_vlapic_init vlapic_init;
vmi_vlapic_cleanup vlapic_cleanup;
/* checkpoint operations */
vmi_snapshot_t vmsnapshot;
vmi_snapshot_t snapshot;
vmi_snapshot_vmcx_t vmcx_snapshot;
vmi_restore_tsc_t vm_restore_tsc;
vmi_restore_tsc_t restore_tsc;
};
extern struct vmm_ops vmm_ops_intel;
extern struct vmm_ops vmm_ops_amd;
extern const struct vmm_ops vmm_ops_intel;
extern const struct vmm_ops vmm_ops_amd;
int vm_create(const char *name, struct vm **retvm);
void vm_destroy(struct vm *vm);

View file

@ -132,6 +132,7 @@ static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
static int svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
static __inline int
@ -162,7 +163,7 @@ svm_disable(void *arg __unused)
* Disable SVM on all CPUs.
*/
static int
svm_cleanup(void)
svm_modcleanup(void)
{
smp_rendezvous(NULL, svm_disable, NULL, NULL);
@ -240,7 +241,7 @@ svm_available(void)
}
static int
svm_init(int ipinum)
svm_modinit(int ipinum)
{
int error, cpu;
@ -274,7 +275,7 @@ svm_init(int ipinum)
}
static void
svm_restore(void)
svm_modresume(void)
{
svm_enable(NULL);
@ -551,7 +552,7 @@ vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
* Initialize a virtual machine.
*/
static void *
svm_vminit(struct vm *vm, pmap_t pmap)
svm_init(struct vm *vm, pmap_t pmap)
{
struct svm_softc *svm_sc;
struct svm_vcpu *vcpu;
@ -728,7 +729,7 @@ svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1,
vis->seg_name = vm_segment_name(s);
}
error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error));
}
@ -1984,7 +1985,7 @@ svm_dr_leave_guest(struct svm_regctx *gctx)
* Start vcpu with specified RIP.
*/
static int
svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
svm_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
struct vm_eventinfo *evinfo)
{
struct svm_regctx *gctx;
@ -2137,7 +2138,7 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
}
static void
svm_vmcleanup(void *arg)
svm_cleanup(void *arg)
{
struct svm_softc *sc = arg;
@ -2260,6 +2261,18 @@ svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
return (EINVAL);
}
static int
svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
{
return (vmcb_getdesc(arg, vcpu, reg, desc));
}
static int
svm_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
{
return (vmcb_setdesc(arg, vcpu, reg, desc));
}
#ifdef BHYVE_SNAPSHOT
static int
svm_snapshot_reg(void *arg, int vcpu, int ident,
@ -2347,6 +2360,18 @@ svm_getcap(void *arg, int vcpu, int type, int *retval)
return (error);
}
static struct vmspace *
svm_vmspace_alloc(vm_offset_t min, vm_offset_t max)
{
return (svm_npt_alloc(min, max));
}
static void
svm_vmspace_free(struct vmspace *vmspace)
{
svm_npt_free(vmspace);
}
static struct vlapic *
svm_vlapic_init(void *arg, int vcpuid)
{
@ -2374,7 +2399,7 @@ svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
#ifdef BHYVE_SNAPSHOT
static int
svm_snapshot_vmi(void *arg, struct vm_snapshot_meta *meta)
svm_snapshot(void *arg, struct vm_snapshot_meta *meta)
{
/* struct svm_softc is AMD's representation for SVM softc */
struct svm_softc *sc;
@ -2527,7 +2552,7 @@ svm_snapshot_vmi(void *arg, struct vm_snapshot_meta *meta)
}
static int
svm_snapshot_vmcx(void *arg, struct vm_snapshot_meta *meta, int vcpu)
svm_vmcx_snapshot(void *arg, struct vm_snapshot_meta *meta, int vcpu)
{
struct vmcb *vmcb;
struct svm_softc *sc;
@ -2672,26 +2697,26 @@ svm_restore_tsc(void *arg, int vcpu, uint64_t offset)
}
#endif
struct vmm_ops vmm_ops_amd = {
const struct vmm_ops vmm_ops_amd = {
.modinit = svm_modinit,
.modcleanup = svm_modcleanup,
.modresume = svm_modresume,
.init = svm_init,
.run = svm_run,
.cleanup = svm_cleanup,
.resume = svm_restore,
.vminit = svm_vminit,
.vmrun = svm_vmrun,
.vmcleanup = svm_vmcleanup,
.vmgetreg = svm_getreg,
.vmsetreg = svm_setreg,
.vmgetdesc = vmcb_getdesc,
.vmsetdesc = vmcb_setdesc,
.vmgetcap = svm_getcap,
.vmsetcap = svm_setcap,
.vmspace_alloc = svm_npt_alloc,
.vmspace_free = svm_npt_free,
.getreg = svm_getreg,
.setreg = svm_setreg,
.getdesc = svm_getdesc,
.setdesc = svm_setdesc,
.getcap = svm_getcap,
.setcap = svm_setcap,
.vmspace_alloc = svm_vmspace_alloc,
.vmspace_free = svm_vmspace_free,
.vlapic_init = svm_vlapic_init,
.vlapic_cleanup = svm_vlapic_cleanup,
#ifdef BHYVE_SNAPSHOT
.vmsnapshot = svm_snapshot_vmi,
.vmcx_snapshot = svm_snapshot_vmcx,
.vm_restore_tsc = svm_restore_tsc,
.snapshot = svm_snapshot,
.vmcx_snapshot = svm_vmcx_snapshot,
.restore_tsc = svm_restore_tsc,
#endif
};

View file

@ -610,7 +610,7 @@ vmx_disable(void *arg __unused)
}
static int
vmx_cleanup(void)
vmx_modcleanup(void)
{
if (pirvec >= 0)
@ -652,7 +652,7 @@ vmx_enable(void *arg __unused)
}
static void
vmx_restore(void)
vmx_modresume(void)
{
if (vmxon_enabled[curcpu])
@ -660,7 +660,7 @@ vmx_restore(void)
}
static int
vmx_init(int ipinum)
vmx_modinit(int ipinum)
{
int error;
uint64_t basic, fixed0, fixed1, feature_control;
@ -668,7 +668,8 @@ vmx_init(int ipinum)
/* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
if (!(cpu_feature2 & CPUID2_VMX)) {
printf("vmx_init: processor does not support VMX operation\n");
printf("vmx_modinit: processor does not support VMX "
"operation\n");
return (ENXIO);
}
@ -679,7 +680,7 @@ vmx_init(int ipinum)
feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
(feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
printf("vmx_init: VMX operation disabled by BIOS\n");
printf("vmx_modinit: VMX operation disabled by BIOS\n");
return (ENXIO);
}
@ -689,7 +690,7 @@ vmx_init(int ipinum)
*/
basic = rdmsr(MSR_VMX_BASIC);
if ((basic & (1UL << 54)) == 0) {
printf("vmx_init: processor does not support desired basic "
printf("vmx_modinit: processor does not support desired basic "
"capabilities\n");
return (EINVAL);
}
@ -700,8 +701,8 @@ vmx_init(int ipinum)
PROCBASED_CTLS_ONE_SETTING,
PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
if (error) {
printf("vmx_init: processor does not support desired primary "
"processor-based controls\n");
printf("vmx_modinit: processor does not support desired "
"primary processor-based controls\n");
return (error);
}
@ -714,8 +715,8 @@ vmx_init(int ipinum)
PROCBASED_CTLS2_ONE_SETTING,
PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
if (error) {
printf("vmx_init: processor does not support desired secondary "
"processor-based controls\n");
printf("vmx_modinit: processor does not support desired "
"secondary processor-based controls\n");
return (error);
}
@ -731,8 +732,8 @@ vmx_init(int ipinum)
PINBASED_CTLS_ONE_SETTING,
PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
if (error) {
printf("vmx_init: processor does not support desired "
"pin-based controls\n");
printf("vmx_modinit: processor does not support desired "
"pin-based controls\n");
return (error);
}
@ -742,7 +743,7 @@ vmx_init(int ipinum)
VM_EXIT_CTLS_ZERO_SETTING,
&exit_ctls);
if (error) {
printf("vmx_init: processor does not support desired "
printf("vmx_modinit: processor does not support desired "
"exit controls\n");
return (error);
}
@ -752,7 +753,7 @@ vmx_init(int ipinum)
VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
&entry_ctls);
if (error) {
printf("vmx_init: processor does not support desired "
printf("vmx_modinit: processor does not support desired "
"entry controls\n");
return (error);
}
@ -873,8 +874,9 @@ vmx_init(int ipinum)
&IDTVEC(justreturn));
if (pirvec < 0) {
if (bootverbose) {
printf("vmx_init: unable to allocate "
"posted interrupt vector\n");
printf("vmx_modinit: unable to "
"allocate posted interrupt "
"vector\n");
}
} else {
posted_interrupts = 1;
@ -890,7 +892,7 @@ vmx_init(int ipinum)
/* Initialize EPT */
error = ept_init(ipinum);
if (error) {
printf("vmx_init: ept initialization failed (%d)\n", error);
printf("vmx_modinit: ept initialization failed (%d)\n", error);
return (error);
}
@ -1015,7 +1017,7 @@ vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
static void *
vmx_vminit(struct vm *vm, pmap_t pmap)
vmx_init(struct vm *vm, pmap_t pmap)
{
uint16_t vpid[VM_MAXCPU];
int i, error;
@ -1083,7 +1085,7 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
guest_msr_rw(vmx, MSR_EFER) ||
guest_msr_ro(vmx, MSR_TSC) ||
((cap_rdpid || cap_rdtscp) && guest_msr_ro(vmx, MSR_TSC_AUX)))
panic("vmx_vminit: error setting guest msr access");
panic("vmx_init: error setting guest msr access");
vpid_alloc(vpid, VM_MAXCPU);
@ -1100,7 +1102,7 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
vmcs->identifier = vmx_revision();
error = vmclear(vmcs);
if (error != 0) {
panic("vmx_vminit: vmclear error %d on vcpu %d\n",
panic("vmx_init: vmclear error %d on vcpu %d\n",
error, i);
}
@ -1158,7 +1160,7 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
vtophys(&vmx->pir_desc[i]));
}
VMCLEAR(vmcs);
KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
KASSERT(error == 0, ("vmx_init: error customizing the vmcs"));
vmx->cap[i].set = 0;
vmx->cap[i].set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0;
@ -3001,7 +3003,7 @@ vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
* from a different process than the one that actually runs it.
*
* If the life of a virtual machine was spent entirely in the context
* of a single process we could do this once in vmx_vminit().
* of a single process we could do this once in vmx_init().
*/
vmcs_write(VMCS_HOST_CR3, rcr3());
@ -3177,7 +3179,7 @@ vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
}
static void
vmx_vmcleanup(void *arg)
vmx_cleanup(void *arg)
{
int i;
struct vmx *vmx = arg;
@ -3547,7 +3549,7 @@ vmx_setcap(void *arg, int vcpu, int type, int val)
/*
* Choose not to support enabling/disabling
* RDPID/RDTSCP via libvmmapi since, as per the
* discussion in vmx_init(), RDPID/RDTSCP are
* discussion in vmx_modinit(), RDPID/RDTSCP are
* either always enabled or always disabled.
*/
error = EOPNOTSUPP;
@ -3617,6 +3619,18 @@ vmx_setcap(void *arg, int vcpu, int type, int val)
return (0);
}
static struct vmspace *
vmx_vmspace_alloc(vm_offset_t min, vm_offset_t max)
{
return (ept_vmspace_alloc(min, max));
}
static void
vmx_vmspace_free(struct vmspace *vmspace)
{
ept_vmspace_free(vmspace);
}
struct vlapic_vtx {
struct vlapic vlapic;
struct pir_desc *pir_desc;
@ -4032,7 +4046,7 @@ vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
#ifdef BHYVE_SNAPSHOT
static int
vmx_snapshot_vmi(void *arg, struct vm_snapshot_meta *meta)
vmx_snapshot(void *arg, struct vm_snapshot_meta *meta)
{
struct vmx *vmx;
struct vmxctx *vmxctx;
@ -4076,7 +4090,7 @@ vmx_snapshot_vmi(void *arg, struct vm_snapshot_meta *meta)
}
static int
vmx_snapshot_vmcx(void *arg, struct vm_snapshot_meta *meta, int vcpu)
vmx_vmcx_snapshot(void *arg, struct vm_snapshot_meta *meta, int vcpu)
{
struct vmcs *vmcs;
struct vmx *vmx;
@ -4177,26 +4191,26 @@ vmx_restore_tsc(void *arg, int vcpu, uint64_t offset)
}
#endif
struct vmm_ops vmm_ops_intel = {
const struct vmm_ops vmm_ops_intel = {
.modinit = vmx_modinit,
.modcleanup = vmx_modcleanup,
.modresume = vmx_modresume,
.init = vmx_init,
.run = vmx_run,
.cleanup = vmx_cleanup,
.resume = vmx_restore,
.vminit = vmx_vminit,
.vmrun = vmx_run,
.vmcleanup = vmx_vmcleanup,
.vmgetreg = vmx_getreg,
.vmsetreg = vmx_setreg,
.vmgetdesc = vmx_getdesc,
.vmsetdesc = vmx_setdesc,
.vmgetcap = vmx_getcap,
.vmsetcap = vmx_setcap,
.vmspace_alloc = ept_vmspace_alloc,
.vmspace_free = ept_vmspace_free,
.getreg = vmx_getreg,
.setreg = vmx_setreg,
.getdesc = vmx_getdesc,
.setdesc = vmx_setdesc,
.getcap = vmx_getcap,
.setcap = vmx_setcap,
.vmspace_alloc = vmx_vmspace_alloc,
.vmspace_free = vmx_vmspace_free,
.vlapic_init = vmx_vlapic_init,
.vlapic_cleanup = vmx_vlapic_cleanup,
#ifdef BHYVE_SNAPSHOT
.vmsnapshot = vmx_snapshot_vmi,
.vmcx_snapshot = vmx_snapshot_vmcx,
.vm_restore_tsc = vmx_restore_tsc,
.snapshot = vmx_snapshot,
.vmcx_snapshot = vmx_vmcx_snapshot,
.restore_tsc = vmx_restore_tsc,
#endif
};

View file

@ -160,8 +160,8 @@ vmx_have_msr_tsc_aux(struct vmx *vmx)
/*
* Since the values of these bits are uniform across all vCPUs
* (see discussion in vmx_init() and initialization of these bits
* in vmx_vminit()), just always use vCPU-zero's capability set and
* (see discussion in vmx_modinit() and initialization of these bits
* in vmx_init()), just always use vCPU-zero's capability set and
* remove the need to require a vcpuid argument.
*/
return ((vmx->cap[0].set & rdpid_rdtscp_bits) != 0);

View file

@ -67,6 +67,7 @@ __FBSDID("$FreeBSD$");
#include <machine/md_var.h>
#include <x86/psl.h>
#include <x86/apicreg.h>
#include <x86/ifunc.h>
#include <machine/vmm.h>
#include <machine/vmm_dev.h>
@ -184,42 +185,53 @@ struct vm {
static int vmm_initialized;
static struct vmm_ops *ops;
#define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0)
#define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0)
#define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0)
static void vmmops_panic(void);
#define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
#define VMRUN(vmi, vcpu, rip, pmap, evinfo) \
(ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, evinfo) : ENXIO)
#define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
#define VMSPACE_ALLOC(min, max) \
(ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
#define VMSPACE_FREE(vmspace) \
(ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
#define VMGETREG(vmi, vcpu, num, retval) \
(ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
#define VMSETREG(vmi, vcpu, num, val) \
(ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
#define VMGETDESC(vmi, vcpu, num, desc) \
(ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
#define VMSETDESC(vmi, vcpu, num, desc) \
(ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
#define VMGETCAP(vmi, vcpu, num, retval) \
(ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
#define VMSETCAP(vmi, vcpu, num, val) \
(ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
#define VLAPIC_INIT(vmi, vcpu) \
(ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL)
#define VLAPIC_CLEANUP(vmi, vlapic) \
(ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL)
static void
vmmops_panic(void)
{
panic("vmm_ops func called when !vmm_is_intel() && !vmm_is_svm()");
}
#define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \
DEFINE_IFUNC(static, ret_type, vmmops_##opname, args) \
{ \
if (vmm_is_intel()) \
return (vmm_ops_intel.opname); \
else if (vmm_is_svm()) \
return (vmm_ops_amd.opname); \
else \
return ((ret_type (*)args)vmmops_panic); \
}
DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum))
DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
DEFINE_VMMOPS_IFUNC(void, modresume, (void))
DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
DEFINE_VMMOPS_IFUNC(int, run, (void *vmi, int vcpu, register_t rip,
struct pmap *pmap, struct vm_eventinfo *info))
DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
DEFINE_VMMOPS_IFUNC(int, getreg, (void *vmi, int vcpu, int num,
uint64_t *retval))
DEFINE_VMMOPS_IFUNC(int, setreg, (void *vmi, int vcpu, int num,
uint64_t val))
DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vmi, int vcpu, int num,
struct seg_desc *desc))
DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vmi, int vcpu, int num,
struct seg_desc *desc))
DEFINE_VMMOPS_IFUNC(int, getcap, (void *vmi, int vcpu, int num, int *retval))
DEFINE_VMMOPS_IFUNC(int, setcap, (void *vmi, int vcpu, int num, int val))
DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
vm_offset_t max))
DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vmi, int vcpu))
DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (void *vmi, struct vlapic *vlapic))
#ifdef BHYVE_SNAPSHOT
#define VM_SNAPSHOT_VMI(vmi, meta) \
(ops != NULL ? (*ops->vmsnapshot)(vmi, meta) : ENXIO)
#define VM_SNAPSHOT_VMCX(vmi, meta, vcpuid) \
(ops != NULL ? (*ops->vmcx_snapshot)(vmi, meta, vcpuid) : ENXIO)
#define VM_RESTORE_TSC(vmi, vcpuid, offset) \
(ops != NULL ? (*ops->vm_restore_tsc)(vmi, vcpuid, offset) : ENXIO)
DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta
*meta))
DEFINE_VMMOPS_IFUNC(int, vmcx_snapshot, (void *vmi, struct vm_snapshot_meta
*meta, int vcpu))
DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vmi, int vcpuid, uint64_t now))
#endif
#define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
@ -282,7 +294,7 @@ vcpu_cleanup(struct vm *vm, int i, bool destroy)
{
struct vcpu *vcpu = &vm->vcpu[i];
VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
vmmops_vlapic_cleanup(vm->cookie, vcpu->vlapic);
if (destroy) {
vmm_stat_free(vcpu->stats);
fpu_save_area_free(vcpu->guestfpu);
@ -310,7 +322,7 @@ vcpu_init(struct vm *vm, int vcpu_id, bool create)
vcpu->tsc_offset = 0;
}
vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
vcpu->vlapic = vmmops_vlapic_init(vm->cookie, vcpu_id);
vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
vcpu->reqidle = 0;
vcpu->exitintinfo = 0;
@ -342,17 +354,14 @@ vm_exitinfo(struct vm *vm, int cpuid)
return (&vcpu->exitinfo);
}
static void
vmm_resume(void)
{
VMM_RESUME();
}
static int
vmm_init(void)
{
int error;
if (!vmm_is_hw_supported())
return (ENXIO);
vmm_host_state_init();
vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
@ -364,16 +373,9 @@ vmm_init(void)
if (error)
return (error);
if (vmm_is_intel())
ops = &vmm_ops_intel;
else if (vmm_is_svm())
ops = &vmm_ops_amd;
else
return (ENXIO);
vmm_resume_p = vmmops_modresume;
vmm_resume_p = vmm_resume;
return (VMM_INIT(vmm_ipinum));
return (vmmops_modinit(vmm_ipinum));
}
static int
@ -383,25 +385,33 @@ vmm_handler(module_t mod, int what, void *arg)
switch (what) {
case MOD_LOAD:
vmmdev_init();
error = vmm_init();
if (error == 0)
vmm_initialized = 1;
if (vmm_is_hw_supported()) {
vmmdev_init();
error = vmm_init();
if (error == 0)
vmm_initialized = 1;
} else {
error = ENXIO;
}
break;
case MOD_UNLOAD:
error = vmmdev_cleanup();
if (error == 0) {
vmm_resume_p = NULL;
iommu_cleanup();
if (vmm_ipinum != IPI_AST)
lapic_ipi_free(vmm_ipinum);
error = VMM_CLEANUP();
/*
* Something bad happened - prevent new
* VMs from being created
*/
if (error)
vmm_initialized = 0;
if (vmm_is_hw_supported()) {
error = vmmdev_cleanup();
if (error == 0) {
vmm_resume_p = NULL;
iommu_cleanup();
if (vmm_ipinum != IPI_AST)
lapic_ipi_free(vmm_ipinum);
error = vmmops_modcleanup();
/*
* Something bad happened - prevent new
* VMs from being created
*/
if (error)
vmm_initialized = 0;
}
} else {
error = 0;
}
break;
default:
@ -431,7 +441,7 @@ vm_init(struct vm *vm, bool create)
{
int i;
vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace));
vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace));
vm->iommu = NULL;
vm->vioapic = vioapic_init(vm);
vm->vhpet = vhpet_init(vm);
@ -473,7 +483,7 @@ vm_create(const char *name, struct vm **retvm)
if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
return (EINVAL);
vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS);
vmspace = vmmops_vmspace_alloc(0, VM_MAXUSER_ADDRESS);
if (vmspace == NULL)
return (ENOMEM);
@ -549,7 +559,7 @@ vm_cleanup(struct vm *vm, bool destroy)
for (i = 0; i < vm->maxcpus; i++)
vcpu_cleanup(vm, i, destroy);
VMCLEANUP(vm->cookie);
vmmops_cleanup(vm->cookie);
/*
* System memory is removed from the guest address space only when
@ -569,7 +579,7 @@ vm_cleanup(struct vm *vm, bool destroy)
for (i = 0; i < VM_MAX_MEMSEGS; i++)
vm_free_memseg(vm, i);
VMSPACE_FREE(vm->vmspace);
vmmops_vmspace_free(vm->vmspace);
vm->vmspace = NULL;
}
}
@ -1033,7 +1043,7 @@ vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
if (reg >= VM_REG_LAST)
return (EINVAL);
return (VMGETREG(vm->cookie, vcpu, reg, retval));
return (vmmops_getreg(vm->cookie, vcpu, reg, retval));
}
int
@ -1048,7 +1058,7 @@ vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
if (reg >= VM_REG_LAST)
return (EINVAL);
error = VMSETREG(vm->cookie, vcpuid, reg, val);
error = vmmops_setreg(vm->cookie, vcpuid, reg, val);
if (error || reg != VM_REG_GUEST_RIP)
return (error);
@ -1102,7 +1112,7 @@ vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
if (!is_segment_register(reg) && !is_descriptor_table(reg))
return (EINVAL);
return (VMGETDESC(vm->cookie, vcpu, reg, desc));
return (vmmops_getdesc(vm->cookie, vcpu, reg, desc));
}
int
@ -1115,7 +1125,7 @@ vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
if (!is_segment_register(reg) && !is_descriptor_table(reg))
return (EINVAL);
return (VMSETDESC(vm->cookie, vcpu, reg, desc));
return (vmmops_setdesc(vm->cookie, vcpu, reg, desc));
}
static void
@ -1333,7 +1343,7 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
* software events that would cause this vcpu to wakeup.
*
* These interrupts/events could have happened after the
* vcpu returned from VMRUN() and before it acquired the
* vcpu returned from vmmops_run() and before it acquired the
* vcpu lock above.
*/
if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
@ -1730,7 +1740,7 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
restore_guest_fpustate(vcpu);
vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo);
error = vmmops_run(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo);
vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
save_guest_fpustate(vcpu);
@ -1819,9 +1829,9 @@ vm_restart_instruction(void *arg, int vcpuid)
} else if (state == VCPU_FROZEN) {
/*
* When a vcpu is "frozen" it is outside the critical section
* around VMRUN() and 'nextrip' points to the next instruction.
* Thus instruction restart is achieved by setting 'nextrip'
* to the vcpu's %rip.
* around vmmops_run() and 'nextrip' points to the next
* instruction. Thus instruction restart is achieved by setting
* 'nextrip' to the vcpu's %rip.
*/
error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
KASSERT(!error, ("%s: error %d getting rip", __func__, error));
@ -2226,7 +2236,7 @@ vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
if (type < 0 || type >= VM_CAP_MAX)
return (EINVAL);
return (VMGETCAP(vm->cookie, vcpu, type, retval));
return (vmmops_getcap(vm->cookie, vcpu, type, retval));
}
int
@ -2238,7 +2248,7 @@ vm_set_capability(struct vm *vm, int vcpu, int type, int val)
if (type < 0 || type >= VM_CAP_MAX)
return (EINVAL);
return (VMSETCAP(vm->cookie, vcpu, type, val));
return (vmmops_setcap(vm->cookie, vcpu, type, val));
}
struct vlapic *
@ -2824,7 +2834,7 @@ vm_snapshot_vmcx(struct vm *vm, struct vm_snapshot_meta *meta)
error = 0;
for (i = 0; i < VM_MAXCPU; i++) {
error = VM_SNAPSHOT_VMCX(vm->cookie, meta, i);
error = vmmops_vmcx_snapshot(vm->cookie, meta, i);
if (error != 0) {
printf("%s: failed to snapshot vmcs/vmcb data for "
"vCPU: %d; error: %d\n", __func__, i, error);
@ -2846,7 +2856,7 @@ vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta)
switch (meta->dev_req) {
case STRUCT_VMX:
ret = VM_SNAPSHOT_VMI(vm->cookie, meta);
ret = vmmops_snapshot(vm->cookie, meta);
break;
case STRUCT_VMCX:
ret = vm_snapshot_vmcx(vm, meta);
@ -2913,7 +2923,8 @@ vm_restore_time(struct vm *vm)
for (i = 0; i < nitems(vm->vcpu); i++) {
vcpu = &vm->vcpu[i];
error = VM_RESTORE_TSC(vm->cookie, i, vcpu->tsc_offset - now);
error = vmmops_restore_tsc(vm->cookie, i, vcpu->tsc_offset -
now);
if (error)
return (error);
}

View file

@ -38,6 +38,12 @@ __FBSDID("$FreeBSD$");
#include "vmm_util.h"
bool
vmm_is_hw_supported(void)
{
return (vmm_is_intel() || vmm_is_svm());
}
bool
vmm_is_intel(void)
{

View file

@ -33,6 +33,7 @@
struct trapframe;
bool vmm_is_hw_supported(void);
bool vmm_is_intel(void);
bool vmm_is_svm(void);
bool vmm_supports_1G_pages(void);