vmm: Lookup vcpu pointers in vmmdev_ioctl.

Centralize mapping vCPU IDs to struct vcpu objects in vmmdev_ioctl and
pass vcpu pointers to the routines in vmm.c.  For operations that want
to perform an action on all vCPUs or on a single vCPU, pass pointers
to both the VM and the vCPU using a NULL vCPU pointer to request
global actions.

Reviewed by:	corvink, markj
Differential Revision:	https://reviews.freebsd.org/D37168
This commit is contained in:
John Baldwin 2022-11-18 10:03:52 -08:00
parent 0cbc39d53d
commit 3f0f4b1598
12 changed files with 245 additions and 277 deletions

View file

@ -264,14 +264,14 @@ int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
int vm_get_seg_desc(struct vcpu *vcpu, int reg,
struct seg_desc *ret_desc);
int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
int vm_set_seg_desc(struct vcpu *vcpu, int reg,
struct seg_desc *desc);
int vm_run(struct vm *vm, struct vm_run *vmrun);
int vm_run(struct vcpu *vcpu, struct vm_exit *vme_user);
int vm_suspend(struct vm *vm, enum vm_suspend_how how);
int vm_inject_nmi(struct vm *vm, int vcpu);
int vm_inject_nmi(struct vcpu *vcpu);
int vm_nmi_pending(struct vcpu *vcpu);
void vm_nmi_clear(struct vcpu *vcpu);
int vm_inject_extint(struct vm *vm, int vcpu);
int vm_inject_extint(struct vcpu *vcpu);
int vm_extint_pending(struct vcpu *vcpu);
void vm_extint_clear(struct vcpu *vcpu);
int vcpu_vcpuid(struct vcpu *vcpu);
@ -280,14 +280,14 @@ struct vcpu *vm_vcpu(struct vm *vm, int cpu);
struct vlapic *vm_lapic(struct vcpu *vcpu);
struct vioapic *vm_ioapic(struct vm *vm);
struct vhpet *vm_hpet(struct vm *vm);
int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state);
int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
int vm_get_capability(struct vcpu *vcpu, int type, int *val);
int vm_set_capability(struct vcpu *vcpu, int type, int val);
int vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state);
int vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state);
int vm_apicid2vcpuid(struct vm *vm, int apicid);
int vm_activate_cpu(struct vm *vm, int vcpu);
int vm_suspend_cpu(struct vm *vm, int vcpu);
int vm_resume_cpu(struct vm *vm, int vcpu);
int vm_activate_cpu(struct vcpu *vcpu);
int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu);
int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu);
int vm_restart_instruction(struct vcpu *vcpu);
struct vm_exit *vm_exitinfo(struct vcpu *vcpu);
void vm_exit_suspended(struct vcpu *vcpu, uint64_t rip);
@ -361,8 +361,7 @@ enum vcpu_state {
VCPU_SLEEPING,
};
int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
bool from_idle);
int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle);
enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
static int __inline
@ -383,7 +382,7 @@ vcpu_should_yield(struct vcpu *vcpu)
#endif
void *vcpu_stats(struct vcpu *vcpu);
void vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr);
void vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr);
struct vmspace *vm_get_vmspace(struct vm *vm);
struct vatpic *vm_atpic(struct vm *vm);
struct vatpit *vm_atpit(struct vm *vm);
@ -429,7 +428,7 @@ int vm_exit_intinfo(struct vcpu *vcpu, uint64_t intinfo);
*/
int vm_entry_intinfo(struct vcpu *vcpu, uint64_t *info);
int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
int vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2);
/*
* Function used to keep track of the guest's TSC offset. The

View file

@ -262,7 +262,7 @@ vatpic_notify_intr(struct vatpic *vatpic)
* interrupt.
*/
atpic->intr_raised = true;
lapic_set_local_intr(vatpic->vm, -1, APIC_LVT_LINT0);
lapic_set_local_intr(vatpic->vm, NULL, APIC_LVT_LINT0);
vioapic_pulse_irq(vatpic->vm, 0);
} else {
VATPIC_CTR3(vatpic, "atpic master no eligible interrupts "

View file

@ -460,13 +460,13 @@ vlapic_fire_lvt(struct vlapic *vlapic, u_int lvt)
return (0);
}
if (vlapic_set_intr_ready(vlapic, vec, false))
vcpu_notify_event(vlapic->vm, vlapic->vcpuid, true);
vcpu_notify_event(vlapic->vcpu, true);
break;
case APIC_LVT_DM_NMI:
vm_inject_nmi(vlapic->vm, vlapic->vcpuid);
vm_inject_nmi(vlapic->vcpu);
break;
case APIC_LVT_DM_EXTINT:
vm_inject_extint(vlapic->vm, vlapic->vcpuid);
vm_inject_extint(vlapic->vcpu);
break;
default:
// Other modes ignored
@ -680,10 +680,10 @@ vlapic_trigger_lvt(struct vlapic *vlapic, int vector)
*/
switch (vector) {
case APIC_LVT_LINT0:
vm_inject_extint(vlapic->vm, vlapic->vcpuid);
vm_inject_extint(vlapic->vcpu);
break;
case APIC_LVT_LINT1:
vm_inject_nmi(vlapic->vm, vlapic->vcpuid);
vm_inject_nmi(vlapic->vcpu);
break;
default:
break;
@ -1040,6 +1040,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
uint64_t icrval;
uint32_t dest, vec, mode, shorthand;
struct vlapic *vlapic2;
struct vcpu *vcpu;
struct vm_exit *vmexit;
struct LAPIC *lapic;
@ -1100,7 +1101,8 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
}
CPU_FOREACH_ISSET(i, &dmask) {
lapic_intr_edge(vlapic->vm, i, vec);
vcpu = vm_vcpu(vlapic->vm, i);
lapic_intr_edge(vcpu, vec);
vmm_stat_array_incr(vlapic->vcpu, IPIS_SENT, i, 1);
VLAPIC_CTR2(vlapic,
"vlapic sending ipi %d to vcpuid %d", vec, i);
@ -1109,7 +1111,8 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
break;
case APIC_DELMODE_NMI:
CPU_FOREACH_ISSET(i, &dmask) {
vm_inject_nmi(vlapic->vm, i);
vcpu = vm_vcpu(vlapic->vm, i);
vm_inject_nmi(vcpu);
VLAPIC_CTR1(vlapic,
"vlapic sending ipi nmi to vcpuid %d", i);
}
@ -1130,7 +1133,8 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
* requires that the boot state is set to SIPI
* here.
*/
vlapic2 = vm_lapic(vm_vcpu(vlapic->vm, i));
vcpu = vm_vcpu(vlapic->vm, i);
vlapic2 = vm_lapic(vcpu);
vlapic2->boot_state = BS_SIPI;
break;
}
@ -1154,7 +1158,8 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
/*
* Ignore SIPIs in any state other than wait-for-SIPI
*/
vlapic2 = vm_lapic(vm_vcpu(vlapic->vm, i));
vcpu = vm_vcpu(vlapic->vm, i);
vlapic2 = vm_lapic(vcpu);
if (vlapic2->boot_state != BS_SIPI)
break;
vlapic2->boot_state = BS_RUNNING;
@ -1169,7 +1174,8 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
}
CPU_FOREACH_ISSET(i, &dmask) {
vlapic2 = vm_lapic(vm_vcpu(vlapic->vm, i));
vcpu = vm_vcpu(vlapic->vm, i);
vlapic2 = vm_lapic(vcpu);
/*
* Ignore SIPIs in any state other than wait-for-SIPI
@ -1235,7 +1241,7 @@ vlapic_self_ipi_handler(struct vlapic *vlapic, uint64_t val)
KASSERT(x2apic(vlapic), ("SELF_IPI does not exist in xAPIC mode"));
vec = val & 0xff;
lapic_intr_edge(vlapic->vm, vlapic->vcpuid, vec);
lapic_intr_edge(vlapic->vcpu, vec);
vmm_stat_array_incr(vlapic->vcpu, IPIS_SENT, vlapic->vcpuid, 1);
VLAPIC_CTR1(vlapic, "vlapic self-ipi %d", vec);
}
@ -1696,6 +1702,7 @@ void
vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
int delmode, int vec)
{
struct vcpu *vcpu;
bool lowprio;
int vcpuid;
cpuset_t dmask;
@ -1716,10 +1723,11 @@ vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
vlapic_calcdest(vm, &dmask, dest, phys, lowprio, false);
CPU_FOREACH_ISSET(vcpuid, &dmask) {
vcpu = vm_vcpu(vm, vcpuid);
if (delmode == IOART_DELEXINT) {
vm_inject_extint(vm, vcpuid);
vm_inject_extint(vcpu);
} else {
lapic_set_intr(vm, vcpuid, vec, level);
lapic_set_intr(vcpu, vec, level);
}
}
}

View file

@ -309,12 +309,6 @@ vcpu_state2str(enum vcpu_state state)
}
#endif
static __inline void *
vcpu_cookie(struct vm *vm, int i)
{
return (vm->vcpu[i].cookie);
}
static void
vcpu_cleanup(struct vm *vm, int i, bool destroy)
{
@ -354,7 +348,7 @@ vcpu_init(struct vm *vm, int vcpu_id, bool create)
vcpu->cookie = vmmops_vcpu_init(vm->cookie, vcpu, vcpu_id);
vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie);
vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
vm_set_x2apic_state(vcpu, X2APIC_DISABLED);
vcpu->reqidle = 0;
vcpu->exitintinfo = 0;
vcpu->nmi_pending = 0;
@ -1172,16 +1166,13 @@ vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
}
int
vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
struct seg_desc *desc)
vm_set_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
{
if (vcpu < 0 || vcpu >= vm->maxcpus)
return (EINVAL);
if (!is_segment_register(reg) && !is_descriptor_table(reg))
return (EINVAL);
return (vmmops_setdesc(vcpu_cookie(vm, vcpu), reg, desc));
return (vmmops_setdesc(vcpu->cookie, reg, desc));
}
static void
@ -1228,13 +1219,11 @@ save_guest_fpustate(struct vcpu *vcpu)
static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
static int
vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
bool from_idle)
{
struct vcpu *vcpu;
int error;
vcpu = &vm->vcpu[vcpuid];
vcpu_assert_locked(vcpu);
/*
@ -1246,7 +1235,7 @@ vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
while (vcpu->state != VCPU_IDLE) {
vcpu->reqidle = 1;
vcpu_notify_event_locked(vcpu, false);
VCPU_CTR1(vm, vcpuid, "vcpu state change from %s to "
VMM_CTR1(vcpu, "vcpu state change from %s to "
"idle requested", vcpu_state2str(vcpu->state));
msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
}
@ -1286,7 +1275,7 @@ vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
if (error)
return (EBUSY);
VCPU_CTR2(vm, vcpuid, "vcpu state changed from %s to %s",
VMM_CTR2(vcpu, "vcpu state changed from %s to %s",
vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
vcpu->state = newstate;
@ -1302,20 +1291,20 @@ vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
}
static void
vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate)
{
int error;
if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
if ((error = vcpu_set_state(vcpu, newstate, false)) != 0)
panic("Error %d setting state to %d\n", error, newstate);
}
static void
vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate)
vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
{
int error;
if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0)
if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0)
panic("Error %d setting state to %d", error, newstate);
}
@ -1366,21 +1355,21 @@ vm_handle_rendezvous(struct vcpu *vcpu)
* Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
*/
static int
vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
vm_handle_hlt(struct vcpu *vcpu, bool intr_disabled, bool *retu)
{
struct vcpu *vcpu;
struct vm *vm = vcpu->vm;
const char *wmesg;
struct thread *td;
int error, t, vcpu_halted, vm_halted;
int error, t, vcpuid, vcpu_halted, vm_halted;
KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
vcpu = &vm->vcpu[vcpuid];
vcpuid = vcpu->vcpuid;
vcpu_halted = 0;
vm_halted = 0;
error = 0;
td = curthread;
KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
vcpu_lock(vcpu);
while (1) {
/*
@ -1418,7 +1407,7 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
*/
if (intr_disabled) {
wmesg = "vmhalt";
VCPU_CTR0(vm, vcpuid, "Halted");
VMM_CTR0(vcpu, "Halted");
if (!vcpu_halted && halt_detection_enabled) {
vcpu_halted = 1;
CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
@ -1432,13 +1421,13 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
}
t = ticks;
vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
/*
* XXX msleep_spin() cannot be interrupted by signals so
* wake up periodically to check pending signals.
*/
msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
vcpu_require_state_locked(vcpu, VCPU_FROZEN);
vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t);
if (td_ast_pending(td, TDA_SUSPEND)) {
vcpu_unlock(vcpu);
@ -1466,14 +1455,13 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
}
static int
vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
vm_handle_paging(struct vcpu *vcpu, bool *retu)
{
struct vm *vm = vcpu->vm;
int rv, ftype;
struct vm_map *map;
struct vcpu *vcpu;
struct vm_exit *vme;
vcpu = &vm->vcpu[vcpuid];
vme = &vcpu->exitinfo;
KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
@ -1488,7 +1476,7 @@ vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
vme->u.paging.gpa, ftype);
if (rv == 0) {
VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx",
VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx",
ftype == VM_PROT_READ ? "accessed" : "dirty",
vme->u.paging.gpa);
goto done;
@ -1498,7 +1486,7 @@ vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
map = &vm->vmspace->vm_map;
rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, "
"ftype = %d", rv, vme->u.paging.gpa, ftype);
if (rv != KERN_SUCCESS)
@ -1508,10 +1496,9 @@ vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
}
static int
vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
vm_handle_inst_emul(struct vcpu *vcpu, bool *retu)
{
struct vie *vie;
struct vcpu *vcpu;
struct vm_exit *vme;
uint64_t gla, gpa, cs_base;
struct vm_guest_paging *paging;
@ -1520,7 +1507,6 @@ vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
enum vm_cpu_mode cpu_mode;
int cs_d, error, fault;
vcpu = &vm->vcpu[vcpuid];
vme = &vcpu->exitinfo;
KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
@ -1534,7 +1520,7 @@ vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
paging = &vme->u.inst_emul.paging;
cpu_mode = paging->cpu_mode;
VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa);
VMM_CTR1(vcpu, "inst_emul fault accessing gpa %#lx", gpa);
/* Fetch, decode and emulate the faulting instruction */
if (vie->num_valid == 0) {
@ -1550,7 +1536,7 @@ vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
return (error);
if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) {
VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx",
VMM_CTR1(vcpu, "Error decoding instruction at %#lx",
vme->rip + cs_base);
*retu = true; /* dump instruction bytes in userspace */
return (0);
@ -1561,8 +1547,8 @@ vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
*/
vme->inst_length = vie->num_processed;
vcpu->nextrip += vie->num_processed;
VCPU_CTR1(vm, vcpuid, "nextrip updated to %#lx after instruction "
"decoding", vcpu->nextrip);
VMM_CTR1(vcpu, "nextrip updated to %#lx after instruction decoding",
vcpu->nextrip);
/* return to userland unless this is an in-kernel emulated device */
if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
@ -1586,17 +1572,16 @@ vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
}
static int
vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
vm_handle_suspend(struct vcpu *vcpu, bool *retu)
{
struct vm *vm = vcpu->vm;
int error, i;
struct vcpu *vcpu;
struct thread *td;
error = 0;
vcpu = &vm->vcpu[vcpuid];
td = curthread;
CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus);
CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus);
/*
* Wait until all 'active_cpus' have suspended themselves.
@ -1608,22 +1593,22 @@ vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
vcpu_lock(vcpu);
while (error == 0) {
if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
VMM_CTR0(vcpu, "All vcpus suspended");
break;
}
if (vm->rendezvous_func == NULL) {
VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
VMM_CTR0(vcpu, "Sleeping during suspend");
vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
vcpu_require_state_locked(vcpu, VCPU_FROZEN);
if (td_ast_pending(td, TDA_SUSPEND)) {
vcpu_unlock(vcpu);
error = thread_check_susp(td, false);
vcpu_lock(vcpu);
}
} else {
VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
VMM_CTR0(vcpu, "Rendezvous during suspend");
vcpu_unlock(vcpu);
error = vm_handle_rendezvous(vcpu);
vcpu_lock(vcpu);
@ -1636,7 +1621,7 @@ vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
*/
for (i = 0; i < vm->maxcpus; i++) {
if (CPU_ISSET(i, &vm->suspended_cpus)) {
vcpu_notify_event(vm, i, false);
vcpu_notify_event(vm_vcpu(vm, i), false);
}
}
@ -1645,10 +1630,8 @@ vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
}
static int
vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu)
vm_handle_reqidle(struct vcpu *vcpu, bool *retu)
{
struct vcpu *vcpu = &vm->vcpu[vcpuid];
vcpu_lock(vcpu);
KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
vcpu->reqidle = 0;
@ -1678,7 +1661,7 @@ vm_suspend(struct vm *vm, enum vm_suspend_how how)
*/
for (i = 0; i < vm->maxcpus; i++) {
if (CPU_ISSET(i, &vm->active_cpus))
vcpu_notify_event(vm, i, false);
vcpu_notify_event(vm_vcpu(vm, i), false);
}
return (0);
@ -1751,21 +1734,18 @@ vm_exit_astpending(struct vcpu *vcpu, uint64_t rip)
}
int
vm_run(struct vm *vm, struct vm_run *vmrun)
vm_run(struct vcpu *vcpu, struct vm_exit *vme_user)
{
struct vm *vm = vcpu->vm;
struct vm_eventinfo evinfo;
int error, vcpuid;
struct vcpu *vcpu;
struct pcb *pcb;
uint64_t tscval;
struct vm_exit *vme;
bool retu, intr_disabled;
pmap_t pmap;
vcpuid = vmrun->cpuid;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
return (EINVAL);
vcpuid = vcpu->vcpuid;
if (!CPU_ISSET(vcpuid, &vm->active_cpus))
return (EINVAL);
@ -1774,7 +1754,6 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
return (EINVAL);
pmap = vmspace_pmap(vm->vmspace);
vcpu = &vm->vcpu[vcpuid];
vme = &vcpu->exitinfo;
evinfo.rptr = &vm->rendezvous_func;
evinfo.sptr = &vm->suspend;
@ -1792,9 +1771,9 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
restore_guest_fpustate(vcpu);
vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
vcpu_require_state(vcpu, VCPU_RUNNING);
error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo);
vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
vcpu_require_state(vcpu, VCPU_FROZEN);
save_guest_fpustate(vcpu);
@ -1807,10 +1786,10 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
vcpu->nextrip = vme->rip + vme->inst_length;
switch (vme->exitcode) {
case VM_EXITCODE_REQIDLE:
error = vm_handle_reqidle(vm, vcpuid, &retu);
error = vm_handle_reqidle(vcpu, &retu);
break;
case VM_EXITCODE_SUSPENDED:
error = vm_handle_suspend(vm, vcpuid, &retu);
error = vm_handle_suspend(vcpu, &retu);
break;
case VM_EXITCODE_IOAPIC_EOI:
vioapic_process_eoi(vm, vme->u.ioapic_eoi.vector);
@ -1820,17 +1799,17 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
break;
case VM_EXITCODE_HLT:
intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
error = vm_handle_hlt(vcpu, intr_disabled, &retu);
break;
case VM_EXITCODE_PAGING:
error = vm_handle_paging(vm, vcpuid, &retu);
error = vm_handle_paging(vcpu, &retu);
break;
case VM_EXITCODE_INST_EMUL:
error = vm_handle_inst_emul(vm, vcpuid, &retu);
error = vm_handle_inst_emul(vcpu, &retu);
break;
case VM_EXITCODE_INOUT:
case VM_EXITCODE_INOUT_STR:
error = vm_handle_inout(vm, vcpuid, vme, &retu);
error = vm_handle_inout(vcpu, vme, &retu);
break;
case VM_EXITCODE_MONITOR:
case VM_EXITCODE_MWAIT:
@ -1856,10 +1835,10 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
goto restart;
vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1);
VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
VMM_CTR2(vcpu, "retu %d/%d", error, vme->exitcode);
/* copy the exit information */
bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
*vme_user = *vme;
return (error);
}
@ -2071,14 +2050,8 @@ vm_entry_intinfo(struct vcpu *vcpu, uint64_t *retinfo)
}
int
vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2)
{
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
*info1 = vcpu->exitintinfo;
*info2 = vcpu_exception_intinfo(vcpu);
return (0);
@ -2168,17 +2141,11 @@ vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2)
static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
int
vm_inject_nmi(struct vm *vm, int vcpuid)
vm_inject_nmi(struct vcpu *vcpu)
{
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
vcpu->nmi_pending = 1;
vcpu_notify_event(vm, vcpuid, false);
vcpu_notify_event(vcpu, false);
return (0);
}
@ -2201,17 +2168,11 @@ vm_nmi_clear(struct vcpu *vcpu)
static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
int
vm_inject_extint(struct vm *vm, int vcpuid)
vm_inject_extint(struct vcpu *vcpu)
{
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
vcpu->extint_pending = 1;
vcpu_notify_event(vm, vcpuid, false);
vcpu_notify_event(vcpu, false);
return (0);
}
@ -2232,27 +2193,21 @@ vm_extint_clear(struct vcpu *vcpu)
}
int
vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
vm_get_capability(struct vcpu *vcpu, int type, int *retval)
{
if (vcpu < 0 || vcpu >= vm->maxcpus)
return (EINVAL);
if (type < 0 || type >= VM_CAP_MAX)
return (EINVAL);
return (vmmops_getcap(vcpu_cookie(vm, vcpu), type, retval));
return (vmmops_getcap(vcpu->cookie, type, retval));
}
int
vm_set_capability(struct vm *vm, int vcpu, int type, int val)
vm_set_capability(struct vcpu *vcpu, int type, int val)
{
if (vcpu < 0 || vcpu >= vm->maxcpus)
return (EINVAL);
if (type < 0 || type >= VM_CAP_MAX)
return (EINVAL);
return (vmmops_setcap(vcpu_cookie(vm, vcpu), type, val));
return (vmmops_setcap(vcpu->cookie, type, val));
}
struct vm *
@ -2343,19 +2298,12 @@ vm_iommu_domain(struct vm *vm)
}
int
vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
bool from_idle)
vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle)
{
int error;
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
vcpu = &vm->vcpu[vcpuid];
vcpu_lock(vcpu);
error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle);
error = vcpu_set_state_locked(vcpu, newstate, from_idle);
vcpu_unlock(vcpu);
return (error);
@ -2376,58 +2324,48 @@ vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
}
int
vm_activate_cpu(struct vm *vm, int vcpuid)
vm_activate_cpu(struct vcpu *vcpu)
{
struct vm *vm = vcpu->vm;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
return (EINVAL);
if (CPU_ISSET(vcpuid, &vm->active_cpus))
if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
return (EBUSY);
VCPU_CTR0(vm, vcpuid, "activated");
CPU_SET_ATOMIC(vcpuid, &vm->active_cpus);
VMM_CTR0(vcpu, "activated");
CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus);
return (0);
}
int
vm_suspend_cpu(struct vm *vm, int vcpuid)
vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
{
int i;
if (vcpuid < -1 || vcpuid >= vm->maxcpus)
return (EINVAL);
if (vcpuid == -1) {
if (vcpu == NULL) {
vm->debug_cpus = vm->active_cpus;
for (i = 0; i < vm->maxcpus; i++) {
for (int i = 0; i < vm->maxcpus; i++) {
if (CPU_ISSET(i, &vm->active_cpus))
vcpu_notify_event(vm, i, false);
vcpu_notify_event(vm_vcpu(vm, i), false);
}
} else {
if (!CPU_ISSET(vcpuid, &vm->active_cpus))
if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
return (EINVAL);
CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus);
vcpu_notify_event(vm, vcpuid, false);
CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
vcpu_notify_event(vcpu, false);
}
return (0);
}
int
vm_resume_cpu(struct vm *vm, int vcpuid)
vm_resume_cpu(struct vm *vm, struct vcpu *vcpu)
{
if (vcpuid < -1 || vcpuid >= vm->maxcpus)
return (EINVAL);
if (vcpuid == -1) {
if (vcpu == NULL) {
CPU_ZERO(&vm->debug_cpus);
} else {
if (!CPU_ISSET(vcpuid, &vm->debug_cpus))
if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus))
return (EINVAL);
CPU_CLR_ATOMIC(vcpuid, &vm->debug_cpus);
CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
}
return (0);
}
@ -2468,28 +2406,19 @@ vcpu_stats(struct vcpu *vcpu)
}
int
vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
{
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
return (EINVAL);
*state = vm->vcpu[vcpuid].x2apic_state;
*state = vcpu->x2apic_state;
return (0);
}
int
vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
{
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
return (EINVAL);
if (state >= X2APIC_STATE_LAST)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
vcpu->x2apic_state = state;
vlapic_set_x2apic_state(vcpu, state);
@ -2536,10 +2465,8 @@ vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
}
void
vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr)
{
struct vcpu *vcpu = &vm->vcpu[vcpuid];
vcpu_lock(vcpu);
vcpu_notify_event_locked(vcpu, lapic_intr);
vcpu_unlock(vcpu);
@ -2578,7 +2505,7 @@ vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest,
if (vm->rendezvous_func != NULL) {
/*
* If a rendezvous is already in progress then we need to
* call the rendezvous handler in case this 'vcpuid' is one
* call the rendezvous handler in case this 'vcpu' is one
* of the targets of the rendezvous.
*/
VMM_CTR0(vcpu, "Rendezvous already in progress");
@ -2604,7 +2531,7 @@ vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest,
*/
for (i = 0; i < vm->maxcpus; i++) {
if (CPU_ISSET(i, &dest))
vcpu_notify_event(vm, i, false);
vcpu_notify_event(vm_vcpu(vm, i), false);
}
return (vm_handle_rendezvous(vcpu));
@ -2751,22 +2678,22 @@ VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
VMM_STAT_DECLARE(VMM_MEM_WIRED);
static void
vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
{
if (vcpu == 0) {
vmm_stat_set(vm_vcpu(vm, vcpu), VMM_MEM_RESIDENT,
PAGE_SIZE * vmspace_resident_count(vm->vmspace));
if (vcpu->vcpuid == 0) {
vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE *
vmspace_resident_count(vcpu->vm->vmspace));
}
}
static void
vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
{
if (vcpu == 0) {
vmm_stat_set(vm_vcpu(vm, vcpu), VMM_MEM_WIRED,
PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
if (vcpu->vcpuid == 0) {
vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE *
pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace)));
}
}

View file

@ -131,22 +131,24 @@ vcpu_lock_one(struct vmmdev_softc *sc, int vcpu)
if (vcpu < 0 || vcpu >= vm_get_maxcpus(sc->vm))
return (EINVAL);
error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN, true);
error = vcpu_set_state(vm_vcpu(sc->vm, vcpu), VCPU_FROZEN, true);
return (error);
}
static void
vcpu_unlock_one(struct vmmdev_softc *sc, int vcpu)
vcpu_unlock_one(struct vmmdev_softc *sc, int vcpuid)
{
struct vcpu *vcpu;
enum vcpu_state state;
state = vcpu_get_state(vm_vcpu(sc->vm, vcpu), NULL);
vcpu = vm_vcpu(sc->vm, vcpuid);
state = vcpu_get_state(vcpu, NULL);
if (state != VCPU_FROZEN) {
panic("vcpu %s(%d) has invalid state %d", vm_name(sc->vm),
vcpu, state);
vcpuid, state);
}
vcpu_set_state(sc->vm, vcpu, VCPU_IDLE, false);
vcpu_set_state(vcpu, VCPU_IDLE, false);
}
static int
@ -366,6 +368,15 @@ vm_set_register_set(struct vcpu *vcpu, unsigned int count, int *regnum,
return (error);
}
static struct vcpu *
lookup_vcpu(struct vm *vm, int vcpuid)
{
if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm))
return (NULL);
return (vm_vcpu(vm, vcpuid));
}
static int
vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
struct thread *td)
@ -389,7 +400,6 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
struct vm_pptdev_mmio *pptmmio;
struct vm_pptdev_msi *pptmsi;
struct vm_pptdev_msix *pptmsix;
struct vm_nmi *vmnmi;
#ifdef COMPAT_FREEBSD13
struct vm_stats_old *vmstats_old;
#endif
@ -399,7 +409,6 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
struct vm_gpa_pte *gpapte;
struct vm_suspend *vmsuspend;
struct vm_gla2gpa *gg;
struct vm_activate_cpu *vac;
struct vm_cpuset *vm_cpuset;
struct vm_intinfo *vmii;
struct vm_rtc_time *rtctime;
@ -427,7 +436,13 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
state_changed = 0;
/*
* Some VMM ioctls can operate only on vcpus that are not running.
* For VMM ioctls that operate on a single vCPU, lookup the
* vcpu. For VMM ioctls which require one or more vCPUs to
* not be running, lock necessary vCPUs.
*
* XXX fragile, handle with care
* Most of these assume that the first field of the ioctl data
* is the vcpuid.
*/
switch (cmd) {
case VM_RUN:
@ -450,8 +465,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
case VM_GET_INTINFO:
case VM_RESTART_INSTRUCTION:
/*
* XXX fragile, handle with care
* Assumes that the first field of the ioctl data is the vcpu.
* ioctls that can operate only on vcpus that are not running.
*/
vcpuid = *(int *)data;
error = vcpu_lock_one(sc, vcpuid);
@ -498,6 +512,42 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
state_changed = 1;
break;
#ifdef COMPAT_FREEBSD13
case VM_STATS_OLD:
#endif
case VM_STATS:
case VM_INJECT_NMI:
case VM_LAPIC_IRQ:
case VM_GET_X2APIC_STATE:
/*
* These do not need the vCPU locked but do operate on
* a specific vCPU.
*/
vcpuid = *(int *)data;
vcpu = lookup_vcpu(sc->vm, vcpuid);
if (vcpu == NULL) {
error = EINVAL;
goto done;
}
break;
case VM_LAPIC_LOCAL_IRQ:
case VM_SUSPEND_CPU:
case VM_RESUME_CPU:
/*
* These can either operate on all CPUs via a vcpuid of
* -1 or on a specific vCPU.
*/
vcpuid = *(int *)data;
if (vcpuid == -1)
break;
vcpu = lookup_vcpu(sc->vm, vcpuid);
if (vcpu == NULL) {
error = EINVAL;
goto done;
}
break;
default:
break;
}
@ -505,7 +555,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
switch(cmd) {
case VM_RUN:
vmrun = (struct vm_run *)data;
error = vm_run(sc->vm, vmrun);
error = vm_run(vcpu, &vmrun->vm_exit);
break;
case VM_SUSPEND:
vmsuspend = (struct vm_suspend *)data;
@ -524,7 +574,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
case VM_STATS_OLD:
vmstats_old = (struct vm_stats_old *)data;
getmicrotime(&vmstats_old->tv);
error = vmm_stat_copy(sc->vm, vmstats_old->cpuid, 0,
error = vmm_stat_copy(vcpu, 0,
nitems(vmstats_old->statbuf),
&vmstats_old->num_entries,
vmstats_old->statbuf);
@ -533,7 +583,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
case VM_STATS: {
vmstats = (struct vm_stats *)data;
getmicrotime(&vmstats->tv);
error = vmm_stat_copy(sc->vm, vmstats->cpuid, vmstats->index,
error = vmm_stat_copy(vcpu, vmstats->index,
nitems(vmstats->statbuf),
&vmstats->num_entries, vmstats->statbuf);
break;
@ -586,17 +636,15 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
vmexc->restart_instruction);
break;
case VM_INJECT_NMI:
vmnmi = (struct vm_nmi *)data;
error = vm_inject_nmi(sc->vm, vmnmi->cpuid);
error = vm_inject_nmi(vcpu);
break;
case VM_LAPIC_IRQ:
vmirq = (struct vm_lapic_irq *)data;
error = lapic_intr_edge(sc->vm, vmirq->cpuid, vmirq->vector);
error = lapic_intr_edge(vcpu, vmirq->vector);
break;
case VM_LAPIC_LOCAL_IRQ:
vmirq = (struct vm_lapic_irq *)data;
error = lapic_set_local_intr(sc->vm, vmirq->cpuid,
vmirq->vector);
error = lapic_set_local_intr(sc->vm, vcpu, vmirq->vector);
break;
case VM_LAPIC_MSI:
vmmsi = (struct vm_lapic_msi *)data;
@ -721,7 +769,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
break;
case VM_SET_SEGMENT_DESCRIPTOR:
vmsegdesc = (struct vm_seg_desc *)data;
error = vm_set_seg_desc(sc->vm, vmsegdesc->cpuid,
error = vm_set_seg_desc(vcpu,
vmsegdesc->regnum,
&vmsegdesc->desc);
break;
@ -775,25 +823,23 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
break;
case VM_GET_CAPABILITY:
vmcap = (struct vm_capability *)data;
error = vm_get_capability(sc->vm, vmcap->cpuid,
error = vm_get_capability(vcpu,
vmcap->captype,
&vmcap->capval);
break;
case VM_SET_CAPABILITY:
vmcap = (struct vm_capability *)data;
error = vm_set_capability(sc->vm, vmcap->cpuid,
error = vm_set_capability(vcpu,
vmcap->captype,
vmcap->capval);
break;
case VM_SET_X2APIC_STATE:
x2apic = (struct vm_x2apic *)data;
error = vm_set_x2apic_state(sc->vm,
x2apic->cpuid, x2apic->state);
error = vm_set_x2apic_state(vcpu, x2apic->state);
break;
case VM_GET_X2APIC_STATE:
x2apic = (struct vm_x2apic *)data;
error = vm_get_x2apic_state(sc->vm,
x2apic->cpuid, &x2apic->state);
error = vm_get_x2apic_state(vcpu, &x2apic->state);
break;
case VM_GET_GPA_PMAP:
gpapte = (struct vm_gpa_pte *)data;
@ -823,8 +869,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
("%s: vm_gla2gpa unknown error %d", __func__, error));
break;
case VM_ACTIVATE_CPU:
vac = (struct vm_activate_cpu *)data;
error = vm_activate_cpu(sc->vm, vac->vcpuid);
error = vm_activate_cpu(vcpu);
break;
case VM_GET_CPUS:
error = 0;
@ -848,12 +893,10 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
free(cpuset, M_TEMP);
break;
case VM_SUSPEND_CPU:
vac = (struct vm_activate_cpu *)data;
error = vm_suspend_cpu(sc->vm, vac->vcpuid);
error = vm_suspend_cpu(sc->vm, vcpu);
break;
case VM_RESUME_CPU:
vac = (struct vm_activate_cpu *)data;
error = vm_resume_cpu(sc->vm, vac->vcpuid);
error = vm_resume_cpu(sc->vm, vcpu);
break;
case VM_SET_INTINFO:
vmii = (struct vm_intinfo *)data;
@ -861,8 +904,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
break;
case VM_GET_INTINFO:
vmii = (struct vm_intinfo *)data;
error = vm_get_intinfo(sc->vm, vmii->vcpuid, &vmii->info1,
&vmii->info2);
error = vm_get_intinfo(vcpu, &vmii->info1, &vmii->info2);
break;
case VM_RTC_WRITE:
rtcdata = (struct vm_rtc_data *)data;

View file

@ -100,8 +100,7 @@ inout_instruction(struct vm_exit *vmexit)
#endif /* KTR */
static int
emulate_inout_port(struct vm *vm, int vcpuid, struct vm_exit *vmexit,
bool *retu)
emulate_inout_port(struct vcpu *vcpu, struct vm_exit *vmexit, bool *retu)
{
ioport_handler_func_t handler;
uint32_t mask, val;
@ -122,8 +121,8 @@ emulate_inout_port(struct vm *vm, int vcpuid, struct vm_exit *vmexit,
val = vmexit->u.inout.eax & mask;
}
error = (*handler)(vm, vmexit->u.inout.in, vmexit->u.inout.port,
vmexit->u.inout.bytes, &val);
error = (*handler)(vcpu_vm(vcpu), vmexit->u.inout.in,
vmexit->u.inout.port, vmexit->u.inout.bytes, &val);
if (error) {
/*
* The value returned by this function is also the return value
@ -138,7 +137,7 @@ emulate_inout_port(struct vm *vm, int vcpuid, struct vm_exit *vmexit,
if (vmexit->u.inout.in) {
vmexit->u.inout.eax &= ~mask;
vmexit->u.inout.eax |= val & mask;
error = vm_set_register(vm_vcpu(vm, vcpuid), VM_REG_GUEST_RAX,
error = vm_set_register(vcpu, VM_REG_GUEST_RAX,
vmexit->u.inout.eax);
KASSERT(error == 0, ("emulate_ioport: error %d setting guest "
"rax register", error));
@ -148,14 +147,14 @@ emulate_inout_port(struct vm *vm, int vcpuid, struct vm_exit *vmexit,
}
static int
emulate_inout_str(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu)
emulate_inout_str(struct vcpu *vcpu, struct vm_exit *vmexit, bool *retu)
{
*retu = true;
return (0); /* Return to userspace to finish emulation */
}
int
vm_handle_inout(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu)
vm_handle_inout(struct vcpu *vcpu, struct vm_exit *vmexit, bool *retu)
{
int bytes __diagused, error;
@ -164,11 +163,11 @@ vm_handle_inout(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu)
("vm_handle_inout: invalid operand size %d", bytes));
if (vmexit->u.inout.string)
error = emulate_inout_str(vm, vcpuid, vmexit, retu);
error = emulate_inout_str(vcpu, vmexit, retu);
else
error = emulate_inout_port(vm, vcpuid, vmexit, retu);
error = emulate_inout_port(vcpu, vmexit, retu);
VCPU_CTR4(vm, vcpuid, "%s%s 0x%04x: %s",
VCPU_CTR4(vcpu_vm(vcpu), vcpu_vcpuid(vcpu), "%s%s 0x%04x: %s",
vmexit->u.inout.rep ? "rep " : "",
inout_instruction(vmexit),
vmexit->u.inout.port,

View file

@ -34,6 +34,6 @@
typedef int (*ioport_handler_func_t)(struct vm *vm,
bool in, int port, int bytes, uint32_t *val);
int vm_handle_inout(struct vm *vm, int vcpuid, struct vm_exit *vme, bool *retu);
int vm_handle_inout(struct vcpu *vcpu, struct vm_exit *vme, bool *retu);
#endif /* _VMM_IOPORT_H_ */

View file

@ -52,13 +52,10 @@ __FBSDID("$FreeBSD$");
#define MSI_X86_ADDR_LOG 0x00000004 /* Destination Mode */
int
lapic_set_intr(struct vm *vm, int cpu, int vector, bool level)
lapic_set_intr(struct vcpu *vcpu, int vector, bool level)
{
struct vlapic *vlapic;
if (cpu < 0 || cpu >= vm_get_maxcpus(vm))
return (EINVAL);
/*
* According to section "Maskable Hardware Interrupts" in Intel SDM
* vectors 16 through 255 can be delivered through the local APIC.
@ -66,32 +63,31 @@ lapic_set_intr(struct vm *vm, int cpu, int vector, bool level)
if (vector < 16 || vector > 255)
return (EINVAL);
vlapic = vm_lapic(vm_vcpu(vm, cpu));
vlapic = vm_lapic(vcpu);
if (vlapic_set_intr_ready(vlapic, vector, level))
vcpu_notify_event(vm, cpu, true);
vcpu_notify_event(vcpu, true);
return (0);
}
int
lapic_set_local_intr(struct vm *vm, int cpu, int vector)
lapic_set_local_intr(struct vm *vm, struct vcpu *vcpu, int vector)
{
struct vlapic *vlapic;
cpuset_t dmask;
int error;
int cpu, error;
if (cpu < -1 || cpu >= vm_get_maxcpus(vm))
return (EINVAL);
if (cpu == -1)
if (vcpu == NULL) {
error = 0;
dmask = vm_active_cpus(vm);
else
CPU_SETOF(cpu, &dmask);
error = 0;
CPU_FOREACH_ISSET(cpu, &dmask) {
vlapic = vm_lapic(vm_vcpu(vm, cpu));
CPU_FOREACH_ISSET(cpu, &dmask) {
vlapic = vm_lapic(vm_vcpu(vm, cpu));
error = vlapic_trigger_lvt(vlapic, vector);
if (error)
break;
}
} else {
vlapic = vm_lapic(vcpu);
error = vlapic_trigger_lvt(vlapic, vector);
if (error)
break;
}
return (error);

View file

@ -47,29 +47,29 @@ int lapic_mmio_write(struct vcpu *vcpu, uint64_t gpa,
* Signals to the LAPIC that an interrupt at 'vector' needs to be generated
* to the 'cpu', the state is recorded in IRR.
*/
int lapic_set_intr(struct vm *vm, int cpu, int vector, bool trig);
int lapic_set_intr(struct vcpu *vcpu, int vector, bool trig);
#define LAPIC_TRIG_LEVEL true
#define LAPIC_TRIG_EDGE false
static __inline int
lapic_intr_level(struct vm *vm, int cpu, int vector)
lapic_intr_level(struct vcpu *vcpu, int vector)
{
return (lapic_set_intr(vm, cpu, vector, LAPIC_TRIG_LEVEL));
return (lapic_set_intr(vcpu, vector, LAPIC_TRIG_LEVEL));
}
static __inline int
lapic_intr_edge(struct vm *vm, int cpu, int vector)
lapic_intr_edge(struct vcpu *vcpu, int vector)
{
return (lapic_set_intr(vm, cpu, vector, LAPIC_TRIG_EDGE));
return (lapic_set_intr(vcpu, vector, LAPIC_TRIG_EDGE));
}
/*
* Triggers the LAPIC local interrupt (LVT) 'vector' on 'cpu'. 'cpu' can
* be set to -1 to trigger the interrupt on all CPUs.
*/
int lapic_set_local_intr(struct vm *vm, int cpu, int vector);
int lapic_set_local_intr(struct vm *vm, struct vcpu *vcpu, int vector);
int lapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg);

View file

@ -82,16 +82,13 @@ vmm_stat_register(void *arg)
}
int
vmm_stat_copy(struct vm *vm, int vcpu, int index, int count, int *num_stats,
vmm_stat_copy(struct vcpu *vcpu, int index, int count, int *num_stats,
uint64_t *buf)
{
struct vmm_stat_type *vst;
uint64_t *stats;
int i, tocopy;
if (vcpu < 0 || vcpu >= vm_get_maxcpus(vm))
return (EINVAL);
if (index < 0 || count < 0)
return (EINVAL);
@ -109,11 +106,11 @@ vmm_stat_copy(struct vm *vm, int vcpu, int index, int count, int *num_stats,
for (i = 0; i < vst_num_types; i++) {
vst = vsttab[i];
if (vst->func != NULL)
(*vst->func)(vm, vcpu, vst);
(*vst->func)(vcpu, vst);
}
/* Copy over the stats */
stats = vcpu_stats(vm_vcpu(vm, vcpu));
stats = vcpu_stats(vcpu);
memcpy(buf, stats + index, tocopy * sizeof(stats[0]));
*num_stats = tocopy;
return (0);

View file

@ -45,7 +45,7 @@ enum vmm_stat_scope {
};
struct vmm_stat_type;
typedef void (*vmm_stat_func_t)(struct vm *vm, int vcpu,
typedef void (*vmm_stat_func_t)(struct vcpu *vcpu,
struct vmm_stat_type *stat);
struct vmm_stat_type {
@ -87,7 +87,7 @@ void *vmm_stat_alloc(void);
void vmm_stat_init(void *vp);
void vmm_stat_free(void *vp);
int vmm_stat_copy(struct vm *vm, int vcpu, int index, int count,
int vmm_stat_copy(struct vcpu *vcpu, int index, int count,
int *num_stats, uint64_t *buf);
int vmm_stat_desc_copy(int index, char *buf, int buflen);

View file

@ -204,7 +204,7 @@ x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx,
regs[2] &= ~AMDID2_MWAITX;
/* Advertise RDTSCP if it is enabled. */
error = vm_get_capability(vm, vcpu_id,
error = vm_get_capability(vcpu,
VM_CAP_RDTSCP, &enable_rdtscp);
if (error == 0 && enable_rdtscp)
regs[3] |= AMDID_RDTSCP;
@ -311,7 +311,7 @@ x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx,
case CPUID_0000_0001:
do_cpuid(1, regs);
error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state);
error = vm_get_x2apic_state(vcpu, &x2apic_state);
if (error) {
panic("x86_emulate_cpuid: error %d "
"fetching x2apic state", error);
@ -456,13 +456,13 @@ x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx,
regs[3] &= CPUID_STDEXT3_MD_CLEAR;
/* Advertise RDPID if it is enabled. */
error = vm_get_capability(vm, vcpu_id,
VM_CAP_RDPID, &enable_rdpid);
error = vm_get_capability(vcpu, VM_CAP_RDPID,
&enable_rdpid);
if (error == 0 && enable_rdpid)
regs[2] |= CPUID_STDEXT2_RDPID;
/* Advertise INVPCID if it is enabled. */
error = vm_get_capability(vm, vcpu_id,
error = vm_get_capability(vcpu,
VM_CAP_ENABLE_INVPCID, &enable_invpcid);
if (error == 0 && enable_invpcid)
regs[1] |= CPUID_STDEXT_INVPCID;