vmm: Dynamically allocate a couple of per-CPU state save areas

This avoids bloating the BSS when MAXCPU is large.

No functional change intended.

PR:		269572
Reviewed by:	corvink, rew
Tested by:	rew
MFC after:	2 weeks
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D39805
This commit is contained in:
Mark Johnston 2023-04-26 10:08:42 -04:00
parent e185d83fc4
commit 74ac712f72
2 changed files with 14 additions and 9 deletions

View File

@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <machine/cpufunc.h>
@ -123,10 +124,8 @@ SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0,
/* Current ASID generation for each host cpu */
static struct asid asid[MAXCPU];
/*
* SVM host state saved area of size 4KB for each core.
*/
static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
/* SVM host state saved area of size 4KB for each physical core. */
static uint8_t *hsave;
static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
@ -167,6 +166,7 @@ svm_modcleanup(void)
{
smp_rendezvous(NULL, svm_disable, NULL, NULL);
kmem_free(hsave, (mp_maxid + 1) * PAGE_SIZE);
return (0);
}
@ -214,7 +214,7 @@ svm_enable(void *arg __unused)
efer |= EFER_SVM;
wrmsr(MSR_EFER, efer);
wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu]));
wrmsr(MSR_VM_HSAVE_PA, vtophys(&hsave[curcpu * PAGE_SIZE]));
}
/*
@ -269,6 +269,7 @@ svm_modinit(int ipinum)
svm_npt_init(ipinum);
/* Enable SVM on all CPUs */
hsave = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, M_WAITOK | M_ZERO);
smp_rendezvous(NULL, svm_enable, NULL, NULL);
return (0);

View File

@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <machine/psl.h>
@ -134,7 +135,7 @@ SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
NULL);
int vmxon_enabled[MAXCPU];
static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
static uint8_t *vmxon_region;
static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
static uint32_t exit_ctls, entry_ctls;
@ -618,6 +619,7 @@ vmx_modcleanup(void)
nmi_flush_l1d_sw = 0;
smp_rendezvous(NULL, vmx_disable, NULL, NULL);
kmem_free(vmxon_region, (mp_maxid + 1) * PAGE_SIZE);
return (0);
}
@ -638,8 +640,8 @@ vmx_enable(void *arg __unused)
load_cr4(rcr4() | CR4_VMXE);
*(uint32_t *)vmxon_region[curcpu] = vmx_revision();
error = vmxon(vmxon_region[curcpu]);
*(uint32_t *)&vmxon_region[curcpu * PAGE_SIZE] = vmx_revision();
error = vmxon(&vmxon_region[curcpu * PAGE_SIZE]);
if (error == 0)
vmxon_enabled[curcpu] = 1;
}
@ -649,7 +651,7 @@ vmx_modresume(void)
{
if (vmxon_enabled[curcpu])
vmxon(vmxon_region[curcpu]);
vmxon(&vmxon_region[curcpu * PAGE_SIZE]);
}
static int
@ -953,6 +955,8 @@ vmx_modinit(int ipinum)
vmx_msr_init();
/* enable VMX operation */
vmxon_region = kmem_malloc((mp_maxid + 1) * PAGE_SIZE,
M_WAITOK | M_ZERO);
smp_rendezvous(NULL, vmx_enable, NULL, NULL);
vmx_initialized = 1;