From 74ac712f72cfd6d7b3db3c9d3b72ccf2824aa183 Mon Sep 17 00:00:00 2001 From: Mark Johnston Date: Wed, 26 Apr 2023 10:08:42 -0400 Subject: [PATCH] vmm: Dynamically allocate a couple of per-CPU state save areas This avoids bloating the BSS when MAXCPU is large. No functional change intended. PR: 269572 Reviewed by: corvink, rew Tested by: rew MFC after: 2 weeks Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D39805 --- sys/amd64/vmm/amd/svm.c | 11 ++++++----- sys/amd64/vmm/intel/vmx.c | 12 ++++++++---- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c index d995b68ebdc6..0af7342128f2 100644 --- a/sys/amd64/vmm/amd/svm.c +++ b/sys/amd64/vmm/amd/svm.c @@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include @@ -123,10 +124,8 @@ SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, /* Current ASID generation for each host cpu */ static struct asid asid[MAXCPU]; -/* - * SVM host state saved area of size 4KB for each core. - */ -static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); +/* SVM host state saved area of size 4KB for each physical core. */ +static uint8_t *hsave; static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); @@ -167,6 +166,7 @@ svm_modcleanup(void) { smp_rendezvous(NULL, svm_disable, NULL, NULL); + kmem_free(hsave, (mp_maxid + 1) * PAGE_SIZE); return (0); } @@ -214,7 +214,7 @@ svm_enable(void *arg __unused) efer |= EFER_SVM; wrmsr(MSR_EFER, efer); - wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); + wrmsr(MSR_VM_HSAVE_PA, vtophys(&hsave[curcpu * PAGE_SIZE])); } /* @@ -269,6 +269,7 @@ svm_modinit(int ipinum) svm_npt_init(ipinum); /* Enable SVM on all CPUs */ + hsave = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, M_WAITOK | M_ZERO); smp_rendezvous(NULL, svm_enable, NULL, NULL); return (0); diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c index 32e53de4e8ee..55ed5fdf1a00 100644 --- a/sys/amd64/vmm/intel/vmx.c +++ b/sys/amd64/vmm/intel/vmx.c @@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include @@ -134,7 +135,7 @@ SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); int vmxon_enabled[MAXCPU]; -static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); +static uint8_t *vmxon_region; static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; static uint32_t exit_ctls, entry_ctls; @@ -618,6 +619,7 @@ vmx_modcleanup(void) nmi_flush_l1d_sw = 0; smp_rendezvous(NULL, vmx_disable, NULL, NULL); + kmem_free(vmxon_region, (mp_maxid + 1) * PAGE_SIZE); return (0); } @@ -638,8 +640,8 @@ vmx_enable(void *arg __unused) load_cr4(rcr4() | CR4_VMXE); - *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); - error = vmxon(vmxon_region[curcpu]); + *(uint32_t *)&vmxon_region[curcpu * PAGE_SIZE] = vmx_revision(); + error = vmxon(&vmxon_region[curcpu * PAGE_SIZE]); if (error == 0) vmxon_enabled[curcpu] = 1; } @@ -649,7 +651,7 @@ vmx_modresume(void) { if (vmxon_enabled[curcpu]) - vmxon(vmxon_region[curcpu]); + vmxon(&vmxon_region[curcpu * PAGE_SIZE]); } static int @@ -953,6 +955,8 @@ vmx_modinit(int ipinum) vmx_msr_init(); /* enable VMX operation */ + vmxon_region = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, + M_WAITOK | M_ZERO); smp_rendezvous(NULL, vmx_enable, NULL, NULL); vmx_initialized = 1;