Change vm_malloc() to map pages in the guest physical address space in 4KB

chunks. This breaks the assumption that the entire memory segment is
contiguously allocated in the host physical address space.

This also paves the way to satisfy the 4KB page allocations by requesting
free pages from the VM subsystem as opposed to hard-partitioning host memory
at boot time.
This commit is contained in:
Neel Natu 2012-10-04 02:27:14 +00:00
parent fce9b857d6
commit f7d51510f1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/bhyve/; revision=241178
6 changed files with 55 additions and 22 deletions

View file

@ -111,9 +111,10 @@ vm_destroy(struct vmctx *vm)
{
assert(vm != NULL);
DESTROY(vm->name);
if (vm->fd >= 0)
close(vm->fd);
DESTROY(vm->name);
free(vm);
}
@ -151,7 +152,6 @@ vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa,
bzero(&seg, sizeof(seg));
seg.gpa = gpa;
error = ioctl(ctx->fd, VM_GET_MEMORY_SEG, &seg);
*ret_hpa = seg.hpa;
*ret_len = seg.len;
return (error);
}

View file

@ -35,7 +35,6 @@ void vmmdev_cleanup(void);
#endif
struct vm_memory_segment {
vm_paddr_t hpa; /* out */
vm_paddr_t gpa; /* in */
size_t len; /* in */
};

View file

@ -356,7 +356,6 @@ ppt_map_mmio(struct vm *vm, int bus, int slot, int func,
if (error == 0) {
seg->gpa = gpa;
seg->len = len;
seg->hpa = hpa;
}
return (error);
}

View file

@ -275,6 +275,28 @@ vm_create(const char *name)
return (vm);
}
static void
vm_free_mem_seg(struct vm *vm, struct vm_memory_segment *seg)
{
size_t len;
vm_paddr_t hpa;
len = 0;
while (len < seg->len) {
hpa = vm_gpa2hpa(vm, seg->gpa + len, PAGE_SIZE);
if (hpa == (vm_paddr_t)-1) {
panic("vm_free_mem_segs: cannot free hpa "
"associated with gpa 0x%016lx", seg->gpa + len);
}
vmm_mem_free(hpa, PAGE_SIZE);
len += PAGE_SIZE;
}
bzero(seg, sizeof(struct vm_memory_segment));
}
void
vm_destroy(struct vm *vm)
{
@ -283,7 +305,9 @@ vm_destroy(struct vm *vm)
ppt_unassign_all(vm);
for (i = 0; i < vm->num_mem_segs; i++)
vmm_mem_free(vm->mem_segs[i].hpa, vm->mem_segs[i].len);
vm_free_mem_seg(vm, &vm->mem_segs[i]);
vm->num_mem_segs = 0;
for (i = 0; i < VM_MAXCPU; i++)
vcpu_cleanup(&vm->vcpu[i]);
@ -345,6 +369,7 @@ int
vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
{
int error, available, allocated;
struct vm_memory_segment *seg;
vm_paddr_t g, hpa;
const boolean_t spok = TRUE; /* superpage mappings are ok */
@ -380,22 +405,32 @@ vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
return (E2BIG);
hpa = vmm_mem_alloc(len);
if (hpa == 0)
return (ENOMEM);
seg = &vm->mem_segs[vm->num_mem_segs];
error = VMMMAP_SET(vm->cookie, gpa, hpa, len, VM_MEMATTR_WRITE_BACK,
VM_PROT_ALL, spok);
if (error) {
vmm_mem_free(hpa, len);
seg->gpa = gpa;
seg->len = 0;
while (seg->len < len) {
hpa = vmm_mem_alloc(PAGE_SIZE);
if (hpa == 0) {
error = ENOMEM;
break;
}
error = VMMMAP_SET(vm->cookie, gpa + seg->len, hpa, PAGE_SIZE,
VM_MEMATTR_WRITE_BACK, VM_PROT_ALL, spok);
if (error)
break;
iommu_create_mapping(vm->iommu, gpa + seg->len, hpa, PAGE_SIZE);
seg->len += PAGE_SIZE;
}
if (seg->len != len) {
vm_free_mem_seg(vm, seg);
return (error);
}
iommu_create_mapping(vm->iommu, gpa, hpa, len);
vm->mem_segs[vm->num_mem_segs].gpa = gpa;
vm->mem_segs[vm->num_mem_segs].hpa = hpa;
vm->mem_segs[vm->num_mem_segs].len = len;
vm->num_mem_segs++;
return (0);

View file

@ -299,7 +299,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
break;
case VM_GET_MEMORY_SEG:
seg = (struct vm_memory_segment *)data;
seg->hpa = seg->len = 0;
seg->len = 0;
(void)vm_gpabase2memseg(sc->vm, seg->gpa, seg);
error = 0;
break;

View file

@ -318,9 +318,9 @@ vmm_mem_alloc(size_t size)
int i;
vm_paddr_t addr;
if ((size & PDRMASK) != 0) {
if ((size & PAGE_MASK) != 0) {
panic("vmm_mem_alloc: size 0x%0lx must be "
"aligned on a 0x%0x boundary\n", size, NBPDR);
"aligned on a 0x%0x boundary\n", size, PAGE_SIZE);
}
addr = 0;
@ -373,9 +373,9 @@ vmm_mem_free(vm_paddr_t base, size_t length)
{
int i;
if ((base & PDRMASK) != 0 || (length & PDRMASK) != 0) {
if ((base & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
panic("vmm_mem_free: base 0x%0lx and length 0x%0lx must be "
"aligned on a 0x%0x boundary\n", base, length, NBPDR);
"aligned on a 0x%0x boundary\n", base, length, PAGE_SIZE);
}
mtx_lock(&vmm_mem_mtx);