Revert "subr_pctrie: use ilog2(x) instead of fls(x)-1"

This reverts commit 574ef65069.
This commit is contained in:
Doug Moore 2024-06-03 13:07:42 -05:00
parent 574ef65069
commit e3537f9235
6 changed files with 69 additions and 104 deletions

View file

@ -350,7 +350,7 @@ pctrie_insert_node(void *parentp, struct pctrie_node *parent, uint64_t *val)
"uint64 too wide"); "uint64 too wide");
_Static_assert(sizeof(uint64_t) * NBBY <= _Static_assert(sizeof(uint64_t) * NBBY <=
(1 << (sizeof(parent->pn_clev) * NBBY)), "pn_clev too narrow"); (1 << (sizeof(parent->pn_clev) * NBBY)), "pn_clev too narrow");
parent->pn_clev = rounddown(ilog2(index ^ newind), PCTRIE_WIDTH); parent->pn_clev = rounddown(flsll(index ^ newind) - 1, PCTRIE_WIDTH);
parent->pn_owner = PCTRIE_COUNT; parent->pn_owner = PCTRIE_COUNT;
parent->pn_owner = index & -(parent->pn_owner << parent->pn_clev); parent->pn_owner = index & -(parent->pn_owner << parent->pn_clev);
@ -546,14 +546,14 @@ pctrie_lookup_le(struct pctrie *ptree, uint64_t index)
KASSERT((pred->pn_popmap & ((1 << slot) - 1)) != 0, KASSERT((pred->pn_popmap & ((1 << slot) - 1)) != 0,
("%s: no popmap siblings before slot %d in node %p", ("%s: no popmap siblings before slot %d in node %p",
__func__, slot, pred)); __func__, slot, pred));
slot = ilog2(pred->pn_popmap & ((1 << slot) - 1)); slot = fls(pred->pn_popmap & ((1 << slot) - 1)) - 1;
pred = pctrie_node_load(&pred->pn_child[slot], NULL, pred = pctrie_node_load(&pred->pn_child[slot], NULL,
PCTRIE_LOCKED); PCTRIE_LOCKED);
} }
while (!pctrie_isleaf(pred)) { while (!pctrie_isleaf(pred)) {
KASSERT(pred->pn_popmap != 0, KASSERT(pred->pn_popmap != 0,
("%s: no popmap children in node %p", __func__, pred)); ("%s: no popmap children in node %p", __func__, pred));
slot = ilog2(pred->pn_popmap); slot = fls(pred->pn_popmap) - 1;
pred = pctrie_node_load(&pred->pn_child[slot], NULL, pred = pctrie_node_load(&pred->pn_child[slot], NULL,
PCTRIE_LOCKED); PCTRIE_LOCKED);
} }

View file

@ -51,8 +51,6 @@ struct vm_freelist {
int lcnt; int lcnt;
}; };
typedef struct vm_freelist vm_freelist_tbl[VM_NFREEPOOL][VM_NFREEORDER_MAX];
struct vm_phys_seg { struct vm_phys_seg {
vm_paddr_t start; vm_paddr_t start;
vm_paddr_t end; vm_paddr_t end;
@ -64,7 +62,7 @@ struct vm_phys_seg {
void *md_first; void *md_first;
#endif #endif
int domain; int domain;
vm_freelist_tbl *free_queues; struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
}; };
extern struct vm_phys_seg vm_phys_segs[]; extern struct vm_phys_seg vm_phys_segs[];

View file

@ -509,7 +509,7 @@ vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind)
m->psind = 0; m->psind = 0;
m->segind = segind; m->segind = segind;
m->order = VM_NFREEORDER; m->order = VM_NFREEORDER;
m->pool = VM_NFREEPOOL; m->pool = VM_FREEPOOL_DEFAULT;
m->valid = m->dirty = 0; m->valid = m->dirty = 0;
pmap_page_init(m); pmap_page_init(m);
} }
@ -785,8 +785,7 @@ vm_page_startup(vm_offset_t vaddr)
m = seg->first_page + atop(startp - seg->start); m = seg->first_page + atop(startp - seg->start);
vmd = VM_DOMAIN(seg->domain); vmd = VM_DOMAIN(seg->domain);
vm_domain_free_lock(vmd); vm_domain_free_lock(vmd);
vm_phys_enqueue_contig(m, VM_FREEPOOL_DEFAULT, vm_phys_enqueue_contig(m, pagecount);
pagecount);
vm_domain_free_unlock(vmd); vm_domain_free_unlock(vmd);
vm_domain_freecnt_inc(vmd, pagecount); vm_domain_freecnt_inc(vmd, pagecount);
vm_cnt.v_page_count += (u_int)pagecount; vm_cnt.v_page_count += (u_int)pagecount;

View file

@ -669,7 +669,6 @@ vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
int tail) int tail)
{ {
vm_page_t m_buddy; vm_page_t m_buddy;
int pool = m->pool;
while (oind > order) { while (oind > order) {
oind--; oind--;
@ -677,10 +676,6 @@ vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
KASSERT(m_buddy->order == VM_NFREEORDER, KASSERT(m_buddy->order == VM_NFREEORDER,
("vm_phys_split_pages: page %p has unexpected order %d", ("vm_phys_split_pages: page %p has unexpected order %d",
m_buddy, m_buddy->order)); m_buddy, m_buddy->order));
KASSERT(m_buddy->pool == VM_NFREEPOOL,
("vm_phys_split_pages: page %p has unexpected pool %d",
m_buddy, m_buddy->pool));
m_buddy->pool = pool;
vm_freelist_add(fl, m_buddy, oind, tail); vm_freelist_add(fl, m_buddy, oind, tail);
} }
} }
@ -698,8 +693,7 @@ vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
* The physical page m's buddy must not be free. * The physical page m's buddy must not be free.
*/ */
static void static void
vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool, vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
int tail)
{ {
int order; int order;
@ -715,7 +709,6 @@ vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool,
order = fls(npages) - 1; order = fls(npages) - 1;
KASSERT(order < VM_NFREEORDER, KASSERT(order < VM_NFREEORDER,
("%s: order %d is out of range", __func__, order)); ("%s: order %d is out of range", __func__, order));
m->pool = pool;
vm_freelist_add(fl, m, order, tail); vm_freelist_add(fl, m, order, tail);
m += 1 << order; m += 1 << order;
npages -= 1 << order; npages -= 1 << order;
@ -736,8 +729,7 @@ vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool,
* parameter m. Otherwise, the physical page m's buddy must not be free. * parameter m. Otherwise, the physical page m's buddy must not be free.
*/ */
static vm_page_t static vm_page_t
vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool, vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
int tail)
{ {
int order; int order;
@ -753,7 +745,6 @@ vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool,
order = ffs(npages) - 1; order = ffs(npages) - 1;
KASSERT(order < VM_NFREEORDER, KASSERT(order < VM_NFREEORDER,
("vm_phys_enq_range: order %d is out of range", order)); ("vm_phys_enq_range: order %d is out of range", order));
m->pool = pool;
vm_freelist_add(fl, m, order, tail); vm_freelist_add(fl, m, order, tail);
m += 1 << order; m += 1 << order;
npages -= 1 << order; npages -= 1 << order;
@ -761,6 +752,18 @@ vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool,
return (m); return (m);
} }
/*
* Set the pool for a contiguous, power of two-sized set of physical pages.
*/
static void
vm_phys_set_pool(int pool, vm_page_t m, int order)
{
vm_page_t m_tmp;
for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
m_tmp->pool = pool;
}
/* /*
* Tries to allocate the specified number of pages from the specified pool * Tries to allocate the specified number of pages from the specified pool
* within the specified domain. Returns the actual number of allocated pages * within the specified domain. Returns the actual number of allocated pages
@ -769,8 +772,7 @@ vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool,
* The returned pages may not be physically contiguous. However, in contrast * The returned pages may not be physically contiguous. However, in contrast
* to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0), * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
* calling this function once to allocate the desired number of pages will * calling this function once to allocate the desired number of pages will
* avoid wasted time in vm_phys_split_pages(). Sets the pool field for * avoid wasted time in vm_phys_split_pages().
* every allocated page.
* *
* The free page queues for the specified domain must be locked. * The free page queues for the specified domain must be locked.
*/ */
@ -799,18 +801,14 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
vm_freelist_rem(fl, m, oind); vm_freelist_rem(fl, m, oind);
avail = i + (1 << oind); avail = i + (1 << oind);
end = imin(npages, avail); end = imin(npages, avail);
ma[i++] = m++; while (i < end)
while (i < end) {
m->pool = pool;
ma[i++] = m++; ma[i++] = m++;
}
if (i == npages) { if (i == npages) {
/* /*
* Return excess pages to fl. Its order * Return excess pages to fl. Its order
* [0, oind) queues are empty. * [0, oind) queues are empty.
*/ */
vm_phys_enq_range(m, avail - i, fl, vm_phys_enq_range(m, avail - i, fl, 1);
pool, 1);
return (npages); return (npages);
} }
} }
@ -821,12 +819,11 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
while ((m = TAILQ_FIRST(&alt[oind].pl)) != while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
NULL) { NULL) {
vm_freelist_rem(alt, m, oind); vm_freelist_rem(alt, m, oind);
vm_phys_set_pool(pool, m, oind);
avail = i + (1 << oind); avail = i + (1 << oind);
end = imin(npages, avail); end = imin(npages, avail);
do { while (i < end)
m->pool = pool;
ma[i++] = m++; ma[i++] = m++;
} while (i < end);
if (i == npages) { if (i == npages) {
/* /*
* Return excess pages to fl. * Return excess pages to fl.
@ -834,7 +831,7 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
* are empty. * are empty.
*/ */
vm_phys_enq_range(m, avail - i, vm_phys_enq_range(m, avail - i,
fl, pool, 1); fl, 1);
return (npages); return (npages);
} }
} }
@ -846,7 +843,7 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
/* /*
* Allocate a contiguous, power of two-sized set of physical pages * Allocate a contiguous, power of two-sized set of physical pages
* from the free lists. Sets the pool field in the first page only. * from the free lists.
* *
* The free page queues must be locked. * The free page queues must be locked.
*/ */
@ -867,8 +864,7 @@ vm_phys_alloc_pages(int domain, int pool, int order)
/* /*
* Allocate a contiguous, power of two-sized set of physical pages from the * Allocate a contiguous, power of two-sized set of physical pages from the
* specified free list. The free list must be specified using one of the * specified free list. The free list must be specified using one of the
* manifest constants VM_FREELIST_*. Sets the pool field in the first page * manifest constants VM_FREELIST_*.
* only.
* *
* The free page queues must be locked. * The free page queues must be locked.
*/ */
@ -919,7 +915,7 @@ vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
m = TAILQ_FIRST(&alt[oind].pl); m = TAILQ_FIRST(&alt[oind].pl);
if (m != NULL) { if (m != NULL) {
vm_freelist_rem(alt, m, oind); vm_freelist_rem(alt, m, oind);
m->pool = pool; vm_phys_set_pool(pool, m, oind);
/* The order [order, oind) queues are empty. */ /* The order [order, oind) queues are empty. */
vm_phys_split_pages(m, oind, fl, order, 1); vm_phys_split_pages(m, oind, fl, order, 1);
return (m); return (m);
@ -1126,8 +1122,7 @@ vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
} }
/* /*
* Free a contiguous, power of two-sized set of physical pages. Assumes that * Free a contiguous, power of two-sized set of physical pages.
* only the first page has a valid pool field.
* *
* The free page queues must be locked. * The free page queues must be locked.
*/ */
@ -1138,19 +1133,18 @@ vm_phys_free_pages(vm_page_t m, int order)
struct vm_phys_seg *seg; struct vm_phys_seg *seg;
vm_paddr_t pa; vm_paddr_t pa;
vm_page_t m_buddy; vm_page_t m_buddy;
int pool = m->pool;
KASSERT(m->order == VM_NFREEORDER, KASSERT(m->order == VM_NFREEORDER,
("vm_phys_free_pages: page %p has unexpected order %d", ("vm_phys_free_pages: page %p has unexpected order %d",
m, m->order)); m, m->order));
KASSERT(pool < VM_NFREEPOOL, KASSERT(m->pool < VM_NFREEPOOL,
("vm_phys_free_pages: page %p has unexpected pool %d", m, pool)); ("vm_phys_free_pages: page %p has unexpected pool %d",
m, m->pool));
KASSERT(order < VM_NFREEORDER, KASSERT(order < VM_NFREEORDER,
("vm_phys_free_pages: order %d is out of range", order)); ("vm_phys_free_pages: order %d is out of range", order));
seg = &vm_phys_segs[m->segind]; seg = &vm_phys_segs[m->segind];
vm_domain_free_assert_locked(VM_DOMAIN(seg->domain)); vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
if (order < VM_NFREEORDER - 1) { if (order < VM_NFREEORDER - 1) {
vm_page_t m_start = m;
pa = VM_PAGE_TO_PHYS(m); pa = VM_PAGE_TO_PHYS(m);
do { do {
pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
@ -1161,28 +1155,25 @@ vm_phys_free_pages(vm_page_t m, int order)
break; break;
fl = (*seg->free_queues)[m_buddy->pool]; fl = (*seg->free_queues)[m_buddy->pool];
vm_freelist_rem(fl, m_buddy, order); vm_freelist_rem(fl, m_buddy, order);
m_buddy->pool = VM_NFREEPOOL; if (m_buddy->pool != m->pool)
vm_phys_set_pool(m->pool, m_buddy, order);
order++; order++;
pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
m = &seg->first_page[atop(pa - seg->start)]; m = &seg->first_page[atop(pa - seg->start)];
} while (order < VM_NFREEORDER - 1); } while (order < VM_NFREEORDER - 1);
if (m != m_start) {
m_start->pool = VM_NFREEPOOL;
m->pool = pool;
}
} }
fl = (*seg->free_queues)[pool]; fl = (*seg->free_queues)[m->pool];
vm_freelist_add(fl, m, order, 1); vm_freelist_add(fl, m, order, 1);
} }
/* /*
* Free a contiguous, arbitrarily sized set of physical pages, without merging * Free a contiguous, arbitrarily sized set of physical pages, without
* across set boundaries. Assumes no pages have a valid pool field. * merging across set boundaries.
* *
* The free page queues must be locked. * The free page queues must be locked.
*/ */
void void
vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages) vm_phys_enqueue_contig(vm_page_t m, u_long npages)
{ {
struct vm_freelist *fl; struct vm_freelist *fl;
struct vm_phys_seg *seg; struct vm_phys_seg *seg;
@ -1196,15 +1187,14 @@ vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages)
*/ */
vm_domain_free_assert_locked(vm_pagequeue_domain(m)); vm_domain_free_assert_locked(vm_pagequeue_domain(m));
seg = &vm_phys_segs[m->segind]; seg = &vm_phys_segs[m->segind];
fl = (*seg->free_queues)[pool]; fl = (*seg->free_queues)[m->pool];
m_end = m + npages; m_end = m + npages;
/* Free blocks of increasing size. */ /* Free blocks of increasing size. */
lo = atop(VM_PAGE_TO_PHYS(m)); lo = atop(VM_PAGE_TO_PHYS(m));
if (m < m_end && if (m < m_end &&
(diff = lo ^ (lo + npages - 1)) != 0) { (diff = lo ^ (lo + npages - 1)) != 0) {
order = min(flsll(diff) - 1, VM_NFREEORDER - 1); order = min(flsll(diff) - 1, VM_NFREEORDER - 1);
m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, 1);
pool, 1);
} }
/* Free blocks of maximum size. */ /* Free blocks of maximum size. */
@ -1213,17 +1203,15 @@ vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages)
KASSERT(seg == &vm_phys_segs[m->segind], KASSERT(seg == &vm_phys_segs[m->segind],
("%s: page range [%p,%p) spans multiple segments", ("%s: page range [%p,%p) spans multiple segments",
__func__, m_end - npages, m)); __func__, m_end - npages, m));
m->pool = pool;
vm_freelist_add(fl, m, order, 1); vm_freelist_add(fl, m, order, 1);
m += 1 << order; m += 1 << order;
} }
/* Free blocks of diminishing size. */ /* Free blocks of diminishing size. */
vm_phys_enq_beg(m, m_end - m, fl, pool, 1); vm_phys_enq_beg(m, m_end - m, fl, 1);
} }
/* /*
* Free a contiguous, arbitrarily sized set of physical pages. * Free a contiguous, arbitrarily sized set of physical pages.
* Assumes that every page has the same, valid, pool field value.
* *
* The free page queues must be locked. * The free page queues must be locked.
*/ */
@ -1233,22 +1221,17 @@ vm_phys_free_contig(vm_page_t m, u_long npages)
vm_paddr_t lo; vm_paddr_t lo;
vm_page_t m_start, m_end; vm_page_t m_start, m_end;
unsigned max_order, order_start, order_end; unsigned max_order, order_start, order_end;
int pool = m->pool;
KASSERT(pool < VM_NFREEPOOL,
("%s: pool %d is out of range", __func__, pool));
vm_domain_free_assert_locked(vm_pagequeue_domain(m)); vm_domain_free_assert_locked(vm_pagequeue_domain(m));
lo = atop(VM_PAGE_TO_PHYS(m)); lo = atop(VM_PAGE_TO_PHYS(m));
max_order = min(flsll(lo ^ (lo + npages)) - 1, VM_NFREEORDER - 1); max_order = min(flsll(lo ^ (lo + npages)) - 1, VM_NFREEORDER - 1);
m_end = m + npages;
for (m_start = m; m < m_end; m++) m_start = m;
m->pool = VM_NFREEPOOL;
m = m_start;
order_start = ffsll(lo) - 1; order_start = ffsll(lo) - 1;
if (order_start < max_order) if (order_start < max_order)
m_start += 1 << order_start; m_start += 1 << order_start;
m_end = m + npages;
order_end = ffsll(lo + npages) - 1; order_end = ffsll(lo + npages) - 1;
if (order_end < max_order) if (order_end < max_order)
m_end -= 1 << order_end; m_end -= 1 << order_end;
@ -1257,15 +1240,11 @@ vm_phys_free_contig(vm_page_t m, u_long npages)
* end of the range last. * end of the range last.
*/ */
if (m_start < m_end) if (m_start < m_end)
vm_phys_enqueue_contig(m_start, pool, m_end - m_start); vm_phys_enqueue_contig(m_start, m_end - m_start);
if (order_start < max_order) { if (order_start < max_order)
m->pool = pool;
vm_phys_free_pages(m, order_start); vm_phys_free_pages(m, order_start);
} if (order_end < max_order)
if (order_end < max_order) {
m_end->pool = pool;
vm_phys_free_pages(m_end, order_end); vm_phys_free_pages(m_end, order_end);
}
} }
/* /*
@ -1313,7 +1292,7 @@ vm_phys_unfree_page(vm_page_t m)
struct vm_phys_seg *seg; struct vm_phys_seg *seg;
vm_paddr_t pa, pa_half; vm_paddr_t pa, pa_half;
vm_page_t m_set, m_tmp; vm_page_t m_set, m_tmp;
int order, pool; int order;
/* /*
* First, find the contiguous, power of two-sized set of free * First, find the contiguous, power of two-sized set of free
@ -1345,8 +1324,7 @@ vm_phys_unfree_page(vm_page_t m)
* is larger than a page, shrink "m_set" by returning the half * is larger than a page, shrink "m_set" by returning the half
* of "m_set" that does not contain "m" to the free lists. * of "m_set" that does not contain "m" to the free lists.
*/ */
pool = m_set->pool; fl = (*seg->free_queues)[m_set->pool];
fl = (*seg->free_queues)[pool];
order = m_set->order; order = m_set->order;
vm_freelist_rem(fl, m_set, order); vm_freelist_rem(fl, m_set, order);
while (order > 0) { while (order > 0) {
@ -1358,10 +1336,8 @@ vm_phys_unfree_page(vm_page_t m)
m_tmp = m_set; m_tmp = m_set;
m_set = &seg->first_page[atop(pa_half - seg->start)]; m_set = &seg->first_page[atop(pa_half - seg->start)];
} }
m_tmp->pool = pool;
vm_freelist_add(fl, m_tmp, order, 0); vm_freelist_add(fl, m_tmp, order, 0);
} }
m_set->pool = pool;
KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
return (true); return (true);
} }
@ -1501,8 +1477,7 @@ vm_phys_find_queues_contig(
* alignment of the first physical page in the set. If the given value * alignment of the first physical page in the set. If the given value
* "boundary" is non-zero, then the set of physical pages cannot cross * "boundary" is non-zero, then the set of physical pages cannot cross
* any physical address boundary that is a multiple of that value. Both * any physical address boundary that is a multiple of that value. Both
* "alignment" and "boundary" must be a power of two. Sets the pool * "alignment" and "boundary" must be a power of two.
* field in every allocated page.
*/ */
vm_page_t vm_page_t
vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
@ -1561,16 +1536,14 @@ vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
fl = (*queues)[m->pool]; fl = (*queues)[m->pool];
oind = m->order; oind = m->order;
vm_freelist_rem(fl, m, oind); vm_freelist_rem(fl, m, oind);
if (m->pool != VM_FREEPOOL_DEFAULT)
vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
} }
/* Return excess pages to the free lists. */ /* Return excess pages to the free lists. */
fl = (*queues)[VM_FREEPOOL_DEFAULT]; fl = (*queues)[VM_FREEPOOL_DEFAULT];
vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
VM_FREEPOOL_DEFAULT, 0);
/* Return page verified to satisfy conditions of request. */ /* Return page verified to satisfy conditions of request. */
for (m = m_run; m < &m_run[npages]; m++)
m->pool = VM_FREEPOOL_DEFAULT;
pa_start = VM_PAGE_TO_PHYS(m_run); pa_start = VM_PAGE_TO_PHYS(m_run);
KASSERT(low <= pa_start, KASSERT(low <= pa_start,
("memory allocated below minimum requested range")); ("memory allocated below minimum requested range"));

View file

@ -66,7 +66,7 @@ vm_page_t vm_phys_alloc_freelist_pages(int domain, int freelist, int pool,
int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]); int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]);
vm_page_t vm_phys_alloc_pages(int domain, int pool, int order); vm_page_t vm_phys_alloc_pages(int domain, int pool, int order);
int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high); int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high);
void vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages); void vm_phys_enqueue_contig(vm_page_t m, u_long npages);
int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
vm_memattr_t memattr); vm_memattr_t memattr);
void vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end); void vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end);

View file

@ -889,35 +889,30 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain,
static void static void
vm_reserv_break(vm_reserv_t rv) vm_reserv_break(vm_reserv_t rv)
{ {
int pool, pos, pos0, pos1; int hi, lo, pos;
vm_reserv_assert_locked(rv); vm_reserv_assert_locked(rv);
CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
__FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
vm_reserv_remove(rv); vm_reserv_remove(rv);
rv->pages->psind = 0; rv->pages->psind = 0;
pool = rv->pages->pool; hi = lo = -1;
rv->pages->pool = VM_NFREEPOOL; pos = 0;
pos0 = bit_test(rv->popmap, 0) ? -1 : 0; for (;;) {
pos1 = -1 - pos0; bit_ff_at(rv->popmap, pos, VM_LEVEL_0_NPAGES, lo != hi, &pos);
for (pos = 0; pos < VM_LEVEL_0_NPAGES; ) { if (lo == hi) {
/* Find the first different bit after pos. */ if (pos == -1)
bit_ff_at(rv->popmap, pos + 1, VM_LEVEL_0_NPAGES, break;
pos1 < pos0, &pos); lo = pos;
if (pos == -1)
pos = VM_LEVEL_0_NPAGES;
if (pos0 <= pos1) {
/* Set pool for pages from pos1 to pos. */
pos0 = pos1;
while (pos0 < pos)
rv->pages[pos0++].pool = pool;
continue; continue;
} }
/* Free unused pages from pos0 to pos. */ if (pos == -1)
pos1 = pos; pos = VM_LEVEL_0_NPAGES;
hi = pos;
vm_domain_free_lock(VM_DOMAIN(rv->domain)); vm_domain_free_lock(VM_DOMAIN(rv->domain));
vm_phys_enqueue_contig(&rv->pages[pos0], pool, pos1 - pos0); vm_phys_enqueue_contig(&rv->pages[lo], hi - lo);
vm_domain_free_unlock(VM_DOMAIN(rv->domain)); vm_domain_free_unlock(VM_DOMAIN(rv->domain));
lo = hi;
} }
bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1); bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1);
rv->popcnt = 0; rv->popcnt = 0;