Every path in a radix trie ends with a leaf or a NULL. By replacing

NULL (non-leaf) pointers with NULL leaves, there is a NULL test
removed from every iteration of an index-based search loop.

This speeds up radix trie searches by few percent. If there are any
radix tries that are not initialized with the init() function, but
instead depend on zeroing everything being proper initialization, this
will break those tries.

Reviewed by:	alc, kib
Tested by:	pho (as part of a larger change)
Differential Revision:	https://reviews.freebsd.org/D41171
This commit is contained in:
Doug Moore 2023-07-28 11:39:52 -05:00
parent dd24d475d5
commit 2d2bcba7ba
6 changed files with 235 additions and 213 deletions

View file

@ -79,9 +79,8 @@ typedef uint32_t pn_popmap_t;
_Static_assert(sizeof(pn_popmap_t) <= sizeof(int),
"pn_popmap_t too wide");
/* Flag bits stored in node pointers. */
#define PCTRIE_ISLEAF 0x1
#define PCTRIE_FLAGS 0x1
/* Set of all flag bits stored in node pointers. */
#define PCTRIE_FLAGS (PCTRIE_ISLEAF)
#define PCTRIE_PAD PCTRIE_FLAGS
/* Returns one unit associated with specified level. */
@ -140,7 +139,7 @@ pctrie_node_get(struct pctrie *ptree, pctrie_alloc_t allocfn, uint64_t index,
*/
if (node->pn_popmap != 0) {
pctrie_node_store(&node->pn_child[ffs(node->pn_popmap) - 1],
NULL, PCTRIE_UNSERIALIZED);
PCTRIE_NULL, PCTRIE_UNSERIALIZED);
node->pn_popmap = 0;
}
node->pn_owner = pctrie_trimkey(index, clevel + 1);
@ -165,7 +164,8 @@ pctrie_node_put(struct pctrie *ptree, struct pctrie_node *node,
if ((node->pn_popmap & (1 << slot)) != 0)
continue;
KASSERT(smr_unserialized_load(&node->pn_child[slot], true) ==
NULL, ("pctrie_node_put: node %p has a child", node));
PCTRIE_NULL,
("pctrie_node_put: node %p has a child", node));
}
#endif
freefn(ptree, node);
@ -320,12 +320,13 @@ pctrie_reclaim_allnodes_int(struct pctrie *ptree, struct pctrie_node *node,
slot = ffs(node->pn_popmap) - 1;
child = pctrie_node_load(&node->pn_child[slot], NULL,
PCTRIE_UNSERIALIZED);
KASSERT(child != NULL, ("%s: bad popmap slot %d in node %p",
KASSERT(child != PCTRIE_NULL,
("%s: bad popmap slot %d in node %p",
__func__, slot, node));
if (!pctrie_isleaf(child))
pctrie_reclaim_allnodes_int(ptree, child, freefn);
node->pn_popmap ^= 1 << slot;
pctrie_node_store(&node->pn_child[slot], NULL,
pctrie_node_store(&node->pn_child[slot], PCTRIE_NULL,
PCTRIE_UNSERIALIZED);
}
pctrie_node_put(ptree, node, freefn);
@ -341,7 +342,9 @@ pctrie_zone_init(void *mem, int size __unused, int flags __unused)
node = mem;
node->pn_popmap = 0;
memset(node->pn_child, 0, sizeof(node->pn_child));
for (int i = 0; i < nitems(node->pn_child); i++)
pctrie_node_store(&node->pn_child[i], PCTRIE_NULL,
PCTRIE_UNSERIALIZED);
return (0);
}
@ -360,7 +363,7 @@ int
pctrie_insert(struct pctrie *ptree, uint64_t *val, pctrie_alloc_t allocfn)
{
uint64_t index, newind;
struct pctrie_node *leaf, *node, *tmp;
struct pctrie_node *leaf, *node, *parent;
smr_pctnode_t *parentp;
int slot;
uint16_t clev;
@ -373,29 +376,32 @@ pctrie_insert(struct pctrie *ptree, uint64_t *val, pctrie_alloc_t allocfn)
* will never be used.
*/
node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED);
if (node == NULL) {
ptree->pt_root = (uintptr_t)leaf;
return (0);
}
for (parentp = (smr_pctnode_t *)&ptree->pt_root;; node = tmp) {
parent = NULL;
for (;;) {
if (pctrie_isleaf(node)) {
if (node == PCTRIE_NULL) {
if (parent == NULL)
ptree->pt_root = leaf;
else
pctrie_addnode(parent, index,
parent->pn_clev, leaf,
PCTRIE_LOCKED);
return (0);
}
newind = *pctrie_toval(node);
if (newind == index)
panic("%s: key %jx is already present",
__func__, (uintmax_t)index);
break;
} else if (pctrie_keybarr(node, index)) {
}
if (pctrie_keybarr(node, index)) {
newind = node->pn_owner;
break;
}
slot = pctrie_slot(index, node->pn_clev);
parentp = &node->pn_child[slot];
tmp = pctrie_node_load(parentp, NULL, PCTRIE_LOCKED);
if (tmp == NULL) {
pctrie_addnode(node, index, node->pn_clev, leaf,
PCTRIE_LOCKED);
return (0);
}
parent = node;
node = pctrie_node_load(&node->pn_child[slot], NULL,
PCTRIE_LOCKED);
}
/*
@ -403,15 +409,17 @@ pctrie_insert(struct pctrie *ptree, uint64_t *val, pctrie_alloc_t allocfn)
* Setup the new intermediate node and add the 2 children: the
* new object and the older edge or object.
*/
parentp = (parent != NULL) ? &parent->pn_child[slot]:
(smr_pctnode_t *)&ptree->pt_root;
clev = pctrie_keydiff(newind, index);
tmp = pctrie_node_get(ptree, allocfn, index, clev);
if (tmp == NULL)
parent = pctrie_node_get(ptree, allocfn, index, clev);
if (parent == NULL)
return (ENOMEM);
/* These writes are not yet visible due to ordering. */
pctrie_addnode(tmp, index, clev, leaf, PCTRIE_UNSERIALIZED);
pctrie_addnode(tmp, newind, clev, node, PCTRIE_UNSERIALIZED);
pctrie_addnode(parent, index, clev, leaf, PCTRIE_UNSERIALIZED);
pctrie_addnode(parent, newind, clev, node, PCTRIE_UNSERIALIZED);
/* Synchronize to make the above visible. */
pctrie_node_store(parentp, tmp, PCTRIE_LOCKED);
pctrie_node_store(parentp, parent, PCTRIE_LOCKED);
return (0);
}
@ -428,10 +436,9 @@ _pctrie_lookup(struct pctrie *ptree, uint64_t index, smr_t smr,
int slot;
node = pctrie_root_load(ptree, smr, access);
while (node != NULL) {
for (;;) {
if (pctrie_isleaf(node)) {
m = pctrie_toval(node);
if (*m == index)
if ((m = pctrie_toval(node)) != NULL && *m == index)
return (m);
break;
}
@ -499,10 +506,9 @@ pctrie_lookup_ge(struct pctrie *ptree, uint64_t index)
*/
node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED);
succ = NULL;
while (node != NULL) {
for (;;) {
if (pctrie_isleaf(node)) {
m = pctrie_toval(node);
if (*m >= index)
if ((m = pctrie_toval(node)) != NULL && *m >= index)
return (m);
break;
}
@ -580,10 +586,9 @@ pctrie_lookup_le(struct pctrie *ptree, uint64_t index)
*/
node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED);
pred = NULL;
while (node != NULL) {
for (;;) {
if (pctrie_isleaf(node)) {
m = pctrie_toval(node);
if (*m <= index)
if ((m = pctrie_toval(node)) != NULL && *m <= index)
return (m);
break;
}
@ -626,66 +631,54 @@ pctrie_lookup_le(struct pctrie *ptree, uint64_t index)
void
pctrie_remove(struct pctrie *ptree, uint64_t index, pctrie_free_t freefn)
{
struct pctrie_node *node, *parent, *tmp;
struct pctrie_node *child, *node, *parent;
uint64_t *m;
int slot;
node = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED);
if (pctrie_isleaf(node)) {
m = pctrie_toval(node);
if (*m != index)
panic("%s: invalid key found", __func__);
pctrie_root_store(ptree, NULL, PCTRIE_LOCKED);
node = NULL;
child = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED);
for (;;) {
if (pctrie_isleaf(child))
break;
parent = node;
node = child;
slot = pctrie_slot(index, node->pn_clev);
child = pctrie_node_load(&node->pn_child[slot], NULL,
PCTRIE_LOCKED);
}
if ((m = pctrie_toval(child)) == NULL || *m != index)
panic("%s: key not found", __func__);
if (node == NULL) {
pctrie_root_store(ptree, PCTRIE_NULL, PCTRIE_LOCKED);
return;
}
parent = NULL;
for (;;) {
if (node == NULL)
panic("pctrie_remove: impossible to locate the key");
slot = pctrie_slot(index, node->pn_clev);
tmp = pctrie_node_load(&node->pn_child[slot], NULL,
KASSERT((node->pn_popmap & (1 << slot)) != 0,
("%s: bad popmap slot %d in node %p",
__func__, slot, node));
node->pn_popmap ^= 1 << slot;
pctrie_node_store(&node->pn_child[slot], PCTRIE_NULL, PCTRIE_LOCKED);
if (!powerof2(node->pn_popmap))
return;
KASSERT(node->pn_popmap != 0, ("%s: bad popmap all zeroes", __func__));
slot = ffs(node->pn_popmap) - 1;
child = pctrie_node_load(&node->pn_child[slot], NULL, PCTRIE_LOCKED);
KASSERT(child != PCTRIE_NULL,
("%s: bad popmap slot %d in node %p", __func__, slot, node));
if (parent == NULL)
pctrie_root_store(ptree, child, PCTRIE_LOCKED);
else {
slot = pctrie_slot(index, parent->pn_clev);
KASSERT(node ==
pctrie_node_load(&parent->pn_child[slot], NULL,
PCTRIE_LOCKED), ("%s: invalid child value", __func__));
pctrie_node_store(&parent->pn_child[slot], child,
PCTRIE_LOCKED);
if (pctrie_isleaf(tmp)) {
m = pctrie_toval(tmp);
if (*m != index)
panic("%s: invalid key found", __func__);
KASSERT((node->pn_popmap & (1 << slot)) != 0,
("%s: bad popmap slot %d in node %p",
__func__, slot, node));
node->pn_popmap ^= 1 << slot;
pctrie_node_store(&node->pn_child[slot], NULL,
PCTRIE_LOCKED);
if (!powerof2(node->pn_popmap))
break;
KASSERT(node->pn_popmap != 0,
("%s: bad popmap all zeroes", __func__));
slot = ffs(node->pn_popmap) - 1;
tmp = pctrie_node_load(&node->pn_child[slot],
NULL, PCTRIE_LOCKED);
KASSERT(tmp != NULL,
("%s: bad popmap slot %d in node %p",
__func__, slot, node));
if (parent == NULL)
pctrie_root_store(ptree, tmp, PCTRIE_LOCKED);
else {
slot = pctrie_slot(index, parent->pn_clev);
KASSERT(pctrie_node_load(
&parent->pn_child[slot], NULL,
PCTRIE_LOCKED) == node,
("%s: invalid child value", __func__));
pctrie_node_store(&parent->pn_child[slot], tmp,
PCTRIE_LOCKED);
}
/*
* The child is still valid and we can not zero the
* pointer until all SMR references are gone.
*/
pctrie_node_put(ptree, node, freefn);
break;
}
parent = node;
node = tmp;
}
/*
* The child is still valid and we can not zero the
* pointer until all SMR references are gone.
*/
pctrie_node_put(ptree, node, freefn);
}
/*
@ -699,9 +692,9 @@ pctrie_reclaim_allnodes(struct pctrie *ptree, pctrie_free_t freefn)
struct pctrie_node *root;
root = pctrie_root_load(ptree, NULL, PCTRIE_LOCKED);
if (root == NULL)
if (root == PCTRIE_NULL)
return;
pctrie_root_store(ptree, NULL, PCTRIE_UNSERIALIZED);
pctrie_root_store(ptree, PCTRIE_NULL, PCTRIE_UNSERIALIZED);
if (!pctrie_isleaf(root))
pctrie_reclaim_allnodes_int(ptree, root, freefn);
}

View file

@ -33,11 +33,16 @@
#ifndef __SYS_PCTRIE_H_
#define __SYS_PCTRIE_H_
/*
* Radix tree node.
*/
struct pctrie_node;
/*
* Radix tree root.
*/
struct pctrie {
uintptr_t pt_root;
struct pctrie_node *pt_root;
};
#endif /* !__SYS_PCTRIE_H_ */

View file

@ -135,18 +135,24 @@ void pctrie_remove(struct pctrie *ptree, uint64_t key,
size_t pctrie_node_size(void);
int pctrie_zone_init(void *mem, int size, int flags);
/*
* Each search path in the trie terminates at a leaf, which is a pointer to a
* value marked with a set 1-bit. A leaf may be associated with a null pointer
* to indicate no value there.
*/
#define PCTRIE_ISLEAF 0x1
#define PCTRIE_NULL (struct pctrie_node *)PCTRIE_ISLEAF
static __inline void
pctrie_init(struct pctrie *ptree)
{
ptree->pt_root = 0;
ptree->pt_root = PCTRIE_NULL;
}
static __inline bool
pctrie_is_empty(struct pctrie *ptree)
{
return (ptree->pt_root == 0);
return (ptree->pt_root == PCTRIE_NULL);
}
/*

View file

@ -33,11 +33,16 @@
#ifndef __VM_RADIX_H_
#define __VM_RADIX_H_
/*
* Radix tree node.
*/
struct vm_radix_node;
/*
* Radix tree root.
*/
struct vm_radix {
uintptr_t rt_root;
struct vm_radix_node *rt_root;
};
#endif /* !__VM_RADIX_H_ */

View file

@ -103,9 +103,8 @@ typedef uint32_t rn_popmap_t;
_Static_assert(sizeof(rn_popmap_t) <= sizeof(int),
"rn_popmap_t too wide");
/* Flag bits stored in node pointers. */
#define VM_RADIX_ISLEAF 0x1
#define VM_RADIX_FLAGS 0x1
/* Set of all flag bits stored in node pointers. */
#define VM_RADIX_FLAGS (VM_RADIX_ISLEAF)
#define VM_RADIX_PAD VM_RADIX_FLAGS
/* Returns one unit associated with specified level. */
@ -165,7 +164,7 @@ vm_radix_node_get(vm_pindex_t index, uint16_t clevel)
*/
if (rnode->rn_popmap != 0) {
vm_radix_node_store(&rnode->rn_child[ffs(rnode->rn_popmap) - 1],
NULL, UNSERIALIZED);
VM_RADIX_NULL, UNSERIALIZED);
rnode->rn_popmap = 0;
}
rnode->rn_owner = vm_radix_trimkey(index, clevel + 1);
@ -189,7 +188,8 @@ vm_radix_node_put(struct vm_radix_node *rnode)
if ((rnode->rn_popmap & (1 << slot)) != 0)
continue;
KASSERT(smr_unserialized_load(&rnode->rn_child[slot], true) ==
NULL, ("vm_radix_node_put: rnode %p has a child", rnode));
VM_RADIX_NULL,
("vm_radix_node_put: rnode %p has a child", rnode));
}
#endif
uma_zfree_smr(vm_radix_node_zone, rnode);
@ -344,17 +344,34 @@ vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode)
slot = ffs(rnode->rn_popmap) - 1;
child = vm_radix_node_load(&rnode->rn_child[slot],
UNSERIALIZED);
KASSERT(child != NULL, ("%s: bad popmap slot %d in rnode %p",
KASSERT(child != VM_RADIX_NULL,
("%s: bad popmap slot %d in rnode %p",
__func__, slot, rnode));
if (!vm_radix_isleaf(child))
vm_radix_reclaim_allnodes_int(child);
rnode->rn_popmap ^= 1 << slot;
vm_radix_node_store(&rnode->rn_child[slot], NULL,
vm_radix_node_store(&rnode->rn_child[slot], VM_RADIX_NULL,
UNSERIALIZED);
}
vm_radix_node_put(rnode);
}
/*
* radix node zone initializer.
*/
static int
vm_radix_zone_init(void *mem, int size, int flags)
{
struct vm_radix_node *rnode;
rnode = mem;
rnode->rn_popmap = 0;
for (int i = 0; i < nitems(rnode->rn_child); i++)
vm_radix_node_store(&rnode->rn_child[i], VM_RADIX_NULL,
UNSERIALIZED);
return (0);
}
#ifndef UMA_MD_SMALL_ALLOC
void vm_radix_reserve_kva(void);
/*
@ -387,8 +404,8 @@ vm_radix_zinit(void)
{
vm_radix_node_zone = uma_zcreate("RADIX NODE",
sizeof(struct vm_radix_node), NULL, NULL, NULL, NULL,
VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_SMR | UMA_ZONE_ZINIT);
sizeof(struct vm_radix_node), NULL, NULL, vm_radix_zone_init, NULL,
VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_SMR);
vm_radix_smr = uma_zone_get_smr(vm_radix_node_zone);
}
@ -400,7 +417,7 @@ int
vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
{
vm_pindex_t index, newind;
struct vm_radix_node *leaf, *rnode, *tmp;
struct vm_radix_node *leaf, *parent, *rnode;
smrnode_t *parentp;
int slot;
uint16_t clev;
@ -413,29 +430,30 @@ vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
* will never be used.
*/
rnode = vm_radix_root_load(rtree, LOCKED);
if (rnode == NULL) {
rtree->rt_root = (uintptr_t)leaf;
return (0);
}
for (parentp = (smrnode_t *)&rtree->rt_root;; rnode = tmp) {
parent = NULL;
for (;;) {
if (vm_radix_isleaf(rnode)) {
if (rnode == VM_RADIX_NULL) {
if (parent == NULL)
rtree->rt_root = leaf;
else
vm_radix_addnode(parent, index,
parent->rn_clev, leaf, LOCKED);
return (0);
}
newind = vm_radix_topage(rnode)->pindex;
if (newind == index)
panic("%s: key %jx is already present",
__func__, (uintmax_t)index);
break;
} else if (vm_radix_keybarr(rnode, index)) {
}
if (vm_radix_keybarr(rnode, index)) {
newind = rnode->rn_owner;
break;
}
slot = vm_radix_slot(index, rnode->rn_clev);
parentp = &rnode->rn_child[slot];
tmp = vm_radix_node_load(parentp, LOCKED);
if (tmp == NULL) {
vm_radix_addnode(rnode, index, rnode->rn_clev, leaf,
LOCKED);
return (0);
}
parent = rnode;
rnode = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
}
/*
@ -443,15 +461,17 @@ vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
* Setup the new intermediate node and add the 2 children: the
* new object and the older edge or object.
*/
parentp = (parent != NULL) ? &parent->rn_child[slot]:
(smrnode_t *)&rtree->rt_root;
clev = vm_radix_keydiff(newind, index);
tmp = vm_radix_node_get(index, clev);
if (tmp == NULL)
parent = vm_radix_node_get(index, clev);
if (parent == NULL)
return (ENOMEM);
/* These writes are not yet visible due to ordering. */
vm_radix_addnode(tmp, index, clev, leaf, UNSERIALIZED);
vm_radix_addnode(tmp, newind, clev, rnode, UNSERIALIZED);
vm_radix_addnode(parent, index, clev, leaf, UNSERIALIZED);
vm_radix_addnode(parent, newind, clev, rnode, UNSERIALIZED);
/* Serializing write to make the above visible. */
vm_radix_node_store(parentp, tmp, LOCKED);
vm_radix_node_store(parentp, parent, LOCKED);
return (0);
}
@ -468,10 +488,10 @@ _vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index,
int slot;
rnode = vm_radix_root_load(rtree, access);
while (rnode != NULL) {
for (;;) {
if (vm_radix_isleaf(rnode)) {
m = vm_radix_topage(rnode);
if (m->pindex == index)
if ((m = vm_radix_topage(rnode)) != NULL &&
m->pindex == index)
return (m);
break;
}
@ -540,10 +560,10 @@ vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
*/
rnode = vm_radix_root_load(rtree, LOCKED);
succ = NULL;
while (rnode != NULL) {
for (;;) {
if (vm_radix_isleaf(rnode)) {
m = vm_radix_topage(rnode);
if (m->pindex >= index)
if ((m = vm_radix_topage(rnode)) != NULL &&
m->pindex >= index)
return (m);
break;
}
@ -619,10 +639,10 @@ vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
*/
rnode = vm_radix_root_load(rtree, LOCKED);
pred = NULL;
while (rnode != NULL) {
for (;;) {
if (vm_radix_isleaf(rnode)) {
m = vm_radix_topage(rnode);
if (m->pindex <= index)
if ((m = vm_radix_topage(rnode)) != NULL &&
m->pindex <= index)
return (m);
break;
}
@ -662,63 +682,52 @@ vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
vm_page_t
vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
{
struct vm_radix_node *rnode, *parent, *tmp;
struct vm_radix_node *child, *parent, *rnode;
vm_page_t m;
int slot;
rnode = vm_radix_root_load(rtree, LOCKED);
if (vm_radix_isleaf(rnode)) {
m = vm_radix_topage(rnode);
if (m->pindex != index)
return (NULL);
vm_radix_root_store(rtree, NULL, LOCKED);
rnode = NULL;
child = vm_radix_root_load(rtree, LOCKED);
for (;;) {
if (vm_radix_isleaf(child))
break;
parent = rnode;
rnode = child;
slot = vm_radix_slot(index, rnode->rn_clev);
child = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
}
if ((m = vm_radix_topage(child)) == NULL || m->pindex != index)
return (NULL);
if (rnode == NULL) {
vm_radix_root_store(rtree, VM_RADIX_NULL, LOCKED);
return (m);
}
parent = NULL;
for (;;) {
if (rnode == NULL)
return (NULL);
slot = vm_radix_slot(index, rnode->rn_clev);
tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
if (vm_radix_isleaf(tmp)) {
m = vm_radix_topage(tmp);
if (m->pindex != index)
return (NULL);
KASSERT((rnode->rn_popmap & (1 << slot)) != 0,
("%s: bad popmap slot %d in rnode %p",
__func__, slot, rnode));
rnode->rn_popmap ^= 1 << slot;
vm_radix_node_store(
&rnode->rn_child[slot], NULL, LOCKED);
if (!powerof2(rnode->rn_popmap))
return (m);
KASSERT(rnode->rn_popmap != 0,
("%s: bad popmap all zeroes", __func__));
slot = ffs(rnode->rn_popmap) - 1;
tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
KASSERT(tmp != NULL,
("%s: bad popmap slot %d in rnode %p",
__func__, slot, rnode));
if (parent == NULL)
vm_radix_root_store(rtree, tmp, LOCKED);
else {
slot = vm_radix_slot(index, parent->rn_clev);
KASSERT(vm_radix_node_load(
&parent->rn_child[slot], LOCKED) == rnode,
("%s: invalid child value", __func__));
vm_radix_node_store(&parent->rn_child[slot],
tmp, LOCKED);
}
/*
* The child is still valid and we can not zero the
* pointer until all smr references are gone.
*/
vm_radix_node_put(rnode);
return (m);
}
parent = rnode;
rnode = tmp;
KASSERT((rnode->rn_popmap & (1 << slot)) != 0,
("%s: bad popmap slot %d in rnode %p", __func__, slot, rnode));
rnode->rn_popmap ^= 1 << slot;
vm_radix_node_store(&rnode->rn_child[slot], VM_RADIX_NULL, LOCKED);
if (!powerof2(rnode->rn_popmap))
return (m);
KASSERT(rnode->rn_popmap != 0, ("%s: bad popmap all zeroes", __func__));
slot = ffs(rnode->rn_popmap) - 1;
child = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
KASSERT(child != VM_RADIX_NULL,
("%s: bad popmap slot %d in rnode %p", __func__, slot, rnode));
if (parent == NULL)
vm_radix_root_store(rtree, child, LOCKED);
else {
slot = vm_radix_slot(index, parent->rn_clev);
KASSERT(rnode ==
vm_radix_node_load(&parent->rn_child[slot], LOCKED),
("%s: invalid child value", __func__));
vm_radix_node_store(&parent->rn_child[slot], child, LOCKED);
}
/*
* The child is still valid and we can not zero the
* pointer until all smr references are gone.
*/
vm_radix_node_put(rnode);
return (m);
}
/*
@ -732,9 +741,9 @@ vm_radix_reclaim_allnodes(struct vm_radix *rtree)
struct vm_radix_node *root;
root = vm_radix_root_load(rtree, LOCKED);
if (root == NULL)
if (root == VM_RADIX_NULL)
return;
vm_radix_root_store(rtree, NULL, UNSERIALIZED);
vm_radix_root_store(rtree, VM_RADIX_NULL, UNSERIALIZED);
if (!vm_radix_isleaf(root))
vm_radix_reclaim_allnodes_int(root);
}
@ -746,36 +755,34 @@ vm_radix_reclaim_allnodes(struct vm_radix *rtree)
vm_page_t
vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage)
{
struct vm_radix_node *rnode, *tmp;
struct vm_radix_node *leaf, *parent, *rnode;
vm_page_t m;
vm_pindex_t index;
int slot;
leaf = vm_radix_toleaf(newpage);
index = newpage->pindex;
rnode = vm_radix_root_load(rtree, LOCKED);
if (rnode == NULL)
panic("%s: replacing page on an empty trie", __func__);
if (vm_radix_isleaf(rnode)) {
m = vm_radix_topage(rnode);
if (m->pindex != index)
panic("%s: original replacing root key not found",
__func__);
rtree->rt_root = (uintptr_t)vm_radix_toleaf(newpage);
return (m);
}
parent = NULL;
for (;;) {
slot = vm_radix_slot(index, rnode->rn_clev);
tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
if (vm_radix_isleaf(tmp)) {
m = vm_radix_topage(tmp);
if (m->pindex != index)
break;
vm_radix_node_store(&rnode->rn_child[slot],
vm_radix_toleaf(newpage), LOCKED);
return (m);
} else if (tmp == NULL || vm_radix_keybarr(tmp, index))
if (vm_radix_isleaf(rnode)) {
if ((m = vm_radix_topage(rnode)) != NULL &&
m->pindex == index) {
if (parent == NULL)
rtree->rt_root = leaf;
else
vm_radix_node_store(
&parent->rn_child[slot], leaf,
LOCKED);
return (m);
}
break;
rnode = tmp;
}
if (vm_radix_keybarr(rnode, index))
break;
slot = vm_radix_slot(index, rnode->rn_clev);
parent = rnode;
rnode = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
}
panic("%s: original replacing page not found", __func__);
}

View file

@ -48,18 +48,24 @@ vm_page_t vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index);
vm_page_t vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage);
void vm_radix_zinit(void);
/*
* Each search path in the trie terminates at a leaf, which is a pointer to a
* page marked with a set 1-bit. A leaf may be associated with a null pointer
* to indicate no page there.
*/
#define VM_RADIX_ISLEAF 0x1
#define VM_RADIX_NULL (struct vm_radix_node *)VM_RADIX_ISLEAF
static __inline void
vm_radix_init(struct vm_radix *rtree)
{
rtree->rt_root = 0;
rtree->rt_root = VM_RADIX_NULL;
}
static __inline bool
vm_radix_is_empty(struct vm_radix *rtree)
{
return (rtree->rt_root == 0);
return (rtree->rt_root == VM_RADIX_NULL);
}
#endif /* _KERNEL */