nilfs2: optimize calculation of min/max number of btree node children

nilfs_btree_node_nchildren_max() and nilfs_btree_node_nchildren_min()
functions switch return value depending on whether target node is the
root or a node block.  In most uses of these functions, however, the
node type is fixed, and moreover the same calculation is repeatedly
performed in loop.

This unfold these functions depending on context and move them outside
loops wherever possible.

Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
This commit is contained in:
Ryusuke Konishi 2010-07-13 23:33:52 +09:00
parent 364ec2d700
commit ea64ab87cd

View file

@ -473,7 +473,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree,
{
struct nilfs_btree_node *node;
__u64 ptr;
int level, index, found, ret;
int level, index, found, ncmax, ret;
node = nilfs_btree_get_root(btree);
level = nilfs_btree_node_get_level(node);
@ -485,6 +485,8 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree,
path[level].bp_bh = NULL;
path[level].bp_index = index;
ncmax = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree));
for (level--; level >= minlevel; level--) {
ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
if (ret < 0)
@ -496,9 +498,9 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree,
found = nilfs_btree_node_lookup(node, key, &index);
else
index = 0;
if (index < nilfs_btree_node_nchildren_max(node, btree))
if (index < ncmax) {
ptr = nilfs_btree_node_get_ptr(btree, node, index);
else {
} else {
WARN_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN);
/* insert */
ptr = NILFS_BMAP_INVALID_PTR;
@ -921,7 +923,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree,
struct buffer_head *bh;
struct nilfs_btree_node *node, *parent, *sib;
__u64 sibptr;
int pindex, level, ret;
int pindex, level, ncmax, ret;
struct inode *dat = NULL;
stats->bs_nblocks = 0;
@ -938,12 +940,13 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree,
if (ret < 0)
goto err_out_data;
ncmax = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree));
for (level = NILFS_BTREE_LEVEL_NODE_MIN;
level < nilfs_btree_height(btree) - 1;
level++) {
node = nilfs_btree_get_nonroot_node(path, level);
if (nilfs_btree_node_get_nchildren(node) <
nilfs_btree_node_nchildren_max(node, btree)) {
if (nilfs_btree_node_get_nchildren(node) < ncmax) {
path[level].bp_op = nilfs_btree_do_insert;
stats->bs_nblocks++;
goto out;
@ -960,8 +963,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree,
if (ret < 0)
goto err_out_child_node;
sib = (struct nilfs_btree_node *)bh->b_data;
if (nilfs_btree_node_get_nchildren(sib) <
nilfs_btree_node_nchildren_max(sib, btree)) {
if (nilfs_btree_node_get_nchildren(sib) < ncmax) {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_carry_left;
stats->bs_nblocks++;
@ -979,8 +981,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree,
if (ret < 0)
goto err_out_child_node;
sib = (struct nilfs_btree_node *)bh->b_data;
if (nilfs_btree_node_get_nchildren(sib) <
nilfs_btree_node_nchildren_max(sib, btree)) {
if (nilfs_btree_node_get_nchildren(sib) < ncmax) {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_carry_right;
stats->bs_nblocks++;
@ -1014,7 +1015,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree,
/* root */
node = nilfs_btree_get_root(btree);
if (nilfs_btree_node_get_nchildren(node) <
nilfs_btree_node_nchildren_max(node, btree)) {
NILFS_BTREE_ROOT_NCHILDREN_MAX) {
path[level].bp_op = nilfs_btree_do_insert;
stats->bs_nblocks++;
goto out;
@ -1281,10 +1282,12 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
struct buffer_head *bh;
struct nilfs_btree_node *node, *parent, *sib;
__u64 sibptr;
int pindex, level, ret;
int pindex, level, ncmin, ret;
ret = 0;
stats->bs_nblocks = 0;
ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
for (level = NILFS_BTREE_LEVEL_NODE_MIN;
level < nilfs_btree_height(btree) - 1;
level++) {
@ -1297,8 +1300,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
if (ret < 0)
goto err_out_child_node;
if (nilfs_btree_node_get_nchildren(node) >
nilfs_btree_node_nchildren_min(node, btree)) {
if (nilfs_btree_node_get_nchildren(node) > ncmin) {
path[level].bp_op = nilfs_btree_do_delete;
stats->bs_nblocks++;
goto out;
@ -1315,8 +1317,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
if (ret < 0)
goto err_out_curr_node;
sib = (struct nilfs_btree_node *)bh->b_data;
if (nilfs_btree_node_get_nchildren(sib) >
nilfs_btree_node_nchildren_min(sib, btree)) {
if (nilfs_btree_node_get_nchildren(sib) > ncmin) {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_borrow_left;
stats->bs_nblocks++;
@ -1336,8 +1337,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
if (ret < 0)
goto err_out_curr_node;
sib = (struct nilfs_btree_node *)bh->b_data;
if (nilfs_btree_node_get_nchildren(sib) >
nilfs_btree_node_nchildren_min(sib, btree)) {
if (nilfs_btree_node_get_nchildren(sib) > ncmin) {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_borrow_right;
stats->bs_nblocks++;