xfs: create slab caches for frequently-used deferred items

Create slab caches for the high-level structures that coordinate
deferred intent items, since they're used fairly heavily.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Chandan Babu R <chandan.babu@oracle.com>
This commit is contained in:
Darrick J. Wong 2021-10-12 14:11:01 -07:00
parent 9e253954ac
commit f3c799c22c
12 changed files with 154 additions and 16 deletions

View file

@ -37,7 +37,7 @@
#include "xfs_icache.h"
#include "xfs_iomap.h"
struct kmem_cache *xfs_bmap_intent_cache;
struct kmem_cache *xfs_bmap_free_item_cache;
/*
@ -6190,7 +6190,7 @@ __xfs_bmap_add(
bmap->br_blockcount,
bmap->br_state);
bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS);
bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
INIT_LIST_HEAD(&bi->bi_list);
bi->bi_type = type;
bi->bi_owner = ip;
@ -6301,3 +6301,20 @@ xfs_bmap_validate_extent(
return __this_address;
return NULL;
}
int __init
xfs_bmap_intent_init_cache(void)
{
xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
sizeof(struct xfs_bmap_intent),
0, 0, NULL);
return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
}
void
xfs_bmap_intent_destroy_cache(void)
{
kmem_cache_destroy(xfs_bmap_intent_cache);
xfs_bmap_intent_cache = NULL;
}

View file

@ -290,4 +290,9 @@ int xfs_bmapi_remap(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, xfs_fsblock_t startblock,
int flags);
extern struct kmem_cache *xfs_bmap_intent_cache;
int __init xfs_bmap_intent_init_cache(void);
void xfs_bmap_intent_destroy_cache(void);
#endif /* __XFS_BMAP_H__ */

View file

@ -18,6 +18,11 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_log.h"
#include "xfs_rmap.h"
#include "xfs_refcount.h"
#include "xfs_bmap.h"
static struct kmem_cache *xfs_defer_pending_cache;
/*
* Deferred Operations in XFS
@ -365,7 +370,7 @@ xfs_defer_cancel_list(
ops->cancel_item(pwi);
}
ASSERT(dfp->dfp_count == 0);
kmem_free(dfp);
kmem_cache_free(xfs_defer_pending_cache, dfp);
}
}
@ -462,7 +467,7 @@ xfs_defer_finish_one(
/* Done with the dfp, free it. */
list_del(&dfp->dfp_list);
kmem_free(dfp);
kmem_cache_free(xfs_defer_pending_cache, dfp);
out:
if (ops->finish_cleanup)
ops->finish_cleanup(tp, state, error);
@ -596,8 +601,8 @@ xfs_defer_add(
dfp = NULL;
}
if (!dfp) {
dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
KM_NOFS);
dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
GFP_NOFS | __GFP_NOFAIL);
dfp->dfp_type = type;
dfp->dfp_intent = NULL;
dfp->dfp_done = NULL;
@ -809,3 +814,55 @@ xfs_defer_resources_rele(
dres->dr_bufs = 0;
dres->dr_ordered = 0;
}
static inline int __init
xfs_defer_init_cache(void)
{
xfs_defer_pending_cache = kmem_cache_create("xfs_defer_pending",
sizeof(struct xfs_defer_pending),
0, 0, NULL);
return xfs_defer_pending_cache != NULL ? 0 : -ENOMEM;
}
static inline void
xfs_defer_destroy_cache(void)
{
kmem_cache_destroy(xfs_defer_pending_cache);
xfs_defer_pending_cache = NULL;
}
/* Set up caches for deferred work items. */
int __init
xfs_defer_init_item_caches(void)
{
int error;
error = xfs_defer_init_cache();
if (error)
return error;
error = xfs_rmap_intent_init_cache();
if (error)
goto err;
error = xfs_refcount_intent_init_cache();
if (error)
goto err;
error = xfs_bmap_intent_init_cache();
if (error)
goto err;
return 0;
err:
xfs_defer_destroy_item_caches();
return error;
}
/* Destroy all the deferred work item caches, if they've been allocated. */
void
xfs_defer_destroy_item_caches(void)
{
xfs_bmap_intent_destroy_cache();
xfs_refcount_intent_destroy_cache();
xfs_rmap_intent_destroy_cache();
xfs_defer_destroy_cache();
}

View file

@ -122,4 +122,7 @@ void xfs_defer_ops_capture_free(struct xfs_mount *mp,
struct xfs_defer_capture *d);
void xfs_defer_resources_rele(struct xfs_defer_resources *dres);
int __init xfs_defer_init_item_caches(void);
void xfs_defer_destroy_item_caches(void);
#endif /* __XFS_DEFER_H__ */

View file

@ -24,6 +24,8 @@
#include "xfs_rmap.h"
#include "xfs_ag.h"
struct kmem_cache *xfs_refcount_intent_cache;
/* Allowable refcount adjustment amounts. */
enum xfs_refc_adjust_op {
XFS_REFCOUNT_ADJUST_INCREASE = 1,
@ -1235,8 +1237,8 @@ __xfs_refcount_add(
type, XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
blockcount);
ri = kmem_alloc(sizeof(struct xfs_refcount_intent),
KM_NOFS);
ri = kmem_cache_alloc(xfs_refcount_intent_cache,
GFP_NOFS | __GFP_NOFAIL);
INIT_LIST_HEAD(&ri->ri_list);
ri->ri_type = type;
ri->ri_startblock = startblock;
@ -1782,3 +1784,20 @@ xfs_refcount_has_record(
return xfs_btree_has_record(cur, &low, &high, exists);
}
int __init
xfs_refcount_intent_init_cache(void)
{
xfs_refcount_intent_cache = kmem_cache_create("xfs_refc_intent",
sizeof(struct xfs_refcount_intent),
0, 0, NULL);
return xfs_refcount_intent_cache != NULL ? 0 : -ENOMEM;
}
void
xfs_refcount_intent_destroy_cache(void)
{
kmem_cache_destroy(xfs_refcount_intent_cache);
xfs_refcount_intent_cache = NULL;
}

View file

@ -83,4 +83,9 @@ extern void xfs_refcount_btrec_to_irec(const union xfs_btree_rec *rec,
extern int xfs_refcount_insert(struct xfs_btree_cur *cur,
struct xfs_refcount_irec *irec, int *stat);
extern struct kmem_cache *xfs_refcount_intent_cache;
int __init xfs_refcount_intent_init_cache(void);
void xfs_refcount_intent_destroy_cache(void);
#endif /* __XFS_REFCOUNT_H__ */

View file

@ -24,6 +24,8 @@
#include "xfs_inode.h"
#include "xfs_ag.h"
struct kmem_cache *xfs_rmap_intent_cache;
/*
* Lookup the first record less than or equal to [bno, len, owner, offset]
* in the btree given by cur.
@ -2485,7 +2487,7 @@ __xfs_rmap_add(
bmap->br_blockcount,
bmap->br_state);
ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_NOFS);
ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
INIT_LIST_HEAD(&ri->ri_list);
ri->ri_type = type;
ri->ri_owner = owner;
@ -2779,3 +2781,20 @@ const struct xfs_owner_info XFS_RMAP_OINFO_REFC = {
const struct xfs_owner_info XFS_RMAP_OINFO_COW = {
.oi_owner = XFS_RMAP_OWN_COW,
};
int __init
xfs_rmap_intent_init_cache(void)
{
xfs_rmap_intent_cache = kmem_cache_create("xfs_rmap_intent",
sizeof(struct xfs_rmap_intent),
0, 0, NULL);
return xfs_rmap_intent_cache != NULL ? 0 : -ENOMEM;
}
void
xfs_rmap_intent_destroy_cache(void)
{
kmem_cache_destroy(xfs_rmap_intent_cache);
xfs_rmap_intent_cache = NULL;
}

View file

@ -215,4 +215,9 @@ extern const struct xfs_owner_info XFS_RMAP_OINFO_INODES;
extern const struct xfs_owner_info XFS_RMAP_OINFO_REFC;
extern const struct xfs_owner_info XFS_RMAP_OINFO_COW;
extern struct kmem_cache *xfs_rmap_intent_cache;
int __init xfs_rmap_intent_init_cache(void);
void xfs_rmap_intent_destroy_cache(void);
#endif /* __XFS_RMAP_H__ */

View file

@ -384,7 +384,7 @@ xfs_bmap_update_finish_item(
bmap->bi_bmap.br_blockcount = count;
return -EAGAIN;
}
kmem_free(bmap);
kmem_cache_free(xfs_bmap_intent_cache, bmap);
return error;
}
@ -404,7 +404,7 @@ xfs_bmap_update_cancel_item(
struct xfs_bmap_intent *bmap;
bmap = container_of(item, struct xfs_bmap_intent, bi_list);
kmem_free(bmap);
kmem_cache_free(xfs_bmap_intent_cache, bmap);
}
const struct xfs_defer_op_type xfs_bmap_update_defer_type = {

View file

@ -384,7 +384,7 @@ xfs_refcount_update_finish_item(
refc->ri_blockcount = new_aglen;
return -EAGAIN;
}
kmem_free(refc);
kmem_cache_free(xfs_refcount_intent_cache, refc);
return error;
}
@ -404,7 +404,7 @@ xfs_refcount_update_cancel_item(
struct xfs_refcount_intent *refc;
refc = container_of(item, struct xfs_refcount_intent, ri_list);
kmem_free(refc);
kmem_cache_free(xfs_refcount_intent_cache, refc);
}
const struct xfs_defer_op_type xfs_refcount_update_defer_type = {

View file

@ -427,7 +427,7 @@ xfs_rmap_update_finish_item(
rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock,
rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state,
state);
kmem_free(rmap);
kmem_cache_free(xfs_rmap_intent_cache, rmap);
return error;
}
@ -447,7 +447,7 @@ xfs_rmap_update_cancel_item(
struct xfs_rmap_intent *rmap;
rmap = container_of(item, struct xfs_rmap_intent, ri_list);
kmem_free(rmap);
kmem_cache_free(xfs_rmap_intent_cache, rmap);
}
const struct xfs_defer_op_type xfs_rmap_update_defer_type = {

View file

@ -38,6 +38,7 @@
#include "xfs_pwork.h"
#include "xfs_ag.h"
#include "xfs_btree.h"
#include "xfs_defer.h"
#include <linux/magic.h>
#include <linux/fs_context.h>
@ -1972,11 +1973,15 @@ xfs_init_caches(void)
if (error)
goto out_destroy_bmap_free_item_cache;
error = xfs_defer_init_item_caches();
if (error)
goto out_destroy_btree_cur_cache;
xfs_da_state_cache = kmem_cache_create("xfs_da_state",
sizeof(struct xfs_da_state),
0, 0, NULL);
if (!xfs_da_state_cache)
goto out_destroy_btree_cur_cache;
goto out_destroy_defer_item_cache;
xfs_ifork_cache = kmem_cache_create("xfs_ifork",
sizeof(struct xfs_ifork),
@ -2106,6 +2111,8 @@ xfs_init_caches(void)
kmem_cache_destroy(xfs_ifork_cache);
out_destroy_da_state_cache:
kmem_cache_destroy(xfs_da_state_cache);
out_destroy_defer_item_cache:
xfs_defer_destroy_item_caches();
out_destroy_btree_cur_cache:
xfs_btree_destroy_cur_caches();
out_destroy_bmap_free_item_cache:
@ -2139,6 +2146,7 @@ xfs_destroy_caches(void)
kmem_cache_destroy(xfs_trans_cache);
kmem_cache_destroy(xfs_ifork_cache);
kmem_cache_destroy(xfs_da_state_cache);
xfs_defer_destroy_item_caches();
xfs_btree_destroy_cur_caches();
kmem_cache_destroy(xfs_bmap_free_item_cache);
kmem_cache_destroy(xfs_log_ticket_cache);