xfs: create deferred log items for file mapping exchanges

Now that we've created the skeleton of a log intent item to track and
restart file mapping exchange operations, add the upper level logic to
commit intent items and turn them into concrete work recorded in the
log.  This builds on the existing bmap update intent items that have
been around for a while now.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Darrick J. Wong 2024-04-15 14:54:17 -07:00
parent 6c08f434bd
commit 966ceafc7a
13 changed files with 1844 additions and 4 deletions

View file

@ -34,6 +34,7 @@ xfs-y += $(addprefix libxfs/, \
xfs_dir2_node.o \
xfs_dir2_sf.o \
xfs_dquot_buf.o \
xfs_exchmaps.o \
xfs_ialloc.o \
xfs_ialloc_btree.o \
xfs_iext_tree.o \

View file

@ -27,6 +27,7 @@
#include "xfs_da_btree.h"
#include "xfs_attr.h"
#include "xfs_trans_priv.h"
#include "xfs_exchmaps.h"
static struct kmem_cache *xfs_defer_pending_cache;
@ -1176,6 +1177,10 @@ xfs_defer_init_item_caches(void)
error = xfs_attr_intent_init_cache();
if (error)
goto err;
error = xfs_exchmaps_intent_init_cache();
if (error)
goto err;
return 0;
err:
xfs_defer_destroy_item_caches();
@ -1186,6 +1191,7 @@ xfs_defer_init_item_caches(void)
void
xfs_defer_destroy_item_caches(void)
{
xfs_exchmaps_intent_destroy_cache();
xfs_attr_intent_destroy_cache();
xfs_extfree_intent_destroy_cache();
xfs_bmap_intent_destroy_cache();

View file

@ -72,7 +72,7 @@ extern const struct xfs_defer_op_type xfs_rmap_update_defer_type;
extern const struct xfs_defer_op_type xfs_extent_free_defer_type;
extern const struct xfs_defer_op_type xfs_agfl_free_defer_type;
extern const struct xfs_defer_op_type xfs_attr_defer_type;
extern const struct xfs_defer_op_type xfs_exchmaps_defer_type;
/*
* Deferred operation item relogging limits.

1045
fs/xfs/libxfs/xfs_exchmaps.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,118 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2020-2024 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <djwong@kernel.org>
*/
#ifndef __XFS_EXCHMAPS_H__
#define __XFS_EXCHMAPS_H__
/* In-core deferred operation info about a file mapping exchange request. */
struct xfs_exchmaps_intent {
/* List of other incore deferred work. */
struct list_head xmi_list;
/* Inodes participating in the operation. */
struct xfs_inode *xmi_ip1;
struct xfs_inode *xmi_ip2;
/* File offset range information. */
xfs_fileoff_t xmi_startoff1;
xfs_fileoff_t xmi_startoff2;
xfs_filblks_t xmi_blockcount;
/* Set these file sizes after the operation, unless negative. */
xfs_fsize_t xmi_isize1;
xfs_fsize_t xmi_isize2;
uint64_t xmi_flags; /* XFS_EXCHMAPS_* flags */
};
/* flags that can be passed to xfs_exchmaps_{estimate,mappings} */
#define XFS_EXCHMAPS_PARAMS (XFS_EXCHMAPS_ATTR_FORK | \
XFS_EXCHMAPS_SET_SIZES | \
XFS_EXCHMAPS_INO1_WRITTEN)
static inline int
xfs_exchmaps_whichfork(const struct xfs_exchmaps_intent *xmi)
{
if (xmi->xmi_flags & XFS_EXCHMAPS_ATTR_FORK)
return XFS_ATTR_FORK;
return XFS_DATA_FORK;
}
/* Parameters for a mapping exchange request. */
struct xfs_exchmaps_req {
/* Inodes participating in the operation. */
struct xfs_inode *ip1;
struct xfs_inode *ip2;
/* File offset range information. */
xfs_fileoff_t startoff1;
xfs_fileoff_t startoff2;
xfs_filblks_t blockcount;
/* XFS_EXCHMAPS_* operation flags */
uint64_t flags;
/*
* Fields below this line are filled out by xfs_exchmaps_estimate;
* callers should initialize this part of the struct to zero.
*/
/*
* Data device blocks to be moved out of ip1, and free space needed to
* handle the bmbt changes.
*/
xfs_filblks_t ip1_bcount;
/*
* Data device blocks to be moved out of ip2, and free space needed to
* handle the bmbt changes.
*/
xfs_filblks_t ip2_bcount;
/* rt blocks to be moved out of ip1. */
xfs_filblks_t ip1_rtbcount;
/* rt blocks to be moved out of ip2. */
xfs_filblks_t ip2_rtbcount;
/* Free space needed to handle the bmbt changes */
unsigned long long resblks;
/* Number of exchanges needed to complete the operation */
unsigned long long nr_exchanges;
};
static inline int
xfs_exchmaps_reqfork(const struct xfs_exchmaps_req *req)
{
if (req->flags & XFS_EXCHMAPS_ATTR_FORK)
return XFS_ATTR_FORK;
return XFS_DATA_FORK;
}
int xfs_exchmaps_estimate(struct xfs_exchmaps_req *req);
extern struct kmem_cache *xfs_exchmaps_intent_cache;
int __init xfs_exchmaps_intent_init_cache(void);
void xfs_exchmaps_intent_destroy_cache(void);
struct xfs_exchmaps_intent *xfs_exchmaps_init_intent(
const struct xfs_exchmaps_req *req);
void xfs_exchmaps_ensure_reflink(struct xfs_trans *tp,
const struct xfs_exchmaps_intent *xmi);
void xfs_exchmaps_upgrade_extent_counts(struct xfs_trans *tp,
const struct xfs_exchmaps_intent *xmi);
int xfs_exchmaps_finish_one(struct xfs_trans *tp,
struct xfs_exchmaps_intent *xmi);
int xfs_exchmaps_check_forks(struct xfs_mount *mp,
const struct xfs_exchmaps_req *req);
void xfs_exchange_mappings(struct xfs_trans *tp,
const struct xfs_exchmaps_req *req);
#endif /* __XFS_EXCHMAPS_H__ */

View file

@ -904,7 +904,29 @@ struct xfs_xmi_log_format {
uint64_t xmi_isize2; /* intended file2 size */
};
#define XFS_EXCHMAPS_LOGGED_FLAGS (0)
/* Exchange mappings between extended attribute forks instead of data forks. */
#define XFS_EXCHMAPS_ATTR_FORK (1ULL << 0)
/* Set the file sizes when finished. */
#define XFS_EXCHMAPS_SET_SIZES (1ULL << 1)
/*
* Exchange the mappings of the two files only if the file allocation units
* mapped to file1's range have been written.
*/
#define XFS_EXCHMAPS_INO1_WRITTEN (1ULL << 2)
/* Clear the reflink flag from inode1 after the operation. */
#define XFS_EXCHMAPS_CLEAR_INO1_REFLINK (1ULL << 3)
/* Clear the reflink flag from inode2 after the operation. */
#define XFS_EXCHMAPS_CLEAR_INO2_REFLINK (1ULL << 4)
#define XFS_EXCHMAPS_LOGGED_FLAGS (XFS_EXCHMAPS_ATTR_FORK | \
XFS_EXCHMAPS_SET_SIZES | \
XFS_EXCHMAPS_INO1_WRITTEN | \
XFS_EXCHMAPS_CLEAR_INO1_REFLINK | \
XFS_EXCHMAPS_CLEAR_INO2_REFLINK)
/* This is the structure used to lay out an mapping exchange done log item. */
struct xfs_xmd_log_format {

View file

@ -10,6 +10,10 @@
* Components of space reservations.
*/
/* Worst case number of bmaps that can be held in a block. */
#define XFS_MAX_CONTIG_BMAPS_PER_BLOCK(mp) \
(((mp)->m_bmap_dmxr[0]) - ((mp)->m_bmap_dmnr[0]))
/* Worst case number of rmaps that can be held in a block. */
#define XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) \
(((mp)->m_rmap_mxr[0]) - ((mp)->m_rmap_mnr[0]))

View file

@ -16,13 +16,17 @@
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_exchmaps_item.h"
#include "xfs_exchmaps.h"
#include "xfs_log.h"
#include "xfs_bmap.h"
#include "xfs_icache.h"
#include "xfs_bmap_btree.h"
#include "xfs_trans_space.h"
#include "xfs_error.h"
#include "xfs_log_priv.h"
#include "xfs_log_recover.h"
#include "xfs_exchrange.h"
#include "xfs_trace.h"
struct kmem_cache *xfs_xmi_cache;
struct kmem_cache *xfs_xmd_cache;
@ -144,6 +148,365 @@ static inline struct xfs_xmd_log_item *XMD_ITEM(struct xfs_log_item *lip)
return container_of(lip, struct xfs_xmd_log_item, xmd_item);
}
STATIC void
xfs_xmd_item_size(
struct xfs_log_item *lip,
int *nvecs,
int *nbytes)
{
*nvecs += 1;
*nbytes += sizeof(struct xfs_xmd_log_format);
}
/*
* This is called to fill in the vector of log iovecs for the given xmd log
* item. We use only 1 iovec, and we point that at the xmd_log_format structure
* embedded in the xmd item.
*/
STATIC void
xfs_xmd_item_format(
struct xfs_log_item *lip,
struct xfs_log_vec *lv)
{
struct xfs_xmd_log_item *xmd_lip = XMD_ITEM(lip);
struct xfs_log_iovec *vecp = NULL;
xmd_lip->xmd_format.xmd_type = XFS_LI_XMD;
xmd_lip->xmd_format.xmd_size = 1;
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_XMD_FORMAT, &xmd_lip->xmd_format,
sizeof(struct xfs_xmd_log_format));
}
/*
* The XMD is either committed or aborted if the transaction is cancelled. If
* the transaction is cancelled, drop our reference to the XMI and free the
* XMD.
*/
STATIC void
xfs_xmd_item_release(
struct xfs_log_item *lip)
{
struct xfs_xmd_log_item *xmd_lip = XMD_ITEM(lip);
xfs_xmi_release(xmd_lip->xmd_intent_log_item);
kvfree(xmd_lip->xmd_item.li_lv_shadow);
kmem_cache_free(xfs_xmd_cache, xmd_lip);
}
static struct xfs_log_item *
xfs_xmd_item_intent(
struct xfs_log_item *lip)
{
return &XMD_ITEM(lip)->xmd_intent_log_item->xmi_item;
}
static const struct xfs_item_ops xfs_xmd_item_ops = {
.flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
XFS_ITEM_INTENT_DONE,
.iop_size = xfs_xmd_item_size,
.iop_format = xfs_xmd_item_format,
.iop_release = xfs_xmd_item_release,
.iop_intent = xfs_xmd_item_intent,
};
/* Log file mapping exchange information in the intent item. */
STATIC struct xfs_log_item *
xfs_exchmaps_create_intent(
struct xfs_trans *tp,
struct list_head *items,
unsigned int count,
bool sort)
{
struct xfs_xmi_log_item *xmi_lip;
struct xfs_exchmaps_intent *xmi;
struct xfs_xmi_log_format *xlf;
ASSERT(count == 1);
xmi = list_first_entry_or_null(items, struct xfs_exchmaps_intent,
xmi_list);
xmi_lip = xfs_xmi_init(tp->t_mountp);
xlf = &xmi_lip->xmi_format;
xlf->xmi_inode1 = xmi->xmi_ip1->i_ino;
xlf->xmi_inode2 = xmi->xmi_ip2->i_ino;
xlf->xmi_startoff1 = xmi->xmi_startoff1;
xlf->xmi_startoff2 = xmi->xmi_startoff2;
xlf->xmi_blockcount = xmi->xmi_blockcount;
xlf->xmi_isize1 = xmi->xmi_isize1;
xlf->xmi_isize2 = xmi->xmi_isize2;
xlf->xmi_flags = xmi->xmi_flags & XFS_EXCHMAPS_LOGGED_FLAGS;
return &xmi_lip->xmi_item;
}
STATIC struct xfs_log_item *
xfs_exchmaps_create_done(
struct xfs_trans *tp,
struct xfs_log_item *intent,
unsigned int count)
{
struct xfs_xmi_log_item *xmi_lip = XMI_ITEM(intent);
struct xfs_xmd_log_item *xmd_lip;
xmd_lip = kmem_cache_zalloc(xfs_xmd_cache, GFP_KERNEL | __GFP_NOFAIL);
xfs_log_item_init(tp->t_mountp, &xmd_lip->xmd_item, XFS_LI_XMD,
&xfs_xmd_item_ops);
xmd_lip->xmd_intent_log_item = xmi_lip;
xmd_lip->xmd_format.xmd_xmi_id = xmi_lip->xmi_format.xmi_id;
return &xmd_lip->xmd_item;
}
/* Add this deferred XMI to the transaction. */
void
xfs_exchmaps_defer_add(
struct xfs_trans *tp,
struct xfs_exchmaps_intent *xmi)
{
trace_xfs_exchmaps_defer(tp->t_mountp, xmi);
xfs_defer_add(tp, &xmi->xmi_list, &xfs_exchmaps_defer_type);
}
static inline struct xfs_exchmaps_intent *xmi_entry(const struct list_head *e)
{
return list_entry(e, struct xfs_exchmaps_intent, xmi_list);
}
/* Cancel a deferred file mapping exchange. */
STATIC void
xfs_exchmaps_cancel_item(
struct list_head *item)
{
struct xfs_exchmaps_intent *xmi = xmi_entry(item);
kmem_cache_free(xfs_exchmaps_intent_cache, xmi);
}
/* Process a deferred file mapping exchange. */
STATIC int
xfs_exchmaps_finish_item(
struct xfs_trans *tp,
struct xfs_log_item *done,
struct list_head *item,
struct xfs_btree_cur **state)
{
struct xfs_exchmaps_intent *xmi = xmi_entry(item);
int error;
/*
* Exchange one more mappings between two files. If there's still more
* work to do, we want to requeue ourselves after all other pending
* deferred operations have finished. This includes all of the dfops
* that we queued directly as well as any new ones created in the
* process of finishing the others. Doing so prevents us from queuing
* a large number of XMI log items in kernel memory, which in turn
* prevents us from pinning the tail of the log (while logging those
* new XMI items) until the first XMI items can be processed.
*/
error = xfs_exchmaps_finish_one(tp, xmi);
if (error != -EAGAIN)
xfs_exchmaps_cancel_item(item);
return error;
}
/* Abort all pending XMIs. */
STATIC void
xfs_exchmaps_abort_intent(
struct xfs_log_item *intent)
{
xfs_xmi_release(XMI_ITEM(intent));
}
/* Is this recovered XMI ok? */
static inline bool
xfs_xmi_validate(
struct xfs_mount *mp,
struct xfs_xmi_log_item *xmi_lip)
{
struct xfs_xmi_log_format *xlf = &xmi_lip->xmi_format;
if (!xfs_has_exchange_range(mp))
return false;
if (xmi_lip->xmi_format.__pad != 0)
return false;
if (xlf->xmi_flags & ~XFS_EXCHMAPS_LOGGED_FLAGS)
return false;
if (!xfs_verify_ino(mp, xlf->xmi_inode1) ||
!xfs_verify_ino(mp, xlf->xmi_inode2))
return false;
if (!xfs_verify_fileext(mp, xlf->xmi_startoff1, xlf->xmi_blockcount))
return false;
return xfs_verify_fileext(mp, xlf->xmi_startoff2, xlf->xmi_blockcount);
}
/*
* Use the recovered log state to create a new request, estimate resource
* requirements, and create a new incore intent state.
*/
STATIC struct xfs_exchmaps_intent *
xfs_xmi_item_recover_intent(
struct xfs_mount *mp,
struct xfs_defer_pending *dfp,
const struct xfs_xmi_log_format *xlf,
struct xfs_exchmaps_req *req,
struct xfs_inode **ipp1,
struct xfs_inode **ipp2)
{
struct xfs_inode *ip1, *ip2;
struct xfs_exchmaps_intent *xmi;
int error;
/*
* Grab both inodes and set IRECOVERY to prevent trimming of post-eof
* mappings and freeing of unlinked inodes until we're totally done
* processing files.
*/
error = xlog_recover_iget(mp, xlf->xmi_inode1, &ip1);
if (error)
return ERR_PTR(error);
error = xlog_recover_iget(mp, xlf->xmi_inode2, &ip2);
if (error)
goto err_rele1;
req->ip1 = ip1;
req->ip2 = ip2;
req->startoff1 = xlf->xmi_startoff1;
req->startoff2 = xlf->xmi_startoff2;
req->blockcount = xlf->xmi_blockcount;
req->flags = xlf->xmi_flags & XFS_EXCHMAPS_PARAMS;
xfs_exchrange_ilock(NULL, ip1, ip2);
error = xfs_exchmaps_estimate(req);
xfs_exchrange_iunlock(ip1, ip2);
if (error)
goto err_rele2;
*ipp1 = ip1;
*ipp2 = ip2;
xmi = xfs_exchmaps_init_intent(req);
xfs_defer_add_item(dfp, &xmi->xmi_list);
return xmi;
err_rele2:
xfs_irele(ip2);
err_rele1:
xfs_irele(ip1);
req->ip2 = req->ip1 = NULL;
return ERR_PTR(error);
}
/* Process a file mapping exchange item that was recovered from the log. */
STATIC int
xfs_exchmaps_recover_work(
struct xfs_defer_pending *dfp,
struct list_head *capture_list)
{
struct xfs_exchmaps_req req = { .flags = 0 };
struct xfs_trans_res resv;
struct xfs_exchmaps_intent *xmi;
struct xfs_log_item *lip = dfp->dfp_intent;
struct xfs_xmi_log_item *xmi_lip = XMI_ITEM(lip);
struct xfs_mount *mp = lip->li_log->l_mp;
struct xfs_trans *tp;
struct xfs_inode *ip1, *ip2;
int error = 0;
if (!xfs_xmi_validate(mp, xmi_lip)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
&xmi_lip->xmi_format,
sizeof(xmi_lip->xmi_format));
return -EFSCORRUPTED;
}
xmi = xfs_xmi_item_recover_intent(mp, dfp, &xmi_lip->xmi_format, &req,
&ip1, &ip2);
if (IS_ERR(xmi))
return PTR_ERR(xmi);
trace_xfs_exchmaps_recover(mp, xmi);
resv = xlog_recover_resv(&M_RES(mp)->tr_write);
error = xfs_trans_alloc(mp, &resv, req.resblks, 0, 0, &tp);
if (error)
goto err_rele;
xfs_exchrange_ilock(tp, ip1, ip2);
xfs_exchmaps_ensure_reflink(tp, xmi);
xfs_exchmaps_upgrade_extent_counts(tp, xmi);
error = xlog_recover_finish_intent(tp, dfp);
if (error == -EFSCORRUPTED)
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
&xmi_lip->xmi_format,
sizeof(xmi_lip->xmi_format));
if (error)
goto err_cancel;
/*
* Commit transaction, which frees the transaction and saves the inodes
* for later replay activities.
*/
error = xfs_defer_ops_capture_and_commit(tp, capture_list);
goto err_unlock;
err_cancel:
xfs_trans_cancel(tp);
err_unlock:
xfs_exchrange_iunlock(ip1, ip2);
err_rele:
xfs_irele(ip2);
xfs_irele(ip1);
return error;
}
/* Relog an intent item to push the log tail forward. */
static struct xfs_log_item *
xfs_exchmaps_relog_intent(
struct xfs_trans *tp,
struct xfs_log_item *intent,
struct xfs_log_item *done_item)
{
struct xfs_xmi_log_item *xmi_lip;
struct xfs_xmi_log_format *old_xlf, *new_xlf;
old_xlf = &XMI_ITEM(intent)->xmi_format;
xmi_lip = xfs_xmi_init(tp->t_mountp);
new_xlf = &xmi_lip->xmi_format;
new_xlf->xmi_inode1 = old_xlf->xmi_inode1;
new_xlf->xmi_inode2 = old_xlf->xmi_inode2;
new_xlf->xmi_startoff1 = old_xlf->xmi_startoff1;
new_xlf->xmi_startoff2 = old_xlf->xmi_startoff2;
new_xlf->xmi_blockcount = old_xlf->xmi_blockcount;
new_xlf->xmi_flags = old_xlf->xmi_flags;
new_xlf->xmi_isize1 = old_xlf->xmi_isize1;
new_xlf->xmi_isize2 = old_xlf->xmi_isize2;
return &xmi_lip->xmi_item;
}
const struct xfs_defer_op_type xfs_exchmaps_defer_type = {
.name = "exchmaps",
.max_items = 1,
.create_intent = xfs_exchmaps_create_intent,
.abort_intent = xfs_exchmaps_abort_intent,
.create_done = xfs_exchmaps_create_done,
.finish_item = xfs_exchmaps_finish_item,
.cancel_item = xfs_exchmaps_cancel_item,
.recover_work = xfs_exchmaps_recover_work,
.relog_intent = xfs_exchmaps_relog_intent,
};
STATIC bool
xfs_xmi_item_match(
struct xfs_log_item *lip,
@ -194,8 +557,9 @@ xlog_recover_xmi_commit_pass2(
xmi_lip = xfs_xmi_init(mp);
memcpy(&xmi_lip->xmi_format, xmi_formatp, len);
/* not implemented yet */
return -EIO;
xlog_recover_intent_item(log, &xmi_lip->xmi_item, lsn,
&xfs_exchmaps_defer_type);
return 0;
}
const struct xlog_recover_item_ops xlog_xmi_item_ops = {

View file

@ -56,4 +56,9 @@ struct xfs_xmd_log_item {
extern struct kmem_cache *xfs_xmi_cache;
extern struct kmem_cache *xfs_xmd_cache;
struct xfs_exchmaps_intent;
void xfs_exchmaps_defer_add(struct xfs_trans *tp,
struct xfs_exchmaps_intent *xmi);
#endif /* __XFS_EXCHMAPS_ITEM_H__ */

View file

@ -13,8 +13,57 @@
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_exchrange.h"
#include "xfs_exchmaps.h"
#include <linux/fsnotify.h>
/* Lock (and optionally join) two inodes for a file range exchange. */
void
xfs_exchrange_ilock(
struct xfs_trans *tp,
struct xfs_inode *ip1,
struct xfs_inode *ip2)
{
if (ip1 != ip2)
xfs_lock_two_inodes(ip1, XFS_ILOCK_EXCL,
ip2, XFS_ILOCK_EXCL);
else
xfs_ilock(ip1, XFS_ILOCK_EXCL);
if (tp) {
xfs_trans_ijoin(tp, ip1, 0);
if (ip2 != ip1)
xfs_trans_ijoin(tp, ip2, 0);
}
}
/* Unlock two inodes after a file range exchange operation. */
void
xfs_exchrange_iunlock(
struct xfs_inode *ip1,
struct xfs_inode *ip2)
{
if (ip2 != ip1)
xfs_iunlock(ip2, XFS_ILOCK_EXCL);
xfs_iunlock(ip1, XFS_ILOCK_EXCL);
}
/*
* Estimate the resource requirements to exchange file contents between the two
* files. The caller is required to hold the IOLOCK and the MMAPLOCK and to
* have flushed both inodes' pagecache and active direct-ios.
*/
int
xfs_exchrange_estimate(
struct xfs_exchmaps_req *req)
{
int error;
xfs_exchrange_ilock(NULL, req->ip1, req->ip2);
error = xfs_exchmaps_estimate(req);
xfs_exchrange_iunlock(req->ip1, req->ip2);
return error;
}
/*
* Generic code for exchanging ranges of two files via XFS_IOC_EXCHANGE_RANGE.
* This part deals with struct file objects and byte ranges and does not deal

View file

@ -27,4 +27,12 @@ struct xfs_exchrange {
long xfs_ioc_exchange_range(struct file *file,
struct xfs_exchange_range __user *argp);
struct xfs_exchmaps_req;
void xfs_exchrange_ilock(struct xfs_trans *tp, struct xfs_inode *ip1,
struct xfs_inode *ip2);
void xfs_exchrange_iunlock(struct xfs_inode *ip1, struct xfs_inode *ip2);
int xfs_exchrange_estimate(struct xfs_exchmaps_req *req);
#endif /* __XFS_EXCHRANGE_H__ */

View file

@ -39,6 +39,7 @@
#include "xfs_buf_mem.h"
#include "xfs_btree_mem.h"
#include "xfs_bmap.h"
#include "xfs_exchmaps.h"
/*
* We include this last to have the helpers above available for the trace

View file

@ -82,6 +82,8 @@ struct xfs_perag;
struct xfbtree;
struct xfs_btree_ops;
struct xfs_bmap_intent;
struct xfs_exchmaps_intent;
struct xfs_exchmaps_req;
#define XFS_ATTR_FILTER_FLAGS \
{ XFS_ATTR_ROOT, "ROOT" }, \
@ -4770,6 +4772,221 @@ DEFINE_XFBTREE_FREESP_EVENT(xfbtree_alloc_block);
DEFINE_XFBTREE_FREESP_EVENT(xfbtree_free_block);
#endif /* CONFIG_XFS_BTREE_IN_MEM */
/* exchmaps tracepoints */
#define XFS_EXCHMAPS_STRINGS \
{ XFS_EXCHMAPS_ATTR_FORK, "ATTRFORK" }, \
{ XFS_EXCHMAPS_SET_SIZES, "SETSIZES" }, \
{ XFS_EXCHMAPS_INO1_WRITTEN, "INO1_WRITTEN" }, \
{ XFS_EXCHMAPS_CLEAR_INO1_REFLINK, "CLEAR_INO1_REFLINK" }, \
{ XFS_EXCHMAPS_CLEAR_INO2_REFLINK, "CLEAR_INO2_REFLINK" }
DEFINE_INODE_IREC_EVENT(xfs_exchmaps_mapping1_skip);
DEFINE_INODE_IREC_EVENT(xfs_exchmaps_mapping1);
DEFINE_INODE_IREC_EVENT(xfs_exchmaps_mapping2);
DEFINE_ITRUNC_EVENT(xfs_exchmaps_update_inode_size);
TRACE_EVENT(xfs_exchmaps_overhead,
TP_PROTO(struct xfs_mount *mp, unsigned long long bmbt_blocks,
unsigned long long rmapbt_blocks),
TP_ARGS(mp, bmbt_blocks, rmapbt_blocks),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned long long, bmbt_blocks)
__field(unsigned long long, rmapbt_blocks)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->bmbt_blocks = bmbt_blocks;
__entry->rmapbt_blocks = rmapbt_blocks;
),
TP_printk("dev %d:%d bmbt_blocks 0x%llx rmapbt_blocks 0x%llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->bmbt_blocks,
__entry->rmapbt_blocks)
);
DECLARE_EVENT_CLASS(xfs_exchmaps_estimate_class,
TP_PROTO(const struct xfs_exchmaps_req *req),
TP_ARGS(req),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino1)
__field(xfs_ino_t, ino2)
__field(xfs_fileoff_t, startoff1)
__field(xfs_fileoff_t, startoff2)
__field(xfs_filblks_t, blockcount)
__field(uint64_t, flags)
__field(xfs_filblks_t, ip1_bcount)
__field(xfs_filblks_t, ip2_bcount)
__field(xfs_filblks_t, ip1_rtbcount)
__field(xfs_filblks_t, ip2_rtbcount)
__field(unsigned long long, resblks)
__field(unsigned long long, nr_exchanges)
),
TP_fast_assign(
__entry->dev = req->ip1->i_mount->m_super->s_dev;
__entry->ino1 = req->ip1->i_ino;
__entry->ino2 = req->ip2->i_ino;
__entry->startoff1 = req->startoff1;
__entry->startoff2 = req->startoff2;
__entry->blockcount = req->blockcount;
__entry->flags = req->flags;
__entry->ip1_bcount = req->ip1_bcount;
__entry->ip2_bcount = req->ip2_bcount;
__entry->ip1_rtbcount = req->ip1_rtbcount;
__entry->ip2_rtbcount = req->ip2_rtbcount;
__entry->resblks = req->resblks;
__entry->nr_exchanges = req->nr_exchanges;
),
TP_printk("dev %d:%d ino1 0x%llx fileoff1 0x%llx ino2 0x%llx fileoff2 0x%llx fsbcount 0x%llx flags (%s) bcount1 0x%llx rtbcount1 0x%llx bcount2 0x%llx rtbcount2 0x%llx resblks 0x%llx nr_exchanges %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino1, __entry->startoff1,
__entry->ino2, __entry->startoff2,
__entry->blockcount,
__print_flags_u64(__entry->flags, "|", XFS_EXCHMAPS_STRINGS),
__entry->ip1_bcount,
__entry->ip1_rtbcount,
__entry->ip2_bcount,
__entry->ip2_rtbcount,
__entry->resblks,
__entry->nr_exchanges)
);
#define DEFINE_EXCHMAPS_ESTIMATE_EVENT(name) \
DEFINE_EVENT(xfs_exchmaps_estimate_class, name, \
TP_PROTO(const struct xfs_exchmaps_req *req), \
TP_ARGS(req))
DEFINE_EXCHMAPS_ESTIMATE_EVENT(xfs_exchmaps_initial_estimate);
DEFINE_EXCHMAPS_ESTIMATE_EVENT(xfs_exchmaps_final_estimate);
DECLARE_EVENT_CLASS(xfs_exchmaps_intent_class,
TP_PROTO(struct xfs_mount *mp, const struct xfs_exchmaps_intent *xmi),
TP_ARGS(mp, xmi),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino1)
__field(xfs_ino_t, ino2)
__field(uint64_t, flags)
__field(xfs_fileoff_t, startoff1)
__field(xfs_fileoff_t, startoff2)
__field(xfs_filblks_t, blockcount)
__field(xfs_fsize_t, isize1)
__field(xfs_fsize_t, isize2)
__field(xfs_fsize_t, new_isize1)
__field(xfs_fsize_t, new_isize2)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->ino1 = xmi->xmi_ip1->i_ino;
__entry->ino2 = xmi->xmi_ip2->i_ino;
__entry->flags = xmi->xmi_flags;
__entry->startoff1 = xmi->xmi_startoff1;
__entry->startoff2 = xmi->xmi_startoff2;
__entry->blockcount = xmi->xmi_blockcount;
__entry->isize1 = xmi->xmi_ip1->i_disk_size;
__entry->isize2 = xmi->xmi_ip2->i_disk_size;
__entry->new_isize1 = xmi->xmi_isize1;
__entry->new_isize2 = xmi->xmi_isize2;
),
TP_printk("dev %d:%d ino1 0x%llx fileoff1 0x%llx ino2 0x%llx fileoff2 0x%llx fsbcount 0x%llx flags (%s) isize1 0x%llx newisize1 0x%llx isize2 0x%llx newisize2 0x%llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino1, __entry->startoff1,
__entry->ino2, __entry->startoff2,
__entry->blockcount,
__print_flags_u64(__entry->flags, "|", XFS_EXCHMAPS_STRINGS),
__entry->isize1, __entry->new_isize1,
__entry->isize2, __entry->new_isize2)
);
#define DEFINE_EXCHMAPS_INTENT_EVENT(name) \
DEFINE_EVENT(xfs_exchmaps_intent_class, name, \
TP_PROTO(struct xfs_mount *mp, const struct xfs_exchmaps_intent *xmi), \
TP_ARGS(mp, xmi))
DEFINE_EXCHMAPS_INTENT_EVENT(xfs_exchmaps_defer);
DEFINE_EXCHMAPS_INTENT_EVENT(xfs_exchmaps_recover);
TRACE_EVENT(xfs_exchmaps_delta_nextents_step,
TP_PROTO(struct xfs_mount *mp,
const struct xfs_bmbt_irec *left,
const struct xfs_bmbt_irec *curr,
const struct xfs_bmbt_irec *new,
const struct xfs_bmbt_irec *right,
int delta, unsigned int state),
TP_ARGS(mp, left, curr, new, right, delta, state),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_fileoff_t, loff)
__field(xfs_fsblock_t, lstart)
__field(xfs_filblks_t, lcount)
__field(xfs_fileoff_t, coff)
__field(xfs_fsblock_t, cstart)
__field(xfs_filblks_t, ccount)
__field(xfs_fileoff_t, noff)
__field(xfs_fsblock_t, nstart)
__field(xfs_filblks_t, ncount)
__field(xfs_fileoff_t, roff)
__field(xfs_fsblock_t, rstart)
__field(xfs_filblks_t, rcount)
__field(int, delta)
__field(unsigned int, state)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->loff = left->br_startoff;
__entry->lstart = left->br_startblock;
__entry->lcount = left->br_blockcount;
__entry->coff = curr->br_startoff;
__entry->cstart = curr->br_startblock;
__entry->ccount = curr->br_blockcount;
__entry->noff = new->br_startoff;
__entry->nstart = new->br_startblock;
__entry->ncount = new->br_blockcount;
__entry->roff = right->br_startoff;
__entry->rstart = right->br_startblock;
__entry->rcount = right->br_blockcount;
__entry->delta = delta;
__entry->state = state;
),
TP_printk("dev %d:%d left 0x%llx:0x%llx:0x%llx; curr 0x%llx:0x%llx:0x%llx <- new 0x%llx:0x%llx:0x%llx; right 0x%llx:0x%llx:0x%llx delta %d state 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->loff, __entry->lstart, __entry->lcount,
__entry->coff, __entry->cstart, __entry->ccount,
__entry->noff, __entry->nstart, __entry->ncount,
__entry->roff, __entry->rstart, __entry->rcount,
__entry->delta, __entry->state)
);
TRACE_EVENT(xfs_exchmaps_delta_nextents,
TP_PROTO(const struct xfs_exchmaps_req *req, int64_t d_nexts1,
int64_t d_nexts2),
TP_ARGS(req, d_nexts1, d_nexts2),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino1)
__field(xfs_ino_t, ino2)
__field(xfs_extnum_t, nexts1)
__field(xfs_extnum_t, nexts2)
__field(int64_t, d_nexts1)
__field(int64_t, d_nexts2)
),
TP_fast_assign(
int whichfork = xfs_exchmaps_reqfork(req);
__entry->dev = req->ip1->i_mount->m_super->s_dev;
__entry->ino1 = req->ip1->i_ino;
__entry->ino2 = req->ip2->i_ino;
__entry->nexts1 = xfs_ifork_ptr(req->ip1, whichfork)->if_nextents;
__entry->nexts2 = xfs_ifork_ptr(req->ip2, whichfork)->if_nextents;
__entry->d_nexts1 = d_nexts1;
__entry->d_nexts2 = d_nexts2;
),
TP_printk("dev %d:%d ino1 0x%llx nexts %llu ino2 0x%llx nexts %llu delta1 %lld delta2 %lld",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino1, __entry->nexts1,
__entry->ino2, __entry->nexts2,
__entry->d_nexts1, __entry->d_nexts2)
);
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH