linux/fs/9p/vfs_addr.c
Kirill A. Shutemov 09cbfeaf1a mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.

This promise never materialized.  And unlikely will.

We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE.  And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.

Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.

Let's stop pretending that pages in page cache are special.  They are
not.

The changes are pretty straight-forward:

 - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;

 - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;

 - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};

 - page_cache_get() -> get_page();

 - page_cache_release() -> put_page();

This patch contains automated changes generated with coccinelle using
script below.  For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.

The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.

There are few places in the code where coccinelle didn't reach.  I'll
fix them manually in a separate patch.  Comments and documentation also
will be addressed with the separate patch.

virtual patch

@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E

@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E

@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT

@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE

@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK

@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)

@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)

@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-04 10:41:08 -07:00

352 lines
8.3 KiB
C

/*
* linux/fs/9p/vfs_addr.c
*
* This file contians vfs address (mmap) ops for 9P2000.
*
* Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/inet.h>
#include <linux/pagemap.h>
#include <linux/idr.h>
#include <linux/sched.h>
#include <linux/uio.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "cache.h"
#include "fid.h"
/**
* v9fs_fid_readpage - read an entire page in from 9P
*
* @fid: fid being read
* @page: structure to page
*
*/
static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
{
struct inode *inode = page->mapping->host;
struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
struct iov_iter to;
int retval, err;
p9_debug(P9_DEBUG_VFS, "\n");
BUG_ON(!PageLocked(page));
retval = v9fs_readpage_from_fscache(inode, page);
if (retval == 0)
return retval;
iov_iter_bvec(&to, ITER_BVEC | READ, &bvec, 1, PAGE_SIZE);
retval = p9_client_read(fid, page_offset(page), &to, &err);
if (err) {
v9fs_uncache_page(inode, page);
retval = err;
goto done;
}
zero_user(page, retval, PAGE_SIZE - retval);
flush_dcache_page(page);
SetPageUptodate(page);
v9fs_readpage_to_fscache(inode, page);
retval = 0;
done:
unlock_page(page);
return retval;
}
/**
* v9fs_vfs_readpage - read an entire page in from 9P
*
* @filp: file being read
* @page: structure to page
*
*/
static int v9fs_vfs_readpage(struct file *filp, struct page *page)
{
return v9fs_fid_readpage(filp->private_data, page);
}
/**
* v9fs_vfs_readpages - read a set of pages from 9P
*
* @filp: file being read
* @mapping: the address space
* @pages: list of pages to read
* @nr_pages: count of pages to read
*
*/
static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
int ret = 0;
struct inode *inode;
inode = mapping->host;
p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp);
ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages);
if (ret == 0)
return ret;
ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
p9_debug(P9_DEBUG_VFS, " = %d\n", ret);
return ret;
}
/**
* v9fs_release_page - release the private state associated with a page
*
* Returns 1 if the page can be released, false otherwise.
*/
static int v9fs_release_page(struct page *page, gfp_t gfp)
{
if (PagePrivate(page))
return 0;
return v9fs_fscache_release_page(page, gfp);
}
/**
* v9fs_invalidate_page - Invalidate a page completely or partially
*
* @page: structure to page
* @offset: offset in the page
*/
static void v9fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
/*
* If called with zero offset, we should release
* the private state assocated with the page
*/
if (offset == 0 && length == PAGE_SIZE)
v9fs_fscache_invalidate_page(page);
}
static int v9fs_vfs_writepage_locked(struct page *page)
{
struct inode *inode = page->mapping->host;
struct v9fs_inode *v9inode = V9FS_I(inode);
loff_t size = i_size_read(inode);
struct iov_iter from;
struct bio_vec bvec;
int err, len;
if (page->index == size >> PAGE_SHIFT)
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
bvec.bv_page = page;
bvec.bv_offset = 0;
bvec.bv_len = len;
iov_iter_bvec(&from, ITER_BVEC | WRITE, &bvec, 1, len);
/* We should have writeback_fid always set */
BUG_ON(!v9inode->writeback_fid);
set_page_writeback(page);
p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err);
end_page_writeback(page);
return err;
}
static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
{
int retval;
p9_debug(P9_DEBUG_VFS, "page %p\n", page);
retval = v9fs_vfs_writepage_locked(page);
if (retval < 0) {
if (retval == -EAGAIN) {
redirty_page_for_writepage(wbc, page);
retval = 0;
} else {
SetPageError(page);
mapping_set_error(page->mapping, retval);
}
} else
retval = 0;
unlock_page(page);
return retval;
}
/**
* v9fs_launder_page - Writeback a dirty page
* Returns 0 on success.
*/
static int v9fs_launder_page(struct page *page)
{
int retval;
struct inode *inode = page->mapping->host;
v9fs_fscache_wait_on_page_write(inode, page);
if (clear_page_dirty_for_io(page)) {
retval = v9fs_vfs_writepage_locked(page);
if (retval)
return retval;
}
return 0;
}
/**
* v9fs_direct_IO - 9P address space operation for direct I/O
* @iocb: target I/O control block
* @pos: offset in file to begin the operation
*
* The presence of v9fs_direct_IO() in the address space ops vector
* allowes open() O_DIRECT flags which would have failed otherwise.
*
* In the non-cached mode, we shunt off direct read and write requests before
* the VFS gets them, so this method should never be called.
*
* Direct IO is not 'yet' supported in the cached mode. Hence when
* this routine is called through generic_file_aio_read(), the read/write fails
* with an error.
*
*/
static ssize_t
v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
struct file *file = iocb->ki_filp;
ssize_t n;
int err = 0;
if (iov_iter_rw(iter) == WRITE) {
n = p9_client_write(file->private_data, pos, iter, &err);
if (n) {
struct inode *inode = file_inode(file);
loff_t i_size = i_size_read(inode);
if (pos + n > i_size)
inode_add_bytes(inode, pos + n - i_size);
}
} else {
n = p9_client_read(file->private_data, pos, iter, &err);
}
return n ? n : err;
}
static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int retval = 0;
struct page *page;
struct v9fs_inode *v9inode;
pgoff_t index = pos >> PAGE_SHIFT;
struct inode *inode = mapping->host;
p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
v9inode = V9FS_I(inode);
start:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page) {
retval = -ENOMEM;
goto out;
}
BUG_ON(!v9inode->writeback_fid);
if (PageUptodate(page))
goto out;
if (len == PAGE_SIZE)
goto out;
retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
put_page(page);
if (!retval)
goto start;
out:
*pagep = page;
return retval;
}
static int v9fs_write_end(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
loff_t last_pos = pos + copied;
struct inode *inode = page->mapping->host;
p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
if (unlikely(copied < len)) {
/*
* zero out the rest of the area
*/
unsigned from = pos & (PAGE_SIZE - 1);
zero_user(page, from + copied, len - copied);
flush_dcache_page(page);
}
if (!PageUptodate(page))
SetPageUptodate(page);
/*
* No need to use i_size_read() here, the i_size
* cannot change under us because we hold the i_mutex.
*/
if (last_pos > inode->i_size) {
inode_add_bytes(inode, last_pos - inode->i_size);
i_size_write(inode, last_pos);
}
set_page_dirty(page);
unlock_page(page);
put_page(page);
return copied;
}
const struct address_space_operations v9fs_addr_operations = {
.readpage = v9fs_vfs_readpage,
.readpages = v9fs_vfs_readpages,
.set_page_dirty = __set_page_dirty_nobuffers,
.writepage = v9fs_vfs_writepage,
.write_begin = v9fs_write_begin,
.write_end = v9fs_write_end,
.releasepage = v9fs_release_page,
.invalidatepage = v9fs_invalidate_page,
.launder_page = v9fs_launder_page,
.direct_IO = v9fs_direct_IO,
};