summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_addr.c18
-rw-r--r--fs/9p/vfs_file.c4
-rw-r--r--fs/9p/vfs_super.c2
-rw-r--r--fs/affs/file.c26
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/afs/file.c4
-rw-r--r--fs/afs/mntpt.c6
-rw-r--r--fs/afs/super.c4
-rw-r--r--fs/afs/write.c26
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/binfmt_elf_fdpic.c2
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/btrfs/check-integrity.c64
-rw-r--r--fs/btrfs/compression.c84
-rw-r--r--fs/btrfs/ctree.c12
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c59
-rw-r--r--fs/btrfs/extent-tree.c25
-rw-r--r--fs/btrfs/extent_io.c266
-rw-r--r--fs/btrfs/extent_io.h6
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file.c51
-rw-r--r--fs/btrfs/free-space-cache.c30
-rw-r--r--fs/btrfs/inode-map.c10
-rw-r--r--fs/btrfs/inode.c104
-rw-r--r--fs/btrfs/ioctl.c86
-rw-r--r--fs/btrfs/lzo.c32
-rw-r--r--fs/btrfs/qgroup.c63
-rw-r--r--fs/btrfs/raid56.c28
-rw-r--r--fs/btrfs/reada.c30
-rw-r--r--fs/btrfs/relocation.c17
-rw-r--r--fs/btrfs/scrub.c24
-rw-r--r--fs/btrfs/send.c16
-rw-r--r--fs/btrfs/struct-funcs.c4
-rw-r--r--fs/btrfs/tests/extent-io-tests.c44
-rw-r--r--fs/btrfs/tests/free-space-tests.c2
-rw-r--r--fs/btrfs/tree-log.c137
-rw-r--r--fs/btrfs/volumes.c14
-rw-r--r--fs/btrfs/zlib.c38
-rw-r--r--fs/buffer.c100
-rw-r--r--fs/cachefiles/rdwr.c38
-rw-r--r--fs/ceph/addr.c114
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/ceph/dir.c4
-rw-r--r--fs/ceph/file.c32
-rw-r--r--fs/ceph/inode.c6
-rw-r--r--fs/ceph/mds_client.c2
-rw-r--r--fs/ceph/mds_client.h2
-rw-r--r--fs/ceph/super.c8
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/cifssmb.c16
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/file.c96
-rw-r--r--fs/cifs/inode.c10
-rw-r--r--fs/configfs/mount.c4
-rw-r--r--fs/cramfs/README26
-rw-r--r--fs/cramfs/inode.c32
-rw-r--r--fs/crypto/crypto.c8
-rw-r--r--fs/dax.c34
-rw-r--r--fs/dcache.c5
-rw-r--r--fs/direct-io.c26
-rw-r--r--fs/dlm/config.c3
-rw-r--r--fs/dlm/lowcomms.c8
-rw-r--r--fs/ecryptfs/crypto.c22
-rw-r--r--fs/ecryptfs/inode.c8
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ecryptfs/main.c8
-rw-r--r--fs/ecryptfs/mmap.c44
-rw-r--r--fs/ecryptfs/read_write.c14
-rw-r--r--fs/efivarfs/super.c4
-rw-r--r--fs/exofs/dir.c30
-rw-r--r--fs/exofs/inode.c34
-rw-r--r--fs/exofs/namei.c4
-rw-r--r--fs/ext2/dir.c36
-rw-r--r--fs/ext2/namei.c6
-rw-r--r--fs/ext4/crypto.c57
-rw-r--r--fs/ext4/dir.c4
-rw-r--r--fs/ext4/ext4.h33
-rw-r--r--fs/ext4/file.c16
-rw-r--r--fs/ext4/inline.c18
-rw-r--r--fs/ext4/inode.c176
-rw-r--r--fs/ext4/mballoc.c40
-rw-r--r--fs/ext4/move_extent.c27
-rw-r--r--fs/ext4/page-io.c18
-rw-r--r--fs/ext4/readpage.c14
-rw-r--r--fs/ext4/super.c65
-rw-r--r--fs/ext4/symlink.c4
-rw-r--r--fs/ext4/xattr.c32
-rw-r--r--fs/f2fs/data.c52
-rw-r--r--fs/f2fs/debug.c6
-rw-r--r--fs/f2fs/dir.c4
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/file.c74
-rw-r--r--fs/f2fs/inline.c10
-rw-r--r--fs/f2fs/namei.c16
-rw-r--r--fs/f2fs/node.c10
-rw-r--r--fs/f2fs/recovery.c2
-rw-r--r--fs/f2fs/segment.c16
-rw-r--r--fs/f2fs/super.c108
-rw-r--r--fs/freevxfs/vxfs_immed.c4
-rw-r--r--fs/freevxfs/vxfs_lookup.c12
-rw-r--r--fs/freevxfs/vxfs_subr.c2
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fscache/page.c10
-rw-r--r--fs/fuse/dev.c26
-rw-r--r--fs/fuse/file.c72
-rw-r--r--fs/fuse/inode.c16
-rw-r--r--fs/gfs2/aops.c44
-rw-r--r--fs/gfs2/bmap.c12
-rw-r--r--fs/gfs2/file.c16
-rw-r--r--fs/gfs2/meta_io.c4
-rw-r--r--fs/gfs2/quota.c14
-rw-r--r--fs/gfs2/rgrp.c5
-rw-r--r--fs/hfs/bnode.c12
-rw-r--r--fs/hfs/btree.c20
-rw-r--r--fs/hfs/inode.c8
-rw-r--r--fs/hfsplus/bitmap.c2
-rw-r--r--fs/hfsplus/bnode.c90
-rw-r--r--fs/hfsplus/btree.c22
-rw-r--r--fs/hfsplus/inode.c8
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hfsplus/xattr.c6
-rw-r--r--fs/hostfs/hostfs_kern.c18
-rw-r--r--fs/hugetlbfs/inode.c10
-rw-r--r--fs/isofs/compress.c36
-rw-r--r--fs/isofs/inode.c2
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jbd2/transaction.c4
-rw-r--r--fs/jffs2/debug.c8
-rw-r--r--fs/jffs2/file.c23
-rw-r--r--fs/jffs2/fs.c8
-rw-r--r--fs/jffs2/gc.c8
-rw-r--r--fs/jffs2/nodelist.c8
-rw-r--r--fs/jffs2/write.c7
-rw-r--r--fs/jfs/jfs_metapage.c42
-rw-r--r--fs/jfs/jfs_metapage.h4
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/kernfs/mount.c4
-rw-r--r--fs/libfs.c24
-rw-r--r--fs/logfs/dev_bdev.c2
-rw-r--r--fs/logfs/dev_mtd.c10
-rw-r--r--fs/logfs/dir.c12
-rw-r--r--fs/logfs/file.c26
-rw-r--r--fs/logfs/readwrite.c20
-rw-r--r--fs/logfs/segment.c28
-rw-r--r--fs/logfs/super.c16
-rw-r--r--fs/minix/dir.c18
-rw-r--r--fs/minix/namei.c4
-rw-r--r--fs/mpage.c22
-rw-r--r--fs/namei.c10
-rw-r--r--fs/ncpfs/dir.c10
-rw-r--r--fs/ncpfs/ncplib_kernel.h2
-rw-r--r--fs/nfs/blocklayout/blocklayout.c24
-rw-r--r--fs/nfs/blocklayout/blocklayout.h4
-rw-r--r--fs/nfs/client.c8
-rw-r--r--fs/nfs/dir.c10
-rw-r--r--fs/nfs/direct.c8
-rw-r--r--fs/nfs/file.c20
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h6
-rw-r--r--fs/nfs/nfs4file.c4
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/objlayout/objio_osd.c2
-rw-r--r--fs/nfs/pagelist.c6
-rw-r--r--fs/nfs/pnfs.c6
-rw-r--r--fs/nfs/read.c16
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nilfs2/bmap.c2
-rw-r--r--fs/nilfs2/btnode.c10
-rw-r--r--fs/nilfs2/dir.c32
-rw-r--r--fs/nilfs2/gcinode.c2
-rw-r--r--fs/nilfs2/inode.c4
-rw-r--r--fs/nilfs2/mdt.c14
-rw-r--r--fs/nilfs2/namei.c4
-rw-r--r--fs/nilfs2/page.c18
-rw-r--r--fs/nilfs2/recovery.c4
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ntfs/aops.c50
-rw-r--r--fs/ntfs/aops.h4
-rw-r--r--fs/ntfs/attrib.c28
-rw-r--r--fs/ntfs/bitmap.c10
-rw-r--r--fs/ntfs/compress.c77
-rw-r--r--fs/ntfs/dir.c56
-rw-r--r--fs/ntfs/file.c56
-rw-r--r--fs/ntfs/index.c14
-rw-r--r--fs/ntfs/inode.c12
-rw-r--r--fs/ntfs/lcnalloc.c6
-rw-r--r--fs/ntfs/logfile.c16
-rw-r--r--fs/ntfs/mft.c34
-rw-r--r--fs/ntfs/ntfs.h2
-rw-r--r--fs/ntfs/super.c72
-rw-r--r--fs/ocfs2/alloc.c28
-rw-r--r--fs/ocfs2/aops.c50
-rw-r--r--fs/ocfs2/cluster/heartbeat.c10
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/file.c14
-rw-r--r--fs/ocfs2/mmap.c6
-rw-r--r--fs/ocfs2/ocfs2.h20
-rw-r--r--fs/ocfs2/quota_global.c11
-rw-r--r--fs/ocfs2/refcounttree.c24
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/orangefs/dir.c12
-rw-r--r--fs/orangefs/inode.c26
-rw-r--r--fs/orangefs/orangefs-bufmap.c4
-rw-r--r--fs/orangefs/orangefs-debugfs.c3
-rw-r--r--fs/orangefs/orangefs-utils.c8
-rw-r--r--fs/orangefs/protocol.h35
-rw-r--r--fs/orangefs/xattr.c19
-rw-r--r--fs/overlayfs/super.c33
-rw-r--r--fs/pipe.c6
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/proc/vmcore.c4
-rw-r--r--fs/pstore/inode.c4
-rw-r--r--fs/qnx6/dir.c16
-rw-r--r--fs/qnx6/inode.c4
-rw-r--r--fs/qnx6/qnx6.h2
-rw-r--r--fs/quota/dquot.c13
-rw-r--r--fs/ramfs/inode.c4
-rw-r--r--fs/reiserfs/file.c4
-rw-r--r--fs/reiserfs/inode.c44
-rw-r--r--fs/reiserfs/ioctl.c4
-rw-r--r--fs/reiserfs/journal.c6
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/reiserfs/tail_conversion.c4
-rw-r--r--fs/reiserfs/xattr.c18
-rw-r--r--fs/splice.c32
-rw-r--r--fs/squashfs/block.c4
-rw-r--r--fs/squashfs/cache.c18
-rw-r--r--fs/squashfs/decompressor.c2
-rw-r--r--fs/squashfs/file.c24
-rw-r--r--fs/squashfs/file_direct.c22
-rw-r--r--fs/squashfs/lz4_wrapper.c8
-rw-r--r--fs/squashfs/lzo_wrapper.c8
-rw-r--r--fs/squashfs/page_actor.c4
-rw-r--r--fs/squashfs/page_actor.h2
-rw-r--r--fs/squashfs/super.c2
-rw-r--r--fs/squashfs/symlink.c6
-rw-r--r--fs/squashfs/xz_wrapper.c4
-rw-r--r--fs/squashfs/zlib_wrapper.c4
-rw-r--r--fs/sync.c4
-rw-r--r--fs/sysv/dir.c18
-rw-r--r--fs/sysv/namei.c4
-rw-r--r--fs/ubifs/file.c54
-rw-r--r--fs/ubifs/super.c6
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/udf/file.c6
-rw-r--r--fs/udf/inode.c4
-rw-r--r--fs/ufs/balloc.c6
-rw-r--r--fs/ufs/dir.c32
-rw-r--r--fs/ufs/inode.c4
-rw-r--r--fs/ufs/namei.c6
-rw-r--r--fs/ufs/util.c4
-rw-r--r--fs/ufs/util.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c4
-rw-r--r--fs/xfs/xfs_aops.c22
-rw-r--r--fs/xfs/xfs_bmap_util.c4
-rw-r--r--fs/xfs/xfs_file.c12
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_mount.h4
-rw-r--r--fs/xfs/xfs_pnfs.c4
-rw-r--r--fs/xfs/xfs_super.c8
264 files changed, 2778 insertions, 2434 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index e9e04376c52c..ac9225e86bf3 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -153,7 +153,7 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset,
* If called with zero offset, we should release
* the private state assocated with the page
*/
- if (offset == 0 && length == PAGE_CACHE_SIZE)
+ if (offset == 0 && length == PAGE_SIZE)
v9fs_fscache_invalidate_page(page);
}
@@ -166,10 +166,10 @@ static int v9fs_vfs_writepage_locked(struct page *page)
struct bio_vec bvec;
int err, len;
- if (page->index == size >> PAGE_CACHE_SHIFT)
- len = size & ~PAGE_CACHE_MASK;
+ if (page->index == size >> PAGE_SHIFT)
+ len = size & ~PAGE_MASK;
else
- len = PAGE_CACHE_SIZE;
+ len = PAGE_SIZE;
bvec.bv_page = page;
bvec.bv_offset = 0;
@@ -271,7 +271,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
int retval = 0;
struct page *page;
struct v9fs_inode *v9inode;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
struct inode *inode = mapping->host;
@@ -288,11 +288,11 @@ start:
if (PageUptodate(page))
goto out;
- if (len == PAGE_CACHE_SIZE)
+ if (len == PAGE_SIZE)
goto out;
retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
- page_cache_release(page);
+ put_page(page);
if (!retval)
goto start;
out:
@@ -313,7 +313,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
/*
* zero out the rest of the area
*/
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
zero_user(page, from + copied, len - copied);
flush_dcache_page(page);
@@ -331,7 +331,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
}
set_page_dirty(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return copied;
}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index eadc894faea2..b84c291ba1eb 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -421,8 +421,8 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file_inode(file);
loff_t i_size;
unsigned long pg_start, pg_end;
- pg_start = origin >> PAGE_CACHE_SHIFT;
- pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT;
+ pg_start = origin >> PAGE_SHIFT;
+ pg_end = (origin + retval - 1) >> PAGE_SHIFT;
if (inode->i_mapping && inode->i_mapping->nrpages)
invalidate_inode_pages2_range(inode->i_mapping,
pg_start, pg_end);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index bf495cedec26..de3ed8629196 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -87,7 +87,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_op = &v9fs_super_ops;
sb->s_bdi = &v9ses->bdi;
if (v9ses->cache)
- sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE;
+ sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
if (!v9ses->cache)
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 22fc7c802d69..0cde550050e8 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -510,9 +510,9 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
page->index, to);
- BUG_ON(to > PAGE_CACHE_SIZE);
+ BUG_ON(to > PAGE_SIZE);
bsize = AFFS_SB(sb)->s_data_blksize;
- tmp = page->index << PAGE_CACHE_SHIFT;
+ tmp = page->index << PAGE_SHIFT;
bidx = tmp / bsize;
boff = tmp % bsize;
@@ -613,10 +613,10 @@ affs_readpage_ofs(struct file *file, struct page *page)
int err;
pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
- to = PAGE_CACHE_SIZE;
- if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
- to = inode->i_size & ~PAGE_CACHE_MASK;
- memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
+ to = PAGE_SIZE;
+ if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
+ to = inode->i_size & ~PAGE_MASK;
+ memset(page_address(page) + to, 0, PAGE_SIZE - to);
}
err = affs_do_readpage_ofs(page, to);
@@ -646,7 +646,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
return err;
}
- index = pos >> PAGE_CACHE_SHIFT;
+ index = pos >> PAGE_SHIFT;
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
@@ -656,10 +656,10 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
return 0;
/* XXX: inefficient but safe in the face of short writes */
- err = affs_do_readpage_ofs(page, PAGE_CACHE_SIZE);
+ err = affs_do_readpage_ofs(page, PAGE_SIZE);
if (err) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
return err;
}
@@ -677,7 +677,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
u32 tmp;
int written;
- from = pos & (PAGE_CACHE_SIZE - 1);
+ from = pos & (PAGE_SIZE - 1);
to = pos + len;
/*
* XXX: not sure if this can handle short copies (len < copied), but
@@ -692,7 +692,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
bh = NULL;
written = 0;
- tmp = (page->index << PAGE_CACHE_SHIFT) + from;
+ tmp = (page->index << PAGE_SHIFT) + from;
bidx = tmp / bsize;
boff = tmp % bsize;
if (boff) {
@@ -788,13 +788,13 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
done:
affs_brelse(bh);
- tmp = (page->index << PAGE_CACHE_SHIFT) + from;
+ tmp = (page->index << PAGE_SHIFT) + from;
if (tmp > inode->i_size)
inode->i_size = AFFS_I(inode)->mmu_private = tmp;
err_first_bh:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return written;
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e10e17788f06..5fda2bc53cd7 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -181,7 +181,7 @@ error:
static inline void afs_dir_put_page(struct page *page)
{
kunmap(page);
- page_cache_release(page);
+ put_page(page);
}
/*
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 999bc3caec92..6344aee4ac4b 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -164,7 +164,7 @@ int afs_page_filler(void *data, struct page *page)
_debug("cache said ENOBUFS");
default:
go_on:
- offset = page->index << PAGE_CACHE_SHIFT;
+ offset = page->index << PAGE_SHIFT;
len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
/* read the contents of the file from the server into the
@@ -319,7 +319,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
BUG_ON(!PageLocked(page));
/* we clean up only if the entire page is being invalidated */
- if (offset == 0 && length == PAGE_CACHE_SIZE) {
+ if (offset == 0 && length == PAGE_SIZE) {
#ifdef CONFIG_AFS_FSCACHE
if (PageFsCache(page)) {
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index ccd0b212e82a..81dd075356b9 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -93,7 +93,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
kunmap(page);
out_free:
- page_cache_release(page);
+ put_page(page);
out:
_leave(" = %d", ret);
return ret;
@@ -189,7 +189,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
buf = kmap_atomic(page);
memcpy(devname, buf, size);
kunmap_atomic(buf);
- page_cache_release(page);
+ put_page(page);
page = NULL;
}
@@ -211,7 +211,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
return mnt;
error:
- page_cache_release(page);
+ put_page(page);
error_no_page:
free_page((unsigned long) options);
error_no_options:
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 81afefe7d8a6..fbdb022b75a2 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -315,8 +315,8 @@ static int afs_fill_super(struct super_block *sb,
_enter("");
/* fill in the superblock */
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = AFS_FS_MAGIC;
sb->s_op = &afs_super_ops;
sb->s_bdi = &as->volume->bdi;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index dfef94f70667..65de439bdc4f 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -93,10 +93,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
_enter(",,%llu", (unsigned long long)pos);
i_size = i_size_read(&vnode->vfs_inode);
- if (pos + PAGE_CACHE_SIZE > i_size)
+ if (pos + PAGE_SIZE > i_size)
len = i_size - pos;
else
- len = PAGE_CACHE_SIZE;
+ len = PAGE_SIZE;
ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
if (ret < 0) {
@@ -123,9 +123,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
struct page *page;
struct key *key = file->private_data;
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
int ret;
_enter("{%x:%u},{%lx},%u,%u",
@@ -151,8 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
*pagep = page;
/* page won't leak in error case: it eventually gets cleaned off LRU */
- if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) {
- ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page);
+ if (!PageUptodate(page) && len != PAGE_SIZE) {
+ ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
if (ret < 0) {
kfree(candidate);
_leave(" = %d [prep]", ret);
@@ -266,7 +266,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
if (PageDirty(page))
_debug("dirtied");
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return copied;
}
@@ -480,7 +480,7 @@ static int afs_writepages_region(struct address_space *mapping,
if (page->index > end) {
*_next = index;
- page_cache_release(page);
+ put_page(page);
_leave(" = 0 [%lx]", *_next);
return 0;
}
@@ -494,7 +494,7 @@ static int afs_writepages_region(struct address_space *mapping,
if (page->mapping != mapping) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
continue;
}
@@ -515,7 +515,7 @@ static int afs_writepages_region(struct address_space *mapping,
ret = afs_write_back_from_locked_page(wb, page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (ret < 0) {
_leave(" = %d", ret);
return ret;
@@ -551,13 +551,13 @@ int afs_writepages(struct address_space *mapping,
&next);
mapping->writeback_index = next;
} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
- end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT);
+ end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
ret = afs_writepages_region(mapping, wbc, 0, end, &next);
if (wbc->nr_to_write > 0)
mapping->writeback_index = next;
} else {
- start = wbc->range_start >> PAGE_CACHE_SHIFT;
- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ start = wbc->range_start >> PAGE_SHIFT;
+ end = wbc->range_end >> PAGE_SHIFT;
ret = afs_writepages_region(mapping, wbc, start, end, &next);
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 7d914c67a9d0..81381cc0dd17 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2292,7 +2292,7 @@ static int elf_core_dump(struct coredump_params *cprm)
void *kaddr = kmap(page);
stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
kunmap(page);
- page_cache_release(page);
+ put_page(page);
} else
stop = !dump_skip(cprm, PAGE_SIZE);
if (stop)
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index b1adb92e69de..083ea2bc60ab 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1533,7 +1533,7 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm)
void *kaddr = kmap(page);
res = dump_emit(cprm, kaddr, PAGE_SIZE);
kunmap(page);
- page_cache_release(page);
+ put_page(page);
} else {
res = dump_skip(cprm, PAGE_SIZE);
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 3172c4e2f502..20a2c02b77c4 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -331,7 +331,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping,
ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return ret;
}
@@ -1149,7 +1149,7 @@ void bd_set_size(struct block_device *bdev, loff_t size)
inode_lock(bdev->bd_inode);
i_size_write(bdev->bd_inode, size);
inode_unlock(bdev->bd_inode);
- while (bsize < PAGE_CACHE_SIZE) {
+ while (bsize < PAGE_SIZE) {
if (size & bsize)
break;
bsize <<= 1;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index e34a71b3e225..516e19d1d202 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -757,7 +757,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
BUG_ON(NULL == l);
ret = btrfsic_read_block(state, &tmp_next_block_ctx);
- if (ret < (int)PAGE_CACHE_SIZE) {
+ if (ret < (int)PAGE_SIZE) {
printk(KERN_INFO
"btrfsic: read @logical %llu failed!\n",
tmp_next_block_ctx.start);
@@ -1231,15 +1231,15 @@ static void btrfsic_read_from_block_data(
size_t offset_in_page;
char *kaddr;
char *dst = (char *)dstv;
- size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT;
+ size_t start_offset = block_ctx->start & ((u64)PAGE_SIZE - 1);
+ unsigned long i = (start_offset + offset) >> PAGE_SHIFT;
WARN_ON(offset + len > block_ctx->len);
- offset_in_page = (start_offset + offset) & (PAGE_CACHE_SIZE - 1);
+ offset_in_page = (start_offset + offset) & (PAGE_SIZE - 1);
while (len > 0) {
- cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page));
- BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_CACHE_SIZE));
+ cur = min(len, ((size_t)PAGE_SIZE - offset_in_page));
+ BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE));
kaddr = block_ctx->datav[i];
memcpy(dst, kaddr + offset_in_page, cur);
@@ -1605,8 +1605,8 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
BUG_ON(!block_ctx->datav);
BUG_ON(!block_ctx->pagev);
- num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
while (num_pages > 0) {
num_pages--;
if (block_ctx->datav[num_pages]) {
@@ -1637,15 +1637,15 @@ static int btrfsic_read_block(struct btrfsic_state *state,
BUG_ON(block_ctx->datav);
BUG_ON(block_ctx->pagev);
BUG_ON(block_ctx->mem_to_free);
- if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) {
+ if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) {
printk(KERN_INFO
"btrfsic: read_block() with unaligned bytenr %llu\n",
block_ctx->dev_bytenr);
return -1;
}
- num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
sizeof(*block_ctx->pagev)) *
num_pages, GFP_NOFS);
@@ -1676,8 +1676,8 @@ static int btrfsic_read_block(struct btrfsic_state *state,
for (j = i; j < num_pages; j++) {
ret = bio_add_page(bio, block_ctx->pagev[j],
- PAGE_CACHE_SIZE, 0);
- if (PAGE_CACHE_SIZE != ret)
+ PAGE_SIZE, 0);
+ if (PAGE_SIZE != ret)
break;
}
if (j == i) {
@@ -1693,7 +1693,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
return -1;
}
bio_put(bio);
- dev_bytenr += (j - i) * PAGE_CACHE_SIZE;
+ dev_bytenr += (j - i) * PAGE_SIZE;
i = j;
}
for (i = 0; i < num_pages; i++) {
@@ -1769,9 +1769,9 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
u32 crc = ~(u32)0;
unsigned int i;
- if (num_pages * PAGE_CACHE_SIZE < state->metablock_size)
+ if (num_pages * PAGE_SIZE < state->metablock_size)
return 1; /* not metadata */
- num_pages = state->metablock_size >> PAGE_CACHE_SHIFT;
+ num_pages = state->metablock_size >> PAGE_SHIFT;
h = (struct btrfs_header *)datav[0];
if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
@@ -1779,8 +1779,8 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
for (i = 0; i < num_pages; i++) {
u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
- size_t sublen = i ? PAGE_CACHE_SIZE :
- (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
+ size_t sublen = i ? PAGE_SIZE :
+ (PAGE_SIZE - BTRFS_CSUM_SIZE);
crc = btrfs_crc32c(crc, data, sublen);
}
@@ -1826,14 +1826,14 @@ again:
if (block->is_superblock) {
bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
mapped_datav[0]);
- if (num_pages * PAGE_CACHE_SIZE <
+ if (num_pages * PAGE_SIZE <
BTRFS_SUPER_INFO_SIZE) {
printk(KERN_INFO
"btrfsic: cannot work with too short bios!\n");
return;
}
is_metadata = 1;
- BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1));
+ BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_SIZE - 1));
processed_len = BTRFS_SUPER_INFO_SIZE;
if (state->print_mask &
BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
@@ -1844,7 +1844,7 @@ again:
}
if (is_metadata) {
if (!block->is_superblock) {
- if (num_pages * PAGE_CACHE_SIZE <
+ if (num_pages * PAGE_SIZE <
state->metablock_size) {
printk(KERN_INFO
"btrfsic: cannot work with too short bios!\n");
@@ -1880,7 +1880,7 @@ again:
}
block->logical_bytenr = bytenr;
} else {
- if (num_pages * PAGE_CACHE_SIZE <
+ if (num_pages * PAGE_SIZE <
state->datablock_size) {
printk(KERN_INFO
"btrfsic: cannot work with too short bios!\n");
@@ -2013,7 +2013,7 @@ again:
block->logical_bytenr = bytenr;
block->is_metadata = 1;
if (block->is_superblock) {
- BUG_ON(PAGE_CACHE_SIZE !=
+ BUG_ON(PAGE_SIZE !=
BTRFS_SUPER_INFO_SIZE);
ret = btrfsic_process_written_superblock(
state,
@@ -2172,8 +2172,8 @@ again:
continue_loop:
BUG_ON(!processed_len);
dev_bytenr += processed_len;
- mapped_datav += processed_len >> PAGE_CACHE_SHIFT;
- num_pages -= processed_len >> PAGE_CACHE_SHIFT;
+ mapped_datav += processed_len >> PAGE_SHIFT;
+ num_pages -= processed_len >> PAGE_SHIFT;
goto again;
}
@@ -2954,7 +2954,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
goto leave;
cur_bytenr = dev_bytenr;
for (i = 0; i < bio->bi_vcnt; i++) {
- BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE);
+ BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_SIZE);
mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
if (!mapped_datav[i]) {
while (i > 0) {
@@ -3037,16 +3037,16 @@ int btrfsic_mount(struct btrfs_root *root,
struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device;
- if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) {
+ if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
printk(KERN_INFO
- "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
- root->nodesize, PAGE_CACHE_SIZE);
+ "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
+ root->nodesize, PAGE_SIZE);
return -1;
}
- if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) {
+ if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
printk(KERN_INFO
- "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
- root->sectorsize, PAGE_CACHE_SIZE);
+ "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
+ root->sectorsize, PAGE_SIZE);
return -1;
}
state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 3346cd8f9910..ff61a41ac90b 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -119,7 +119,7 @@ static int check_compressed_csum(struct inode *inode,
csum = ~(u32)0;
kaddr = kmap_atomic(page);
- csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE);
+ csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
btrfs_csum_final(csum, (char *)&csum);
kunmap_atomic(kaddr);
@@ -190,7 +190,7 @@ csum_failed:
for (index = 0; index < cb->nr_pages; index++) {
page = cb->compressed_pages[index];
page->mapping = NULL;
- page_cache_release(page);
+ put_page(page);
}
/* do io completion on the original bio */
@@ -224,8 +224,8 @@ out:
static noinline void end_compressed_writeback(struct inode *inode,
const struct compressed_bio *cb)
{
- unsigned long index = cb->start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_CACHE_SHIFT;
+ unsigned long index = cb->start >> PAGE_SHIFT;
+ unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
struct page *pages[16];
unsigned long nr_pages = end_index - index + 1;
int i;
@@ -247,7 +247,7 @@ static noinline void end_compressed_writeback(struct inode *inode,
if (cb->errors)
SetPageError(pages[i]);
end_page_writeback(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
nr_pages -= ret;
index += ret;
@@ -304,7 +304,7 @@ static void end_compressed_bio_write(struct bio *bio)
for (index = 0; index < cb->nr_pages; index++) {
page = cb->compressed_pages[index];
page->mapping = NULL;
- page_cache_release(page);
+ put_page(page);
}
/* finally free the cb struct */
@@ -341,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
int ret;
int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
- WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
+ WARN_ON(start & ((u64)PAGE_SIZE - 1));
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
if (!cb)
return -ENOMEM;
@@ -374,14 +374,14 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
page->mapping = inode->i_mapping;
if (bio->bi_iter.bi_size)
ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
- PAGE_CACHE_SIZE,
+ PAGE_SIZE,
bio, 0);
else
ret = 0;
page->mapping = NULL;
- if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) <
- PAGE_CACHE_SIZE) {
+ if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
+ PAGE_SIZE) {
bio_get(bio);
/*
@@ -410,15 +410,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
BUG_ON(!bio);
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
- bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
+ bio_add_page(bio, page, PAGE_SIZE, 0);
}
- if (bytes_left < PAGE_CACHE_SIZE) {
+ if (bytes_left < PAGE_SIZE) {
btrfs_info(BTRFS_I(inode)->root->fs_info,
"bytes left %lu compress len %lu nr %lu",
bytes_left, cb->compressed_len, cb->nr_pages);
}
- bytes_left -= PAGE_CACHE_SIZE;
- first_byte += PAGE_CACHE_SIZE;
+ bytes_left -= PAGE_SIZE;
+ first_byte += PAGE_SIZE;
cond_resched();
}
bio_get(bio);
@@ -457,17 +457,17 @@ static noinline int add_ra_bio_pages(struct inode *inode,
int misses = 0;
page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
- last_offset = (page_offset(page) + PAGE_CACHE_SIZE);
+ last_offset = (page_offset(page) + PAGE_SIZE);
em_tree = &BTRFS_I(inode)->extent_tree;
tree = &BTRFS_I(inode)->io_tree;
if (isize == 0)
return 0;
- end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+ end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
while (last_offset < compressed_end) {
- pg_index = last_offset >> PAGE_CACHE_SHIFT;
+ pg_index = last_offset >> PAGE_SHIFT;
if (pg_index > end_index)
break;
@@ -488,11 +488,11 @@ static noinline int add_ra_bio_pages(struct inode *inode,
break;
if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
- page_cache_release(page);
+ put_page(page);
goto next;
}
- end = last_offset + PAGE_CACHE_SIZE - 1;
+ end = last_offset + PAGE_SIZE - 1;
/*
* at this point, we have a locked page in the page cache
* for these bytes in the file. But, we have to make
@@ -502,27 +502,27 @@ static noinline int add_ra_bio_pages(struct inode *inode,
lock_extent(tree, last_offset, end);
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, last_offset,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
read_unlock(&em_tree->lock);
if (!em || last_offset < em->start ||
- (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
+ (last_offset + PAGE_SIZE > extent_map_end(em)) ||
(em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
free_extent_map(em);
unlock_extent(tree, last_offset, end);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
break;
}
free_extent_map(em);
if (page->index == end_index) {
char *userpage;
- size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1);
+ size_t zero_offset = isize & (PAGE_SIZE - 1);
if (zero_offset) {
int zeros;
- zeros = PAGE_CACHE_SIZE - zero_offset;
+ zeros = PAGE_SIZE - zero_offset;
userpage = kmap_atomic(page);
memset(userpage + zero_offset, 0, zeros);
flush_dcache_page(page);
@@ -531,19 +531,19 @@ static noinline int add_ra_bio_pages(struct inode *inode,
}
ret = bio_add_page(cb->orig_bio, page,
- PAGE_CACHE_SIZE, 0);
+ PAGE_SIZE, 0);
- if (ret == PAGE_CACHE_SIZE) {
+ if (ret == PAGE_SIZE) {
nr_pages++;
- page_cache_release(page);
+ put_page(page);
} else {
unlock_extent(tree, last_offset, end);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
break;
}
next:
- last_offset += PAGE_CACHE_SIZE;
+ last_offset += PAGE_SIZE;
}
return 0;
}
@@ -567,7 +567,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
struct extent_map_tree *em_tree;
struct compressed_bio *cb;
struct btrfs_root *root = BTRFS_I(inode)->root;
- unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
+ unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
unsigned long compressed_len;
unsigned long nr_pages;
unsigned long pg_index;
@@ -589,7 +589,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree,
page_offset(bio->bi_io_vec->bv_page),
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
read_unlock(&em_tree->lock);
if (!em)
return -EIO;
@@ -617,7 +617,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cb->compress_type = extent_compress_type(bio_flags);
cb->orig_bio = bio;
- nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE);
+ nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
GFP_NOFS);
if (!cb->compressed_pages)
@@ -640,7 +640,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
add_ra_bio_pages(inode, em_start + em_len, cb);
/* include any pages we added in add_ra-bio_pages */
- uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
+ uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
cb->len = uncompressed_len;
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
@@ -653,18 +653,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
page = cb->compressed_pages[pg_index];
page->mapping = inode->i_mapping;
- page->index = em_start >> PAGE_CACHE_SHIFT;
+ page->index = em_start >> PAGE_SHIFT;
if (comp_bio->bi_iter.bi_size)
ret = tree->ops->merge_bio_hook(READ, page, 0,
- PAGE_CACHE_SIZE,
+ PAGE_SIZE,
comp_bio, 0);
else
ret = 0;
page->mapping = NULL;
- if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) <
- PAGE_CACHE_SIZE) {
+ if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
+ PAGE_SIZE) {
bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
@@ -702,9 +702,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
- bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0);
+ bio_add_page(comp_bio, page, PAGE_SIZE, 0);
}
- cur_disk_byte += PAGE_CACHE_SIZE;
+ cur_disk_byte += PAGE_SIZE;
}
bio_get(comp_bio);
@@ -1013,8 +1013,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
/* copy bytes from the working buffer into the pages */
while (working_bytes > 0) {
- bytes = min(PAGE_CACHE_SIZE - *pg_offset,
- PAGE_CACHE_SIZE - buf_offset);
+ bytes = min(PAGE_SIZE - *pg_offset,
+ PAGE_SIZE - buf_offset);
bytes = min(bytes, working_bytes);
kaddr = kmap_atomic(page_out);
memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
@@ -1027,7 +1027,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
current_buf_start += bytes;
/* check if we need to pick another page */
- if (*pg_offset == PAGE_CACHE_SIZE) {
+ if (*pg_offset == PAGE_SIZE) {
(*pg_index)++;
if (*pg_index >= vcnt)
return 0;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 77592931ab4f..ec7928a27aaa 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -19,6 +19,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
+#include <linux/vmalloc.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -5361,10 +5362,13 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
goto out;
}
- tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL);
+ tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
if (!tmp_buf) {
- ret = -ENOMEM;
- goto out;
+ tmp_buf = vmalloc(left_root->nodesize);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
}
left_path->search_commit_root = 1;
@@ -5565,7 +5569,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
out:
btrfs_free_path(left_path);
btrfs_free_path(right_path);
- kfree(tmp_buf);
+ kvfree(tmp_buf);
return ret;
}
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index a1d6652e0c47..26bcb487f958 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -394,6 +394,8 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
dev_replace->cursor_right = 0;
dev_replace->is_valid = 1;
dev_replace->item_needs_writeback = 1;
+ atomic64_set(&dev_replace->num_write_errors, 0);
+ atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
btrfs_dev_replace_unlock(dev_replace, 1);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4b02591b0301..4e47849d7427 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -25,7 +25,6 @@
#include <linux/buffer_head.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
-#include <linux/freezer.h>
#include <linux/slab.h>
#include <linux/migrate.h>
#include <linux/ratelimit.h>
@@ -303,7 +302,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
err = map_private_extent_buffer(buf, offset, 32,
&kaddr, &map_start, &map_len);
if (err)
- return 1;
+ return err;
cur_len = min(len, map_len - (offset - map_start));
crc = btrfs_csum_data(kaddr + offset - map_start,
crc, cur_len);
@@ -313,7 +312,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
if (csum_size > sizeof(inline_result)) {
result = kzalloc(csum_size, GFP_NOFS);
if (!result)
- return 1;
+ return -ENOMEM;
} else {
result = (char *)&inline_result;
}
@@ -334,7 +333,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
val, found, btrfs_header_level(buf));
if (result != (char *)&inline_result)
kfree(result);
- return 1;
+ return -EUCLEAN;
}
} else {
write_extent_buffer(buf, result, 0, csum_size);
@@ -513,11 +512,21 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
eb = (struct extent_buffer *)page->private;
if (page != eb->pages[0])
return 0;
+
found_start = btrfs_header_bytenr(eb);
- if (WARN_ON(found_start != start || !PageUptodate(page)))
- return 0;
- csum_tree_block(fs_info, eb, 0);
- return 0;
+ /*
+ * Please do not consolidate these warnings into a single if.
+ * It is useful to know what went wrong.
+ */
+ if (WARN_ON(found_start != start))
+ return -EUCLEAN;
+ if (WARN_ON(!PageUptodate(page)))
+ return -EUCLEAN;
+
+ ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
+ btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
+
+ return csum_tree_block(fs_info, eb, 0);
}
static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
@@ -661,10 +670,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
eb, found_level);
ret = csum_tree_block(fs_info, eb, 1);
- if (ret) {
- ret = -EIO;
+ if (ret)
goto err;
- }
/*
* If this is a leaf block and it is corrupt, set the corrupt bit so
@@ -1055,7 +1062,7 @@ static void btree_invalidatepage(struct page *page, unsigned int offset,
(unsigned long long)page_offset(page));
ClearPagePrivate(page);
set_page_private(page, 0);
- page_cache_release(page);
+ put_page(page);
}
}
@@ -1757,7 +1764,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
if (err)
return err;
- bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
+ bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
bdi->congested_fn = btrfs_congested_fn;
bdi->congested_data = info;
bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
@@ -1831,7 +1838,7 @@ static int cleaner_kthread(void *arg)
*/
btrfs_delete_unused_bgs(root->fs_info);
sleep:
- if (!try_to_freeze() && !again) {
+ if (!again) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop())
schedule();
@@ -1921,14 +1928,12 @@ sleep:
if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
&root->fs_info->fs_state)))
btrfs_cleanup_transaction(root);
- if (!try_to_freeze()) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (!kthread_should_stop() &&
- (!btrfs_transaction_blocked(root->fs_info) ||
- cannot_commit))
- schedule_timeout(delay);
- __set_current_state(TASK_RUNNING);
- }
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!kthread_should_stop() &&
+ (!btrfs_transaction_blocked(root->fs_info) ||
+ cannot_commit))
+ schedule_timeout(delay);
+ __set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
return 0;
}
@@ -2537,7 +2542,7 @@ int open_ctree(struct super_block *sb,
err = ret;
goto fail_bdi;
}
- fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
+ fs_info->dirty_metadata_batch = PAGE_SIZE *
(1 + ilog2(nr_cpu_ids));
ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
@@ -2782,7 +2787,7 @@ int open_ctree(struct super_block *sb,
* flag our filesystem as having big metadata blocks if
* they are bigger than the page size
*/
- if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) {
+ if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
@@ -2832,7 +2837,7 @@ int open_ctree(struct super_block *sb,
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
- SZ_4M / PAGE_CACHE_SIZE);
+ SZ_4M / PAGE_SIZE);
tree_root->nodesize = nodesize;
tree_root->sectorsize = sectorsize;
@@ -4071,9 +4076,9 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
/* Only PAGE SIZE is supported yet */
- if (sectorsize != PAGE_CACHE_SIZE) {
+ if (sectorsize != PAGE_SIZE) {
printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
- sectorsize, PAGE_CACHE_SIZE);
+ sectorsize, PAGE_SIZE);
ret = -EINVAL;
}
if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 53e12977bfd0..84e060eb0de8 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3452,7 +3452,7 @@ again:
num_pages = 1;
num_pages *= 16;
- num_pages *= PAGE_CACHE_SIZE;
+ num_pages *= PAGE_SIZE;
ret = btrfs_check_data_free_space(inode, 0, num_pages);
if (ret)
@@ -4639,7 +4639,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
loops = 0;
while (delalloc_bytes && loops < 3) {
max_reclaim = min(delalloc_bytes, to_reclaim);
- nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
+ nr_pages = max_reclaim >> PAGE_SHIFT;
btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
/*
* We need to wait for the async pages to actually start before
@@ -9386,15 +9386,23 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
u64 dev_min = 1;
u64 dev_nr = 0;
u64 target;
+ int debug;
int index;
int full = 0;
int ret = 0;
+ debug = btrfs_test_opt(root, ENOSPC_DEBUG);
+
block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
/* odd, couldn't find the block group, leave it alone */
- if (!block_group)
+ if (!block_group) {
+ if (debug)
+ btrfs_warn(root->fs_info,
+ "can't find block group for bytenr %llu",
+ bytenr);
return -1;
+ }
min_free = btrfs_block_group_used(&block_group->item);
@@ -9448,8 +9456,13 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
* this is just a balance, so if we were marked as full
* we know there is no space for a new chunk
*/
- if (full)
+ if (full) {
+ if (debug)
+ btrfs_warn(root->fs_info,
+ "no space to alloc new chunk for block group %llu",
+ block_group->key.objectid);
goto out;
+ }
index = get_block_group_index(block_group);
}
@@ -9496,6 +9509,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
ret = -1;
}
}
+ if (debug && ret == -1)
+ btrfs_warn(root->fs_info,
+ "no space to allocate a new chunk for block group %llu",
+ block_group->key.objectid);
mutex_unlock(&root->fs_info->chunk_mutex);
btrfs_end_transaction(trans, root);
out:
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 76a0c8597d98..d247fc0eea19 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1363,23 +1363,23 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
{
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+ unsigned long index = start >> PAGE_SHIFT;
+ unsigned long end_index = end >> PAGE_SHIFT;
struct page *page;
while (index <= end_index) {
page = find_get_page(inode->i_mapping, index);
BUG_ON(!page); /* Pages should be in the extent_io_tree */
clear_page_dirty_for_io(page);
- page_cache_release(page);
+ put_page(page);
index++;
}
}
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
{
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+ unsigned long index = start >> PAGE_SHIFT;
+ unsigned long end_index = end >> PAGE_SHIFT;
struct page *page;
while (index <= end_index) {
@@ -1387,7 +1387,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
BUG_ON(!page); /* Pages should be in the extent_io_tree */
__set_page_dirty_nobuffers(page);
account_page_redirty(page);
- page_cache_release(page);
+ put_page(page);
index++;
}
}
@@ -1397,15 +1397,15 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
*/
static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
{
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+ unsigned long index = start >> PAGE_SHIFT;
+ unsigned long end_index = end >> PAGE_SHIFT;
struct page *page;
while (index <= end_index) {
page = find_get_page(tree->mapping, index);
BUG_ON(!page); /* Pages should be in the extent_io_tree */
set_page_writeback(page);
- page_cache_release(page);
+ put_page(page);
index++;
}
}
@@ -1556,8 +1556,8 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
{
int ret;
struct page *pages[16];
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+ unsigned long index = start >> PAGE_SHIFT;
+ unsigned long end_index = end >> PAGE_SHIFT;
unsigned long nr_pages = end_index - index + 1;
int i;
@@ -1571,7 +1571,7 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
for (i = 0; i < ret; i++) {
if (pages[i] != locked_page)
unlock_page(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
nr_pages -= ret;
index += ret;
@@ -1584,9 +1584,9 @@ static noinline int lock_delalloc_pages(struct inode *inode,
u64 delalloc_start,
u64 delalloc_end)
{
- unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
+ unsigned long index = delalloc_start >> PAGE_SHIFT;
unsigned long start_index = index;
- unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
+ unsigned long end_index = delalloc_end >> PAGE_SHIFT;
unsigned long pages_locked = 0;
struct page *pages[16];
unsigned long nrpages;
@@ -1619,11 +1619,11 @@ static noinline int lock_delalloc_pages(struct inode *inode,
pages[i]->mapping != inode->i_mapping) {
ret = -EAGAIN;
unlock_page(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
goto done;
}
}
- page_cache_release(pages[i]);
+ put_page(pages[i]);
pages_locked++;
}
nrpages -= ret;
@@ -1636,7 +1636,7 @@ done:
__unlock_for_delalloc(inode, locked_page,
delalloc_start,
((u64)(start_index + pages_locked - 1)) <<
- PAGE_CACHE_SHIFT);
+ PAGE_SHIFT);
}
return ret;
}
@@ -1696,7 +1696,7 @@ again:
free_extent_state(cached_state);
cached_state = NULL;
if (!loops) {
- max_bytes = PAGE_CACHE_SIZE;
+ max_bytes = PAGE_SIZE;
loops = 1;
goto again;
} else {
@@ -1735,8 +1735,8 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
int ret;
struct page *pages[16];
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+ unsigned long index = start >> PAGE_SHIFT;
+ unsigned long end_index = end >> PAGE_SHIFT;
unsigned long nr_pages = end_index - index + 1;
int i;
@@ -1757,7 +1757,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
SetPagePrivate2(pages[i]);
if (pages[i] == locked_page) {
- page_cache_release(pages[i]);
+ put_page(pages[i]);
continue;
}
if (page_ops & PAGE_CLEAR_DIRTY)
@@ -1770,7 +1770,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
end_page_writeback(pages[i]);
if (page_ops & PAGE_UNLOCK)
unlock_page(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
nr_pages -= ret;
index += ret;
@@ -1961,7 +1961,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
{
u64 start = page_offset(page);
- u64 end = start + PAGE_CACHE_SIZE - 1;
+ u64 end = start + PAGE_SIZE - 1;
if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
SetPageUptodate(page);
}
@@ -2071,11 +2071,11 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
struct page *p = eb->pages[i];
ret = repair_io_failure(root->fs_info->btree_inode, start,
- PAGE_CACHE_SIZE, start, p,
+ PAGE_SIZE, start, p,
start - page_offset(p), mirror_num);
if (ret)
break;
- start += PAGE_CACHE_SIZE;
+ start += PAGE_SIZE;
}
return ret;
@@ -2466,8 +2466,8 @@ static void end_bio_extent_writepage(struct bio *bio)
* advance bv_offset and adjust bv_len to compensate.
* Print a warning for nonzero offsets, and an error
* if they don't add up to a full page. */
- if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
- if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
+ if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
+ if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
"partial page write in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len);
@@ -2541,8 +2541,8 @@ static void end_bio_extent_readpage(struct bio *bio)
* advance bv_offset and adjust bv_len to compensate.
* Print a warning for nonzero offsets, and an error
* if they don't add up to a full page. */
- if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
- if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
+ if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
+ if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
"partial page read in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len);
@@ -2598,13 +2598,13 @@ static void end_bio_extent_readpage(struct bio *bio)
readpage_ok:
if (likely(uptodate)) {
loff_t i_size = i_size_read(inode);
- pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ pgoff_t end_index = i_size >> PAGE_SHIFT;
unsigned off;
/* Zero out the end if this page straddles i_size */
- off = i_size & (PAGE_CACHE_SIZE-1);
+ off = i_size & (PAGE_SIZE-1);
if (page->index == end_index && off)
- zero_user_segment(page, off, PAGE_CACHE_SIZE);
+ zero_user_segment(page, off, PAGE_SIZE);
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
@@ -2768,7 +2768,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
struct bio *bio;
int contig = 0;
int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
- size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
+ size_t page_size = min_t(size_t, size, PAGE_SIZE);
if (bio_ret && *bio_ret) {
bio = *bio_ret;
@@ -2821,7 +2821,7 @@ static void attach_extent_buffer_page(struct extent_buffer *eb,
{
if (!PagePrivate(page)) {
SetPagePrivate(page);
- page_cache_get(page);
+ get_page(page);
set_page_private(page, (unsigned long)eb);
} else {
WARN_ON(page->private != (unsigned long)eb);
@@ -2832,7 +2832,7 @@ void set_page_extent_mapped(struct page *page)
{
if (!PagePrivate(page)) {
SetPagePrivate(page);
- page_cache_get(page);
+ get_page(page);
set_page_private(page, EXTENT_PAGE_PRIVATE);
}
}
@@ -2880,7 +2880,7 @@ static int __do_readpage(struct extent_io_tree *tree,
{
struct inode *inode = page->mapping->host;
u64 start = page_offset(page);
- u64 page_end = start + PAGE_CACHE_SIZE - 1;
+ u64 page_end = start + PAGE_SIZE - 1;
u64 end;
u64 cur = start;
u64 extent_offset;
@@ -2909,12 +2909,12 @@ static int __do_readpage(struct extent_io_tree *tree,
}
}
- if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
+ if (page->index == last_byte >> PAGE_SHIFT) {
char *userpage;
- size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
+ size_t zero_offset = last_byte & (PAGE_SIZE - 1);
if (zero_offset) {
- iosize = PAGE_CACHE_SIZE - zero_offset;
+ iosize = PAGE_SIZE - zero_offset;
userpage = kmap_atomic(page);
memset(userpage + zero_offset, 0, iosize);
flush_dcache_page(page);
@@ -2922,14 +2922,14 @@ static int __do_readpage(struct extent_io_tree *tree,
}
}
while (cur <= end) {
- unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
+ unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1;
bool force_bio_submit = false;
if (cur >= last_byte) {
char *userpage;
struct extent_state *cached = NULL;
- iosize = PAGE_CACHE_SIZE - pg_offset;
+ iosize = PAGE_SIZE - pg_offset;
userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page);
@@ -3112,7 +3112,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
for (index = 0; index < nr_pages; index++) {
__do_readpage(tree, pages[index], get_extent, em_cached, bio,
mirror_num, bio_flags, rw, prev_em_start);
- page_cache_release(pages[index]);
+ put_page(pages[index]);
}
}
@@ -3134,10 +3134,10 @@ static void __extent_readpages(struct extent_io_tree *tree,
page_start = page_offset(pages[index]);
if (!end) {
start = page_start;
- end = start + PAGE_CACHE_SIZE - 1;
+ end = start + PAGE_SIZE - 1;
first_index = index;
} else if (end + 1 == page_start) {
- end += PAGE_CACHE_SIZE;
+ end += PAGE_SIZE;
} else {
__do_contiguous_readpages(tree, &pages[first_index],
index - first_index, start,
@@ -3145,7 +3145,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
bio, mirror_num, bio_flags,
rw, prev_em_start);
start = page_start;
- end = start + PAGE_CACHE_SIZE - 1;
+ end = start + PAGE_SIZE - 1;
first_index = index;
}
}
@@ -3167,13 +3167,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
struct inode *inode = page->mapping->host;
struct btrfs_ordered_extent *ordered;
u64 start = page_offset(page);
- u64 end = start + PAGE_CACHE_SIZE - 1;
+ u64 end = start + PAGE_SIZE - 1;
int ret;
while (1) {
lock_extent(tree, start, end);
ordered = btrfs_lookup_ordered_range(inode, start,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
if (!ordered)
break;
unlock_extent(tree, start, end);
@@ -3227,7 +3227,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
unsigned long *nr_written)
{
struct extent_io_tree *tree = epd->tree;
- u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1;
+ u64 page_end = delalloc_start + PAGE_SIZE - 1;
u64 nr_delalloc;
u64 delalloc_to_write = 0;
u64 delalloc_end = 0;
@@ -3264,13 +3264,11 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
goto done;
}
/*
- * delalloc_end is already one less than the total
- * length, so we don't subtract one from
- * PAGE_CACHE_SIZE
+ * delalloc_end is already one less than the total length, so
+ * we don't subtract one from PAGE_SIZE
*/
delalloc_to_write += (delalloc_end - delalloc_start +
- PAGE_CACHE_SIZE) >>
- PAGE_CACHE_SHIFT;
+ PAGE_SIZE) >> PAGE_SHIFT;
delalloc_start = delalloc_end + 1;
}
if (wbc->nr_to_write < delalloc_to_write) {
@@ -3319,7 +3317,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
{
struct extent_io_tree *tree = epd->tree;
u64 start = page_offset(page);
- u64 page_end = start + PAGE_CACHE_SIZE - 1;
+ u64 page_end = start + PAGE_SIZE - 1;
u64 end;
u64 cur = start;
u64 extent_offset;
@@ -3434,7 +3432,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
if (ret) {
SetPageError(page);
} else {
- unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1;
+ unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1;
set_range_writeback(tree, cur, cur + iosize - 1);
if (!PageWriteback(page)) {
@@ -3477,12 +3475,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
struct inode *inode = page->mapping->host;
struct extent_page_data *epd = data;
u64 start = page_offset(page);
- u64 page_end = start + PAGE_CACHE_SIZE - 1;
+ u64 page_end = start + PAGE_SIZE - 1;
int ret;
int nr = 0;
size_t pg_offset = 0;
loff_t i_size = i_size_read(inode);
- unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
+ unsigned long end_index = i_size >> PAGE_SHIFT;
int write_flags;
unsigned long nr_written = 0;
@@ -3497,10 +3495,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
ClearPageError(page);
- pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
+ pg_offset = i_size & (PAGE_SIZE - 1);
if (page->index > end_index ||
(page->index == end_index && !pg_offset)) {
- page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
unlock_page(page);
return 0;
}
@@ -3510,7 +3508,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0,
- PAGE_CACHE_SIZE - pg_offset);
+ PAGE_SIZE - pg_offset);
kunmap_atomic(userpage);
flush_dcache_page(page);
}
@@ -3748,7 +3746,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
clear_page_dirty_for_io(p);
set_page_writeback(p);
ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
- PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
+ PAGE_SIZE, 0, bdev, &epd->bio,
-1, end_bio_extent_buffer_writepage,
0, epd->bio_flags, bio_flags, false);
epd->bio_flags = bio_flags;
@@ -3760,7 +3758,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
ret = -EIO;
break;
}
- offset += PAGE_CACHE_SIZE;
+ offset += PAGE_SIZE;
update_nr_written(p, wbc, 1);
unlock_page(p);
}
@@ -3804,8 +3802,8 @@ int btree_write_cache_pages(struct address_space *mapping,
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
} else {
- index = wbc->range_start >> PAGE_CACHE_SHIFT;
- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ index = wbc->range_start >> PAGE_SHIFT;
+ end = wbc->range_end >> PAGE_SHIFT;
scanned = 1;
}
if (wbc->sync_mode == WB_SYNC_ALL)
@@ -3948,8 +3946,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
} else {
- index = wbc->range_start >> PAGE_CACHE_SHIFT;
- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ index = wbc->range_start >> PAGE_SHIFT;
+ end = wbc->range_end >> PAGE_SHIFT;
scanned = 1;
}
if (wbc->sync_mode == WB_SYNC_ALL)
@@ -4083,8 +4081,8 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
int ret = 0;
struct address_space *mapping = inode->i_mapping;
struct page *page;
- unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
- PAGE_CACHE_SHIFT;
+ unsigned long nr_pages = (end - start + PAGE_SIZE) >>
+ PAGE_SHIFT;
struct extent_page_data epd = {
.bio = NULL,
@@ -4102,18 +4100,18 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
};
while (start <= end) {
- page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+ page = find_get_page(mapping, start >> PAGE_SHIFT);
if (clear_page_dirty_for_io(page))
ret = __extent_writepage(page, &wbc_writepages, &epd);
else {
if (tree->ops && tree->ops->writepage_end_io_hook)
tree->ops->writepage_end_io_hook(page, start,
- start + PAGE_CACHE_SIZE - 1,
+ start + PAGE_SIZE - 1,
NULL, 1);
unlock_page(page);
}
- page_cache_release(page);
- start += PAGE_CACHE_SIZE;
+ put_page(page);
+ start += PAGE_SIZE;
}
flush_epd_write_bio(&epd);
@@ -4163,7 +4161,7 @@ int extent_readpages(struct extent_io_tree *tree,
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping,
page->index, GFP_NOFS)) {
- page_cache_release(page);
+ put_page(page);
continue;
}
@@ -4197,7 +4195,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
{
struct extent_state *cached_state = NULL;
u64 start = page_offset(page);
- u64 end = start + PAGE_CACHE_SIZE - 1;
+ u64 end = start + PAGE_SIZE - 1;
size_t blocksize = page->mapping->host->i_sb->s_blocksize;
start += ALIGN(offset, blocksize);
@@ -4223,7 +4221,7 @@ static int try_release_extent_state(struct extent_map_tree *map,
struct page *page, gfp_t mask)
{
u64 start = page_offset(page);
- u64 end = start + PAGE_CACHE_SIZE - 1;
+ u64 end = start + PAGE_SIZE - 1;
int ret = 1;
if (test_range_bit(tree, start, end,
@@ -4262,7 +4260,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
{
struct extent_map *em;
u64 start = page_offset(page);
- u64 end = start + PAGE_CACHE_SIZE - 1;
+ u64 end = start + PAGE_SIZE - 1;
if (gfpflags_allow_blocking(mask) &&
page->mapping->host->i_size > SZ_16M) {
@@ -4587,14 +4585,14 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
ClearPagePrivate(page);
set_page_private(page, 0);
/* One for the page private */
- page_cache_release(page);
+ put_page(page);
}
if (mapped)
spin_unlock(&page->mapping->private_lock);
/* One for when we alloced the page */
- page_cache_release(page);
+ put_page(page);
} while (index != 0);
}
@@ -4779,7 +4777,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
rcu_read_lock();
eb = radix_tree_lookup(&fs_info->buffer_radix,
- start >> PAGE_CACHE_SHIFT);
+ start >> PAGE_SHIFT);
if (eb && atomic_inc_not_zero(&eb->refs)) {
rcu_read_unlock();
/*
@@ -4829,7 +4827,7 @@ again:
goto free_eb;
spin_lock(&fs_info->buffer_lock);
ret = radix_tree_insert(&fs_info->buffer_radix,
- start >> PAGE_CACHE_SHIFT, eb);
+ start >> PAGE_SHIFT, eb);
spin_unlock(&fs_info->buffer_lock);
radix_tree_preload_end();
if (ret == -EEXIST) {
@@ -4862,7 +4860,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
unsigned long len = fs_info->tree_root->nodesize;
unsigned long num_pages = num_extent_pages(start, len);
unsigned long i;
- unsigned long index = start >> PAGE_CACHE_SHIFT;
+ unsigned long index = start >> PAGE_SHIFT;
struct extent_buffer *eb;
struct extent_buffer *exists = NULL;
struct page *p;
@@ -4896,7 +4894,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
if (atomic_inc_not_zero(&exists->refs)) {
spin_unlock(&mapping->private_lock);
unlock_page(p);
- page_cache_release(p);
+ put_page(p);
mark_extent_buffer_accessed(exists, p);
goto free_eb;
}
@@ -4908,7 +4906,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
*/
ClearPagePrivate(p);
WARN_ON(PageDirty(p));
- page_cache_release(p);
+ put_page(p);
}
attach_extent_buffer_page(eb, p);
spin_unlock(&mapping->private_lock);
@@ -4931,7 +4929,7 @@ again:
spin_lock(&fs_info->buffer_lock);
ret = radix_tree_insert(&fs_info->buffer_radix,
- start >> PAGE_CACHE_SHIFT, eb);
+ start >> PAGE_SHIFT, eb);
spin_unlock(&fs_info->buffer_lock);
radix_tree_preload_end();
if (ret == -EEXIST) {
@@ -4994,7 +4992,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
spin_lock(&fs_info->buffer_lock);
radix_tree_delete(&fs_info->buffer_radix,
- eb->start >> PAGE_CACHE_SHIFT);
+ eb->start >> PAGE_SHIFT);
spin_unlock(&fs_info->buffer_lock);
} else {
spin_unlock(&eb->refs_lock);
@@ -5168,8 +5166,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
if (start) {
WARN_ON(start < eb->start);
- start_i = (start >> PAGE_CACHE_SHIFT) -
- (eb->start >> PAGE_CACHE_SHIFT);
+ start_i = (start >> PAGE_SHIFT) -
+ (eb->start >> PAGE_SHIFT);
} else {
start_i = 0;
}
@@ -5252,18 +5250,18 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
struct page *page;
char *kaddr;
char *dst = (char *)dstv;
- size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+ size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ unsigned long i = (start_offset + start) >> PAGE_SHIFT;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+ offset = (start_offset + start) & (PAGE_SIZE - 1);
while (len > 0) {
page = eb->pages[i];
- cur = min(len, (PAGE_CACHE_SIZE - offset));
+ cur = min(len, (PAGE_SIZE - offset));
kaddr = page_address(page);
memcpy(dst, kaddr + offset, cur);
@@ -5283,19 +5281,19 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
struct page *page;
char *kaddr;
char __user *dst = (char __user *)dstv;
- size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+ size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ unsigned long i = (start_offset + start) >> PAGE_SHIFT;
int ret = 0;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+ offset = (start_offset + start) & (PAGE_SIZE - 1);
while (len > 0) {
page = eb->pages[i];
- cur = min(len, (PAGE_CACHE_SIZE - offset));
+ cur = min(len, (PAGE_SIZE - offset));
kaddr = page_address(page);
if (copy_to_user(dst, kaddr + offset, cur)) {
ret = -EFAULT;
@@ -5316,13 +5314,13 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
unsigned long *map_start,
unsigned long *map_len)
{
- size_t offset = start & (PAGE_CACHE_SIZE - 1);
+ size_t offset = start & (PAGE_SIZE - 1);
char *kaddr;
struct page *p;
- size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+ size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ unsigned long i = (start_offset + start) >> PAGE_SHIFT;
unsigned long end_i = (start_offset + start + min_len - 1) >>
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
if (i != end_i)
return -EINVAL;
@@ -5332,7 +5330,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
*map_start = 0;
} else {
offset = 0;
- *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
+ *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
}
if (start + min_len > eb->len) {
@@ -5345,7 +5343,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
p = eb->pages[i];
kaddr = page_address(p);
*map = kaddr + offset;
- *map_len = PAGE_CACHE_SIZE - offset;
+ *map_len = PAGE_SIZE - offset;
return 0;
}
@@ -5358,19 +5356,19 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
struct page *page;
char *kaddr;
char *ptr = (char *)ptrv;
- size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+ size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ unsigned long i = (start_offset + start) >> PAGE_SHIFT;
int ret = 0;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+ offset = (start_offset + start) & (PAGE_SIZE - 1);
while (len > 0) {
page = eb->pages[i];
- cur = min(len, (PAGE_CACHE_SIZE - offset));
+ cur = min(len, (PAGE_SIZE - offset));
kaddr = page_address(page);
ret = memcmp(ptr, kaddr + offset, cur);
@@ -5393,19 +5391,19 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
struct page *page;
char *kaddr;
char *src = (char *)srcv;
- size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+ size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ unsigned long i = (start_offset + start) >> PAGE_SHIFT;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+ offset = (start_offset + start) & (PAGE_SIZE - 1);
while (len > 0) {
page = eb->pages[i];
WARN_ON(!PageUptodate(page));
- cur = min(len, PAGE_CACHE_SIZE - offset);
+ cur = min(len, PAGE_SIZE - offset);
kaddr = page_address(page);
memcpy(kaddr + offset, src, cur);
@@ -5423,19 +5421,19 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
size_t offset;
struct page *page;
char *kaddr;
- size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+ size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ unsigned long i = (start_offset + start) >> PAGE_SHIFT;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+ offset = (start_offset + start) & (PAGE_SIZE - 1);
while (len > 0) {
page = eb->pages[i];
WARN_ON(!PageUptodate(page));
- cur = min(len, PAGE_CACHE_SIZE - offset);
+ cur = min(len, PAGE_SIZE - offset);
kaddr = page_address(page);
memset(kaddr + offset, c, cur);
@@ -5454,19 +5452,19 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
size_t offset;
struct page *page;
char *kaddr;
- size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
+ size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
+ unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
WARN_ON(src->len != dst_len);
offset = (start_offset + dst_offset) &
- (PAGE_CACHE_SIZE - 1);
+ (PAGE_SIZE - 1);
while (len > 0) {
page = dst->pages[i];
WARN_ON(!PageUptodate(page));
- cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
+ cur = min(len, (unsigned long)(PAGE_SIZE - offset));
kaddr = page_address(page);
read_extent_buffer(src, kaddr + offset, src_offset, cur);
@@ -5508,7 +5506,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
unsigned long *page_index,
size_t *page_offset)
{
- size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
+ size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
size_t byte_offset = BIT_BYTE(nr);
size_t offset;
@@ -5519,8 +5517,8 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
*/
offset = start_offset + start + byte_offset;
- *page_index = offset >> PAGE_CACHE_SHIFT;
- *page_offset = offset & (PAGE_CACHE_SIZE - 1);
+ *page_index = offset >> PAGE_SHIFT;
+ *page_offset = offset & (PAGE_SIZE - 1);
}
/**
@@ -5572,7 +5570,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
len -= bits_to_set;
bits_to_set = BITS_PER_BYTE;
mask_to_set = ~0U;
- if (++offset >= PAGE_CACHE_SIZE && len > 0) {
+ if (++offset >= PAGE_SIZE && len > 0) {
offset = 0;
page = eb->pages[++i];
WARN_ON(!PageUptodate(page));
@@ -5614,7 +5612,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
len -= bits_to_clear;
bits_to_clear = BITS_PER_BYTE;
mask_to_clear = ~0U;
- if (++offset >= PAGE_CACHE_SIZE && len > 0) {
+ if (++offset >= PAGE_SIZE && len > 0) {
offset = 0;
page = eb->pages[++i];
WARN_ON(!PageUptodate(page));
@@ -5661,7 +5659,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
size_t cur;
size_t dst_off_in_page;
size_t src_off_in_page;
- size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
+ size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
unsigned long dst_i;
unsigned long src_i;
@@ -5680,17 +5678,17 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
while (len > 0) {
dst_off_in_page = (start_offset + dst_offset) &
- (PAGE_CACHE_SIZE - 1);
+ (PAGE_SIZE - 1);
src_off_in_page = (start_offset + src_offset) &
- (PAGE_CACHE_SIZE - 1);
+ (PAGE_SIZE - 1);
- dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
- src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
+ dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
+ src_i = (start_offset + src_offset) >> PAGE_SHIFT;
- cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
+ cur = min(len, (unsigned long)(PAGE_SIZE -
src_off_in_page));
cur = min_t(unsigned long, cur,
- (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
+ (unsigned long)(PAGE_SIZE - dst_off_in_page));
copy_pages(dst->pages[dst_i], dst->pages[src_i],
dst_off_in_page, src_off_in_page, cur);
@@ -5709,7 +5707,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
size_t src_off_in_page;
unsigned long dst_end = dst_offset + len - 1;
unsigned long src_end = src_offset + len - 1;
- size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
+ size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
unsigned long dst_i;
unsigned long src_i;
@@ -5728,13 +5726,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
return;
}
while (len > 0) {
- dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
- src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
+ dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
+ src_i = (start_offset + src_end) >> PAGE_SHIFT;
dst_off_in_page = (start_offset + dst_end) &
- (PAGE_CACHE_SIZE - 1);
+ (PAGE_SIZE - 1);
src_off_in_page = (start_offset + src_end) &
- (PAGE_CACHE_SIZE - 1);
+ (PAGE_SIZE - 1);
cur = min_t(unsigned long, len, src_off_in_page + 1);
cur = min(cur, dst_off_in_page + 1);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 5dbf92e68fbd..b5e0ade90e88 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -120,7 +120,7 @@ struct extent_state {
};
#define INLINE_EXTENT_BUFFER_PAGES 16
-#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE)
+#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
struct extent_buffer {
u64 start;
unsigned long len;
@@ -365,8 +365,8 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
static inline unsigned long num_extent_pages(u64 start, u64 len)
{
- return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
- (start >> PAGE_CACHE_SHIFT);
+ return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) -
+ (start >> PAGE_SHIFT);
}
static inline void extent_buffer_get(struct extent_buffer *eb)
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index b5baf5bdc8e1..7a7d6e253cfc 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -32,7 +32,7 @@
size) - 1))
#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
- PAGE_CACHE_SIZE))
+ PAGE_SIZE))
#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
sizeof(struct btrfs_ordered_sum)) / \
@@ -203,7 +203,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
csum = (u8 *)dst;
}
- if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
+ if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
path->reada = READA_FORWARD;
WARN_ON(bio->bi_vcnt <= 0);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 15a09cb156ce..8d7b5a45c005 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -414,11 +414,11 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
size_t copied = 0;
size_t total_copied = 0;
int pg = 0;
- int offset = pos & (PAGE_CACHE_SIZE - 1);
+ int offset = pos & (PAGE_SIZE - 1);
while (write_bytes > 0) {
size_t count = min_t(size_t,
- PAGE_CACHE_SIZE - offset, write_bytes);
+ PAGE_SIZE - offset, write_bytes);
struct page *page = prepared_pages[pg];
/*
* Copy data from userspace to the current page
@@ -448,7 +448,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
if (unlikely(copied == 0))
break;
- if (copied < PAGE_CACHE_SIZE - offset) {
+ if (copied < PAGE_SIZE - offset) {
offset += copied;
} else {
pg++;
@@ -473,7 +473,7 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
*/
ClearPageChecked(pages[i]);
unlock_page(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
}
@@ -1297,7 +1297,7 @@ static int prepare_uptodate_page(struct inode *inode,
{
int ret = 0;
- if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
+ if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
if (ret)
@@ -1323,7 +1323,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
size_t write_bytes, bool force_uptodate)
{
int i;
- unsigned long index = pos >> PAGE_CACHE_SHIFT;
+ unsigned long index = pos >> PAGE_SHIFT;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
int err = 0;
int faili;
@@ -1345,7 +1345,7 @@ again:
err = prepare_uptodate_page(inode, pages[i],
pos + write_bytes, false);
if (err) {
- page_cache_release(pages[i]);
+ put_page(pages[i]);
if (err == -EAGAIN) {
err = 0;
goto again;
@@ -1360,7 +1360,7 @@ again:
fail:
while (faili >= 0) {
unlock_page(pages[faili]);
- page_cache_release(pages[faili]);
+ put_page(pages[faili]);
faili--;
}
return err;
@@ -1408,7 +1408,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
cached_state, GFP_NOFS);
for (i = 0; i < num_pages; i++) {
unlock_page(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
@@ -1497,8 +1497,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
bool force_page_uptodate = false;
bool need_unlock;
- nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE),
- PAGE_CACHE_SIZE / (sizeof(struct page *)));
+ nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
+ PAGE_SIZE / (sizeof(struct page *)));
nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
nrptrs = max(nrptrs, 8);
pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
@@ -1506,13 +1506,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
return -ENOMEM;
while (iov_iter_count(i) > 0) {
- size_t offset = pos & (PAGE_CACHE_SIZE - 1);
+ size_t offset = pos & (PAGE_SIZE - 1);
size_t sector_offset;
size_t write_bytes = min(iov_iter_count(i),
- nrptrs * (size_t)PAGE_CACHE_SIZE -
+ nrptrs * (size_t)PAGE_SIZE -
offset);
size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
size_t reserve_bytes;
size_t dirty_pages;
size_t copied;
@@ -1547,7 +1547,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
* write_bytes, so scale down.
*/
num_pages = DIV_ROUND_UP(write_bytes + offset,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
reserve_bytes = round_up(write_bytes + sector_offset,
root->sectorsize);
goto reserve_metadata;
@@ -1609,7 +1609,7 @@ again:
} else {
force_page_uptodate = false;
dirty_pages = DIV_ROUND_UP(copied + offset,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
}
/*
@@ -1641,7 +1641,7 @@ again:
u64 __pos;
__pos = round_down(pos, root->sectorsize) +
- (dirty_pages << PAGE_CACHE_SHIFT);
+ (dirty_pages << PAGE_SHIFT);
btrfs_delalloc_release_space(inode, __pos,
release_bytes);
}
@@ -1682,7 +1682,7 @@ again:
cond_resched();
balance_dirty_pages_ratelimited(inode->i_mapping);
- if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1)
+ if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
btrfs_btree_balance_dirty(root);
pos += copied;
@@ -1738,8 +1738,8 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
goto out;
written += written_buffered;
iocb->ki_pos = pos + written_buffered;
- invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
- endbyte >> PAGE_CACHE_SHIFT);
+ invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
+ endbyte >> PAGE_SHIFT);
out:
return written ? written : err;
}
@@ -1905,7 +1905,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
*/
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
@@ -2682,9 +2682,12 @@ static long btrfs_fallocate(struct file *file, int mode,
return ret;
inode_lock(inode);
- ret = inode_newsize_ok(inode, alloc_end);
- if (ret)
- goto out;
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
+ ret = inode_newsize_ok(inode, offset + len);
+ if (ret)
+ goto out;
+ }
/*
* TODO: Move these two operations after we have checked
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 8f835bfa1bdd..5e6062c26129 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -29,7 +29,7 @@
#include "inode-map.h"
#include "volumes.h"
-#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
+#define BITS_PER_BITMAP (PAGE_SIZE * 8)
#define MAX_CACHE_BYTES_PER_GIG SZ_32K
struct btrfs_trim_range {
@@ -295,7 +295,7 @@ static int readahead_cache(struct inode *inode)
return -ENOMEM;
file_ra_state_init(ra, inode->i_mapping);
- last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+ last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
@@ -310,14 +310,14 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
int num_pages;
int check_crcs = 0;
- num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
+ num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
check_crcs = 1;
/* Make sure we can fit our crcs into the first page */
if (write && check_crcs &&
- (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
+ (num_pages * sizeof(u32)) >= PAGE_SIZE)
return -ENOSPC;
memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
@@ -354,9 +354,9 @@ static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
io_ctl->page = io_ctl->pages[io_ctl->index++];
io_ctl->cur = page_address(io_ctl->page);
io_ctl->orig = io_ctl->cur;
- io_ctl->size = PAGE_CACHE_SIZE;
+ io_ctl->size = PAGE_SIZE;
if (clear)
- memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
+ memset(io_ctl->cur, 0, PAGE_SIZE);
}
static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
@@ -369,7 +369,7 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
if (io_ctl->pages[i]) {
ClearPageChecked(io_ctl->pages[i]);
unlock_page(io_ctl->pages[i]);
- page_cache_release(io_ctl->pages[i]);
+ put_page(io_ctl->pages[i]);
}
}
}
@@ -475,7 +475,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
offset = sizeof(u32) * io_ctl->num_pages;
crc = btrfs_csum_data(io_ctl->orig + offset, crc,
- PAGE_CACHE_SIZE - offset);
+ PAGE_SIZE - offset);
btrfs_csum_final(crc, (char *)&crc);
io_ctl_unmap_page(io_ctl);
tmp = page_address(io_ctl->pages[0]);
@@ -503,7 +503,7 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
io_ctl_map_page(io_ctl, 0);
crc = btrfs_csum_data(io_ctl->orig + offset, crc,
- PAGE_CACHE_SIZE - offset);
+ PAGE_SIZE - offset);
btrfs_csum_final(crc, (char *)&crc);
if (val != crc) {
btrfs_err_rl(io_ctl->root->fs_info,
@@ -561,7 +561,7 @@ static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
io_ctl_map_page(io_ctl, 0);
}
- memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
+ memcpy(io_ctl->cur, bitmap, PAGE_SIZE);
io_ctl_set_crc(io_ctl, io_ctl->index - 1);
if (io_ctl->index < io_ctl->num_pages)
io_ctl_map_page(io_ctl, 0);
@@ -621,7 +621,7 @@ static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
if (ret)
return ret;
- memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
+ memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE);
io_ctl_unmap_page(io_ctl);
return 0;
@@ -775,7 +775,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
} else {
ASSERT(num_bitmaps);
num_bitmaps--;
- e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+ e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
if (!e->bitmap) {
kmem_cache_free(
btrfs_free_space_cachep, e);
@@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
* sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
* we add more bitmaps.
*/
- bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
+ bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE;
if (bitmap_bytes >= max_bytes) {
ctl->extents_thresh = 0;
@@ -2111,7 +2111,7 @@ new_bitmap:
}
/* allocate the bitmap */
- info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+ info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
spin_lock(&ctl->tree_lock);
if (!info->bitmap) {
ret = -ENOMEM;
@@ -3580,7 +3580,7 @@ again:
}
if (!map) {
- map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+ map = kzalloc(PAGE_SIZE, GFP_NOFS);
if (!map) {
kmem_cache_free(btrfs_free_space_cachep, info);
return -ENOMEM;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 1f0ec19b23f6..70107f7c9307 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -283,7 +283,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
}
#define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space))
-#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
+#define INODES_PER_BITMAP (PAGE_SIZE * 8)
/*
* The goal is to keep the memory used by the free_ino tree won't
@@ -317,7 +317,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
}
ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
- PAGE_CACHE_SIZE / sizeof(*info);
+ PAGE_SIZE / sizeof(*info);
}
/*
@@ -481,12 +481,12 @@ again:
spin_lock(&ctl->tree_lock);
prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
- prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
- prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
+ prealloc = ALIGN(prealloc, PAGE_SIZE);
+ prealloc += ctl->total_bitmaps * PAGE_SIZE;
spin_unlock(&ctl->tree_lock);
/* Just to make sure we have enough space */
- prealloc += 8 * PAGE_CACHE_SIZE;
+ prealloc += 8 * PAGE_SIZE;
ret = btrfs_delalloc_reserve_space(inode, 0, prealloc);
if (ret)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 41a5688ffdfe..2aaba58b4856 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -194,7 +194,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
while (compressed_size > 0) {
cpage = compressed_pages[i];
cur_size = min_t(unsigned long, compressed_size,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
kaddr = kmap_atomic(cpage);
write_extent_buffer(leaf, kaddr, ptr, cur_size);
@@ -208,13 +208,13 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
compress_type);
} else {
page = find_get_page(inode->i_mapping,
- start >> PAGE_CACHE_SHIFT);
+ start >> PAGE_SHIFT);
btrfs_set_file_extent_compression(leaf, ei, 0);
kaddr = kmap_atomic(page);
- offset = start & (PAGE_CACHE_SIZE - 1);
+ offset = start & (PAGE_SIZE - 1);
write_extent_buffer(leaf, kaddr + offset, ptr, size);
kunmap_atomic(kaddr);
- page_cache_release(page);
+ put_page(page);
}
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
@@ -322,7 +322,7 @@ out:
* And at reserve time, it's always aligned to page size, so
* just free one page here.
*/
- btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE);
+ btrfs_qgroup_free_data(inode, 0, PAGE_SIZE);
btrfs_free_path(path);
btrfs_end_transaction(trans, root);
return ret;
@@ -435,8 +435,8 @@ static noinline void compress_file_range(struct inode *inode,
actual_end = min_t(u64, isize, end + 1);
again:
will_compress = 0;
- nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
- nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE);
+ nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
+ nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE);
/*
* we don't want to send crud past the end of i_size through
@@ -514,7 +514,7 @@ again:
if (!ret) {
unsigned long offset = total_compressed &
- (PAGE_CACHE_SIZE - 1);
+ (PAGE_SIZE - 1);
struct page *page = pages[nr_pages_ret - 1];
char *kaddr;
@@ -524,7 +524,7 @@ again:
if (offset) {
kaddr = kmap_atomic(page);
memset(kaddr + offset, 0,
- PAGE_CACHE_SIZE - offset);
+ PAGE_SIZE - offset);
kunmap_atomic(kaddr);
}
will_compress = 1;
@@ -580,7 +580,7 @@ cont:
* one last check to make sure the compression is really a
* win, compare the page count read with the blocks on disk
*/
- total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
+ total_in = ALIGN(total_in, PAGE_SIZE);
if (total_compressed >= total_in) {
will_compress = 0;
} else {
@@ -594,7 +594,7 @@ cont:
*/
for (i = 0; i < nr_pages_ret; i++) {
WARN_ON(pages[i]->mapping);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
kfree(pages);
pages = NULL;
@@ -650,7 +650,7 @@ cleanup_and_bail_uncompressed:
free_pages_out:
for (i = 0; i < nr_pages_ret; i++) {
WARN_ON(pages[i]->mapping);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
kfree(pages);
}
@@ -664,7 +664,7 @@ static void free_async_extent_pages(struct async_extent *async_extent)
for (i = 0; i < async_extent->nr_pages; i++) {
WARN_ON(async_extent->pages[i]->mapping);
- page_cache_release(async_extent->pages[i]);
+ put_page(async_extent->pages[i]);
}
kfree(async_extent->pages);
async_extent->nr_pages = 0;
@@ -966,7 +966,7 @@ static noinline int cow_file_range(struct inode *inode,
PAGE_END_WRITEBACK);
*nr_written = *nr_written +
- (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
+ (end - start + PAGE_SIZE) / PAGE_SIZE;
*page_started = 1;
goto out;
} else if (ret < 0) {
@@ -1106,8 +1106,8 @@ static noinline void async_cow_submit(struct btrfs_work *work)
async_cow = container_of(work, struct async_cow, work);
root = async_cow->root;
- nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
- PAGE_CACHE_SHIFT;
+ nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
+ PAGE_SHIFT;
/*
* atomic_sub_return implies a barrier for waitqueue_active
@@ -1164,8 +1164,8 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
async_cow_start, async_cow_submit,
async_cow_free);
- nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
- PAGE_CACHE_SHIFT;
+ nr_pages = (cur_end - start + PAGE_SIZE) >>
+ PAGE_SHIFT;
atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
btrfs_queue_work(root->fs_info->delalloc_workers,
@@ -1960,7 +1960,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state)
{
- WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
+ WARN_ON((end & (PAGE_SIZE - 1)) == 0);
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
cached_state, GFP_NOFS);
}
@@ -1993,7 +1993,7 @@ again:
inode = page->mapping->host;
page_start = page_offset(page);
- page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
+ page_end = page_offset(page) + PAGE_SIZE - 1;
lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
&cached_state);
@@ -2003,7 +2003,7 @@ again:
goto out;
ordered = btrfs_lookup_ordered_range(inode, page_start,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
if (ordered) {
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
page_end, &cached_state, GFP_NOFS);
@@ -2014,7 +2014,7 @@ again:
}
ret = btrfs_delalloc_reserve_space(inode, page_start,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
if (ret) {
mapping_set_error(page->mapping, ret);
end_extent_writepage(page, ret, page_start, page_end);
@@ -2030,7 +2030,7 @@ out:
&cached_state, GFP_NOFS);
out_page:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
kfree(fixup);
}
@@ -2063,7 +2063,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
return -EAGAIN;
SetPageChecked(page);
- page_cache_get(page);
+ get_page(page);
btrfs_init_work(&fixup->work, btrfs_fixup_helper,
btrfs_writepage_fixup_worker, NULL, NULL);
fixup->page = page;
@@ -4247,7 +4247,7 @@ static int truncate_inline_extent(struct inode *inode,
if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
loff_t offset = new_size;
- loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
+ loff_t page_end = ALIGN(offset, PAGE_SIZE);
/*
* Zero out the remaining of the last page of our inline extent,
@@ -4633,7 +4633,7 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
struct extent_state *cached_state = NULL;
char *kaddr;
u32 blocksize = root->sectorsize;
- pgoff_t index = from >> PAGE_CACHE_SHIFT;
+ pgoff_t index = from >> PAGE_SHIFT;
unsigned offset = from & (blocksize - 1);
struct page *page;
gfp_t mask = btrfs_alloc_write_mask(mapping);
@@ -4668,7 +4668,7 @@ again:
lock_page(page);
if (page->mapping != mapping) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
goto again;
}
if (!PageUptodate(page)) {
@@ -4686,7 +4686,7 @@ again:
unlock_extent_cached(io_tree, block_start, block_end,
&cached_state, GFP_NOFS);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
@@ -4728,7 +4728,7 @@ out_unlock:
btrfs_delalloc_release_space(inode, block_start,
blocksize);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
out:
return ret;
}
@@ -6717,7 +6717,7 @@ static noinline int uncompress_inline(struct btrfs_path *path,
read_extent_buffer(leaf, tmp, ptr, inline_size);
- max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
+ max_size = min_t(unsigned long, PAGE_SIZE, max_size);
ret = btrfs_decompress(compress_type, tmp, page,
extent_offset, inline_size, max_size);
kfree(tmp);
@@ -6879,8 +6879,8 @@ next:
size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
extent_offset = page_offset(page) + pg_offset - extent_start;
- copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
- size - extent_offset);
+ copy_size = min_t(u64, PAGE_SIZE - pg_offset,
+ size - extent_offset);
em->start = extent_start + extent_offset;
em->len = ALIGN(copy_size, root->sectorsize);
em->orig_block_len = em->len;
@@ -6899,9 +6899,9 @@ next:
map = kmap(page);
read_extent_buffer(leaf, map + pg_offset, ptr,
copy_size);
- if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
+ if (pg_offset + copy_size < PAGE_SIZE) {
memset(map + pg_offset + copy_size, 0,
- PAGE_CACHE_SIZE - pg_offset -
+ PAGE_SIZE - pg_offset -
copy_size);
}
kunmap(page);
@@ -7336,12 +7336,12 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
int start_idx;
int end_idx;
- start_idx = start >> PAGE_CACHE_SHIFT;
+ start_idx = start >> PAGE_SHIFT;
/*
* end is the last byte in the last page. end == start is legal
*/
- end_idx = end >> PAGE_CACHE_SHIFT;
+ end_idx = end >> PAGE_SHIFT;
rcu_read_lock();
@@ -7382,7 +7382,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
* include/linux/pagemap.h for details.
*/
if (unlikely(page != *pagep)) {
- page_cache_release(page);
+ put_page(page);
page = NULL;
}
}
@@ -7390,7 +7390,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
if (page) {
if (page->index <= end_idx)
found = true;
- page_cache_release(page);
+ put_page(page);
}
rcu_read_unlock();
@@ -8719,7 +8719,7 @@ static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
if (ret == 1) {
ClearPagePrivate(page);
set_page_private(page, 0);
- page_cache_release(page);
+ put_page(page);
}
return ret;
}
@@ -8739,7 +8739,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
u64 page_start = page_offset(page);
- u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
+ u64 page_end = page_start + PAGE_SIZE - 1;
u64 start;
u64 end;
int inode_evicting = inode->i_state & I_FREEING;
@@ -8822,7 +8822,7 @@ again:
* 2) Not written to disk
* This means the reserved space should be freed here.
*/
- btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE);
+ btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
if (!inode_evicting) {
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY |
@@ -8837,7 +8837,7 @@ again:
if (PagePrivate(page)) {
ClearPagePrivate(page);
set_page_private(page, 0);
- page_cache_release(page);
+ put_page(page);
}
}
@@ -8874,11 +8874,11 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
u64 page_end;
u64 end;
- reserved_space = PAGE_CACHE_SIZE;
+ reserved_space = PAGE_SIZE;
sb_start_pagefault(inode->i_sb);
page_start = page_offset(page);
- page_end = page_start + PAGE_CACHE_SIZE - 1;
+ page_end = page_start + PAGE_SIZE - 1;
end = page_end;
/*
@@ -8934,15 +8934,15 @@ again:
goto again;
}
- if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) {
+ if (page->index == ((size - 1) >> PAGE_SHIFT)) {
reserved_space = round_up(size - page_start, root->sectorsize);
- if (reserved_space < PAGE_CACHE_SIZE) {
+ if (reserved_space < PAGE_SIZE) {
end = page_start + reserved_space - 1;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
btrfs_delalloc_release_space(inode, page_start,
- PAGE_CACHE_SIZE - reserved_space);
+ PAGE_SIZE - reserved_space);
}
}
@@ -8969,14 +8969,14 @@ again:
ret = 0;
/* page is wholly or partially inside EOF */
- if (page_start + PAGE_CACHE_SIZE > size)
- zero_start = size & ~PAGE_CACHE_MASK;
+ if (page_start + PAGE_SIZE > size)
+ zero_start = size & ~PAGE_MASK;
else
- zero_start = PAGE_CACHE_SIZE;
+ zero_start = PAGE_SIZE;
- if (zero_start != PAGE_CACHE_SIZE) {
+ if (zero_start != PAGE_SIZE) {
kaddr = kmap(page);
- memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
+ memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
flush_dcache_page(page);
kunmap(page);
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 053e677839fe..5a23806ae418 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -898,7 +898,7 @@ static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
u64 end;
read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
+ em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
read_unlock(&em_tree->lock);
if (em) {
@@ -988,7 +988,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map *em;
- u64 len = PAGE_CACHE_SIZE;
+ u64 len = PAGE_SIZE;
/*
* hopefully we have this extent in the tree already, try without
@@ -1124,15 +1124,15 @@ static int cluster_pages_for_defrag(struct inode *inode,
struct extent_io_tree *tree;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
- file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+ file_end = (isize - 1) >> PAGE_SHIFT;
if (!isize || start_index > file_end)
return 0;
page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
ret = btrfs_delalloc_reserve_space(inode,
- start_index << PAGE_CACHE_SHIFT,
- page_cnt << PAGE_CACHE_SHIFT);
+ start_index << PAGE_SHIFT,
+ page_cnt << PAGE_SHIFT);
if (ret)
return ret;
i_done = 0;
@@ -1148,7 +1148,7 @@ again:
break;
page_start = page_offset(page);
- page_end = page_start + PAGE_CACHE_SIZE - 1;
+ page_end = page_start + PAGE_SIZE - 1;
while (1) {
lock_extent_bits(tree, page_start, page_end,
&cached_state);
@@ -1169,7 +1169,7 @@ again:
*/
if (page->mapping != inode->i_mapping) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
goto again;
}
}
@@ -1179,7 +1179,7 @@ again:
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
ret = -EIO;
break;
}
@@ -1187,7 +1187,7 @@ again:
if (page->mapping != inode->i_mapping) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
goto again;
}
@@ -1208,7 +1208,7 @@ again:
wait_on_page_writeback(pages[i]);
page_start = page_offset(pages[0]);
- page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
+ page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
lock_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end - 1, &cached_state);
@@ -1222,8 +1222,8 @@ again:
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
btrfs_delalloc_release_space(inode,
- start_index << PAGE_CACHE_SHIFT,
- (page_cnt - i_done) << PAGE_CACHE_SHIFT);
+ start_index << PAGE_SHIFT,
+ (page_cnt - i_done) << PAGE_SHIFT);
}
@@ -1240,17 +1240,17 @@ again:
set_page_extent_mapped(pages[i]);
set_page_dirty(pages[i]);
unlock_page(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
return i_done;
out:
for (i = 0; i < i_done; i++) {
unlock_page(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
btrfs_delalloc_release_space(inode,
- start_index << PAGE_CACHE_SHIFT,
- page_cnt << PAGE_CACHE_SHIFT);
+ start_index << PAGE_SHIFT,
+ page_cnt << PAGE_SHIFT);
return ret;
}
@@ -1273,7 +1273,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
int defrag_count = 0;
int compress_type = BTRFS_COMPRESS_ZLIB;
u32 extent_thresh = range->extent_thresh;
- unsigned long max_cluster = SZ_256K >> PAGE_CACHE_SHIFT;
+ unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
unsigned long cluster = max_cluster;
u64 new_align = ~((u64)SZ_128K - 1);
struct page **pages = NULL;
@@ -1317,9 +1317,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
/* find the last page to defrag */
if (range->start + range->len > range->start) {
last_index = min_t(u64, isize - 1,
- range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
+ range->start + range->len - 1) >> PAGE_SHIFT;
} else {
- last_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+ last_index = (isize - 1) >> PAGE_SHIFT;
}
if (newer_than) {
@@ -1331,11 +1331,11 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
* we always align our defrag to help keep
* the extents in the file evenly spaced
*/
- i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
+ i = (newer_off & new_align) >> PAGE_SHIFT;
} else
goto out_ra;
} else {
- i = range->start >> PAGE_CACHE_SHIFT;
+ i = range->start >> PAGE_SHIFT;
}
if (!max_to_defrag)
max_to_defrag = last_index - i + 1;
@@ -1348,7 +1348,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
inode->i_mapping->writeback_index = i;
while (i <= last_index && defrag_count < max_to_defrag &&
- (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) {
+ (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
/*
* make sure we stop running if someone unmounts
* the FS
@@ -1362,7 +1362,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
break;
}
- if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
+ if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
extent_thresh, &last_len, &skip,
&defrag_end, range->flags &
BTRFS_DEFRAG_RANGE_COMPRESS)) {
@@ -1371,14 +1371,14 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
* the should_defrag function tells us how much to skip
* bump our counter by the suggested amount
*/
- next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE);
+ next = DIV_ROUND_UP(skip, PAGE_SIZE);
i = max(i + 1, next);
continue;
}
if (!newer_than) {
- cluster = (PAGE_CACHE_ALIGN(defrag_end) >>
- PAGE_CACHE_SHIFT) - i;
+ cluster = (PAGE_ALIGN(defrag_end) >>
+ PAGE_SHIFT) - i;
cluster = min(cluster, max_cluster);
} else {
cluster = max_cluster;
@@ -1412,20 +1412,20 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
i += ret;
newer_off = max(newer_off + 1,
- (u64)i << PAGE_CACHE_SHIFT);
+ (u64)i << PAGE_SHIFT);
ret = find_new_extents(root, inode, newer_than,
&newer_off, SZ_64K);
if (!ret) {
range->start = newer_off;
- i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
+ i = (newer_off & new_align) >> PAGE_SHIFT;
} else {
break;
}
} else {
if (ret > 0) {
i += ret;
- last_len += ret << PAGE_CACHE_SHIFT;
+ last_len += ret << PAGE_SHIFT;
} else {
i++;
last_len = 0;
@@ -1654,7 +1654,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
src_inode = file_inode(src.file);
if (src_inode->i_sb != file_inode(file)->i_sb) {
- btrfs_info(BTRFS_I(src_inode)->root->fs_info,
+ btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
"Snapshot src from another FS");
ret = -EXDEV;
} else if (!inode_owner_or_capable(src_inode)) {
@@ -1722,7 +1722,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
readonly = true;
if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
- if (vol_args->size > PAGE_CACHE_SIZE) {
+ if (vol_args->size > PAGE_SIZE) {
ret = -EINVAL;
goto free_args;
}
@@ -2806,12 +2806,12 @@ static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return ERR_PTR(-EIO);
}
if (page->mapping != inode->i_mapping) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return ERR_PTR(-EAGAIN);
}
}
@@ -2823,7 +2823,7 @@ static int gather_extent_pages(struct inode *inode, struct page **pages,
int num_pages, u64 off)
{
int i;
- pgoff_t index = off >> PAGE_CACHE_SHIFT;
+ pgoff_t index = off >> PAGE_SHIFT;
for (i = 0; i < num_pages; i++) {
again:
@@ -2932,12 +2932,12 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
pg = cmp->src_pages[i];
if (pg) {
unlock_page(pg);
- page_cache_release(pg);
+ put_page(pg);
}
pg = cmp->dst_pages[i];
if (pg) {
unlock_page(pg);
- page_cache_release(pg);
+ put_page(pg);
}
}
kfree(cmp->src_pages);
@@ -2949,7 +2949,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
u64 len, struct cmp_pages *cmp)
{
int ret;
- int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
+ int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
struct page **src_pgarr, **dst_pgarr;
/*
@@ -2987,12 +2987,12 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
int ret = 0;
int i;
struct page *src_page, *dst_page;
- unsigned int cmp_len = PAGE_CACHE_SIZE;
+ unsigned int cmp_len = PAGE_SIZE;
void *addr, *dst_addr;
i = 0;
while (len) {
- if (len < PAGE_CACHE_SIZE)
+ if (len < PAGE_SIZE)
cmp_len = len;
BUG_ON(i >= cmp->num_pages);
@@ -3191,7 +3191,7 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
if (olen > BTRFS_MAX_DEDUPE_LEN)
olen = BTRFS_MAX_DEDUPE_LEN;
- if (WARN_ON_ONCE(bs < PAGE_CACHE_SIZE)) {
+ if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
/*
* Btrfs does not support blocksize < page_size. As a
* result, btrfs_cmp_data() won't correctly handle
@@ -3891,8 +3891,8 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
* data immediately and not the previous data.
*/
truncate_inode_pages_range(&inode->i_data,
- round_down(destoff, PAGE_CACHE_SIZE),
- round_up(destoff + len, PAGE_CACHE_SIZE) - 1);
+ round_down(destoff, PAGE_SIZE),
+ round_up(destoff + len, PAGE_SIZE) - 1);
out_unlock:
if (!same_inode)
btrfs_double_inode_unlock(src, inode);
@@ -4124,7 +4124,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
/* we generally have at most 6 or so space infos, one for each raid
* level. So, a whole page should be more than enough for everyone
*/
- if (alloc_size > PAGE_CACHE_SIZE)
+ if (alloc_size > PAGE_SIZE)
return -ENOMEM;
space_args.total_spaces = 0;
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index a2f051347731..1adfbe7be6b8 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -55,8 +55,8 @@ static struct list_head *lzo_alloc_workspace(void)
return ERR_PTR(-ENOMEM);
workspace->mem = vmalloc(LZO1X_MEM_COMPRESS);
- workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE));
- workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE));
+ workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
+ workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
if (!workspace->mem || !workspace->buf || !workspace->cbuf)
goto fail;
@@ -116,7 +116,7 @@ static int lzo_compress_pages(struct list_head *ws,
*total_out = 0;
*total_in = 0;
- in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+ in_page = find_get_page(mapping, start >> PAGE_SHIFT);
data_in = kmap(in_page);
/*
@@ -133,10 +133,10 @@ static int lzo_compress_pages(struct list_head *ws,
tot_out = LZO_LEN;
pages[0] = out_page;
nr_pages = 1;
- pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN;
+ pg_bytes_left = PAGE_SIZE - LZO_LEN;
/* compress at most one page of data each time */
- in_len = min(len, PAGE_CACHE_SIZE);
+ in_len = min(len, PAGE_SIZE);
while (tot_in < len) {
ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
&out_len, workspace->mem);
@@ -201,7 +201,7 @@ static int lzo_compress_pages(struct list_head *ws,
cpage_out = kmap(out_page);
pages[nr_pages++] = out_page;
- pg_bytes_left = PAGE_CACHE_SIZE;
+ pg_bytes_left = PAGE_SIZE;
out_offset = 0;
}
}
@@ -221,12 +221,12 @@ static int lzo_compress_pages(struct list_head *ws,
bytes_left = len - tot_in;
kunmap(in_page);
- page_cache_release(in_page);
+ put_page(in_page);
- start += PAGE_CACHE_SIZE;
- in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+ start += PAGE_SIZE;
+ in_page = find_get_page(mapping, start >> PAGE_SHIFT);
data_in = kmap(in_page);
- in_len = min(bytes_left, PAGE_CACHE_SIZE);
+ in_len = min(bytes_left, PAGE_SIZE);
}
if (tot_out > tot_in)
@@ -248,7 +248,7 @@ out:
if (in_page) {
kunmap(in_page);
- page_cache_release(in_page);
+ put_page(in_page);
}
return ret;
@@ -266,7 +266,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
char *data_in;
unsigned long page_in_index = 0;
unsigned long page_out_index = 0;
- unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE);
+ unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
unsigned long buf_offset = 0;
unsigned long bytes;
@@ -289,7 +289,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
tot_in = LZO_LEN;
in_offset = LZO_LEN;
tot_len = min_t(size_t, srclen, tot_len);
- in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN;
+ in_page_bytes_left = PAGE_SIZE - LZO_LEN;
tot_out = 0;
pg_offset = 0;
@@ -345,12 +345,12 @@ cont:
data_in = kmap(pages_in[++page_in_index]);
- in_page_bytes_left = PAGE_CACHE_SIZE;
+ in_page_bytes_left = PAGE_SIZE;
in_offset = 0;
}
}
- out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
+ out_len = lzo1x_worst_compress(PAGE_SIZE);
ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
&out_len);
if (need_unmap)
@@ -399,7 +399,7 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
in_len = read_compress_length(data_in);
data_in += LZO_LEN;
- out_len = PAGE_CACHE_SIZE;
+ out_len = PAGE_SIZE;
ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
if (ret != LZO_E_OK) {
printk(KERN_WARNING "BTRFS: decompress failed!\n");
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5279fdae7142..9e119552ed32 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1463,6 +1463,7 @@ struct btrfs_qgroup_extent_record
u64 bytenr = record->bytenr;
assert_spin_locked(&delayed_refs->lock);
+ trace_btrfs_qgroup_insert_dirty_extent(record);
while (*p) {
parent_node = *p;
@@ -1594,6 +1595,9 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
+ trace_qgroup_update_counters(qg->qgroupid, cur_old_count,
+ cur_new_count);
+
/* Rfer update part */
if (cur_old_count == 0 && cur_new_count > 0) {
qg->rfer += num_bytes;
@@ -1683,6 +1687,9 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
goto out_free;
BUG_ON(!fs_info->quota_root);
+ trace_btrfs_qgroup_account_extent(bytenr, num_bytes, nr_old_roots,
+ nr_new_roots);
+
qgroups = ulist_alloc(GFP_NOFS);
if (!qgroups) {
ret = -ENOMEM;
@@ -1752,6 +1759,8 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
record = rb_entry(node, struct btrfs_qgroup_extent_record,
node);
+ trace_btrfs_qgroup_account_extents(record);
+
if (!ret) {
/*
* Use (u64)-1 as time_seq to do special search, which
@@ -1842,8 +1851,10 @@ out:
}
/*
- * copy the acounting information between qgroups. This is necessary when a
- * snapshot or a subvolume is created
+ * Copy the acounting information between qgroups. This is necessary
+ * when a snapshot or a subvolume is created. Throwing an error will
+ * cause a transaction abort so we take extra care here to only error
+ * when a readonly fs is a reasonable outcome.
*/
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
@@ -1873,15 +1884,15 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2 * inherit->num_excl_copies;
for (i = 0; i < nums; ++i) {
srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
- if (!srcgroup) {
- ret = -EINVAL;
- goto out;
- }
- if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
- ret = -EINVAL;
- goto out;
- }
+ /*
+ * Zero out invalid groups so we can ignore
+ * them later.
+ */
+ if (!srcgroup ||
+ ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
+ *i_qgroups = 0ULL;
+
++i_qgroups;
}
}
@@ -1916,17 +1927,19 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
*/
if (inherit) {
i_qgroups = (u64 *)(inherit + 1);
- for (i = 0; i < inherit->num_qgroups; ++i) {
+ for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
+ if (*i_qgroups == 0)
+ continue;
ret = add_qgroup_relation_item(trans, quota_root,
objectid, *i_qgroups);
- if (ret)
+ if (ret && ret != -EEXIST)
goto out;
ret = add_qgroup_relation_item(trans, quota_root,
*i_qgroups, objectid);
- if (ret)
+ if (ret && ret != -EEXIST)
goto out;
- ++i_qgroups;
}
+ ret = 0;
}
@@ -1987,17 +2000,22 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
i_qgroups = (u64 *)(inherit + 1);
for (i = 0; i < inherit->num_qgroups; ++i) {
- ret = add_relation_rb(quota_root->fs_info, objectid,
- *i_qgroups);
- if (ret)
- goto unlock;
+ if (*i_qgroups) {
+ ret = add_relation_rb(quota_root->fs_info, objectid,
+ *i_qgroups);
+ if (ret)
+ goto unlock;
+ }
++i_qgroups;
}
- for (i = 0; i < inherit->num_ref_copies; ++i) {
+ for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
struct btrfs_qgroup *src;
struct btrfs_qgroup *dst;
+ if (!i_qgroups[0] || !i_qgroups[1])
+ continue;
+
src = find_qgroup_rb(fs_info, i_qgroups[0]);
dst = find_qgroup_rb(fs_info, i_qgroups[1]);
@@ -2008,12 +2026,14 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
dst->rfer = src->rfer - level_size;
dst->rfer_cmpr = src->rfer_cmpr - level_size;
- i_qgroups += 2;
}
- for (i = 0; i < inherit->num_excl_copies; ++i) {
+ for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
struct btrfs_qgroup *src;
struct btrfs_qgroup *dst;
+ if (!i_qgroups[0] || !i_qgroups[1])
+ continue;
+
src = find_qgroup_rb(fs_info, i_qgroups[0]);
dst = find_qgroup_rb(fs_info, i_qgroups[1]);
@@ -2024,7 +2044,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
dst->excl = src->excl + level_size;
dst->excl_cmpr = src->excl_cmpr + level_size;
- i_qgroups += 2;
}
unlock:
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 55161369fab1..0b7792e02dd5 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -270,7 +270,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
s = kmap(rbio->bio_pages[i]);
d = kmap(rbio->stripe_pages[i]);
- memcpy(d, s, PAGE_CACHE_SIZE);
+ memcpy(d, s, PAGE_SIZE);
kunmap(rbio->bio_pages[i]);
kunmap(rbio->stripe_pages[i]);
@@ -962,7 +962,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
*/
static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
{
- return DIV_ROUND_UP(stripe_len, PAGE_CACHE_SIZE) * nr_stripes;
+ return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
}
/*
@@ -1078,7 +1078,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
u64 disk_start;
stripe = &rbio->bbio->stripes[stripe_nr];
- disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
+ disk_start = stripe->physical + (page_index << PAGE_SHIFT);
/* if the device is missing, just fail this stripe */
if (!stripe->dev->bdev)
@@ -1096,8 +1096,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
if (last_end == disk_start && stripe->dev->bdev &&
!last->bi_error &&
last->bi_bdev == stripe->dev->bdev) {
- ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
- if (ret == PAGE_CACHE_SIZE)
+ ret = bio_add_page(last, page, PAGE_SIZE, 0);
+ if (ret == PAGE_SIZE)
return 0;
}
}
@@ -1111,7 +1111,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
bio->bi_bdev = stripe->dev->bdev;
bio->bi_iter.bi_sector = disk_start >> 9;
- bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
+ bio_add_page(bio, page, PAGE_SIZE, 0);
bio_list_add(bio_list, bio);
return 0;
}
@@ -1154,7 +1154,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
bio_list_for_each(bio, &rbio->bio_list) {
start = (u64)bio->bi_iter.bi_sector << 9;
stripe_offset = start - rbio->bbio->raid_map[0];
- page_index = stripe_offset >> PAGE_CACHE_SHIFT;
+ page_index = stripe_offset >> PAGE_SHIFT;
for (i = 0; i < bio->bi_vcnt; i++) {
p = bio->bi_io_vec[i].bv_page;
@@ -1253,7 +1253,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
} else {
/* raid5 */
memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
- run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
+ run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
}
@@ -1914,7 +1914,7 @@ pstripe:
/* Copy parity block into failed block to start with */
memcpy(pointers[faila],
pointers[rbio->nr_data],
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
/* rearrange the pointer array */
p = pointers[faila];
@@ -1923,7 +1923,7 @@ pstripe:
pointers[rbio->nr_data - 1] = p;
/* xor in the rest */
- run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
+ run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
}
/* if we're doing this rebuild as part of an rmw, go through
* and set all of our private rbio pages in the
@@ -2250,7 +2250,7 @@ void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
rbio->stripe_len * rbio->nr_data);
stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
- index = stripe_offset >> PAGE_CACHE_SHIFT;
+ index = stripe_offset >> PAGE_SHIFT;
rbio->bio_pages[index] = page;
}
@@ -2365,14 +2365,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
} else {
/* raid5 */
memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
- run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
+ run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
}
/* Check scrubbing pairty and repair it */
p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
parity = kmap(p);
- if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE))
- memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE);
+ if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
+ memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
else
/* Parity is right, needn't writeback */
bitmap_clear(rbio->dbitmap, pagenr, 1);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index b892914968c1..298631eaee78 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -226,7 +226,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info,
/* find extent */
spin_lock(&fs_info->reada_lock);
re = radix_tree_lookup(&fs_info->reada_tree,
- start >> PAGE_CACHE_SHIFT);
+ start >> PAGE_SHIFT);
if (re)
re->refcnt++;
spin_unlock(&fs_info->reada_lock);
@@ -257,7 +257,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
zone = NULL;
spin_lock(&fs_info->reada_lock);
ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
- logical >> PAGE_CACHE_SHIFT, 1);
+ logical >> PAGE_SHIFT, 1);
if (ret == 1 && logical >= zone->start && logical <= zone->end) {
kref_get(&zone->refcnt);
spin_unlock(&fs_info->reada_lock);
@@ -294,13 +294,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
spin_lock(&fs_info->reada_lock);
ret = radix_tree_insert(&dev->reada_zones,
- (unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
+ (unsigned long)(zone->end >> PAGE_SHIFT),
zone);
if (ret == -EEXIST) {
kfree(zone);
ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
- logical >> PAGE_CACHE_SHIFT, 1);
+ logical >> PAGE_SHIFT, 1);
if (ret == 1 && logical >= zone->start && logical <= zone->end)
kref_get(&zone->refcnt);
else
@@ -326,7 +326,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
u64 length;
int real_stripes;
int nzones = 0;
- unsigned long index = logical >> PAGE_CACHE_SHIFT;
+ unsigned long index = logical >> PAGE_SHIFT;
int dev_replace_is_ongoing;
int have_zone = 0;
@@ -495,7 +495,7 @@ static void reada_extent_put(struct btrfs_fs_info *fs_info,
struct reada_extent *re)
{
int i;
- unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
+ unsigned long index = re->logical >> PAGE_SHIFT;
spin_lock(&fs_info->reada_lock);
if (--re->refcnt) {
@@ -538,7 +538,7 @@ static void reada_zone_release(struct kref *kref)
struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
radix_tree_delete(&zone->device->reada_zones,
- zone->end >> PAGE_CACHE_SHIFT);
+ zone->end >> PAGE_SHIFT);
kfree(zone);
}
@@ -587,7 +587,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
{
int i;
- unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
+ unsigned long index = zone->end >> PAGE_SHIFT;
for (i = 0; i < zone->ndevs; ++i) {
struct reada_zone *peer;
@@ -622,7 +622,7 @@ static int reada_pick_zone(struct btrfs_device *dev)
(void **)&zone, index, 1);
if (ret == 0)
break;
- index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
+ index = (zone->end >> PAGE_SHIFT) + 1;
if (zone->locked) {
if (zone->elems > top_locked_elems) {
top_locked_elems = zone->elems;
@@ -673,7 +673,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
* plugging to speed things up
*/
ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
- dev->reada_next >> PAGE_CACHE_SHIFT, 1);
+ dev->reada_next >> PAGE_SHIFT, 1);
if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
ret = reada_pick_zone(dev);
if (!ret) {
@@ -682,7 +682,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
}
re = NULL;
ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
- dev->reada_next >> PAGE_CACHE_SHIFT, 1);
+ dev->reada_next >> PAGE_SHIFT, 1);
}
if (ret == 0) {
spin_unlock(&fs_info->reada_lock);
@@ -838,7 +838,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
printk(KERN_CONT " curr off %llu",
device->reada_next - zone->start);
printk(KERN_CONT "\n");
- index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
+ index = (zone->end >> PAGE_SHIFT) + 1;
}
cnt = 0;
index = 0;
@@ -864,7 +864,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
}
}
printk(KERN_CONT "\n");
- index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+ index = (re->logical >> PAGE_SHIFT) + 1;
if (++cnt > 15)
break;
}
@@ -880,7 +880,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
if (ret == 0)
break;
if (!re->scheduled) {
- index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+ index = (re->logical >> PAGE_SHIFT) + 1;
continue;
}
printk(KERN_DEBUG
@@ -897,7 +897,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
}
}
printk(KERN_CONT "\n");
- index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+ index = (re->logical >> PAGE_SHIFT) + 1;
}
spin_unlock(&fs_info->reada_lock);
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 2bd0011450df..08ef890deca6 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1850,6 +1850,7 @@ again:
eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
+ break;
} else if (!extent_buffer_uptodate(eb)) {
ret = -EIO;
free_extent_buffer(eb);
@@ -3129,10 +3130,10 @@ static int relocate_file_extent_cluster(struct inode *inode,
if (ret)
goto out;
- index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
- last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
+ index = (cluster->start - offset) >> PAGE_SHIFT;
+ last_index = (cluster->end - offset) >> PAGE_SHIFT;
while (index <= last_index) {
- ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
+ ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE);
if (ret)
goto out;
@@ -3145,7 +3146,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
mask);
if (!page) {
btrfs_delalloc_release_metadata(inode,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
ret = -ENOMEM;
goto out;
}
@@ -3162,16 +3163,16 @@ static int relocate_file_extent_cluster(struct inode *inode,
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
btrfs_delalloc_release_metadata(inode,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
ret = -EIO;
goto out;
}
}
page_start = page_offset(page);
- page_end = page_start + PAGE_CACHE_SIZE - 1;
+ page_end = page_start + PAGE_SIZE - 1;
lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
@@ -3191,7 +3192,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
unlock_extent(&BTRFS_I(inode)->io_tree,
page_start, page_end);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
index++;
balance_dirty_pages_ratelimited(inode->i_mapping);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 39dbdcbf4d13..4678f03e878e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -703,7 +703,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
if (IS_ERR(inode))
return PTR_ERR(inode);
- index = offset >> PAGE_CACHE_SHIFT;
+ index = offset >> PAGE_SHIFT;
page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
if (!page) {
@@ -1636,7 +1636,7 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
if (spage->io_error) {
void *mapped_buffer = kmap_atomic(spage->page);
- memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
+ memset(mapped_buffer, 0, PAGE_SIZE);
flush_dcache_page(spage->page);
kunmap_atomic(mapped_buffer);
}
@@ -4294,8 +4294,8 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
goto out;
}
- while (len >= PAGE_CACHE_SIZE) {
- index = offset >> PAGE_CACHE_SHIFT;
+ while (len >= PAGE_SIZE) {
+ index = offset >> PAGE_SHIFT;
again:
page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
if (!page) {
@@ -4326,7 +4326,7 @@ again:
*/
if (page->mapping != inode->i_mapping) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
goto again;
}
if (!PageUptodate(page)) {
@@ -4348,15 +4348,15 @@ again:
ret = err;
next_page:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (ret)
break;
- offset += PAGE_CACHE_SIZE;
- physical_for_dev_replace += PAGE_CACHE_SIZE;
- nocow_ctx_logical += PAGE_CACHE_SIZE;
- len -= PAGE_CACHE_SIZE;
+ offset += PAGE_SIZE;
+ physical_for_dev_replace += PAGE_SIZE;
+ nocow_ctx_logical += PAGE_SIZE;
+ len -= PAGE_SIZE;
}
ret = COPY_COMPLETE;
out:
@@ -4390,8 +4390,8 @@ static int write_page_nocow(struct scrub_ctx *sctx,
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
bio->bi_bdev = dev->bdev;
- ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
- if (ret != PAGE_CACHE_SIZE) {
+ ret = bio_add_page(bio, page, PAGE_SIZE, 0);
+ if (ret != PAGE_SIZE) {
leave_with_eio:
bio_put(bio);
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 19b7bf4284ee..8d358c547c59 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4449,9 +4449,9 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
struct page *page;
char *addr;
struct btrfs_key key;
- pgoff_t index = offset >> PAGE_CACHE_SHIFT;
+ pgoff_t index = offset >> PAGE_SHIFT;
pgoff_t last_index;
- unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
+ unsigned pg_offset = offset & ~PAGE_MASK;
ssize_t ret = 0;
key.objectid = sctx->cur_ino;
@@ -4471,7 +4471,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
if (len == 0)
goto out;
- last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+ last_index = (offset + len - 1) >> PAGE_SHIFT;
/* initial readahead */
memset(&sctx->ra, 0, sizeof(struct file_ra_state));
@@ -4481,7 +4481,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
while (index <= last_index) {
unsigned cur_len = min_t(unsigned, len,
- PAGE_CACHE_SIZE - pg_offset);
+ PAGE_SIZE - pg_offset);
page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
@@ -4493,7 +4493,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
ret = -EIO;
break;
}
@@ -4503,7 +4503,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
kunmap(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
index++;
pg_offset = 0;
len -= cur_len;
@@ -4804,7 +4804,7 @@ static int clone_range(struct send_ctx *sctx,
type = btrfs_file_extent_type(leaf, ei);
if (type == BTRFS_FILE_EXTENT_INLINE) {
ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
- ext_len = PAGE_CACHE_ALIGN(ext_len);
+ ext_len = PAGE_ALIGN(ext_len);
} else {
ext_len = btrfs_file_extent_num_bytes(leaf, ei);
}
@@ -4886,7 +4886,7 @@ static int send_write_or_clone(struct send_ctx *sctx,
* but there may be items after this page. Make
* sure to send the whole thing
*/
- len = PAGE_CACHE_ALIGN(len);
+ len = PAGE_ALIGN(len);
} else {
len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
}
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index b976597b0721..e05619f241be 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -66,7 +66,7 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
\
if (token && token->kaddr && token->offset <= offset && \
token->eb == eb && \
- (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \
+ (token->offset + PAGE_SIZE >= offset + size)) { \
kaddr = token->kaddr; \
p = kaddr + part_offset - token->offset; \
res = get_unaligned_le##bits(p + off); \
@@ -104,7 +104,7 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \
\
if (token && token->kaddr && token->offset <= offset && \
token->eb == eb && \
- (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \
+ (token->offset + PAGE_SIZE >= offset + size)) { \
kaddr = token->kaddr; \
p = kaddr + part_offset - token->offset; \
put_unaligned_le##bits(val, p + off); \
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 669b58201e36..70948b13bc81 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -32,8 +32,8 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
{
int ret;
struct page *pages[16];
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+ unsigned long index = start >> PAGE_SHIFT;
+ unsigned long end_index = end >> PAGE_SHIFT;
unsigned long nr_pages = end_index - index + 1;
int i;
int count = 0;
@@ -49,9 +49,9 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
count++;
if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
unlock_page(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
if (flags & PROCESS_RELEASE)
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
nr_pages -= ret;
index += ret;
@@ -93,7 +93,7 @@ static int test_find_delalloc(void)
* everything to make sure our pages don't get evicted and screw up our
* test.
*/
- for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) {
+ for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
if (!page) {
test_msg("Failed to allocate test page\n");
@@ -104,7 +104,7 @@ static int test_find_delalloc(void)
if (index) {
unlock_page(page);
} else {
- page_cache_get(page);
+ get_page(page);
locked_page = page;
}
}
@@ -129,7 +129,7 @@ static int test_find_delalloc(void)
}
unlock_extent(&tmp, start, end);
unlock_page(locked_page);
- page_cache_release(locked_page);
+ put_page(locked_page);
/*
* Test this scenario
@@ -139,7 +139,7 @@ static int test_find_delalloc(void)
*/
test_start = SZ_64M;
locked_page = find_lock_page(inode->i_mapping,
- test_start >> PAGE_CACHE_SHIFT);
+ test_start >> PAGE_SHIFT);
if (!locked_page) {
test_msg("Couldn't find the locked page\n");
goto out_bits;
@@ -165,7 +165,7 @@ static int test_find_delalloc(void)
}
unlock_extent(&tmp, start, end);
/* locked_page was unlocked above */
- page_cache_release(locked_page);
+ put_page(locked_page);
/*
* Test this scenario
@@ -174,7 +174,7 @@ static int test_find_delalloc(void)
*/
test_start = max_bytes + 4096;
locked_page = find_lock_page(inode->i_mapping, test_start >>
- PAGE_CACHE_SHIFT);
+ PAGE_SHIFT);
if (!locked_page) {
test_msg("Could'nt find the locked page\n");
goto out_bits;
@@ -225,13 +225,13 @@ static int test_find_delalloc(void)
* range we want to find.
*/
page = find_get_page(inode->i_mapping,
- (max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT);
+ (max_bytes + SZ_1M) >> PAGE_SHIFT);
if (!page) {
test_msg("Couldn't find our page\n");
goto out_bits;
}
ClearPageDirty(page);
- page_cache_release(page);
+ put_page(page);
/* We unlocked it in the previous test */
lock_page(locked_page);
@@ -239,7 +239,7 @@ static int test_find_delalloc(void)
end = 0;
/*
* Currently if we fail to find dirty pages in the delalloc range we
- * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If
+ * will adjust max_bytes down to PAGE_SIZE and then re-search. If
* this changes at any point in the future we will need to fix this
* tests expected behavior.
*/
@@ -249,9 +249,9 @@ static int test_find_delalloc(void)
test_msg("Didn't find our range\n");
goto out_bits;
}
- if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) {
+ if (start != test_start && end != test_start + PAGE_SIZE - 1) {
test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
- test_start, test_start + PAGE_CACHE_SIZE - 1, start,
+ test_start, test_start + PAGE_SIZE - 1, start,
end);
goto out_bits;
}
@@ -265,7 +265,7 @@ out_bits:
clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
out:
if (locked_page)
- page_cache_release(locked_page);
+ put_page(locked_page);
process_page_range(inode, 0, total_dirty - 1,
PROCESS_UNLOCK | PROCESS_RELEASE);
iput(inode);
@@ -298,9 +298,9 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
return -EINVAL;
}
- bitmap_set(bitmap, (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+ bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
sizeof(long) * BITS_PER_BYTE);
- extent_buffer_bitmap_set(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0,
+ extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
sizeof(long) * BITS_PER_BYTE);
if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
test_msg("Setting straddling pages failed\n");
@@ -309,10 +309,10 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
bitmap_clear(bitmap,
- (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+ (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
sizeof(long) * BITS_PER_BYTE);
extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
- extent_buffer_bitmap_clear(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0,
+ extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
sizeof(long) * BITS_PER_BYTE);
if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
test_msg("Clearing straddling pages failed\n");
@@ -353,7 +353,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
static int test_eb_bitmaps(void)
{
- unsigned long len = PAGE_CACHE_SIZE * 4;
+ unsigned long len = PAGE_SIZE * 4;
unsigned long *bitmap;
struct extent_buffer *eb;
int ret;
@@ -379,7 +379,7 @@ static int test_eb_bitmaps(void)
/* Do it over again with an extent buffer which isn't page-aligned. */
free_extent_buffer(eb);
- eb = __alloc_dummy_extent_buffer(NULL, PAGE_CACHE_SIZE / 2, len);
+ eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len);
if (!eb) {
test_msg("Couldn't allocate test extent buffer\n");
kfree(bitmap);
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index c9ad97b1e690..514247515312 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -22,7 +22,7 @@
#include "../disk-io.h"
#include "../free-space-cache.h"
-#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
+#define BITS_PER_BITMAP (PAGE_SIZE * 8)
/*
* This test just does basic sanity checking, making sure we can add an exten
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index a82e20ecbee1..e692eea87af6 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4415,6 +4415,127 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
return ret;
}
+/*
+ * When we are logging a new inode X, check if it doesn't have a reference that
+ * matches the reference from some other inode Y created in a past transaction
+ * and that was renamed in the current transaction. If we don't do this, then at
+ * log replay time we can lose inode Y (and all its files if it's a directory):
+ *
+ * mkdir /mnt/x
+ * echo "hello world" > /mnt/x/foobar
+ * sync
+ * mv /mnt/x /mnt/y
+ * mkdir /mnt/x # or touch /mnt/x
+ * xfs_io -c fsync /mnt/x
+ * <power fail>
+ * mount fs, trigger log replay
+ *
+ * After the log replay procedure, we would lose the first directory and all its
+ * files (file foobar).
+ * For the case where inode Y is not a directory we simply end up losing it:
+ *
+ * echo "123" > /mnt/foo
+ * sync
+ * mv /mnt/foo /mnt/bar
+ * echo "abc" > /mnt/foo
+ * xfs_io -c fsync /mnt/foo
+ * <power fail>
+ *
+ * We also need this for cases where a snapshot entry is replaced by some other
+ * entry (file or directory) otherwise we end up with an unreplayable log due to
+ * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
+ * if it were a regular entry:
+ *
+ * mkdir /mnt/x
+ * btrfs subvolume snapshot /mnt /mnt/x/snap
+ * btrfs subvolume delete /mnt/x/snap
+ * rmdir /mnt/x
+ * mkdir /mnt/x
+ * fsync /mnt/x or fsync some new file inside it
+ * <power fail>
+ *
+ * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
+ * the same transaction.
+ */
+static int btrfs_check_ref_name_override(struct extent_buffer *eb,
+ const int slot,
+ const struct btrfs_key *key,
+ struct inode *inode)
+{
+ int ret;
+ struct btrfs_path *search_path;
+ char *name = NULL;
+ u32 name_len = 0;
+ u32 item_size = btrfs_item_size_nr(eb, slot);
+ u32 cur_offset = 0;
+ unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
+
+ search_path = btrfs_alloc_path();
+ if (!search_path)
+ return -ENOMEM;
+ search_path->search_commit_root = 1;
+ search_path->skip_locking = 1;
+
+ while (cur_offset < item_size) {
+ u64 parent;
+ u32 this_name_len;
+ u32 this_len;
+ unsigned long name_ptr;
+ struct btrfs_dir_item *di;
+
+ if (key->type == BTRFS_INODE_REF_KEY) {
+ struct btrfs_inode_ref *iref;
+
+ iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
+ parent = key->offset;
+ this_name_len = btrfs_inode_ref_name_len(eb, iref);
+ name_ptr = (unsigned long)(iref + 1);
+ this_len = sizeof(*iref) + this_name_len;
+ } else {
+ struct btrfs_inode_extref *extref;
+
+ extref = (struct btrfs_inode_extref *)(ptr +
+ cur_offset);
+ parent = btrfs_inode_extref_parent(eb, extref);
+ this_name_len = btrfs_inode_extref_name_len(eb, extref);
+ name_ptr = (unsigned long)&extref->name;
+ this_len = sizeof(*extref) + this_name_len;
+ }
+
+ if (this_name_len > name_len) {
+ char *new_name;
+
+ new_name = krealloc(name, this_name_len, GFP_NOFS);
+ if (!new_name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ name_len = this_name_len;
+ name = new_name;
+ }
+
+ read_extent_buffer(eb, name, name_ptr, this_name_len);
+ di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
+ search_path, parent,
+ name, this_name_len, 0);
+ if (di && !IS_ERR(di)) {
+ ret = 1;
+ goto out;
+ } else if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ }
+ btrfs_release_path(search_path);
+
+ cur_offset += this_len;
+ }
+ ret = 0;
+out:
+ btrfs_free_path(search_path);
+ kfree(name);
+ return ret;
+}
+
/* log a single inode in the tree log.
* At least one parent directory for this inode must exist in the tree
* or be logged already.
@@ -4602,6 +4723,22 @@ again:
if (min_key.type == BTRFS_INODE_ITEM_KEY)
need_log_inode_item = false;
+ if ((min_key.type == BTRFS_INODE_REF_KEY ||
+ min_key.type == BTRFS_INODE_EXTREF_KEY) &&
+ BTRFS_I(inode)->generation == trans->transid) {
+ ret = btrfs_check_ref_name_override(path->nodes[0],
+ path->slots[0],
+ &min_key, inode);
+ if (ret < 0) {
+ err = ret;
+ goto out_unlock;
+ } else if (ret > 0) {
+ err = 1;
+ btrfs_set_log_full_commit(root->fs_info, trans);
+ goto out_unlock;
+ }
+ }
+
/* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
if (ins_nr == 0)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index e2b54d546b7c..bd0f45fb38c4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1025,16 +1025,16 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
}
/* make sure our super fits in the device */
- if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
+ if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
goto error_bdev_put;
/* make sure our super fits in the page */
- if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
+ if (sizeof(*disk_super) > PAGE_SIZE)
goto error_bdev_put;
/* make sure our super doesn't straddle pages on disk */
- index = bytenr >> PAGE_CACHE_SHIFT;
- if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
+ index = bytenr >> PAGE_SHIFT;
+ if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
goto error_bdev_put;
/* pull in the page with our super */
@@ -1047,7 +1047,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
p = kmap(page);
/* align our pointer to the offset of the super block */
- disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
+ disk_super = p + (bytenr & ~PAGE_MASK);
if (btrfs_super_bytenr(disk_super) != bytenr ||
btrfs_super_magic(disk_super) != BTRFS_MAGIC)
@@ -1075,7 +1075,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
error_unmap:
kunmap(page);
- page_cache_release(page);
+ put_page(page);
error_bdev_put:
blkdev_put(bdev, flags);
@@ -6527,7 +6527,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
* but sb spans only this function. Add an explicit SetPageUptodate call
* to silence the warning eg. on PowerPC 64.
*/
- if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
+ if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
SetPageUptodate(sb->pages[0]);
write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 82990b8f872b..88d274e8ecf2 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -59,7 +59,7 @@ static struct list_head *zlib_alloc_workspace(void)
workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
zlib_inflate_workspacesize());
workspace->strm.workspace = vmalloc(workspacesize);
- workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+ workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS);
if (!workspace->strm.workspace || !workspace->buf)
goto fail;
@@ -103,7 +103,7 @@ static int zlib_compress_pages(struct list_head *ws,
workspace->strm.total_in = 0;
workspace->strm.total_out = 0;
- in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+ in_page = find_get_page(mapping, start >> PAGE_SHIFT);
data_in = kmap(in_page);
out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
@@ -117,8 +117,8 @@ static int zlib_compress_pages(struct list_head *ws,
workspace->strm.next_in = data_in;
workspace->strm.next_out = cpage_out;
- workspace->strm.avail_out = PAGE_CACHE_SIZE;
- workspace->strm.avail_in = min(len, PAGE_CACHE_SIZE);
+ workspace->strm.avail_out = PAGE_SIZE;
+ workspace->strm.avail_in = min(len, PAGE_SIZE);
while (workspace->strm.total_in < len) {
ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
@@ -156,7 +156,7 @@ static int zlib_compress_pages(struct list_head *ws,
cpage_out = kmap(out_page);
pages[nr_pages] = out_page;
nr_pages++;
- workspace->strm.avail_out = PAGE_CACHE_SIZE;
+ workspace->strm.avail_out = PAGE_SIZE;
workspace->strm.next_out = cpage_out;
}
/* we're all done */
@@ -170,14 +170,14 @@ static int zlib_compress_pages(struct list_head *ws,
bytes_left = len - workspace->strm.total_in;
kunmap(in_page);
- page_cache_release(in_page);
+ put_page(in_page);
- start += PAGE_CACHE_SIZE;
+ start += PAGE_SIZE;
in_page = find_get_page(mapping,
- start >> PAGE_CACHE_SHIFT);
+ start >> PAGE_SHIFT);
data_in = kmap(in_page);
workspace->strm.avail_in = min(bytes_left,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
workspace->strm.next_in = data_in;
}
}
@@ -205,7 +205,7 @@ out:
if (in_page) {
kunmap(in_page);
- page_cache_release(in_page);
+ put_page(in_page);
}
return ret;
}
@@ -223,18 +223,18 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
size_t total_out = 0;
unsigned long page_in_index = 0;
unsigned long page_out_index = 0;
- unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE);
+ unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
unsigned long pg_offset;
data_in = kmap(pages_in[page_in_index]);
workspace->strm.next_in = data_in;
- workspace->strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE);
+ workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
workspace->strm.total_in = 0;
workspace->strm.total_out = 0;
workspace->strm.next_out = workspace->buf;
- workspace->strm.avail_out = PAGE_CACHE_SIZE;
+ workspace->strm.avail_out = PAGE_SIZE;
pg_offset = 0;
/* If it's deflate, and it's got no preset dictionary, then
@@ -274,7 +274,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
}
workspace->strm.next_out = workspace->buf;
- workspace->strm.avail_out = PAGE_CACHE_SIZE;
+ workspace->strm.avail_out = PAGE_SIZE;
if (workspace->strm.avail_in == 0) {
unsigned long tmp;
@@ -288,7 +288,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
workspace->strm.next_in = data_in;
tmp = srclen - workspace->strm.total_in;
workspace->strm.avail_in = min(tmp,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
}
}
if (ret != Z_STREAM_END)
@@ -325,7 +325,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
workspace->strm.total_in = 0;
workspace->strm.next_out = workspace->buf;
- workspace->strm.avail_out = PAGE_CACHE_SIZE;
+ workspace->strm.avail_out = PAGE_SIZE;
workspace->strm.total_out = 0;
/* If it's deflate, and it's got no preset dictionary, then
we can tell zlib to skip the adler32 check. */
@@ -368,8 +368,8 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
else
buf_offset = 0;
- bytes = min(PAGE_CACHE_SIZE - pg_offset,
- PAGE_CACHE_SIZE - buf_offset);
+ bytes = min(PAGE_SIZE - pg_offset,
+ PAGE_SIZE - buf_offset);
bytes = min(bytes, bytes_left);
kaddr = kmap_atomic(dest_page);
@@ -380,7 +380,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
bytes_left -= bytes;
next:
workspace->strm.next_out = workspace->buf;
- workspace->strm.avail_out = PAGE_CACHE_SIZE;
+ workspace->strm.avail_out = PAGE_SIZE;
}
if (ret != Z_STREAM_END && bytes_left != 0)
diff --git a/fs/buffer.c b/fs/buffer.c
index 33be29675358..af0d9a82a8ed 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -129,7 +129,7 @@ __clear_page_buffers(struct page *page)
{
ClearPagePrivate(page);
set_page_private(page, 0);
- page_cache_release(page);
+ put_page(page);
}
static void buffer_io_error(struct buffer_head *bh, char *msg)
@@ -207,7 +207,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
struct page *page;
int all_mapped = 1;
- index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
+ index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
if (!page)
goto out;
@@ -245,7 +245,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
}
out_unlock:
spin_unlock(&bd_mapping->private_lock);
- page_cache_release(page);
+ put_page(page);
out:
return ret;
}
@@ -1040,7 +1040,7 @@ done:
ret = (block < end_block) ? 1 : -ENXIO;
failed:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return ret;
}
@@ -1533,7 +1533,7 @@ void block_invalidatepage(struct page *page, unsigned int offset,
/*
* Check for overflow
*/
- BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+ BUG_ON(stop > PAGE_SIZE || stop < length);
head = page_buffers(page);
bh = head;
@@ -1716,7 +1716,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
blocksize = bh->b_size;
bbits = block_size_bits(blocksize);
- block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+ block = (sector_t)page->index << (PAGE_SHIFT - bbits);
last_block = (i_size_read(inode) - 1) >> bbits;
/*
@@ -1894,7 +1894,7 @@ EXPORT_SYMBOL(page_zero_new_buffers);
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
@@ -1904,15 +1904,15 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
BUG_ON(!PageLocked(page));
- BUG_ON(from > PAGE_CACHE_SIZE);
- BUG_ON(to > PAGE_CACHE_SIZE);
+ BUG_ON(from > PAGE_SIZE);
+ BUG_ON(to > PAGE_SIZE);
BUG_ON(from > to);
head = create_page_buffers(page, inode, 0);
blocksize = head->b_size;
bbits = block_size_bits(blocksize);
- block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+ block = (sector_t)page->index << (PAGE_SHIFT - bbits);
for(bh = head, block_start = 0; bh != head || !block_start;
block++, block_start=block_end, bh = bh->b_this_page) {
@@ -2020,7 +2020,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
unsigned flags, struct page **pagep, get_block_t *get_block)
{
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
int status;
@@ -2031,7 +2031,7 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
status = __block_write_begin(page, pos, len, get_block);
if (unlikely(status)) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
page = NULL;
}
@@ -2047,7 +2047,7 @@ int block_write_end(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
unsigned start;
- start = pos & (PAGE_CACHE_SIZE - 1);
+ start = pos & (PAGE_SIZE - 1);
if (unlikely(copied < len)) {
/*
@@ -2099,7 +2099,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
}
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
@@ -2136,9 +2136,9 @@ int block_is_partially_uptodate(struct page *page, unsigned long from,
head = page_buffers(page);
blocksize = head->b_size;
- to = min_t(unsigned, PAGE_CACHE_SIZE - from, count);
+ to = min_t(unsigned, PAGE_SIZE - from, count);
to = from + to;
- if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
+ if (from < blocksize && to > PAGE_SIZE - blocksize)
return 0;
bh = head;
@@ -2181,7 +2181,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
blocksize = head->b_size;
bbits = block_size_bits(blocksize);
- iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+ iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
lblock = (i_size_read(inode)+blocksize-1) >> bbits;
bh = head;
nr = 0;
@@ -2295,16 +2295,16 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
unsigned zerofrom, offset, len;
int err = 0;
- index = pos >> PAGE_CACHE_SHIFT;
- offset = pos & ~PAGE_CACHE_MASK;
+ index = pos >> PAGE_SHIFT;
+ offset = pos & ~PAGE_MASK;
- while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
- zerofrom = curpos & ~PAGE_CACHE_MASK;
+ while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
+ zerofrom = curpos & ~PAGE_MASK;
if (zerofrom & (blocksize-1)) {
*bytes |= (blocksize-1);
(*bytes)++;
}
- len = PAGE_CACHE_SIZE - zerofrom;
+ len = PAGE_SIZE - zerofrom;
err = pagecache_write_begin(file, mapping, curpos, len,
AOP_FLAG_UNINTERRUPTIBLE,
@@ -2329,7 +2329,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
/* page covers the boundary, find the boundary offset */
if (index == curidx) {
- zerofrom = curpos & ~PAGE_CACHE_MASK;
+ zerofrom = curpos & ~PAGE_MASK;
/* if we will expand the thing last block will be filled */
if (offset <= zerofrom) {
goto out;
@@ -2375,7 +2375,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
if (err)
return err;
- zerofrom = *bytes & ~PAGE_CACHE_MASK;
+ zerofrom = *bytes & ~PAGE_MASK;
if (pos+len > *bytes && zerofrom & (blocksize-1)) {
*bytes |= (blocksize-1);
(*bytes)++;
@@ -2430,10 +2430,10 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
}
/* page is wholly or partially inside EOF */
- if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
- end = size & ~PAGE_CACHE_MASK;
+ if (((page->index + 1) << PAGE_SHIFT) > size)
+ end = size & ~PAGE_MASK;
else
- end = PAGE_CACHE_SIZE;
+ end = PAGE_SIZE;
ret = __block_write_begin(page, 0, end, get_block);
if (!ret)
@@ -2508,8 +2508,8 @@ int nobh_write_begin(struct address_space *mapping,
int ret = 0;
int is_mapped_to_disk = 1;
- index = pos >> PAGE_CACHE_SHIFT;
- from = pos & (PAGE_CACHE_SIZE - 1);
+ index = pos >> PAGE_SHIFT;
+ from = pos & (PAGE_SIZE - 1);
to = from + len;
page = grab_cache_page_write_begin(mapping, index, flags);
@@ -2543,7 +2543,7 @@ int nobh_write_begin(struct address_space *mapping,
goto out_release;
}
- block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+ block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
/*
* We loop across all blocks in the page, whether or not they are
@@ -2551,7 +2551,7 @@ int nobh_write_begin(struct address_space *mapping,
* page is fully mapped-to-disk.
*/
for (block_start = 0, block_in_page = 0, bh = head;
- block_start < PAGE_CACHE_SIZE;
+ block_start < PAGE_SIZE;
block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
int create;
@@ -2623,7 +2623,7 @@ failed:
out_release:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
*pagep = NULL;
return ret;
@@ -2653,7 +2653,7 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
}
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
while (head) {
bh = head;
@@ -2675,7 +2675,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
{
struct inode * const inode = page->mapping->host;
loff_t i_size = i_size_read(inode);
- const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ const pgoff_t end_index = i_size >> PAGE_SHIFT;
unsigned offset;
int ret;
@@ -2684,7 +2684,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
goto out;
/* Is the page fully outside i_size? (truncate in progress) */
- offset = i_size & (PAGE_CACHE_SIZE-1);
+ offset = i_size & (PAGE_SIZE-1);
if (page->index >= end_index+1 || !offset) {
/*
* The page may have dirty, unmapped buffers. For example,
@@ -2707,7 +2707,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ zero_user_segment(page, offset, PAGE_SIZE);
out:
ret = mpage_writepage(page, get_block, wbc);
if (ret == -EAGAIN)
@@ -2720,8 +2720,8 @@ EXPORT_SYMBOL(nobh_writepage);
int nobh_truncate_page(struct address_space *mapping,
loff_t from, get_block_t *get_block)
{
- pgoff_t index = from >> PAGE_CACHE_SHIFT;
- unsigned offset = from & (PAGE_CACHE_SIZE-1);
+ pgoff_t index = from >> PAGE_SHIFT;
+ unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize;
sector_t iblock;
unsigned length, pos;
@@ -2738,7 +2738,7 @@ int nobh_truncate_page(struct address_space *mapping,
return 0;
length = blocksize - length;
- iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
page = grab_cache_page(mapping, index);
err = -ENOMEM;
@@ -2748,7 +2748,7 @@ int nobh_truncate_page(struct address_space *mapping,
if (page_has_buffers(page)) {
has_buffers:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return block_truncate_page(mapping, from, get_block);
}
@@ -2772,7 +2772,7 @@ has_buffers:
if (!PageUptodate(page)) {
err = mapping->a_ops->readpage(NULL, page);
if (err) {
- page_cache_release(page);
+ put_page(page);
goto out;
}
lock_page(page);
@@ -2789,7 +2789,7 @@ has_buffers:
unlock:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
out:
return err;
}
@@ -2798,8 +2798,8 @@ EXPORT_SYMBOL(nobh_truncate_page);
int block_truncate_page(struct address_space *mapping,
loff_t from, get_block_t *get_block)
{
- pgoff_t index = from >> PAGE_CACHE_SHIFT;
- unsigned offset = from & (PAGE_CACHE_SIZE-1);
+ pgoff_t index = from >> PAGE_SHIFT;
+ unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize;
sector_t iblock;
unsigned length, pos;
@@ -2816,7 +2816,7 @@ int block_truncate_page(struct address_space *mapping,
return 0;
length = blocksize - length;
- iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
page = grab_cache_page(mapping, index);
err = -ENOMEM;
@@ -2865,7 +2865,7 @@ int block_truncate_page(struct address_space *mapping,
unlock:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
out:
return err;
}
@@ -2879,7 +2879,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
{
struct inode * const inode = page->mapping->host;
loff_t i_size = i_size_read(inode);
- const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ const pgoff_t end_index = i_size >> PAGE_SHIFT;
unsigned offset;
/* Is the page fully inside i_size? */
@@ -2888,14 +2888,14 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
end_buffer_async_write);
/* Is the page fully outside i_size? (truncate in progress) */
- offset = i_size & (PAGE_CACHE_SIZE-1);
+ offset = i_size & (PAGE_SIZE-1);
if (page->index >= end_index+1 || !offset) {
/*
* The page may have dirty, unmapped buffers. For example,
* they may have been added in ext3_writepage(). Make them
* freeable here, so the page does not leak.
*/
- do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ do_invalidatepage(page, 0, PAGE_SIZE);
unlock_page(page);
return 0; /* don't care */
}
@@ -2907,7 +2907,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ zero_user_segment(page, offset, PAGE_SIZE);
return __block_write_full_page(inode, page, get_block, wbc,
end_buffer_async_write);
}
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index c0f3da3926a0..afbdc418966d 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -194,10 +194,10 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
error = -EIO;
}
- page_cache_release(monitor->back_page);
+ put_page(monitor->back_page);
fscache_end_io(op, monitor->netfs_page, error);
- page_cache_release(monitor->netfs_page);
+ put_page(monitor->netfs_page);
fscache_retrieval_complete(op, 1);
fscache_put_retrieval(op);
kfree(monitor);
@@ -288,8 +288,8 @@ monitor_backing_page:
_debug("- monitor add");
/* install the monitor */
- page_cache_get(monitor->netfs_page);
- page_cache_get(backpage);
+ get_page(monitor->netfs_page);
+ get_page(backpage);
monitor->back_page = backpage;
monitor->monitor.private = backpage;
add_page_wait_queue(backpage, &monitor->monitor);
@@ -310,7 +310,7 @@ backing_page_already_present:
_debug("- present");
if (newpage) {
- page_cache_release(newpage);
+ put_page(newpage);
newpage = NULL;
}
@@ -342,7 +342,7 @@ success:
out:
if (backpage)
- page_cache_release(backpage);
+ put_page(backpage);
if (monitor) {
fscache_put_retrieval(monitor->op);
kfree(monitor);
@@ -363,7 +363,7 @@ io_error:
goto out;
nomem_page:
- page_cache_release(newpage);
+ put_page(newpage);
nomem_monitor:
fscache_put_retrieval(monitor->op);
kfree(monitor);
@@ -530,7 +530,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
netpage->index, cachefiles_gfp);
if (ret < 0) {
if (ret == -EEXIST) {
- page_cache_release(netpage);
+ put_page(netpage);
fscache_retrieval_complete(op, 1);
continue;
}
@@ -538,10 +538,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
}
/* install a monitor */
- page_cache_get(netpage);
+ get_page(netpage);
monitor->netfs_page = netpage;
- page_cache_get(backpage);
+ get_page(backpage);
monitor->back_page = backpage;
monitor->monitor.private = backpage;
add_page_wait_queue(backpage, &monitor->monitor);
@@ -555,10 +555,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
unlock_page(backpage);
}
- page_cache_release(backpage);
+ put_page(backpage);
backpage = NULL;
- page_cache_release(netpage);
+ put_page(netpage);
netpage = NULL;
continue;
@@ -603,7 +603,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
netpage->index, cachefiles_gfp);
if (ret < 0) {
if (ret == -EEXIST) {
- page_cache_release(netpage);
+ put_page(netpage);
fscache_retrieval_complete(op, 1);
continue;
}
@@ -612,14 +612,14 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
copy_highpage(netpage, backpage);
- page_cache_release(backpage);
+ put_page(backpage);
backpage = NULL;
fscache_mark_page_cached(op, netpage);
/* the netpage is unlocked and marked up to date here */
fscache_end_io(op, netpage, 0);
- page_cache_release(netpage);
+ put_page(netpage);
netpage = NULL;
fscache_retrieval_complete(op, 1);
continue;
@@ -632,11 +632,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
out:
/* tidy up */
if (newpage)
- page_cache_release(newpage);
+ put_page(newpage);
if (netpage)
- page_cache_release(netpage);
+ put_page(netpage);
if (backpage)
- page_cache_release(backpage);
+ put_page(backpage);
if (monitor) {
fscache_put_retrieval(op);
kfree(monitor);
@@ -644,7 +644,7 @@ out:
list_for_each_entry_safe(netpage, _n, list, lru) {
list_del(&netpage->lru);
- page_cache_release(netpage);
+ put_page(netpage);
fscache_retrieval_complete(op, 1);
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index fc5cae2a0db2..4801571f51cb 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -143,7 +143,7 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset,
inode = page->mapping->host;
ci = ceph_inode(inode);
- if (offset != 0 || length != PAGE_CACHE_SIZE) {
+ if (offset != 0 || length != PAGE_SIZE) {
dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
inode, page, page->index, offset, length);
return;
@@ -197,10 +197,10 @@ static int readpage_nounlock(struct file *filp, struct page *page)
&ceph_inode_to_client(inode)->client->osdc;
int err = 0;
u64 off = page_offset(page);
- u64 len = PAGE_CACHE_SIZE;
+ u64 len = PAGE_SIZE;
if (off >= i_size_read(inode)) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
return 0;
}
@@ -212,7 +212,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
*/
if (off == 0)
return -EINVAL;
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
return 0;
}
@@ -234,9 +234,9 @@ static int readpage_nounlock(struct file *filp, struct page *page)
ceph_fscache_readpage_cancel(inode, page);
goto out;
}
- if (err < PAGE_CACHE_SIZE)
+ if (err < PAGE_SIZE)
/* zero fill remainder of page */
- zero_user_segment(page, err, PAGE_CACHE_SIZE);
+ zero_user_segment(page, err, PAGE_SIZE);
else
flush_dcache_page(page);
@@ -278,10 +278,10 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
if (rc < 0 && rc != -ENOENT)
goto unlock;
- if (bytes < (int)PAGE_CACHE_SIZE) {
+ if (bytes < (int)PAGE_SIZE) {
/* zero (remainder of) page */
int s = bytes < 0 ? 0 : bytes;
- zero_user_segment(page, s, PAGE_CACHE_SIZE);
+ zero_user_segment(page, s, PAGE_SIZE);
}
dout("finish_read %p uptodate %p idx %lu\n", inode, page,
page->index);
@@ -290,8 +290,8 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
ceph_readpage_to_fscache(inode, page);
unlock:
unlock_page(page);
- page_cache_release(page);
- bytes -= PAGE_CACHE_SIZE;
+ put_page(page);
+ bytes -= PAGE_SIZE;
}
kfree(osd_data->pages);
}
@@ -336,7 +336,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
if (max && nr_pages == max)
break;
}
- len = nr_pages << PAGE_CACHE_SHIFT;
+ len = nr_pages << PAGE_SHIFT;
dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
off, len);
vino = ceph_vino(inode);
@@ -364,7 +364,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
if (add_to_page_cache_lru(page, &inode->i_data, page->index,
GFP_KERNEL)) {
ceph_fscache_uncache_page(inode, page);
- page_cache_release(page);
+ put_page(page);
dout("start_read %p add_to_page_cache failed %p\n",
inode, page);
nr_pages = i;
@@ -415,8 +415,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
if (rc == 0)
goto out;
- if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
- max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
+ if (fsc->mount_options->rsize >= PAGE_SIZE)
+ max = (fsc->mount_options->rsize + PAGE_SIZE - 1)
>> PAGE_SHIFT;
dout("readpages %p file %p nr_pages %d max %d\n", inode,
@@ -484,7 +484,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
long writeback_stat;
u64 truncate_size;
u32 truncate_seq;
- int err = 0, len = PAGE_CACHE_SIZE;
+ int err = 0, len = PAGE_SIZE;
dout("writepage %p idx %lu\n", page, page->index);
@@ -725,9 +725,9 @@ static int ceph_writepages_start(struct address_space *mapping,
}
if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
wsize = fsc->mount_options->wsize;
- if (wsize < PAGE_CACHE_SIZE)
- wsize = PAGE_CACHE_SIZE;
- max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
+ if (wsize < PAGE_SIZE)
+ wsize = PAGE_SIZE;
+ max_pages_ever = wsize >> PAGE_SHIFT;
pagevec_init(&pvec, 0);
@@ -737,8 +737,8 @@ static int ceph_writepages_start(struct address_space *mapping,
end = -1;
dout(" cyclic, start at %lu\n", start);
} else {
- start = wbc->range_start >> PAGE_CACHE_SHIFT;
- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ start = wbc->range_start >> PAGE_SHIFT;
+ end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
should_loop = 0;
@@ -887,7 +887,7 @@ get_more_pages:
num_ops = 1 + do_sync;
strip_unit_end = page->index +
- ((len - 1) >> PAGE_CACHE_SHIFT);
+ ((len - 1) >> PAGE_SHIFT);
BUG_ON(pages);
max_pages = calc_pages_for(0, (u64)len);
@@ -901,7 +901,7 @@ get_more_pages:
len = 0;
} else if (page->index !=
- (offset + len) >> PAGE_CACHE_SHIFT) {
+ (offset + len) >> PAGE_SHIFT) {
if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS :
CEPH_OSD_MAX_OPS)) {
redirty_page_for_writepage(wbc, page);
@@ -929,7 +929,7 @@ get_more_pages:
pages[locked_pages] = page;
locked_pages++;
- len += PAGE_CACHE_SIZE;
+ len += PAGE_SIZE;
}
/* did we get anything? */
@@ -981,7 +981,7 @@ new_request:
BUG_ON(IS_ERR(req));
}
BUG_ON(len < page_offset(pages[locked_pages - 1]) +
- PAGE_CACHE_SIZE - offset);
+ PAGE_SIZE - offset);
req->r_callback = writepages_finish;
req->r_inode = inode;
@@ -1011,7 +1011,7 @@ new_request:
}
set_page_writeback(pages[i]);
- len += PAGE_CACHE_SIZE;
+ len += PAGE_SIZE;
}
if (snap_size != -1) {
@@ -1020,7 +1020,7 @@ new_request:
/* writepages_finish() clears writeback pages
* according to the data length, so make sure
* data length covers all locked pages */
- u64 min_len = len + 1 - PAGE_CACHE_SIZE;
+ u64 min_len = len + 1 - PAGE_SIZE;
len = min(len, (u64)i_size_read(inode) - offset);
len = max(len, min_len);
}
@@ -1135,8 +1135,8 @@ static int ceph_update_writeable_page(struct file *file,
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- loff_t page_off = pos & PAGE_CACHE_MASK;
- int pos_in_page = pos & ~PAGE_CACHE_MASK;
+ loff_t page_off = pos & PAGE_MASK;
+ int pos_in_page = pos & ~PAGE_MASK;
int end_in_page = pos_in_page + len;
loff_t i_size;
int r;
@@ -1191,7 +1191,7 @@ retry_locked:
}
/* full page? */
- if (pos_in_page == 0 && len == PAGE_CACHE_SIZE)
+ if (pos_in_page == 0 && len == PAGE_SIZE)
return 0;
/* past end of file? */
@@ -1199,12 +1199,12 @@ retry_locked:
if (page_off >= i_size ||
(pos_in_page == 0 && (pos+len) >= i_size &&
- end_in_page - pos_in_page != PAGE_CACHE_SIZE)) {
+ end_in_page - pos_in_page != PAGE_SIZE)) {
dout(" zeroing %p 0 - %d and %d - %d\n",
- page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE);
+ page, pos_in_page, end_in_page, (int)PAGE_SIZE);
zero_user_segments(page,
0, pos_in_page,
- end_in_page, PAGE_CACHE_SIZE);
+ end_in_page, PAGE_SIZE);
return 0;
}
@@ -1228,7 +1228,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
{
struct inode *inode = file_inode(file);
struct page *page;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
int r;
do {
@@ -1242,7 +1242,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
r = ceph_update_writeable_page(file, pos, len, page);
if (r < 0)
- page_cache_release(page);
+ put_page(page);
else
*pagep = page;
} while (r == -EAGAIN);
@@ -1259,7 +1259,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata)
{
struct inode *inode = file_inode(file);
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
int check_cap = 0;
dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
@@ -1279,7 +1279,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
set_page_dirty(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (check_cap)
ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
@@ -1322,11 +1322,11 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
struct page *pinned_page = NULL;
- loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT;
+ loff_t off = vmf->pgoff << PAGE_SHIFT;
int want, got, ret;
dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
- inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE);
+ inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE);
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else
@@ -1343,7 +1343,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
}
dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
- inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got));
+ inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
ci->i_inline_version == CEPH_INLINE_NONE)
@@ -1352,16 +1352,16 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = -EAGAIN;
dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
- inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret);
+ inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret);
if (pinned_page)
- page_cache_release(pinned_page);
+ put_page(pinned_page);
ceph_put_cap_refs(ci, got);
if (ret != -EAGAIN)
return ret;
/* read inline data */
- if (off >= PAGE_CACHE_SIZE) {
+ if (off >= PAGE_SIZE) {
/* does not support inline data > PAGE_SIZE */
ret = VM_FAULT_SIGBUS;
} else {
@@ -1378,12 +1378,12 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
CEPH_STAT_CAP_INLINE_DATA, true);
if (ret1 < 0 || off >= i_size_read(inode)) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
ret = VM_FAULT_SIGBUS;
goto out;
}
- if (ret1 < PAGE_CACHE_SIZE)
- zero_user_segment(page, ret1, PAGE_CACHE_SIZE);
+ if (ret1 < PAGE_SIZE)
+ zero_user_segment(page, ret1, PAGE_SIZE);
else
flush_dcache_page(page);
SetPageUptodate(page);
@@ -1392,7 +1392,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
out:
dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
- inode, off, (size_t)PAGE_CACHE_SIZE, ret);
+ inode, off, (size_t)PAGE_SIZE, ret);
return ret;
}
@@ -1430,10 +1430,10 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
}
}
- if (off + PAGE_CACHE_SIZE <= size)
- len = PAGE_CACHE_SIZE;
+ if (off + PAGE_SIZE <= size)
+ len = PAGE_SIZE;
else
- len = size & ~PAGE_CACHE_MASK;
+ len = size & ~PAGE_MASK;
dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
inode, ceph_vinop(inode), off, len, size);
@@ -1519,7 +1519,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
return;
if (PageUptodate(page)) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return;
}
}
@@ -1534,14 +1534,14 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
}
if (page != locked_page) {
- if (len < PAGE_CACHE_SIZE)
- zero_user_segment(page, len, PAGE_CACHE_SIZE);
+ if (len < PAGE_SIZE)
+ zero_user_segment(page, len, PAGE_SIZE);
else
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
}
@@ -1578,7 +1578,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
from_pagecache = true;
lock_page(page);
} else {
- page_cache_release(page);
+ put_page(page);
page = NULL;
}
}
@@ -1586,8 +1586,8 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
if (page) {
len = i_size_read(inode);
- if (len > PAGE_CACHE_SIZE)
- len = PAGE_CACHE_SIZE;
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
} else {
page = __page_cache_alloc(GFP_NOFS);
if (!page) {
@@ -1670,7 +1670,7 @@ out:
if (page && page != locked_page) {
if (from_pagecache) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
} else
__free_pages(page, 0);
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index de17bb232ff8..cfaeef18cbca 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2510,7 +2510,7 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
*pinned_page = page;
break;
}
- page_cache_release(page);
+ put_page(page);
}
/*
* drop cap refs first because getattr while
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index fadc243dfb28..4fb2bbc2a272 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -129,7 +129,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
struct inode *dir = d_inode(parent);
struct dentry *dentry, *last = NULL;
struct ceph_dentry_info *di;
- unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *);
+ unsigned nsize = PAGE_SIZE / sizeof(struct dentry *);
int err = 0;
loff_t ptr_pos = 0;
struct ceph_readdir_cache_control cache_ctl = {};
@@ -154,7 +154,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
}
err = -EAGAIN;
- pgoff = ptr_pos >> PAGE_CACHE_SHIFT;
+ pgoff = ptr_pos >> PAGE_SHIFT;
if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) {
ceph_readdir_cache_release(&cache_ctl);
cache_ctl.page = find_lock_page(&dir->i_data, pgoff);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ef38f01c1795..a79f9269831e 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -466,7 +466,7 @@ more:
ret += zlen;
}
- didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
+ didpages = (page_align + ret) >> PAGE_SHIFT;
pos += ret;
read = pos - off;
left -= ret;
@@ -806,8 +806,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (write) {
ret = invalidate_inode_pages2_range(inode->i_mapping,
- pos >> PAGE_CACHE_SHIFT,
- (pos + count) >> PAGE_CACHE_SHIFT);
+ pos >> PAGE_SHIFT,
+ (pos + count) >> PAGE_SHIFT);
if (ret < 0)
dout("invalidate_inode_pages2_range returned %d\n", ret);
@@ -872,7 +872,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
* may block.
*/
truncate_inode_pages_range(inode->i_mapping, pos,
- (pos+len) | (PAGE_CACHE_SIZE - 1));
+ (pos+len) | (PAGE_SIZE - 1));
osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
}
@@ -1006,8 +1006,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
return ret;
ret = invalidate_inode_pages2_range(inode->i_mapping,
- pos >> PAGE_CACHE_SHIFT,
- (pos + count) >> PAGE_CACHE_SHIFT);
+ pos >> PAGE_SHIFT,
+ (pos + count) >> PAGE_SHIFT);
if (ret < 0)
dout("invalidate_inode_pages2_range returned %d\n", ret);
@@ -1036,7 +1036,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
* write from beginning of first page,
* regardless of io alignment
*/
- num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
if (IS_ERR(pages)) {
@@ -1159,7 +1159,7 @@ again:
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
if (pinned_page) {
- page_cache_release(pinned_page);
+ put_page(pinned_page);
pinned_page = NULL;
}
ceph_put_cap_refs(ci, got);
@@ -1188,10 +1188,10 @@ again:
if (retry_op == READ_INLINE) {
BUG_ON(ret > 0 || read > 0);
if (iocb->ki_pos < i_size &&
- iocb->ki_pos < PAGE_CACHE_SIZE) {
+ iocb->ki_pos < PAGE_SIZE) {
loff_t end = min_t(loff_t, i_size,
iocb->ki_pos + len);
- end = min_t(loff_t, end, PAGE_CACHE_SIZE);
+ end = min_t(loff_t, end, PAGE_SIZE);
if (statret < end)
zero_user_segment(page, statret, end);
ret = copy_page_to_iter(page,
@@ -1463,21 +1463,21 @@ static inline void ceph_zero_partial_page(
struct inode *inode, loff_t offset, unsigned size)
{
struct page *page;
- pgoff_t index = offset >> PAGE_CACHE_SHIFT;
+ pgoff_t index = offset >> PAGE_SHIFT;
page = find_lock_page(inode->i_mapping, index);
if (page) {
wait_on_page_writeback(page);
- zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size);
+ zero_user(page, offset & (PAGE_SIZE - 1), size);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
}
static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
loff_t length)
{
- loff_t nearly = round_up(offset, PAGE_CACHE_SIZE);
+ loff_t nearly = round_up(offset, PAGE_SIZE);
if (offset < nearly) {
loff_t size = nearly - offset;
if (length < size)
@@ -1486,8 +1486,8 @@ static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
offset += size;
length -= size;
}
- if (length >= PAGE_CACHE_SIZE) {
- loff_t size = round_down(length, PAGE_CACHE_SIZE);
+ if (length >= PAGE_SIZE) {
+ loff_t size = round_down(length, PAGE_SIZE);
truncate_pagecache_range(inode, offset, offset + size - 1);
offset += size;
length -= size;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ed58b168904a..edfade037738 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1338,7 +1338,7 @@ void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
{
if (ctl->page) {
kunmap(ctl->page);
- page_cache_release(ctl->page);
+ put_page(ctl->page);
ctl->page = NULL;
}
}
@@ -1348,7 +1348,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
struct ceph_mds_request *req)
{
struct ceph_inode_info *ci = ceph_inode(dir);
- unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry*);
+ unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
unsigned idx = ctl->index % nsize;
pgoff_t pgoff = ctl->index / nsize;
@@ -1367,7 +1367,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
unlock_page(ctl->page);
ctl->dentries = kmap(ctl->page);
if (idx == 0)
- memset(ctl->dentries, 0, PAGE_CACHE_SIZE);
+ memset(ctl->dentries, 0, PAGE_SIZE);
}
if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 44852c3ae531..541ead4d8965 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1610,7 +1610,7 @@ again:
while (!list_empty(&tmp_list)) {
if (!msg) {
msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
- PAGE_CACHE_SIZE, GFP_NOFS, false);
+ PAGE_SIZE, GFP_NOFS, false);
if (!msg)
goto out_err;
head = msg->front.iov_base;
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 37712ccffcc6..ee69a537dba5 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -97,7 +97,7 @@ struct ceph_mds_reply_info_parsed {
/*
* cap releases are batched and sent to the MDS en masse.
*/
-#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \
+#define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - \
sizeof(struct ceph_mds_cap_release)) / \
sizeof(struct ceph_mds_cap_item))
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index c973043deb0e..f12d5e2955c2 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -560,7 +560,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
/* set up mempools */
err = -ENOMEM;
- page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT;
+ page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
size = sizeof (struct page *) * (page_count ? page_count : 1);
fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
if (!fsc->wb_pagevec_pool)
@@ -912,13 +912,13 @@ static int ceph_register_bdi(struct super_block *sb,
int err;
/* set ra_pages based on rasize mount option? */
- if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE)
+ if (fsc->mount_options->rasize >= PAGE_SIZE)
fsc->backing_dev_info.ra_pages =
- (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1)
+ (fsc->mount_options->rasize + PAGE_SIZE - 1)
>> PAGE_SHIFT;
else
fsc->backing_dev_info.ra_pages =
- VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
+ VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
atomic_long_inc_return(&bdi_seq));
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 1d86fc620e5c..89201564c346 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -962,7 +962,7 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
cifs_dbg(FYI, "about to flush pages\n");
/* should we flush first and last page first */
truncate_inode_pages_range(&target_inode->i_data, destoff,
- PAGE_CACHE_ALIGN(destoff + len)-1);
+ PAGE_ALIGN(destoff + len)-1);
if (target_tcon->ses->server->ops->duplicate_extents)
rc = target_tcon->ses->server->ops->duplicate_extents(xid,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d21da9f05bae..f2cc0b3d1af7 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -714,7 +714,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
*
* Note that this might make for "interesting" allocation problems during
* writeback however as we have to allocate an array of pointers for the
- * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
+ * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096.
*
* For reads, there is a similar problem as we need to allocate an array
* of kvecs to handle the receive, though that should only need to be done
@@ -733,7 +733,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
/*
* The default wsize is 1M. find_get_pages seems to return a maximum of 256
- * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
+ * pages in a single call. With PAGE_SIZE == 4k, this means we can fill
* a single wsize request with a single call.
*/
#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 76fcb50295a3..a894bf809ff7 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1929,17 +1929,17 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
wsize = server->ops->wp_retry_size(inode);
if (wsize < rest_len) {
- nr_pages = wsize / PAGE_CACHE_SIZE;
+ nr_pages = wsize / PAGE_SIZE;
if (!nr_pages) {
rc = -ENOTSUPP;
break;
}
- cur_len = nr_pages * PAGE_CACHE_SIZE;
- tailsz = PAGE_CACHE_SIZE;
+ cur_len = nr_pages * PAGE_SIZE;
+ tailsz = PAGE_SIZE;
} else {
- nr_pages = DIV_ROUND_UP(rest_len, PAGE_CACHE_SIZE);
+ nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE);
cur_len = rest_len;
- tailsz = rest_len - (nr_pages - 1) * PAGE_CACHE_SIZE;
+ tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE;
}
wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete);
@@ -1957,7 +1957,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
wdata2->sync_mode = wdata->sync_mode;
wdata2->nr_pages = nr_pages;
wdata2->offset = page_offset(wdata2->pages[0]);
- wdata2->pagesz = PAGE_CACHE_SIZE;
+ wdata2->pagesz = PAGE_SIZE;
wdata2->tailsz = tailsz;
wdata2->bytes = cur_len;
@@ -1975,7 +1975,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
if (rc != 0 && rc != -EAGAIN) {
SetPageError(wdata2->pages[j]);
end_page_writeback(wdata2->pages[j]);
- page_cache_release(wdata2->pages[j]);
+ put_page(wdata2->pages[j]);
}
}
@@ -2018,7 +2018,7 @@ cifs_writev_complete(struct work_struct *work)
else if (wdata->result < 0)
SetPageError(page);
end_page_writeback(page);
- page_cache_release(page);
+ put_page(page);
}
if (wdata->result != -EAGAIN)
mapping_set_error(inode->i_mapping, wdata->result);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a763cd3d9e7c..6f62ac821a84 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3630,7 +3630,7 @@ try_mount_again:
cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info);
/* tune readahead according to rsize */
- cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
+ cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_SIZE;
remote_path_check:
#ifdef CONFIG_CIFS_DFS_UPCALL
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ff882aeaccc6..c03d0744648b 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1833,7 +1833,7 @@ refind_writable:
static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
{
struct address_space *mapping = page->mapping;
- loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+ loff_t offset = (loff_t)page->index << PAGE_SHIFT;
char *write_data;
int rc = -EFAULT;
int bytes_written = 0;
@@ -1849,7 +1849,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
write_data = kmap(page);
write_data += from;
- if ((to > PAGE_CACHE_SIZE) || (from > to)) {
+ if ((to > PAGE_SIZE) || (from > to)) {
kunmap(page);
return -EIO;
}
@@ -1902,7 +1902,7 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
* find_get_pages_tag seems to return a max of 256 on each
* iteration, so we must call it several times in order to
* fill the array or the wsize is effectively limited to
- * 256 * PAGE_CACHE_SIZE.
+ * 256 * PAGE_SIZE.
*/
*found_pages = 0;
pages = wdata->pages;
@@ -1991,7 +1991,7 @@ wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
/* put any pages we aren't going to use */
for (i = nr_pages; i < found_pages; i++) {
- page_cache_release(wdata->pages[i]);
+ put_page(wdata->pages[i]);
wdata->pages[i] = NULL;
}
@@ -2009,11 +2009,11 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
wdata->sync_mode = wbc->sync_mode;
wdata->nr_pages = nr_pages;
wdata->offset = page_offset(wdata->pages[0]);
- wdata->pagesz = PAGE_CACHE_SIZE;
+ wdata->pagesz = PAGE_SIZE;
wdata->tailsz = min(i_size_read(mapping->host) -
page_offset(wdata->pages[nr_pages - 1]),
- (loff_t)PAGE_CACHE_SIZE);
- wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz;
+ (loff_t)PAGE_SIZE);
+ wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
if (wdata->cfile != NULL)
cifsFileInfo_put(wdata->cfile);
@@ -2047,15 +2047,15 @@ static int cifs_writepages(struct address_space *mapping,
* If wsize is smaller than the page cache size, default to writing
* one page at a time via cifs_writepage
*/
- if (cifs_sb->wsize < PAGE_CACHE_SIZE)
+ if (cifs_sb->wsize < PAGE_SIZE)
return generic_writepages(mapping, wbc);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
} else {
- index = wbc->range_start >> PAGE_CACHE_SHIFT;
- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ index = wbc->range_start >> PAGE_SHIFT;
+ end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = true;
scanned = true;
@@ -2071,7 +2071,7 @@ retry:
if (rc)
break;
- tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1;
+ tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
&found_pages);
@@ -2111,7 +2111,7 @@ retry:
else
SetPageError(wdata->pages[i]);
end_page_writeback(wdata->pages[i]);
- page_cache_release(wdata->pages[i]);
+ put_page(wdata->pages[i]);
}
if (rc != -EAGAIN)
mapping_set_error(mapping, rc);
@@ -2154,7 +2154,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
xid = get_xid();
/* BB add check for wbc flags */
- page_cache_get(page);
+ get_page(page);
if (!PageUptodate(page))
cifs_dbg(FYI, "ppw - page not up to date\n");
@@ -2170,7 +2170,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
*/
set_page_writeback(page);
retry_write:
- rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
+ rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
goto retry_write;
else if (rc == -EAGAIN)
@@ -2180,7 +2180,7 @@ retry_write:
else
SetPageUptodate(page);
end_page_writeback(page);
- page_cache_release(page);
+ put_page(page);
free_xid(xid);
return rc;
}
@@ -2214,12 +2214,12 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
if (copied == len)
SetPageUptodate(page);
ClearPageChecked(page);
- } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
+ } else if (!PageUptodate(page) && copied == PAGE_SIZE)
SetPageUptodate(page);
if (!PageUptodate(page)) {
char *page_data;
- unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned offset = pos & (PAGE_SIZE - 1);
unsigned int xid;
xid = get_xid();
@@ -2248,7 +2248,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
}
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return rc;
}
@@ -3286,9 +3286,9 @@ cifs_readv_complete(struct work_struct *work)
(rdata->result == -EAGAIN && got_bytes))
cifs_readpage_to_fscache(rdata->mapping->host, page);
- got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes);
+ got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
- page_cache_release(page);
+ put_page(page);
rdata->pages[i] = NULL;
}
kref_put(&rdata->refcount, cifs_readdata_release);
@@ -3307,21 +3307,21 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
/* determine the eof that the server (probably) has */
eof = CIFS_I(rdata->mapping->host)->server_eof;
- eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
+ eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
rdata->got_bytes = 0;
- rdata->tailsz = PAGE_CACHE_SIZE;
+ rdata->tailsz = PAGE_SIZE;
for (i = 0; i < nr_pages; i++) {
struct page *page = rdata->pages[i];
- if (len >= PAGE_CACHE_SIZE) {
+ if (len >= PAGE_SIZE) {
/* enough data to fill the page */
iov.iov_base = kmap(page);
- iov.iov_len = PAGE_CACHE_SIZE;
+ iov.iov_len = PAGE_SIZE;
cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
i, page->index, iov.iov_base, iov.iov_len);
- len -= PAGE_CACHE_SIZE;
+ len -= PAGE_SIZE;
} else if (len > 0) {
/* enough for partial page, fill and zero the rest */
iov.iov_base = kmap(page);
@@ -3329,7 +3329,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
i, page->index, iov.iov_base, iov.iov_len);
memset(iov.iov_base + len,
- '\0', PAGE_CACHE_SIZE - len);
+ '\0', PAGE_SIZE - len);
rdata->tailsz = len;
len = 0;
} else if (page->index > eof_index) {
@@ -3341,12 +3341,12 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
* to prevent the VFS from repeatedly attempting to
* fill them until the writes are flushed.
*/
- zero_user(page, 0, PAGE_CACHE_SIZE);
+ zero_user(page, 0, PAGE_SIZE);
lru_cache_add_file(page);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
rdata->pages[i] = NULL;
rdata->nr_pages--;
continue;
@@ -3354,7 +3354,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
/* no need to hold page hostage */
lru_cache_add_file(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
rdata->pages[i] = NULL;
rdata->nr_pages--;
continue;
@@ -3402,8 +3402,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
}
/* move first page to the tmplist */
- *offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
- *bytes = PAGE_CACHE_SIZE;
+ *offset = (loff_t)page->index << PAGE_SHIFT;
+ *bytes = PAGE_SIZE;
*nr_pages = 1;
list_move_tail(&page->lru, tmplist);
@@ -3415,7 +3415,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
break;
/* would this page push the read over the rsize? */
- if (*bytes + PAGE_CACHE_SIZE > rsize)
+ if (*bytes + PAGE_SIZE > rsize)
break;
__SetPageLocked(page);
@@ -3424,7 +3424,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
break;
}
list_move_tail(&page->lru, tmplist);
- (*bytes) += PAGE_CACHE_SIZE;
+ (*bytes) += PAGE_SIZE;
expected_index++;
(*nr_pages)++;
}
@@ -3493,7 +3493,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
* reach this point however since we set ra_pages to 0 when the
* rsize is smaller than a cache page.
*/
- if (unlikely(rsize < PAGE_CACHE_SIZE)) {
+ if (unlikely(rsize < PAGE_SIZE)) {
add_credits_and_wake_if(server, credits, 0);
return 0;
}
@@ -3512,7 +3512,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
list_del(&page->lru);
lru_cache_add_file(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
rc = -ENOMEM;
add_credits_and_wake_if(server, credits, 0);
@@ -3524,7 +3524,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
rdata->offset = offset;
rdata->bytes = bytes;
rdata->pid = pid;
- rdata->pagesz = PAGE_CACHE_SIZE;
+ rdata->pagesz = PAGE_SIZE;
rdata->read_into_pages = cifs_readpages_read_into_pages;
rdata->credits = credits;
@@ -3542,7 +3542,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
page = rdata->pages[i];
lru_cache_add_file(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
/* Fallback to the readpage in error/reconnect cases */
kref_put(&rdata->refcount, cifs_readdata_release);
@@ -3577,7 +3577,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
read_data = kmap(page);
/* for reads over a certain size could initiate async read ahead */
- rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
+ rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
if (rc < 0)
goto io_error;
@@ -3587,8 +3587,8 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
file_inode(file)->i_atime =
current_fs_time(file_inode(file)->i_sb);
- if (PAGE_CACHE_SIZE > rc)
- memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
+ if (PAGE_SIZE > rc)
+ memset(read_data + rc, 0, PAGE_SIZE - rc);
flush_dcache_page(page);
SetPageUptodate(page);
@@ -3608,7 +3608,7 @@ read_complete:
static int cifs_readpage(struct file *file, struct page *page)
{
- loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+ loff_t offset = (loff_t)page->index << PAGE_SHIFT;
int rc = -EACCES;
unsigned int xid;
@@ -3679,8 +3679,8 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata)
{
int oncethru = 0;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
- loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
+ pgoff_t index = pos >> PAGE_SHIFT;
+ loff_t offset = pos & (PAGE_SIZE - 1);
loff_t page_start = pos & PAGE_MASK;
loff_t i_size;
struct page *page;
@@ -3703,7 +3703,7 @@ start:
* the server. If the write is short, we'll end up doing a sync write
* instead.
*/
- if (len == PAGE_CACHE_SIZE)
+ if (len == PAGE_SIZE)
goto out;
/*
@@ -3718,7 +3718,7 @@ start:
(offset == 0 && (pos + len) >= i_size)) {
zero_user_segments(page, 0, offset,
offset + len,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
/*
* PageChecked means that the parts of the page
* to which we're not writing are considered up
@@ -3737,7 +3737,7 @@ start:
* do a sync write instead since PG_uptodate isn't set.
*/
cifs_readpage_worker(file, page, &page_start);
- page_cache_release(page);
+ put_page(page);
oncethru = 1;
goto start;
} else {
@@ -3764,7 +3764,7 @@ static void cifs_invalidate_page(struct page *page, unsigned int offset,
{
struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
- if (offset == 0 && length == PAGE_CACHE_SIZE)
+ if (offset == 0 && length == PAGE_SIZE)
cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
}
@@ -3772,7 +3772,7 @@ static int cifs_launder_page(struct page *page)
{
int rc = 0;
loff_t range_start = page_offset(page);
- loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+ loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 0,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 4cd4705ebfad..514dadb0575d 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -59,7 +59,7 @@ static void cifs_set_ops(struct inode *inode)
/* check if server can support readpages */
if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
- PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
+ PAGE_SIZE + MAX_CIFS_HDR_SIZE)
inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
else
inode->i_data.a_ops = &cifs_addr_ops;
@@ -2019,8 +2019,8 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
static int cifs_truncate_page(struct address_space *mapping, loff_t from)
{
- pgoff_t index = from >> PAGE_CACHE_SHIFT;
- unsigned offset = from & (PAGE_CACHE_SIZE - 1);
+ pgoff_t index = from >> PAGE_SHIFT;
+ unsigned offset = from & (PAGE_SIZE - 1);
struct page *page;
int rc = 0;
@@ -2028,9 +2028,9 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
if (!page)
return -ENOMEM;
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ zero_user_segment(page, offset, PAGE_SIZE);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return rc;
}
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index a8f3b589a2df..cfd91320e869 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -71,8 +71,8 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent)
struct inode *inode;
struct dentry *root;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = CONFIGFS_MAGIC;
sb->s_op = &configfs_ops;
sb->s_time_gran = 1;
diff --git a/fs/cramfs/README b/fs/cramfs/README
index 445d1c2d7646..9d4e7ea311f4 100644
--- a/fs/cramfs/README
+++ b/fs/cramfs/README
@@ -86,26 +86,26 @@ Block Size
(Block size in cramfs refers to the size of input data that is
compressed at a time. It's intended to be somewhere around
-PAGE_CACHE_SIZE for cramfs_readpage's convenience.)
+PAGE_SIZE for cramfs_readpage's convenience.)
The superblock ought to indicate the block size that the fs was
written for, since comments in <linux/pagemap.h> indicate that
-PAGE_CACHE_SIZE may grow in future (if I interpret the comment
+PAGE_SIZE may grow in future (if I interpret the comment
correctly).
-Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that
-for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in
+Currently, mkcramfs #define's PAGE_SIZE as 4096 and uses that
+for blksize, whereas Linux-2.3.39 uses its PAGE_SIZE, which in
turn is defined as PAGE_SIZE (which can be as large as 32KB on arm).
This discrepancy is a bug, though it's not clear which should be
changed.
-One option is to change mkcramfs to take its PAGE_CACHE_SIZE from
+One option is to change mkcramfs to take its PAGE_SIZE from
<asm/page.h>. Personally I don't like this option, but it does
require the least amount of change: just change `#define
-PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage
+PAGE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage
is that the generated cramfs cannot always be shared between different
kernels, not even necessarily kernels of the same architecture if
-PAGE_CACHE_SIZE is subject to change between kernel versions
+PAGE_SIZE is subject to change between kernel versions
(currently possible with arm and ia64).
The remaining options try to make cramfs more sharable.
@@ -126,22 +126,22 @@ size. The options are:
1. Always 4096 bytes.
2. Writer chooses blocksize; kernel adapts but rejects blocksize >
- PAGE_CACHE_SIZE.
+ PAGE_SIZE.
3. Writer chooses blocksize; kernel adapts even to blocksize >
- PAGE_CACHE_SIZE.
+ PAGE_SIZE.
It's easy enough to change the kernel to use a smaller value than
-PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks.
+PAGE_SIZE: just make cramfs_readpage read multiple blocks.
-The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE
+The cost of option 1 is that kernels with a larger PAGE_SIZE
value don't get as good compression as they can.
The cost of option 2 relative to option 1 is that the code uses
variables instead of #define'd constants. The gain is that people
-with kernels having larger PAGE_CACHE_SIZE can make use of that if
+with kernels having larger PAGE_SIZE can make use of that if
they don't mind their cramfs being inaccessible to kernels with
-smaller PAGE_CACHE_SIZE values.
+smaller PAGE_SIZE values.
Option 3 is easy to implement if we don't mind being CPU-inefficient:
e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index b862bc219cd7..3a32ddf98095 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -137,7 +137,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
* page cache and dentry tree anyway..
*
* This also acts as a way to guarantee contiguous areas of up to
- * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to
+ * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
* worry about end-of-buffer issues even when decompressing a full
* page cache.
*/
@@ -152,7 +152,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
*/
#define BLKS_PER_BUF_SHIFT (2)
#define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT)
-#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_CACHE_SIZE)
+#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE)
static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
static unsigned buffer_blocknr[READ_BUFFERS];
@@ -173,8 +173,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
if (!len)
return NULL;
- blocknr = offset >> PAGE_CACHE_SHIFT;
- offset &= PAGE_CACHE_SIZE - 1;
+ blocknr = offset >> PAGE_SHIFT;
+ offset &= PAGE_SIZE - 1;
/* Check if an existing buffer already has the data.. */
for (i = 0; i < READ_BUFFERS; i++) {
@@ -184,14 +184,14 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
continue;
if (blocknr < buffer_blocknr[i])
continue;
- blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
+ blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
blk_offset += offset;
if (blk_offset + len > BUFFER_SIZE)
continue;
return read_buffers[i] + blk_offset;
}
- devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT;
+ devsize = mapping->host->i_size >> PAGE_SHIFT;
/* Ok, read in BLKS_PER_BUF pages completely first. */
for (i = 0; i < BLKS_PER_BUF; i++) {
@@ -213,7 +213,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
wait_on_page_locked(page);
if (!PageUptodate(page)) {
/* asynchronous error */
- page_cache_release(page);
+ put_page(page);
pages[i] = NULL;
}
}
@@ -229,12 +229,12 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
struct page *page = pages[i];
if (page) {
- memcpy(data, kmap(page), PAGE_CACHE_SIZE);
+ memcpy(data, kmap(page), PAGE_SIZE);
kunmap(page);
- page_cache_release(page);
+ put_page(page);
} else
- memset(data, 0, PAGE_CACHE_SIZE);
- data += PAGE_CACHE_SIZE;
+ memset(data, 0, PAGE_SIZE);
+ data += PAGE_SIZE;
}
return read_buffers[buffer] + offset;
}
@@ -353,7 +353,7 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
buf->f_type = CRAMFS_MAGIC;
- buf->f_bsize = PAGE_CACHE_SIZE;
+ buf->f_bsize = PAGE_SIZE;
buf->f_blocks = CRAMFS_SB(sb)->blocks;
buf->f_bfree = 0;
buf->f_bavail = 0;
@@ -496,7 +496,7 @@ static int cramfs_readpage(struct file *file, struct page *page)
int bytes_filled;
void *pgdata;
- maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
bytes_filled = 0;
pgdata = kmap(page);
@@ -516,14 +516,14 @@ static int cramfs_readpage(struct file *file, struct page *page)
if (compr_len == 0)
; /* hole */
- else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) {
+ else if (unlikely(compr_len > (PAGE_SIZE << 1))) {
pr_err("bad compressed blocksize %u\n",
compr_len);
goto err;
} else {
mutex_lock(&read_mutex);
bytes_filled = cramfs_uncompress_block(pgdata,
- PAGE_CACHE_SIZE,
+ PAGE_SIZE,
cramfs_read(sb, start_offset, compr_len),
compr_len);
mutex_unlock(&read_mutex);
@@ -532,7 +532,7 @@ static int cramfs_readpage(struct file *file, struct page *page)
}
}
- memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled);
+ memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
flush_dcache_page(page);
kunmap(page);
SetPageUptodate(page);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 06cd1a22240b..7f5804537d30 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -175,10 +175,10 @@ static int do_page_crypto(struct inode *inode,
FS_XTS_TWEAK_SIZE - sizeof(index));
sg_init_table(&dst, 1);
- sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
+ sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
sg_init_table(&src, 1);
- sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
- skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+ sg_set_page(&src, src_page, PAGE_SIZE, 0);
+ skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
xts_tweak);
if (rw == FS_DECRYPT)
res = crypto_skcipher_decrypt(req);
@@ -287,7 +287,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
struct bio *bio;
int ret, err = 0;
- BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
+ BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
ctx = fscrypt_get_ctx(inode);
if (IS_ERR(ctx))
diff --git a/fs/dax.c b/fs/dax.c
index 90322eb7498c..75ba46d82a76 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -323,7 +323,7 @@ static int dax_load_hole(struct address_space *mapping, struct page *page,
size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (vmf->pgoff >= size) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return VM_FAULT_SIGBUS;
}
@@ -351,7 +351,7 @@ static int copy_user_bh(struct page *to, struct inode *inode,
}
#define NO_SECTOR -1
-#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT))
+#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
sector_t sector, bool pmd_entry, bool dirty)
@@ -506,8 +506,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
return 0;
- start_index = wbc->range_start >> PAGE_CACHE_SHIFT;
- end_index = wbc->range_end >> PAGE_CACHE_SHIFT;
+ start_index = wbc->range_start >> PAGE_SHIFT;
+ end_index = wbc->range_end >> PAGE_SHIFT;
pmd_index = DAX_PMD_INDEX(start_index);
rcu_read_lock();
@@ -642,12 +642,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
page = find_get_page(mapping, vmf->pgoff);
if (page) {
if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
- page_cache_release(page);
+ put_page(page);
return VM_FAULT_RETRY;
}
if (unlikely(page->mapping != mapping)) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
goto repeat;
}
size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -711,10 +711,10 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
if (page) {
unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
- PAGE_CACHE_SIZE, 0);
+ PAGE_SIZE, 0);
delete_from_page_cache(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
page = NULL;
}
@@ -747,7 +747,7 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
unlock_page:
if (page) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
goto out;
}
@@ -1094,7 +1094,7 @@ EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
* you are truncating a file, the helper function dax_truncate_page() may be
* more convenient.
*
- * We work in terms of PAGE_CACHE_SIZE here for commonality with
+ * We work in terms of PAGE_SIZE here for commonality with
* block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
* took care of disposing of the unnecessary blocks. Even if the filesystem
* block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1104,18 +1104,18 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
get_block_t get_block)
{
struct buffer_head bh;
- pgoff_t index = from >> PAGE_CACHE_SHIFT;
- unsigned offset = from & (PAGE_CACHE_SIZE-1);
+ pgoff_t index = from >> PAGE_SHIFT;
+ unsigned offset = from & (PAGE_SIZE-1);
int err;
/* Block boundary? Nothing to do */
if (!length)
return 0;
- BUG_ON((offset + length) > PAGE_CACHE_SIZE);
+ BUG_ON((offset + length) > PAGE_SIZE);
memset(&bh, 0, sizeof(bh));
bh.b_bdev = inode->i_sb->s_bdev;
- bh.b_size = PAGE_CACHE_SIZE;
+ bh.b_size = PAGE_SIZE;
err = get_block(inode, index, &bh, 0);
if (err < 0)
return err;
@@ -1123,7 +1123,7 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
struct block_device *bdev = bh.b_bdev;
struct blk_dax_ctl dax = {
.sector = to_sector(&bh, inode),
- .size = PAGE_CACHE_SIZE,
+ .size = PAGE_SIZE,
};
if (dax_map_atomic(bdev, &dax) < 0)
@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
* Similar to block_truncate_page(), this function can be called by a
* filesystem when it is truncating a DAX file to handle the partial page.
*
- * We work in terms of PAGE_CACHE_SIZE here for commonality with
+ * We work in terms of PAGE_SIZE here for commonality with
* block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
* took care of disposing of the unnecessary blocks. Even if the filesystem
* block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1154,7 +1154,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
*/
int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
{
- unsigned length = PAGE_CACHE_ALIGN(from) - from;
+ unsigned length = PAGE_ALIGN(from) - from;
return dax_zero_page_range(inode, from, length, get_block);
}
EXPORT_SYMBOL_GPL(dax_truncate_page);
diff --git a/fs/dcache.c b/fs/dcache.c
index 32ceae3e6112..d5ecc6e477da 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1667,7 +1667,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
DCACHE_OP_REVALIDATE |
DCACHE_OP_WEAK_REVALIDATE |
DCACHE_OP_DELETE |
- DCACHE_OP_SELECT_INODE));
+ DCACHE_OP_SELECT_INODE |
+ DCACHE_OP_REAL));
dentry->d_op = op;
if (!op)
return;
@@ -1685,6 +1686,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
dentry->d_flags |= DCACHE_OP_PRUNE;
if (op->d_select_inode)
dentry->d_flags |= DCACHE_OP_SELECT_INODE;
+ if (op->d_real)
+ dentry->d_flags |= DCACHE_OP_REAL;
}
EXPORT_SYMBOL(d_set_d_op);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 476f1ecbd1f0..472037732daf 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -172,7 +172,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
*/
if (dio->page_errors == 0)
dio->page_errors = ret;
- page_cache_get(page);
+ get_page(page);
dio->pages[0] = page;
sdio->head = 0;
sdio->tail = 1;
@@ -424,7 +424,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
{
while (sdio->head < sdio->tail)
- page_cache_release(dio->pages[sdio->head++]);
+ put_page(dio->pages[sdio->head++]);
}
/*
@@ -487,7 +487,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
if (dio->rw == READ && !PageCompound(page) &&
dio->should_dirty)
set_page_dirty_lock(page);
- page_cache_release(page);
+ put_page(page);
}
err = bio->bi_error;
bio_put(bio);
@@ -696,7 +696,7 @@ static inline int dio_bio_add_page(struct dio_submit *sdio)
*/
if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
sdio->pages_in_io--;
- page_cache_get(sdio->cur_page);
+ get_page(sdio->cur_page);
sdio->final_block_in_bio = sdio->cur_page_block +
(sdio->cur_page_len >> sdio->blkbits);
ret = 0;
@@ -810,13 +810,13 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
*/
if (sdio->cur_page) {
ret = dio_send_cur_page(dio, sdio, map_bh);
- page_cache_release(sdio->cur_page);
+ put_page(sdio->cur_page);
sdio->cur_page = NULL;
if (ret)
return ret;
}
- page_cache_get(page); /* It is in dio */
+ get_page(page); /* It is in dio */
sdio->cur_page = page;
sdio->cur_page_offset = offset;
sdio->cur_page_len = len;
@@ -830,7 +830,7 @@ out:
if (sdio->boundary) {
ret = dio_send_cur_page(dio, sdio, map_bh);
dio_bio_submit(dio, sdio);
- page_cache_release(sdio->cur_page);
+ put_page(sdio->cur_page);
sdio->cur_page = NULL;
}
return ret;
@@ -947,7 +947,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
ret = get_more_blocks(dio, sdio, map_bh);
if (ret) {
- page_cache_release(page);
+ put_page(page);
goto out;
}
if (!buffer_mapped(map_bh))
@@ -988,7 +988,7 @@ do_holes:
/* AKPM: eargh, -ENOTBLK is a hack */
if (dio->rw & WRITE) {
- page_cache_release(page);
+ put_page(page);
return -ENOTBLK;
}
@@ -1001,7 +1001,7 @@ do_holes:
if (sdio->block_in_file >=
i_size_aligned >> blkbits) {
/* We hit eof */
- page_cache_release(page);
+ put_page(page);
goto out;
}
zero_user(page, from, 1 << blkbits);
@@ -1041,7 +1041,7 @@ do_holes:
sdio->next_block_for_io,
map_bh);
if (ret) {
- page_cache_release(page);
+ put_page(page);
goto out;
}
sdio->next_block_for_io += this_chunk_blocks;
@@ -1057,7 +1057,7 @@ next_block:
}
/* Drop the ref which was taken in get_user_pages() */
- page_cache_release(page);
+ put_page(page);
}
out:
return ret;
@@ -1281,7 +1281,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
if (retval == 0)
retval = ret2;
- page_cache_release(sdio.cur_page);
+ put_page(sdio.cur_page);
sdio.cur_page = NULL;
}
if (sdio.bio)
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 519112168a9e..1669f6291c95 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -343,13 +343,12 @@ static struct config_group *make_cluster(struct config_group *g,
struct dlm_cluster *cl = NULL;
struct dlm_spaces *sps = NULL;
struct dlm_comms *cms = NULL;
- void *gps = NULL;
cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS);
sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS);
cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS);
- if (!cl || !gps || !sps || !cms)
+ if (!cl || !sps || !cms)
goto fail;
config_group_init_type_name(&cl->group, name, &cluster_type);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 00640e70ed7a..1ab012a27d9f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -640,7 +640,7 @@ static int receive_from_sock(struct connection *con)
con->rx_page = alloc_page(GFP_ATOMIC);
if (con->rx_page == NULL)
goto out_resched;
- cbuf_init(&con->cb, PAGE_CACHE_SIZE);
+ cbuf_init(&con->cb, PAGE_SIZE);
}
/*
@@ -657,7 +657,7 @@ static int receive_from_sock(struct connection *con)
* buffer and the start of the currently used section (cb.base)
*/
if (cbuf_data(&con->cb) >= con->cb.base) {
- iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb);
+ iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
iov[1].iov_len = con->cb.base;
iov[1].iov_base = page_address(con->rx_page);
nvec = 2;
@@ -675,7 +675,7 @@ static int receive_from_sock(struct connection *con)
ret = dlm_process_incoming_buffer(con->nodeid,
page_address(con->rx_page),
con->cb.base, con->cb.len,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
if (ret == -EBADMSG) {
log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
page_address(con->rx_page), con->cb.base,
@@ -1416,7 +1416,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
spin_lock(&con->writequeue_lock);
e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
if ((&e->list == &con->writequeue) ||
- (PAGE_CACHE_SIZE - e->end < len)) {
+ (PAGE_SIZE - e->end < len)) {
e = NULL;
} else {
offset = e->end;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 543a146ee019..8710093aae5e 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -286,7 +286,7 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
pg = virt_to_page(addr);
offset = offset_in_page(addr);
sg_set_page(&sg[i], pg, 0, offset);
- remainder_of_page = PAGE_CACHE_SIZE - offset;
+ remainder_of_page = PAGE_SIZE - offset;
if (size >= remainder_of_page) {
sg[i].length = remainder_of_page;
addr += remainder_of_page;
@@ -400,7 +400,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
struct page *page)
{
return ecryptfs_lower_header_size(crypt_stat) +
- ((loff_t)page->index << PAGE_CACHE_SHIFT);
+ ((loff_t)page->index << PAGE_SHIFT);
}
/**
@@ -428,7 +428,7 @@ static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat,
size_t extent_size = crypt_stat->extent_size;
int rc;
- extent_base = (((loff_t)page_index) * (PAGE_CACHE_SIZE / extent_size));
+ extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size));
rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
(extent_base + extent_offset));
if (rc) {
@@ -498,7 +498,7 @@ int ecryptfs_encrypt_page(struct page *page)
}
for (extent_offset = 0;
- extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
+ extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
extent_offset++) {
rc = crypt_extent(crypt_stat, enc_extent_page, page,
extent_offset, ENCRYPT);
@@ -512,7 +512,7 @@ int ecryptfs_encrypt_page(struct page *page)
lower_offset = lower_offset_for_page(crypt_stat, page);
enc_extent_virt = kmap(enc_extent_page);
rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
kunmap(enc_extent_page);
if (rc < 0) {
ecryptfs_printk(KERN_ERR,
@@ -560,7 +560,7 @@ int ecryptfs_decrypt_page(struct page *page)
lower_offset = lower_offset_for_page(crypt_stat, page);
page_virt = kmap(page);
- rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_CACHE_SIZE,
+ rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE,
ecryptfs_inode);
kunmap(page);
if (rc < 0) {
@@ -571,7 +571,7 @@ int ecryptfs_decrypt_page(struct page *page)
}
for (extent_offset = 0;
- extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
+ extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
extent_offset++) {
rc = crypt_extent(crypt_stat, page, page,
extent_offset, DECRYPT);
@@ -659,11 +659,11 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
else {
- if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
+ if (PAGE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
crypt_stat->metadata_size =
ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
else
- crypt_stat->metadata_size = PAGE_CACHE_SIZE;
+ crypt_stat->metadata_size = PAGE_SIZE;
}
}
@@ -1445,7 +1445,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
ECRYPTFS_VALIDATE_HEADER_SIZE);
if (rc) {
/* metadata is not in the file header, so try xattrs */
- memset(page_virt, 0, PAGE_CACHE_SIZE);
+ memset(page_virt, 0, PAGE_SIZE);
rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
if (rc) {
printk(KERN_DEBUG "Valid eCryptfs headers not found in "
@@ -1478,7 +1478,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
}
out:
if (page_virt) {
- memset(page_virt, 0, PAGE_CACHE_SIZE);
+ memset(page_virt, 0, PAGE_SIZE);
kmem_cache_free(ecryptfs_header_cache, page_virt);
}
return rc;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 1ac631cd9d84..9323c536b2f2 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -763,10 +763,10 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
} else { /* ia->ia_size < i_size_read(inode) */
/* We're chopping off all the pages down to the page
* in which ia->ia_size is located. Fill in the end of
- * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to
- * PAGE_CACHE_SIZE with zeros. */
- size_t num_zeros = (PAGE_CACHE_SIZE
- - (ia->ia_size & ~PAGE_CACHE_MASK));
+ * that page from (ia->ia_size & ~PAGE_MASK) to
+ * PAGE_SIZE with zeros. */
+ size_t num_zeros = (PAGE_SIZE
+ - (ia->ia_size & ~PAGE_MASK));
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
truncate_setsize(inode, ia->ia_size);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 9893d1538122..3cf1546dca82 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1798,7 +1798,7 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
* added the our &auth_tok_list */
next_packet_is_auth_tok_packet = 1;
while (next_packet_is_auth_tok_packet) {
- size_t max_packet_size = ((PAGE_CACHE_SIZE - 8) - i);
+ size_t max_packet_size = ((PAGE_SIZE - 8) - i);
switch (src[i]) {
case ECRYPTFS_TAG_3_PACKET_TYPE:
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 8b0b4a73116d..1698132d0e57 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -695,12 +695,12 @@ static struct ecryptfs_cache_info {
{
.cache = &ecryptfs_header_cache,
.name = "ecryptfs_headers",
- .size = PAGE_CACHE_SIZE,
+ .size = PAGE_SIZE,
},
{
.cache = &ecryptfs_xattr_cache,
.name = "ecryptfs_xattr_cache",
- .size = PAGE_CACHE_SIZE,
+ .size = PAGE_SIZE,
},
{
.cache = &ecryptfs_key_record_cache,
@@ -818,7 +818,7 @@ static int __init ecryptfs_init(void)
{
int rc;
- if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) {
+ if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_SIZE) {
rc = -EINVAL;
ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is "
"larger than the host's page size, and so "
@@ -826,7 +826,7 @@ static int __init ecryptfs_init(void)
"default eCryptfs extent size is [%u] bytes; "
"the page size is [%lu] bytes.\n",
ECRYPTFS_DEFAULT_EXTENT_SIZE,
- (unsigned long)PAGE_CACHE_SIZE);
+ (unsigned long)PAGE_SIZE);
goto out;
}
rc = ecryptfs_init_kmem_caches();
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 39e4381d3a65..148d11b514fb 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -122,7 +122,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
struct ecryptfs_crypt_stat *crypt_stat)
{
loff_t extent_num_in_page = 0;
- loff_t num_extents_per_page = (PAGE_CACHE_SIZE
+ loff_t num_extents_per_page = (PAGE_SIZE
/ crypt_stat->extent_size);
int rc = 0;
@@ -138,7 +138,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
char *page_virt;
page_virt = kmap_atomic(page);
- memset(page_virt, 0, PAGE_CACHE_SIZE);
+ memset(page_virt, 0, PAGE_SIZE);
/* TODO: Support more than one header extent */
if (view_extent_num == 0) {
size_t written;
@@ -164,8 +164,8 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
- crypt_stat->metadata_size);
rc = ecryptfs_read_lower_page_segment(
- page, (lower_offset >> PAGE_CACHE_SHIFT),
- (lower_offset & ~PAGE_CACHE_MASK),
+ page, (lower_offset >> PAGE_SHIFT),
+ (lower_offset & ~PAGE_MASK),
crypt_stat->extent_size, page->mapping->host);
if (rc) {
printk(KERN_ERR "%s: Error attempting to read "
@@ -198,7 +198,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
- PAGE_CACHE_SIZE,
+ PAGE_SIZE,
page->mapping->host);
} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
@@ -215,7 +215,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
} else {
rc = ecryptfs_read_lower_page_segment(
- page, page->index, 0, PAGE_CACHE_SIZE,
+ page, page->index, 0, PAGE_SIZE,
page->mapping->host);
if (rc) {
printk(KERN_ERR "Error reading page; rc = "
@@ -250,12 +250,12 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
struct inode *inode = page->mapping->host;
int end_byte_in_page;
- if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index)
+ if ((i_size_read(inode) / PAGE_SIZE) != page->index)
goto out;
- end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
+ end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
if (to > end_byte_in_page)
end_byte_in_page = to;
- zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE);
+ zero_user_segment(page, end_byte_in_page, PAGE_SIZE);
out:
return 0;
}
@@ -279,7 +279,7 @@ static int ecryptfs_write_begin(struct file *file,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
loff_t prev_page_end_size;
int rc = 0;
@@ -289,14 +289,14 @@ static int ecryptfs_write_begin(struct file *file,
return -ENOMEM;
*pagep = page;
- prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT);
+ prev_page_end_size = ((loff_t)index << PAGE_SHIFT);
if (!PageUptodate(page)) {
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_read_lower_page_segment(
- page, index, 0, PAGE_CACHE_SIZE, mapping->host);
+ page, index, 0, PAGE_SIZE, mapping->host);
if (rc) {
printk(KERN_ERR "%s: Error attempting to read "
"lower page segment; rc = [%d]\n",
@@ -322,7 +322,7 @@ static int ecryptfs_write_begin(struct file *file,
SetPageUptodate(page);
} else {
rc = ecryptfs_read_lower_page_segment(
- page, index, 0, PAGE_CACHE_SIZE,
+ page, index, 0, PAGE_SIZE,
mapping->host);
if (rc) {
printk(KERN_ERR "%s: Error reading "
@@ -336,9 +336,9 @@ static int ecryptfs_write_begin(struct file *file,
} else {
if (prev_page_end_size
>= i_size_read(page->mapping->host)) {
- zero_user(page, 0, PAGE_CACHE_SIZE);
+ zero_user(page, 0, PAGE_SIZE);
SetPageUptodate(page);
- } else if (len < PAGE_CACHE_SIZE) {
+ } else if (len < PAGE_SIZE) {
rc = ecryptfs_decrypt_page(page);
if (rc) {
printk(KERN_ERR "%s: Error decrypting "
@@ -371,11 +371,11 @@ static int ecryptfs_write_begin(struct file *file,
* of page? Zero it out. */
if ((i_size_read(mapping->host) == prev_page_end_size)
&& (pos != 0))
- zero_user(page, 0, PAGE_CACHE_SIZE);
+ zero_user(page, 0, PAGE_SIZE);
out:
if (unlikely(rc)) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
*pagep = NULL;
}
return rc;
@@ -438,7 +438,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
inode_lock(lower_inode);
size = lower_inode->i_op->getxattr(lower_dentry, lower_inode,
ECRYPTFS_XATTR_NAME,
- xattr_virt, PAGE_CACHE_SIZE);
+ xattr_virt, PAGE_SIZE);
if (size < 0)
size = 8;
put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
@@ -480,8 +480,8 @@ static int ecryptfs_write_end(struct file *file,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ pgoff_t index = pos >> PAGE_SHIFT;
+ unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + copied;
struct inode *ecryptfs_inode = mapping->host;
struct ecryptfs_crypt_stat *crypt_stat =
@@ -501,7 +501,7 @@ static int ecryptfs_write_end(struct file *file,
goto out;
}
if (!PageUptodate(page)) {
- if (copied < PAGE_CACHE_SIZE) {
+ if (copied < PAGE_SIZE) {
rc = 0;
goto out;
}
@@ -534,7 +534,7 @@ static int ecryptfs_write_end(struct file *file,
rc = copied;
out:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return rc;
}
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 09fe622274e4..158a3a39f82d 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -74,7 +74,7 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
loff_t offset;
int rc;
- offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT)
+ offset = ((((loff_t)page_for_lower->index) << PAGE_SHIFT)
+ offset_in_page);
virt = kmap(page_for_lower);
rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
@@ -123,9 +123,9 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
else
pos = offset;
while (pos < (offset + size)) {
- pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
- size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
- size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
+ pgoff_t ecryptfs_page_idx = (pos >> PAGE_SHIFT);
+ size_t start_offset_in_page = (pos & ~PAGE_MASK);
+ size_t num_bytes = (PAGE_SIZE - start_offset_in_page);
loff_t total_remaining_bytes = ((offset + size) - pos);
if (fatal_signal_pending(current)) {
@@ -165,7 +165,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
* Fill in zero values to the end of the page */
memset(((char *)ecryptfs_page_virt
+ start_offset_in_page), 0,
- PAGE_CACHE_SIZE - start_offset_in_page);
+ PAGE_SIZE - start_offset_in_page);
}
/* pos >= offset, we are now writing the data request */
@@ -186,7 +186,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
ecryptfs_page,
start_offset_in_page,
data_offset);
- page_cache_release(ecryptfs_page);
+ put_page(ecryptfs_page);
if (rc) {
printk(KERN_ERR "%s: Error encrypting "
"page; rc = [%d]\n", __func__, rc);
@@ -262,7 +262,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
loff_t offset;
int rc;
- offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page);
+ offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page);
virt = kmap(page_for_ecryptfs);
rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
if (rc > 0)
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index dd029d13ea61..553c5d2db4a4 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -197,8 +197,8 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
efivarfs_sb = sb;
sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = EFIVARFS_MAGIC;
sb->s_op = &efivarfs_ops;
sb->s_d_op = &efivarfs_d_ops;
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index e5bb2abf77f9..547b93cbea63 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -41,16 +41,16 @@ static inline unsigned exofs_chunk_size(struct inode *inode)
static inline void exofs_put_page(struct page *page)
{
kunmap(page);
- page_cache_release(page);
+ put_page(page);
}
static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr)
{
loff_t last_byte = inode->i_size;
- last_byte -= page_nr << PAGE_CACHE_SHIFT;
- if (last_byte > PAGE_CACHE_SIZE)
- last_byte = PAGE_CACHE_SIZE;
+ last_byte -= page_nr << PAGE_SHIFT;
+ if (last_byte > PAGE_SIZE)
+ last_byte = PAGE_SIZE;
return last_byte;
}
@@ -85,13 +85,13 @@ static void exofs_check_page(struct page *page)
unsigned chunk_size = exofs_chunk_size(dir);
char *kaddr = page_address(page);
unsigned offs, rec_len;
- unsigned limit = PAGE_CACHE_SIZE;
+ unsigned limit = PAGE_SIZE;
struct exofs_dir_entry *p;
char *error;
/* if the page is the last one in the directory */
- if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
- limit = dir->i_size & ~PAGE_CACHE_MASK;
+ if ((dir->i_size >> PAGE_SHIFT) == page->index) {
+ limit = dir->i_size & ~PAGE_MASK;
if (limit & (chunk_size - 1))
goto Ebadsize;
if (!limit)
@@ -138,7 +138,7 @@ bad_entry:
EXOFS_ERR(
"ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
"offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n",
- dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+ dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
_LLU(le64_to_cpu(p->inode_no)),
rec_len, p->name_len);
goto fail;
@@ -147,7 +147,7 @@ Eend:
EXOFS_ERR("ERROR [exofs_check_page]: "
"entry in directory(0x%lx) spans the page boundary"
"offset=%lu, inode=0x%llx\n",
- dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
+ dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
_LLU(le64_to_cpu(p->inode_no)));
fail:
SetPageChecked(page);
@@ -237,8 +237,8 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
{
loff_t pos = ctx->pos;
struct inode *inode = file_inode(file);
- unsigned int offset = pos & ~PAGE_CACHE_MASK;
- unsigned long n = pos >> PAGE_CACHE_SHIFT;
+ unsigned int offset = pos & ~PAGE_MASK;
+ unsigned long n = pos >> PAGE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(exofs_chunk_size(inode)-1);
int need_revalidate = (file->f_version != inode->i_version);
@@ -254,7 +254,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
if (IS_ERR(page)) {
EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n",
inode->i_ino);
- ctx->pos += PAGE_CACHE_SIZE - offset;
+ ctx->pos += PAGE_SIZE - offset;
return PTR_ERR(page);
}
kaddr = page_address(page);
@@ -262,7 +262,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
if (offset) {
offset = exofs_validate_entry(kaddr, offset,
chunk_mask);
- ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset;
+ ctx->pos = (n<<PAGE_SHIFT) + offset;
}
file->f_version = inode->i_version;
need_revalidate = 0;
@@ -449,7 +449,7 @@ int exofs_add_link(struct dentry *dentry, struct inode *inode)
kaddr = page_address(page);
dir_end = kaddr + exofs_last_byte(dir, n);
de = (struct exofs_dir_entry *)kaddr;
- kaddr += PAGE_CACHE_SIZE - reclen;
+ kaddr += PAGE_SIZE - reclen;
while ((char *)de <= kaddr) {
if ((char *)de == dir_end) {
name_len = 0;
@@ -602,7 +602,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
kunmap_atomic(kaddr);
err = exofs_commit_chunk(page, 0, chunk_size);
fail:
- page_cache_release(page);
+ put_page(page);
return err;
}
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 9eaf595aeaf8..49e1bd00b4ec 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -317,7 +317,7 @@ static int read_exec(struct page_collect *pcol)
if (!pcol->ios) {
int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
- pcol->pg_first << PAGE_CACHE_SHIFT,
+ pcol->pg_first << PAGE_SHIFT,
pcol->length, &pcol->ios);
if (ret)
@@ -383,7 +383,7 @@ static int readpage_strip(void *data, struct page *page)
struct inode *inode = pcol->inode;
struct exofs_i_info *oi = exofs_i(inode);
loff_t i_size = i_size_read(inode);
- pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ pgoff_t end_index = i_size >> PAGE_SHIFT;
size_t len;
int ret;
@@ -397,9 +397,9 @@ static int readpage_strip(void *data, struct page *page)
pcol->that_locked_page = page;
if (page->index < end_index)
- len = PAGE_CACHE_SIZE;
+ len = PAGE_SIZE;
else if (page->index == end_index)
- len = i_size & ~PAGE_CACHE_MASK;
+ len = i_size & ~PAGE_MASK;
else
len = 0;
@@ -442,8 +442,8 @@ try_again:
goto fail;
}
- if (len != PAGE_CACHE_SIZE)
- zero_user(page, len, PAGE_CACHE_SIZE - len);
+ if (len != PAGE_SIZE)
+ zero_user(page, len, PAGE_SIZE - len);
EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
inode->i_ino, page->index, len);
@@ -609,7 +609,7 @@ static void __r4w_put_page(void *priv, struct page *page)
if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
EXOFS_DBGMSG2("index=0x%lx\n", page->index);
- page_cache_release(page);
+ put_page(page);
return;
}
EXOFS_DBGMSG2("that_locked_page index=0x%lx\n",
@@ -633,7 +633,7 @@ static int write_exec(struct page_collect *pcol)
BUG_ON(pcol->ios);
ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
- pcol->pg_first << PAGE_CACHE_SHIFT,
+ pcol->pg_first << PAGE_SHIFT,
pcol->length, &pcol->ios);
if (unlikely(ret))
goto err;
@@ -696,7 +696,7 @@ static int writepage_strip(struct page *page,
struct inode *inode = pcol->inode;
struct exofs_i_info *oi = exofs_i(inode);
loff_t i_size = i_size_read(inode);
- pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ pgoff_t end_index = i_size >> PAGE_SHIFT;
size_t len;
int ret;
@@ -708,9 +708,9 @@ static int writepage_strip(struct page *page,
if (page->index < end_index)
/* in this case, the page is within the limits of the file */
- len = PAGE_CACHE_SIZE;
+ len = PAGE_SIZE;
else {
- len = i_size & ~PAGE_CACHE_MASK;
+ len = i_size & ~PAGE_MASK;
if (page->index > end_index || !len) {
/* in this case, the page is outside the limits
@@ -790,10 +790,10 @@ static int exofs_writepages(struct address_space *mapping,
long start, end, expected_pages;
int ret;
- start = wbc->range_start >> PAGE_CACHE_SHIFT;
+ start = wbc->range_start >> PAGE_SHIFT;
end = (wbc->range_end == LLONG_MAX) ?
start + mapping->nrpages :
- wbc->range_end >> PAGE_CACHE_SHIFT;
+ wbc->range_end >> PAGE_SHIFT;
if (start || end)
expected_pages = end - start + 1;
@@ -881,15 +881,15 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
}
/* read modify write */
- if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
+ if (!PageUptodate(page) && (len != PAGE_SIZE)) {
loff_t i_size = i_size_read(mapping->host);
- pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ pgoff_t end_index = i_size >> PAGE_SHIFT;
size_t rlen;
if (page->index < end_index)
- rlen = PAGE_CACHE_SIZE;
+ rlen = PAGE_SIZE;
else if (page->index == end_index)
- rlen = i_size & ~PAGE_CACHE_MASK;
+ rlen = i_size & ~PAGE_MASK;
else
rlen = 0;
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index c20d77df2679..622a686bb08b 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -292,11 +292,11 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
out_dir:
if (dir_de) {
kunmap(dir_page);
- page_cache_release(dir_page);
+ put_page(dir_page);
}
out_old:
kunmap(old_page);
- page_cache_release(old_page);
+ put_page(old_page);
out:
return err;
}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 0c6638b40f21..7ff6fcfa685d 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -37,7 +37,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
{
unsigned len = le16_to_cpu(dlen);
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
if (len == EXT2_MAX_REC_LEN)
return 1 << 16;
#endif
@@ -46,7 +46,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
static inline __le16 ext2_rec_len_to_disk(unsigned len)
{
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
if (len == (1 << 16))
return cpu_to_le16(EXT2_MAX_REC_LEN);
else
@@ -67,7 +67,7 @@ static inline unsigned ext2_chunk_size(struct inode *inode)
static inline void ext2_put_page(struct page *page)
{
kunmap(page);
- page_cache_release(page);
+ put_page(page);
}
/*
@@ -79,9 +79,9 @@ ext2_last_byte(struct inode *inode, unsigned long page_nr)
{
unsigned last_byte = inode->i_size;
- last_byte -= page_nr << PAGE_CACHE_SHIFT;
- if (last_byte > PAGE_CACHE_SIZE)
- last_byte = PAGE_CACHE_SIZE;
+ last_byte -= page_nr << PAGE_SHIFT;
+ if (last_byte > PAGE_SIZE)
+ last_byte = PAGE_SIZE;
return last_byte;
}
@@ -118,12 +118,12 @@ static void ext2_check_page(struct page *page, int quiet)
char *kaddr = page_address(page);
u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
unsigned offs, rec_len;
- unsigned limit = PAGE_CACHE_SIZE;
+ unsigned limit = PAGE_SIZE;
ext2_dirent *p;
char *error;
- if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
- limit = dir->i_size & ~PAGE_CACHE_MASK;
+ if ((dir->i_size >> PAGE_SHIFT) == page->index) {
+ limit = dir->i_size & ~PAGE_MASK;
if (limit & (chunk_size - 1))
goto Ebadsize;
if (!limit)
@@ -176,7 +176,7 @@ bad_entry:
if (!quiet)
ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - "
"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
- dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+ dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
(unsigned long) le32_to_cpu(p->inode),
rec_len, p->name_len);
goto fail;
@@ -186,7 +186,7 @@ Eend:
ext2_error(sb, "ext2_check_page",
"entry in directory #%lu spans the page boundary"
"offset=%lu, inode=%lu",
- dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
+ dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
(unsigned long) le32_to_cpu(p->inode));
}
fail:
@@ -287,8 +287,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
loff_t pos = ctx->pos;
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
- unsigned int offset = pos & ~PAGE_CACHE_MASK;
- unsigned long n = pos >> PAGE_CACHE_SHIFT;
+ unsigned int offset = pos & ~PAGE_MASK;
+ unsigned long n = pos >> PAGE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
unsigned char *types = NULL;
@@ -309,14 +309,14 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
ext2_error(sb, __func__,
"bad page in #%lu",
inode->i_ino);
- ctx->pos += PAGE_CACHE_SIZE - offset;
+ ctx->pos += PAGE_SIZE - offset;
return PTR_ERR(page);
}
kaddr = page_address(page);
if (unlikely(need_revalidate)) {
if (offset) {
offset = ext2_validate_entry(kaddr, offset, chunk_mask);
- ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset;
+ ctx->pos = (n<<PAGE_SHIFT) + offset;
}
file->f_version = inode->i_version;
need_revalidate = 0;
@@ -406,7 +406,7 @@ struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir,
if (++n >= npages)
n = 0;
/* next page is past the blocks we've got */
- if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
+ if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
ext2_error(dir->i_sb, __func__,
"dir %lu size %lld exceeds block count %llu",
dir->i_ino, dir->i_size,
@@ -511,7 +511,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
kaddr = page_address(page);
dir_end = kaddr + ext2_last_byte(dir, n);
de = (ext2_dirent *)kaddr;
- kaddr += PAGE_CACHE_SIZE - reclen;
+ kaddr += PAGE_SIZE - reclen;
while ((char *)de <= kaddr) {
if ((char *)de == dir_end) {
/* We hit i_size */
@@ -655,7 +655,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
kunmap_atomic(kaddr);
err = ext2_commit_chunk(page, 0, chunk_size);
fail:
- page_cache_release(page);
+ put_page(page);
return err;
}
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 1a7eb49a115d..d446203127fc 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -398,7 +398,7 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0);
else {
kunmap(dir_page);
- page_cache_release(dir_page);
+ put_page(dir_page);
}
inode_dec_link_count(old_dir);
}
@@ -408,11 +408,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
out_dir:
if (dir_de) {
kunmap(dir_page);
- page_cache_release(dir_page);
+ put_page(dir_page);
}
out_old:
kunmap(old_page);
- page_cache_release(old_page);
+ put_page(old_page);
out:
return err;
}
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index edc053a81914..db9ae6e18154 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -91,7 +91,8 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
* Return: An allocated and initialized encryption context on success; error
* value or NULL otherwise.
*/
-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
+ gfp_t gfp_flags)
{
struct ext4_crypto_ctx *ctx = NULL;
int res = 0;
@@ -118,7 +119,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
list_del(&ctx->free_list);
spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
if (!ctx) {
- ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
+ ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags);
if (!ctx) {
res = -ENOMEM;
goto out;
@@ -255,7 +256,8 @@ static int ext4_page_crypto(struct inode *inode,
ext4_direction_t rw,
pgoff_t index,
struct page *src_page,
- struct page *dest_page)
+ struct page *dest_page,
+ gfp_t gfp_flags)
{
u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
@@ -266,7 +268,7 @@ static int ext4_page_crypto(struct inode *inode,
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
- req = skcipher_request_alloc(tfm, GFP_NOFS);
+ req = skcipher_request_alloc(tfm, gfp_flags);
if (!req) {
printk_ratelimited(KERN_ERR
"%s: crypto_request_alloc() failed\n",
@@ -283,10 +285,10 @@ static int ext4_page_crypto(struct inode *inode,
EXT4_XTS_TWEAK_SIZE - sizeof(index));
sg_init_table(&dst, 1);
- sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
+ sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
sg_init_table(&src, 1);
- sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
- skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+ sg_set_page(&src, src_page, PAGE_SIZE, 0);
+ skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
xts_tweak);
if (rw == EXT4_DECRYPT)
res = crypto_skcipher_decrypt(req);
@@ -307,9 +309,10 @@ static int ext4_page_crypto(struct inode *inode,
return 0;
}
-static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
+static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx,
+ gfp_t gfp_flags)
{
- ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
+ ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags);
if (ctx->w.bounce_page == NULL)
return ERR_PTR(-ENOMEM);
ctx->flags |= EXT4_WRITE_PATH_FL;
@@ -332,7 +335,8 @@ static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
* error value or NULL.
*/
struct page *ext4_encrypt(struct inode *inode,
- struct page *plaintext_page)
+ struct page *plaintext_page,
+ gfp_t gfp_flags)
{
struct ext4_crypto_ctx *ctx;
struct page *ciphertext_page = NULL;
@@ -340,17 +344,17 @@ struct page *ext4_encrypt(struct inode *inode,
BUG_ON(!PageLocked(plaintext_page));
- ctx = ext4_get_crypto_ctx(inode);
+ ctx = ext4_get_crypto_ctx(inode, gfp_flags);
if (IS_ERR(ctx))
return (struct page *) ctx;
/* The encryption operation will require a bounce page. */
- ciphertext_page = alloc_bounce_page(ctx);
+ ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
if (IS_ERR(ciphertext_page))
goto errout;
ctx->w.control_page = plaintext_page;
err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
- plaintext_page, ciphertext_page);
+ plaintext_page, ciphertext_page, gfp_flags);
if (err) {
ciphertext_page = ERR_PTR(err);
errout:
@@ -378,8 +382,8 @@ int ext4_decrypt(struct page *page)
{
BUG_ON(!PageLocked(page));
- return ext4_page_crypto(page->mapping->host,
- EXT4_DECRYPT, page->index, page, page);
+ return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT,
+ page->index, page, page, GFP_NOFS);
}
int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
@@ -396,13 +400,13 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
(unsigned long) inode->i_ino, lblk, len);
#endif
- BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
+ BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
- ctx = ext4_get_crypto_ctx(inode);
+ ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ciphertext_page = alloc_bounce_page(ctx);
+ ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
if (IS_ERR(ciphertext_page)) {
err = PTR_ERR(ciphertext_page);
goto errout;
@@ -410,11 +414,12 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
while (len--) {
err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
- ZERO_PAGE(0), ciphertext_page);
+ ZERO_PAGE(0), ciphertext_page,
+ GFP_NOFS);
if (err)
goto errout;
- bio = bio_alloc(GFP_KERNEL, 1);
+ bio = bio_alloc(GFP_NOWAIT, 1);
if (!bio) {
err = -ENOMEM;
goto errout;
@@ -473,13 +478,16 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
*/
static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
{
- struct inode *dir = d_inode(dentry->d_parent);
- struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
+ struct dentry *dir;
+ struct ext4_crypt_info *ci;
int dir_has_key, cached_with_key;
- if (!ext4_encrypted_inode(dir))
+ dir = dget_parent(dentry);
+ if (!ext4_encrypted_inode(d_inode(dir))) {
+ dput(dir);
return 0;
-
+ }
+ ci = EXT4_I(d_inode(dir))->i_crypt_info;
if (ci && ci->ci_keyring_key &&
(ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED) |
@@ -489,6 +497,7 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
/* this should eventually be an flag in d_flags */
cached_with_key = dentry->d_fsdata != NULL;
dir_has_key = (ci != NULL);
+ dput(dir);
/*
* If the dentry was cached without the key, and it is a
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 50ba27cbed03..561d7308b393 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -155,13 +155,13 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
err = ext4_map_blocks(NULL, inode, &map, 0);
if (err > 0) {
pgoff_t index = map.m_pblk >>
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ (PAGE_SHIFT - inode->i_blkbits);
if (!ra_has_index(&file->f_ra, index))
page_cache_sync_readahead(
sb->s_bdev->bd_inode->i_mapping,
&file->f_ra, file,
index, 1);
- file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
+ file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
bh = ext4_bread(NULL, inode, map.m_lblk, 0);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c04743519865..349afebe21ee 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -912,6 +912,29 @@ do { \
#include "extents_status.h"
/*
+ * Lock subclasses for i_data_sem in the ext4_inode_info structure.
+ *
+ * These are needed to avoid lockdep false positives when we need to
+ * allocate blocks to the quota inode during ext4_map_blocks(), while
+ * holding i_data_sem for a normal (non-quota) inode. Since we don't
+ * do quota tracking for the quota inode, this avoids deadlock (as
+ * well as infinite recursion, since it isn't turtles all the way
+ * down...)
+ *
+ * I_DATA_SEM_NORMAL - Used for most inodes
+ * I_DATA_SEM_OTHER - Used by move_inode.c for the second normal inode
+ * where the second inode has larger inode number
+ * than the first
+ * I_DATA_SEM_QUOTA - Used for quota inodes only
+ */
+enum {
+ I_DATA_SEM_NORMAL = 0,
+ I_DATA_SEM_OTHER,
+ I_DATA_SEM_QUOTA,
+};
+
+
+/*
* fourth extended file system inode data in memory
*/
struct ext4_inode_info {
@@ -1961,7 +1984,7 @@ ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
{
unsigned len = le16_to_cpu(dlen);
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
if (len == EXT4_MAX_REC_LEN || len == 0)
return blocksize;
return (len & 65532) | ((len & 3) << 16);
@@ -1974,7 +1997,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
{
if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
BUG();
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
if (len < 65536)
return cpu_to_le16(len);
if (len == blocksize) {
@@ -2282,11 +2305,13 @@ extern struct kmem_cache *ext4_crypt_info_cachep;
bool ext4_valid_contents_enc_mode(uint32_t mode);
uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
extern struct workqueue_struct *ext4_read_workqueue;
-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode);
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
+ gfp_t gfp_flags);
void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
void ext4_restore_control_page(struct page *data_page);
struct page *ext4_encrypt(struct inode *inode,
- struct page *plaintext_page);
+ struct page *plaintext_page,
+ gfp_t gfp_flags);
int ext4_decrypt(struct page *page);
int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
ext4_fsblk_t pblk, ext4_lblk_t len);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6659e216385e..fa2208bae2e1 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -329,7 +329,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
struct super_block *sb = inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct vfsmount *mnt = filp->f_path.mnt;
- struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
+ struct dentry *dir;
struct path path;
char buf[64], *cp;
int ret;
@@ -373,14 +373,18 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
if (ext4_encryption_info(inode) == NULL)
return -ENOKEY;
}
- if (ext4_encrypted_inode(dir) &&
- !ext4_is_child_context_consistent_with_parent(dir, inode)) {
+
+ dir = dget_parent(file_dentry(filp));
+ if (ext4_encrypted_inode(d_inode(dir)) &&
+ !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
ext4_warning(inode->i_sb,
"Inconsistent encryption contexts: %lu/%lu\n",
- (unsigned long) dir->i_ino,
+ (unsigned long) d_inode(dir)->i_ino,
(unsigned long) inode->i_ino);
+ dput(dir);
return -EPERM;
}
+ dput(dir);
/*
* Set up the jbd2_inode if we are opening the inode for
* writing and the journal is present
@@ -428,8 +432,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
lastoff = startoff;
endoff = (loff_t)end_blk << blkbits;
- index = startoff >> PAGE_CACHE_SHIFT;
- end = endoff >> PAGE_CACHE_SHIFT;
+ index = startoff >> PAGE_SHIFT;
+ end = endoff >> PAGE_SHIFT;
pagevec_init(&pvec, 0);
do {
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 7cbdd3752ba5..7bc6c855cc18 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -482,7 +482,7 @@ static int ext4_read_inline_page(struct inode *inode, struct page *page)
ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
flush_dcache_page(page);
kunmap_atomic(kaddr);
- zero_user_segment(page, len, PAGE_CACHE_SIZE);
+ zero_user_segment(page, len, PAGE_SIZE);
SetPageUptodate(page);
brelse(iloc.bh);
@@ -507,7 +507,7 @@ int ext4_readpage_inline(struct inode *inode, struct page *page)
if (!page->index)
ret = ext4_read_inline_page(inode, page);
else if (!PageUptodate(page)) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
}
@@ -595,7 +595,7 @@ retry:
if (ret) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
page = NULL;
ext4_orphan_add(handle, inode);
up_write(&EXT4_I(inode)->xattr_sem);
@@ -621,7 +621,7 @@ retry:
out:
if (page) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
if (sem_held)
up_write(&EXT4_I(inode)->xattr_sem);
@@ -690,7 +690,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
if (!ext4_has_inline_data(inode)) {
ret = 0;
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
goto out_up_read;
}
@@ -815,7 +815,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
if (ret) {
up_read(&EXT4_I(inode)->xattr_sem);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
ext4_truncate_failed_write(inode);
return ret;
}
@@ -829,7 +829,7 @@ out:
up_read(&EXT4_I(inode)->xattr_sem);
if (page) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
return ret;
}
@@ -919,7 +919,7 @@ retry_journal:
out_release_page:
up_read(&EXT4_I(inode)->xattr_sem);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
out_journal:
ext4_journal_stop(handle);
out:
@@ -947,7 +947,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
i_size_changed = 1;
}
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
/*
* Don't mark the inode dirty under page lock. First, it unnecessarily
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index dab84a2530ff..981a1fc30eaa 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -763,39 +763,47 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096
-static handle_t *start_dio_trans(struct inode *inode,
- struct buffer_head *bh_result)
+/*
+ * Get blocks function for the cases that need to start a transaction -
+ * generally difference cases of direct IO and DAX IO. It also handles retries
+ * in case of ENOSPC.
+ */
+static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int flags)
{
int dio_credits;
+ handle_t *handle;
+ int retries = 0;
+ int ret;
/* Trim mapping request to maximum we can map at once for DIO */
if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
dio_credits = ext4_chunk_trans_blocks(inode,
bh_result->b_size >> inode->i_blkbits);
- return ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
+retry:
+ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ ret = _ext4_get_block(inode, iblock, bh_result, flags);
+ ext4_journal_stop(handle);
+
+ if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
+ return ret;
}
/* Get block function for DIO reads and writes to inodes without extents */
int ext4_dio_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
- handle_t *handle;
- int ret;
-
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
- if (create) {
- handle = start_dio_trans(inode, bh);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- }
- ret = _ext4_get_block(inode, iblock, bh,
- create ? EXT4_GET_BLOCKS_CREATE : 0);
- if (create)
- ext4_journal_stop(handle);
- return ret;
+ if (!create)
+ return _ext4_get_block(inode, iblock, bh, 0);
+ return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
}
/*
@@ -806,18 +814,13 @@ int ext4_dio_get_block(struct inode *inode, sector_t iblock,
static int ext4_dio_get_block_unwritten_async(struct inode *inode,
sector_t iblock, struct buffer_head *bh_result, int create)
{
- handle_t *handle;
int ret;
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
- handle = start_dio_trans(inode, bh_result);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- ret = _ext4_get_block(inode, iblock, bh_result,
- EXT4_GET_BLOCKS_IO_CREATE_EXT);
- ext4_journal_stop(handle);
+ ret = ext4_get_block_trans(inode, iblock, bh_result,
+ EXT4_GET_BLOCKS_IO_CREATE_EXT);
/*
* When doing DIO using unwritten extents, we need io_end to convert
@@ -850,18 +853,13 @@ static int ext4_dio_get_block_unwritten_async(struct inode *inode,
static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
sector_t iblock, struct buffer_head *bh_result, int create)
{
- handle_t *handle;
int ret;
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
- handle = start_dio_trans(inode, bh_result);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- ret = _ext4_get_block(inode, iblock, bh_result,
- EXT4_GET_BLOCKS_IO_CREATE_EXT);
- ext4_journal_stop(handle);
+ ret = ext4_get_block_trans(inode, iblock, bh_result,
+ EXT4_GET_BLOCKS_IO_CREATE_EXT);
/*
* Mark inode as having pending DIO writes to unwritten extents.
@@ -1057,7 +1055,7 @@ int do_journal_get_write_access(handle_t *handle,
static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
@@ -1069,15 +1067,15 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
bool decrypt = false;
BUG_ON(!PageLocked(page));
- BUG_ON(from > PAGE_CACHE_SIZE);
- BUG_ON(to > PAGE_CACHE_SIZE);
+ BUG_ON(from > PAGE_SIZE);
+ BUG_ON(to > PAGE_SIZE);
BUG_ON(from > to);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
head = page_buffers(page);
bbits = ilog2(blocksize);
- block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+ block = (sector_t)page->index << (PAGE_SHIFT - bbits);
for (bh = head, block_start = 0; bh != head || !block_start;
block++, block_start = block_end, bh = bh->b_this_page) {
@@ -1159,8 +1157,8 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
* we allocate blocks but write fails for some reason
*/
needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
- index = pos >> PAGE_CACHE_SHIFT;
- from = pos & (PAGE_CACHE_SIZE - 1);
+ index = pos >> PAGE_SHIFT;
+ from = pos & (PAGE_SIZE - 1);
to = from + len;
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -1188,7 +1186,7 @@ retry_grab:
retry_journal:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
if (IS_ERR(handle)) {
- page_cache_release(page);
+ put_page(page);
return PTR_ERR(handle);
}
@@ -1196,7 +1194,7 @@ retry_journal:
if (page->mapping != mapping) {
/* The page got truncated from under us */
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
ext4_journal_stop(handle);
goto retry_grab;
}
@@ -1252,7 +1250,7 @@ retry_journal:
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal;
- page_cache_release(page);
+ put_page(page);
return ret;
}
*pagep = page;
@@ -1295,7 +1293,7 @@ static int ext4_write_end(struct file *file,
ret = ext4_jbd2_file_inode(handle, inode);
if (ret) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
goto errout;
}
}
@@ -1315,7 +1313,7 @@ static int ext4_write_end(struct file *file,
*/
i_size_changed = ext4_update_inode_size(inode, pos + copied);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
@@ -1399,7 +1397,7 @@ static int ext4_journalled_write_end(struct file *file,
int size_changed = 0;
trace_ext4_journalled_write_end(inode, pos, len, copied);
- from = pos & (PAGE_CACHE_SIZE - 1);
+ from = pos & (PAGE_SIZE - 1);
to = from + len;
BUG_ON(!ext4_handle_valid(handle));
@@ -1423,7 +1421,7 @@ static int ext4_journalled_write_end(struct file *file,
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
@@ -1537,7 +1535,7 @@ static void ext4_da_page_release_reservation(struct page *page,
int num_clusters;
ext4_fsblk_t lblk;
- BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+ BUG_ON(stop > PAGE_SIZE || stop < length);
head = page_buffers(page);
bh = head;
@@ -1553,7 +1551,7 @@ static void ext4_da_page_release_reservation(struct page *page,
clear_buffer_delay(bh);
} else if (contiguous_blks) {
lblk = page->index <<
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ (PAGE_SHIFT - inode->i_blkbits);
lblk += (curr_off >> inode->i_blkbits) -
contiguous_blks;
ext4_es_remove_extent(inode, lblk, contiguous_blks);
@@ -1563,7 +1561,7 @@ static void ext4_da_page_release_reservation(struct page *page,
} while ((bh = bh->b_this_page) != head);
if (contiguous_blks) {
- lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
ext4_es_remove_extent(inode, lblk, contiguous_blks);
}
@@ -1572,7 +1570,7 @@ static void ext4_da_page_release_reservation(struct page *page,
* need to release the reserved space for that cluster. */
num_clusters = EXT4_NUM_B2C(sbi, to_release);
while (num_clusters > 0) {
- lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
+ lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
((num_clusters - 1) << sbi->s_cluster_bits);
if (sbi->s_cluster_ratio == 1 ||
!ext4_find_delalloc_cluster(inode, lblk))
@@ -1619,8 +1617,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
end = mpd->next_page - 1;
if (invalidate) {
ext4_lblk_t start, last;
- start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
- last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ start = index << (PAGE_SHIFT - inode->i_blkbits);
+ last = end << (PAGE_SHIFT - inode->i_blkbits);
ext4_es_remove_extent(inode, start, last - start + 1);
}
@@ -1636,7 +1634,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
if (invalidate) {
- block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ block_invalidatepage(page, 0, PAGE_SIZE);
ClearPageUptodate(page);
}
unlock_page(page);
@@ -2007,10 +2005,10 @@ static int ext4_writepage(struct page *page,
trace_ext4_writepage(page);
size = i_size_read(inode);
- if (page->index == size >> PAGE_CACHE_SHIFT)
- len = size & ~PAGE_CACHE_MASK;
+ if (page->index == size >> PAGE_SHIFT)
+ len = size & ~PAGE_MASK;
else
- len = PAGE_CACHE_SIZE;
+ len = PAGE_SIZE;
page_bufs = page_buffers(page);
/*
@@ -2034,7 +2032,7 @@ static int ext4_writepage(struct page *page,
ext4_bh_delay_or_unwritten)) {
redirty_page_for_writepage(wbc, page);
if ((current->flags & PF_MEMALLOC) ||
- (inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) {
+ (inode->i_sb->s_blocksize == PAGE_SIZE)) {
/*
* For memory cleaning there's no point in writing only
* some buffers. So just bail out. Warn if we came here
@@ -2076,10 +2074,10 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
int err;
BUG_ON(page->index != mpd->first_page);
- if (page->index == size >> PAGE_CACHE_SHIFT)
- len = size & ~PAGE_CACHE_MASK;
+ if (page->index == size >> PAGE_SHIFT)
+ len = size & ~PAGE_MASK;
else
- len = PAGE_CACHE_SIZE;
+ len = PAGE_SIZE;
clear_page_dirty_for_io(page);
err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
if (!err)
@@ -2213,7 +2211,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
int nr_pages, i;
struct inode *inode = mpd->inode;
struct buffer_head *head, *bh;
- int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
+ int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
pgoff_t start, end;
ext4_lblk_t lblk;
sector_t pblock;
@@ -2274,7 +2272,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
* supports blocksize < pagesize as we will try to
* convert potentially unmapped parts of inode.
*/
- mpd->io_submit.io_end->size += PAGE_CACHE_SIZE;
+ mpd->io_submit.io_end->size += PAGE_SIZE;
/* Page fully mapped - let IO run! */
err = mpage_submit_page(mpd, page);
if (err < 0) {
@@ -2426,7 +2424,7 @@ update_disksize:
* Update on-disk size after IO is submitted. Races with
* truncate are avoided by checking i_size under i_data_sem.
*/
- disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
+ disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
if (disksize > EXT4_I(inode)->i_disksize) {
int err2;
loff_t i_size;
@@ -2562,7 +2560,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
mpd->next_page = page->index + 1;
/* Add all dirty buffers to mpd */
lblk = ((ext4_lblk_t)page->index) <<
- (PAGE_CACHE_SHIFT - blkbits);
+ (PAGE_SHIFT - blkbits);
head = page_buffers(page);
err = mpage_process_page_bufs(mpd, head, head, lblk);
if (err <= 0)
@@ -2647,7 +2645,7 @@ static int ext4_writepages(struct address_space *mapping,
* We may need to convert up to one extent per block in
* the page and we may dirty the inode.
*/
- rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits);
+ rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
}
/*
@@ -2678,8 +2676,8 @@ static int ext4_writepages(struct address_space *mapping,
mpd.first_page = writeback_index;
mpd.last_page = -1;
} else {
- mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT;
- mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT;
+ mpd.first_page = wbc->range_start >> PAGE_SHIFT;
+ mpd.last_page = wbc->range_end >> PAGE_SHIFT;
}
mpd.inode = inode;
@@ -2838,7 +2836,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
handle_t *handle;
- index = pos >> PAGE_CACHE_SHIFT;
+ index = pos >> PAGE_SHIFT;
if (ext4_nonda_switch(inode->i_sb)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
@@ -2881,7 +2879,7 @@ retry_journal:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
ext4_da_write_credits(inode, pos, len));
if (IS_ERR(handle)) {
- page_cache_release(page);
+ put_page(page);
return PTR_ERR(handle);
}
@@ -2889,7 +2887,7 @@ retry_journal:
if (page->mapping != mapping) {
/* The page got truncated from under us */
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
ext4_journal_stop(handle);
goto retry_grab;
}
@@ -2917,7 +2915,7 @@ retry_journal:
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal;
- page_cache_release(page);
+ put_page(page);
return ret;
}
@@ -2965,7 +2963,7 @@ static int ext4_da_write_end(struct file *file,
len, copied, page, fsdata);
trace_ext4_da_write_end(inode, pos, len, copied);
- start = pos & (PAGE_CACHE_SIZE - 1);
+ start = pos & (PAGE_SIZE - 1);
end = start + copied - 1;
/*
@@ -3187,7 +3185,7 @@ static int __ext4_journalled_invalidatepage(struct page *page,
/*
* If it's a full truncate we just forget about the pending dirtying
*/
- if (offset == 0 && length == PAGE_CACHE_SIZE)
+ if (offset == 0 && length == PAGE_SIZE)
ClearPageChecked(page);
return jbd2_journal_invalidatepage(journal, page, offset, length);
@@ -3556,8 +3554,8 @@ void ext4_set_aops(struct inode *inode)
static int __ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
- ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
- unsigned offset = from & (PAGE_CACHE_SIZE-1);
+ ext4_fsblk_t index = from >> PAGE_SHIFT;
+ unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize, pos;
ext4_lblk_t iblock;
struct inode *inode = mapping->host;
@@ -3565,14 +3563,14 @@ static int __ext4_block_zero_page_range(handle_t *handle,
struct page *page;
int err = 0;
- page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
+ page = find_or_create_page(mapping, from >> PAGE_SHIFT,
mapping_gfp_constraint(mapping, ~__GFP_FS));
if (!page)
return -ENOMEM;
blocksize = inode->i_sb->s_blocksize;
- iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+ iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
@@ -3614,7 +3612,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
ext4_encrypted_inode(inode)) {
/* We expect the key to be set. */
BUG_ON(!ext4_has_encryption_key(inode));
- BUG_ON(blocksize != PAGE_CACHE_SIZE);
+ BUG_ON(blocksize != PAGE_SIZE);
WARN_ON_ONCE(ext4_decrypt(page));
}
}
@@ -3638,7 +3636,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
unlock:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return err;
}
@@ -3653,7 +3651,7 @@ static int ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
struct inode *inode = mapping->host;
- unsigned offset = from & (PAGE_CACHE_SIZE-1);
+ unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize = inode->i_sb->s_blocksize;
unsigned max = blocksize - (offset & (blocksize - 1));
@@ -3678,7 +3676,7 @@ static int ext4_block_zero_page_range(handle_t *handle,
static int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from)
{
- unsigned offset = from & (PAGE_CACHE_SIZE-1);
+ unsigned offset = from & (PAGE_SIZE-1);
unsigned length;
unsigned blocksize;
struct inode *inode = mapping->host;
@@ -3816,7 +3814,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
*/
if (offset + length > inode->i_size) {
length = inode->i_size +
- PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
+ PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
offset;
}
@@ -4891,23 +4889,23 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
tid_t commit_tid = 0;
int ret;
- offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
+ offset = inode->i_size & (PAGE_SIZE - 1);
/*
* All buffers in the last page remain valid? Then there's nothing to
- * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
+ * do. We do the check mainly to optimize the common PAGE_SIZE ==
* blocksize case
*/
- if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits))
+ if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
return;
while (1) {
page = find_lock_page(inode->i_mapping,
- inode->i_size >> PAGE_CACHE_SHIFT);
+ inode->i_size >> PAGE_SHIFT);
if (!page)
return;
ret = __ext4_journalled_invalidatepage(page, offset,
- PAGE_CACHE_SIZE - offset);
+ PAGE_SIZE - offset);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (ret != -EBUSY)
return;
commit_tid = 0;
@@ -5546,10 +5544,10 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
goto out;
}
- if (page->index == size >> PAGE_CACHE_SHIFT)
- len = size & ~PAGE_CACHE_MASK;
+ if (page->index == size >> PAGE_SHIFT)
+ len = size & ~PAGE_MASK;
else
- len = PAGE_CACHE_SIZE;
+ len = PAGE_SIZE;
/*
* Return if we have all the buffers mapped. This avoids the need to do
* journal_start/journal_stop which can block and take a long time
@@ -5580,7 +5578,7 @@ retry_alloc:
ret = block_page_mkwrite(vma, vmf, get_block);
if (!ret && ext4_should_journal_data(inode)) {
if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
- PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
+ PAGE_SIZE, NULL, do_journal_get_write_access)) {
unlock_page(page);
ret = VM_FAULT_SIGBUS;
ext4_journal_stop(handle);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 50e05df28f66..eeeade76012e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -119,7 +119,7 @@ MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
*
*
* one block each for bitmap and buddy information. So for each group we
- * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
+ * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
* blocksize) blocks. So it can have information regarding groups_per_page
* which is blocks_per_page/2
*
@@ -807,7 +807,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
*
* one block each for bitmap and buddy information.
* So for each group we take up 2 blocks. A page can
- * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
+ * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
* So it can have information regarding groups_per_page which
* is blocks_per_page/2
*
@@ -839,7 +839,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
sb = inode->i_sb;
ngroups = ext4_get_groups_count(sb);
blocksize = 1 << inode->i_blkbits;
- blocks_per_page = PAGE_CACHE_SIZE / blocksize;
+ blocks_per_page = PAGE_SIZE / blocksize;
groups_per_page = blocks_per_page >> 1;
if (groups_per_page == 0)
@@ -993,7 +993,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
e4b->bd_buddy_page = NULL;
e4b->bd_bitmap_page = NULL;
- blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+ blocks_per_page = PAGE_SIZE / sb->s_blocksize;
/*
* the buddy cache inode stores the block bitmap
* and buddy information in consecutive blocks.
@@ -1028,11 +1028,11 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
{
if (e4b->bd_bitmap_page) {
unlock_page(e4b->bd_bitmap_page);
- page_cache_release(e4b->bd_bitmap_page);
+ put_page(e4b->bd_bitmap_page);
}
if (e4b->bd_buddy_page) {
unlock_page(e4b->bd_buddy_page);
- page_cache_release(e4b->bd_buddy_page);
+ put_page(e4b->bd_buddy_page);
}
}
@@ -1125,7 +1125,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
might_sleep();
mb_debug(1, "load group %u\n", group);
- blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+ blocks_per_page = PAGE_SIZE / sb->s_blocksize;
grp = ext4_get_group_info(sb, group);
e4b->bd_blkbits = sb->s_blocksize_bits;
@@ -1167,7 +1167,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
* is yet to initialize the same. So
* wait for it to initialize.
*/
- page_cache_release(page);
+ put_page(page);
page = find_or_create_page(inode->i_mapping, pnum, gfp);
if (page) {
BUG_ON(page->mapping != inode->i_mapping);
@@ -1203,7 +1203,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
if (page == NULL || !PageUptodate(page)) {
if (page)
- page_cache_release(page);
+ put_page(page);
page = find_or_create_page(inode->i_mapping, pnum, gfp);
if (page) {
BUG_ON(page->mapping != inode->i_mapping);
@@ -1238,11 +1238,11 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
err:
if (page)
- page_cache_release(page);
+ put_page(page);
if (e4b->bd_bitmap_page)
- page_cache_release(e4b->bd_bitmap_page);
+ put_page(e4b->bd_bitmap_page);
if (e4b->bd_buddy_page)
- page_cache_release(e4b->bd_buddy_page);
+ put_page(e4b->bd_buddy_page);
e4b->bd_buddy = NULL;
e4b->bd_bitmap = NULL;
return ret;
@@ -1257,9 +1257,9 @@ static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
{
if (e4b->bd_bitmap_page)
- page_cache_release(e4b->bd_bitmap_page);
+ put_page(e4b->bd_bitmap_page);
if (e4b->bd_buddy_page)
- page_cache_release(e4b->bd_buddy_page);
+ put_page(e4b->bd_buddy_page);
}
@@ -2833,8 +2833,8 @@ static void ext4_free_data_callback(struct super_block *sb,
/* No more items in the per group rb tree
* balance refcounts from ext4_mb_free_metadata()
*/
- page_cache_release(e4b.bd_buddy_page);
- page_cache_release(e4b.bd_bitmap_page);
+ put_page(e4b.bd_buddy_page);
+ put_page(e4b.bd_bitmap_page);
}
ext4_unlock_group(sb, entry->efd_group);
kmem_cache_free(ext4_free_data_cachep, entry);
@@ -4385,9 +4385,9 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
ext4_mb_put_pa(ac, ac->ac_sb, pa);
}
if (ac->ac_bitmap_page)
- page_cache_release(ac->ac_bitmap_page);
+ put_page(ac->ac_bitmap_page);
if (ac->ac_buddy_page)
- page_cache_release(ac->ac_buddy_page);
+ put_page(ac->ac_buddy_page);
if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
mutex_unlock(&ac->ac_lg->lg_mutex);
ext4_mb_collect_stats(ac);
@@ -4599,8 +4599,8 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
* otherwise we'll refresh it from
* on-disk bitmap and lose not-yet-available
* blocks */
- page_cache_get(e4b->bd_buddy_page);
- page_cache_get(e4b->bd_bitmap_page);
+ get_page(e4b->bd_buddy_page);
+ get_page(e4b->bd_bitmap_page);
}
while (*n) {
parent = *n;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 4098acc701c3..325cef48b39a 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
{
if (first < second) {
down_write(&EXT4_I(first)->i_data_sem);
- down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
+ down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
} else {
down_write(&EXT4_I(second)->i_data_sem);
- down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
+ down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
}
}
@@ -156,7 +156,7 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
if (!page[1]) {
unlock_page(page[0]);
- page_cache_release(page[0]);
+ put_page(page[0]);
return -ENOMEM;
}
/*
@@ -192,7 +192,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
create_empty_buffers(page, blocksize, 0);
head = page_buffers(page);
- block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
for (bh = head, block_start = 0; bh != head || !block_start;
block++, block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
@@ -268,7 +268,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
int i, err2, jblocks, retries = 0;
int replaced_count = 0;
int from = data_offset_in_page << orig_inode->i_blkbits;
- int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
+ int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
struct super_block *sb = orig_inode->i_sb;
struct buffer_head *bh = NULL;
@@ -404,9 +404,9 @@ data_copy:
unlock_pages:
unlock_page(pagep[0]);
- page_cache_release(pagep[0]);
+ put_page(pagep[0]);
unlock_page(pagep[1]);
- page_cache_release(pagep[1]);
+ put_page(pagep[1]);
stop_journal:
ext4_journal_stop(handle);
if (*err == -ENOSPC &&
@@ -484,6 +484,13 @@ mext_check_arguments(struct inode *orig_inode,
return -EBUSY;
}
+ if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
+ ext4_debug("ext4 move extent: The argument files should "
+ "not be quota files [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EBUSY;
+ }
+
/* Ext4 move extent supports only extent based file */
if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
ext4_debug("ext4 move extent: orig file is not extents "
@@ -554,7 +561,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
struct inode *orig_inode = file_inode(o_filp);
struct inode *donor_inode = file_inode(d_filp);
struct ext4_ext_path *path = NULL;
- int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
+ int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
ext4_lblk_t o_end, o_start = orig_blk;
ext4_lblk_t d_start = donor_blk;
int ret;
@@ -648,9 +655,9 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
if (o_end - o_start < cur_len)
cur_len = o_end - o_start;
- orig_page_index = o_start >> (PAGE_CACHE_SHIFT -
+ orig_page_index = o_start >> (PAGE_SHIFT -
orig_inode->i_blkbits);
- donor_page_index = d_start >> (PAGE_CACHE_SHIFT -
+ donor_page_index = d_start >> (PAGE_SHIFT -
donor_inode->i_blkbits);
offset_in_page = o_start % blocks_per_page;
if (cur_len > blocks_per_page- offset_in_page)
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index d77d15f4b674..e4fc8ea45d78 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/backing-dev.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -432,8 +433,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- if (len < PAGE_CACHE_SIZE)
- zero_user_segment(page, len, PAGE_CACHE_SIZE);
+ if (len < PAGE_SIZE)
+ zero_user_segment(page, len, PAGE_SIZE);
/*
* In the first loop we prepare and mark buffers to submit. We have to
* mark all buffers in the page before submitting so that
@@ -470,9 +471,20 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
nr_to_submit) {
- data_page = ext4_encrypt(inode, page);
+ gfp_t gfp_flags = GFP_NOFS;
+
+ retry_encrypt:
+ data_page = ext4_encrypt(inode, page, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
+ if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
+ if (io->io_bio) {
+ ext4_io_submit(io);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ }
+ gfp_flags |= __GFP_NOFAIL;
+ goto retry_encrypt;
+ }
data_page = NULL;
goto out;
}
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 5dc5e95063de..dc54a4b60eba 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -23,7 +23,7 @@
*
* then this code just gives up and calls the buffer_head-based read function.
* It does handle a page which has holes at the end - that is a common case:
- * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
+ * the end-of-file on blocksize < PAGE_SIZE setups.
*
*/
@@ -140,7 +140,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits;
- const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+ const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
const unsigned blocksize = 1 << blkbits;
sector_t block_in_file;
sector_t last_block;
@@ -173,7 +173,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
if (page_has_buffers(page))
goto confused;
- block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+ block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
last_block = block_in_file + nr_pages * blocks_per_page;
last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
if (last_block > last_block_in_file)
@@ -217,7 +217,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
set_error_page:
SetPageError(page);
zero_user_segment(page, 0,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
unlock_page(page);
goto next_page;
}
@@ -250,7 +250,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
}
if (first_hole != blocks_per_page) {
zero_user_segment(page, first_hole << blkbits,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
if (first_hole == 0) {
SetPageUptodate(page);
unlock_page(page);
@@ -279,7 +279,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
if (ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
- ctx = ext4_get_crypto_ctx(inode);
+ ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
goto set_error_page;
}
@@ -319,7 +319,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
unlock_page(page);
next_page:
if (pages)
- page_cache_release(page);
+ put_page(page);
}
BUG_ON(pages && !list_empty(pages));
if (bio)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 539297515896..304c712dbe12 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1113,6 +1113,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
unsigned int flags);
static int ext4_enable_quotas(struct super_block *sb);
+static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
static struct dquot **ext4_get_dquots(struct inode *inode)
{
@@ -1129,7 +1130,7 @@ static const struct dquot_operations ext4_quota_operations = {
.alloc_dquot = dquot_alloc,
.destroy_dquot = dquot_destroy,
.get_projid = ext4_get_projid,
- .get_next_id = dquot_get_next_id,
+ .get_next_id = ext4_get_next_id,
};
static const struct quotactl_ops ext4_qctl_operations = {
@@ -1323,9 +1324,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
return -1;
}
if (ext4_has_feature_quota(sb)) {
- ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
- "when QUOTA feature is enabled");
- return -1;
+ ext4_msg(sb, KERN_INFO, "Journaled quota options "
+ "ignored when QUOTA feature is enabled");
+ return 1;
}
qname = match_strdup(args);
if (!qname) {
@@ -1688,10 +1689,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
return -1;
}
if (ext4_has_feature_quota(sb)) {
- ext4_msg(sb, KERN_ERR,
- "Cannot set journaled quota options "
+ ext4_msg(sb, KERN_INFO,
+ "Quota format mount options ignored "
"when QUOTA feature is enabled");
- return -1;
+ return 1;
}
sbi->s_jquota_fmt = m->mount_opt;
#endif
@@ -1756,11 +1757,11 @@ static int parse_options(char *options, struct super_block *sb,
#ifdef CONFIG_QUOTA
if (ext4_has_feature_quota(sb) &&
(test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
- ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
- "feature is enabled");
- return 0;
- }
- if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
+ ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
+ "mount options ignored.");
+ clear_opt(sb, USRQUOTA);
+ clear_opt(sb, GRPQUOTA);
+ } else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sb, USRQUOTA);
@@ -1784,7 +1785,7 @@ static int parse_options(char *options, struct super_block *sb,
int blocksize =
BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
- if (blocksize < PAGE_CACHE_SIZE) {
+ if (blocksize < PAGE_SIZE) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"dioread_nolock if block size != PAGE_SIZE");
return 0;
@@ -3808,7 +3809,7 @@ no_journal:
}
if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
- (blocksize != PAGE_CACHE_SIZE)) {
+ (blocksize != PAGE_SIZE)) {
ext4_msg(sb, KERN_ERR,
"Unsupported blocksize for fs encryption");
goto failed_mount_wq;
@@ -5028,6 +5029,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
EXT4_SB(sb)->s_jquota_fmt, type);
}
+static void lockdep_set_quota_inode(struct inode *inode, int subclass)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ /* The first argument of lockdep_set_subclass has to be
+ * *exactly* the same as the argument to init_rwsem() --- in
+ * this case, in init_once() --- or lockdep gets unhappy
+ * because the name of the lock is set using the
+ * stringification of the argument to init_rwsem().
+ */
+ (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
+ lockdep_set_subclass(&ei->i_data_sem, subclass);
+}
+
/*
* Standard function to be called on quota_on
*/
@@ -5067,8 +5082,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
if (err)
return err;
}
-
- return dquot_quota_on(sb, type, format_id, path);
+ lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
+ err = dquot_quota_on(sb, type, format_id, path);
+ if (err)
+ lockdep_set_quota_inode(path->dentry->d_inode,
+ I_DATA_SEM_NORMAL);
+ return err;
}
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
@@ -5095,8 +5114,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
/* Don't account quota for quota files to avoid recursion */
qf_inode->i_flags |= S_NOQUOTA;
+ lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
err = dquot_enable(qf_inode, type, format_id, flags);
iput(qf_inode);
+ if (err)
+ lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
return err;
}
@@ -5253,6 +5275,17 @@ out:
return len;
}
+static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
+{
+ const struct quota_format_ops *ops;
+
+ if (!sb_has_quota_loaded(sb, qid->type))
+ return -ESRCH;
+ ops = sb_dqopt(sb)->ops[qid->type];
+ if (!ops || !ops->get_next_id)
+ return -ENOSYS;
+ return dquot_get_next_id(sb, qid);
+}
#endif
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 6f7ee30a89ce..75ed5c2f0c16 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -80,12 +80,12 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
if (res <= plen)
paddr[res] = '\0';
if (cpage)
- page_cache_release(cpage);
+ put_page(cpage);
set_delayed_call(done, kfree_link, paddr);
return paddr;
errout:
if (cpage)
- page_cache_release(cpage);
+ put_page(cpage);
kfree(paddr);
return ERR_PTR(res);
}
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 0441e055c8e8..e79bd32b9b79 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -230,6 +230,27 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
return error;
}
+static int
+__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
+ void *end, const char *function, unsigned int line)
+{
+ struct ext4_xattr_entry *entry = IFIRST(header);
+ int error = -EFSCORRUPTED;
+
+ if (((void *) header >= end) ||
+ (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC)))
+ goto errout;
+ error = ext4_xattr_check_names(entry, end, entry);
+errout:
+ if (error)
+ __ext4_error_inode(inode, function, line, 0,
+ "corrupted in-inode xattr");
+ return error;
+}
+
+#define xattr_check_inode(inode, header, end) \
+ __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
+
static inline int
ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
{
@@ -341,7 +362,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
header = IHDR(inode, raw_inode);
entry = IFIRST(header);
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
- error = ext4_xattr_check_names(entry, end, entry);
+ error = xattr_check_inode(inode, header, end);
if (error)
goto cleanup;
error = ext4_xattr_find_entry(&entry, name_index, name,
@@ -477,7 +498,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
- error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
+ error = xattr_check_inode(inode, header, end);
if (error)
goto cleanup;
error = ext4_xattr_list_entries(dentry, IFIRST(header),
@@ -1040,8 +1061,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
is->s.here = is->s.first;
is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
- error = ext4_xattr_check_names(IFIRST(header), is->s.end,
- IFIRST(header));
+ error = xattr_check_inode(inode, header, is->s.end);
if (error)
return error;
/* Find the named attribute. */
@@ -1356,6 +1376,10 @@ retry:
last = entry;
total_ino = sizeof(struct ext4_xattr_ibody_header);
+ error = xattr_check_inode(inode, header, end);
+ if (error)
+ goto cleanup;
+
free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
if (free >= new_extra_isize) {
entry = IFIRST(header);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index e5c762b37239..53fec0872e60 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -223,7 +223,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
/* Allocate a new bio */
bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
- if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
@@ -265,8 +265,8 @@ alloc_new:
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
- if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
- PAGE_CACHE_SIZE) {
+ if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
+ PAGE_SIZE) {
__submit_merged_bio(io);
goto alloc_new;
}
@@ -406,7 +406,7 @@ got_it:
* see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
*/
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
unlock_page(page);
return page;
@@ -517,7 +517,7 @@ struct page *get_new_data_page(struct inode *inode,
goto got_it;
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
} else {
f2fs_put_page(page, 1);
@@ -530,8 +530,8 @@ struct page *get_new_data_page(struct inode *inode,
}
got_it:
if (new_i_size && i_size_read(inode) <
- ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
- i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
+ ((loff_t)(index + 1) << PAGE_SHIFT)) {
+ i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
/* Only the directory inode sets new_i_size */
set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
}
@@ -570,9 +570,9 @@ alloc:
/* update i_size */
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
dn->ofs_in_node;
- if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
+ if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
i_size_write(dn->inode,
- ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
+ ((loff_t)(fofs + 1) << PAGE_SHIFT));
return 0;
}
@@ -971,7 +971,7 @@ got_it:
goto confused;
}
} else {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
unlock_page(page);
goto next_page;
@@ -1021,7 +1021,7 @@ submit_and_realloc:
goto next_page;
set_error_page:
SetPageError(page);
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
unlock_page(page);
goto next_page;
confused:
@@ -1032,7 +1032,7 @@ confused:
unlock_page(page);
next_page:
if (pages)
- page_cache_release(page);
+ put_page(page);
}
BUG_ON(pages && !list_empty(pages));
if (bio)
@@ -1136,7 +1136,7 @@ static int f2fs_write_data_page(struct page *page,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long) i_size)
- >> PAGE_CACHE_SHIFT;
+ >> PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
int err = 0;
@@ -1157,11 +1157,11 @@ static int f2fs_write_data_page(struct page *page,
* If the offset is out-of-range of file size,
* this page does not have to be written to disk.
*/
- offset = i_size & (PAGE_CACHE_SIZE - 1);
+ offset = i_size & (PAGE_SIZE - 1);
if ((page->index >= end_index + 1) || !offset)
goto out;
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ zero_user_segment(page, offset, PAGE_SIZE);
write:
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
@@ -1267,8 +1267,8 @@ next:
cycled = 0;
end = -1;
} else {
- index = wbc->range_start >> PAGE_CACHE_SHIFT;
- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ index = wbc->range_start >> PAGE_SHIFT;
+ end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
@@ -1448,11 +1448,11 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
* the block addresses when there is no need to fill the page.
*/
if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
- len == PAGE_CACHE_SIZE)
+ len == PAGE_SIZE)
return 0;
if (f2fs_has_inline_data(inode) ||
- (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
+ (pos & PAGE_MASK) >= i_size_read(inode)) {
f2fs_lock_op(sbi);
locked = true;
}
@@ -1513,7 +1513,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *page = NULL;
- pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
+ pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
bool need_balance = false;
block_t blkaddr = NULL_ADDR;
int err = 0;
@@ -1561,22 +1561,22 @@ repeat:
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
- if (len == PAGE_CACHE_SIZE)
+ if (len == PAGE_SIZE)
goto out_update;
if (PageUptodate(page))
goto out_clear;
- if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
- unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+ if ((pos & PAGE_MASK) >= i_size_read(inode)) {
+ unsigned start = pos & (PAGE_SIZE - 1);
unsigned end = start + len;
/* Reading beyond i_size is simple: memset to zero */
- zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
+ zero_user_segments(page, 0, start, end, PAGE_SIZE);
goto out_update;
}
if (blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
} else {
struct f2fs_io_info fio = {
.sbi = sbi,
@@ -1688,7 +1688,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
- (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
+ (offset % PAGE_SIZE || length != PAGE_SIZE))
return;
if (PageDirty(page)) {
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 4fb6ef88a34f..f4a61a5ff79f 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -164,7 +164,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
/* build curseg */
si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
- si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE;
+ si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
/* build dirty segmap */
si->base_mem += sizeof(struct dirty_seglist_info);
@@ -201,9 +201,9 @@ get_cache:
si->page_mem = 0;
npages = NODE_MAPPING(sbi)->nrpages;
- si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
npages = META_MAPPING(sbi)->nrpages;
- si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
}
static int stat_show(struct seq_file *s, void *v)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 80641ad82745..af819571bce7 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -17,8 +17,8 @@
static unsigned long dir_blocks(struct inode *inode)
{
- return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1))
- >> PAGE_CACHE_SHIFT;
+ return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
+ >> PAGE_SHIFT;
}
static unsigned int dir_buckets(unsigned int level, int dir_level)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index bbe2cd1265d0..7a4558d17f36 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1294,7 +1294,7 @@ static inline void f2fs_put_page(struct page *page, int unlock)
f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
unlock_page(page);
}
- page_cache_release(page);
+ put_page(page);
}
static inline void f2fs_put_dnode(struct dnode_of_data *dn)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index b41c3579ea9e..443e07705c2a 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -74,11 +74,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
goto mapped;
/* page is wholly or partially inside EOF */
- if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
+ if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
i_size_read(inode)) {
unsigned offset;
- offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ offset = i_size_read(inode) & ~PAGE_MASK;
+ zero_user_segment(page, offset, PAGE_SIZE);
}
set_page_dirty(page);
SetPageUptodate(page);
@@ -346,11 +346,11 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
goto found;
}
- pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);
+ pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
- for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
+ for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
if (err && err != -ENOENT) {
@@ -370,7 +370,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
/* find data/hole in dnode block */
for (; dn.ofs_in_node < end_offset;
dn.ofs_in_node++, pgofs++,
- data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
+ data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
block_t blkaddr;
blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
@@ -508,8 +508,8 @@ void truncate_data_blocks(struct dnode_of_data *dn)
static int truncate_partial_data_page(struct inode *inode, u64 from,
bool cache_only)
{
- unsigned offset = from & (PAGE_CACHE_SIZE - 1);
- pgoff_t index = from >> PAGE_CACHE_SHIFT;
+ unsigned offset = from & (PAGE_SIZE - 1);
+ pgoff_t index = from >> PAGE_SHIFT;
struct address_space *mapping = inode->i_mapping;
struct page *page;
@@ -529,7 +529,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
return 0;
truncate_out:
f2fs_wait_on_page_writeback(page, DATA, true);
- zero_user(page, offset, PAGE_CACHE_SIZE - offset);
+ zero_user(page, offset, PAGE_SIZE - offset);
if (!cache_only || !f2fs_encrypted_inode(inode) ||
!S_ISREG(inode->i_mode))
set_page_dirty(page);
@@ -799,11 +799,11 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (ret)
return ret;
- pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
- pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
- off_start = offset & (PAGE_CACHE_SIZE - 1);
- off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ off_start = offset & (PAGE_SIZE - 1);
+ off_end = (offset + len) & (PAGE_SIZE - 1);
if (pg_start == pg_end) {
ret = fill_zero(inode, pg_start, off_start,
@@ -813,7 +813,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
} else {
if (off_start) {
ret = fill_zero(inode, pg_start++, off_start,
- PAGE_CACHE_SIZE - off_start);
+ PAGE_SIZE - off_start);
if (ret)
return ret;
}
@@ -830,8 +830,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
f2fs_balance_fs(sbi, true);
- blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
- blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
+ blk_start = (loff_t)pg_start << PAGE_SHIFT;
+ blk_end = (loff_t)pg_end << PAGE_SHIFT;
truncate_inode_pages_range(mapping, blk_start,
blk_end - 1);
@@ -954,8 +954,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (ret)
return ret;
- pg_start = offset >> PAGE_CACHE_SHIFT;
- pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = offset >> PAGE_SHIFT;
+ pg_end = (offset + len) >> PAGE_SHIFT;
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
@@ -1006,11 +1006,11 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
truncate_pagecache_range(inode, offset, offset + len - 1);
- pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
- pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
- off_start = offset & (PAGE_CACHE_SIZE - 1);
- off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ off_start = offset & (PAGE_SIZE - 1);
+ off_end = (offset + len) & (PAGE_SIZE - 1);
if (pg_start == pg_end) {
ret = fill_zero(inode, pg_start, off_start,
@@ -1024,12 +1024,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
} else {
if (off_start) {
ret = fill_zero(inode, pg_start++, off_start,
- PAGE_CACHE_SIZE - off_start);
+ PAGE_SIZE - off_start);
if (ret)
return ret;
new_size = max_t(loff_t, new_size,
- (loff_t)pg_start << PAGE_CACHE_SHIFT);
+ (loff_t)pg_start << PAGE_SHIFT);
}
for (index = pg_start; index < pg_end; index++) {
@@ -1060,7 +1060,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
f2fs_unlock_op(sbi);
new_size = max_t(loff_t, new_size,
- (loff_t)(index + 1) << PAGE_CACHE_SHIFT);
+ (loff_t)(index + 1) << PAGE_SHIFT);
}
if (off_end) {
@@ -1117,8 +1117,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
truncate_pagecache(inode, offset);
- pg_start = offset >> PAGE_CACHE_SHIFT;
- pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = offset >> PAGE_SHIFT;
+ pg_end = (offset + len) >> PAGE_SHIFT;
delta = pg_end - pg_start;
nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
@@ -1158,11 +1158,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
f2fs_balance_fs(sbi, true);
- pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
- pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
- off_start = offset & (PAGE_CACHE_SIZE - 1);
- off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ off_start = offset & (PAGE_SIZE - 1);
+ off_end = (offset + len) & (PAGE_SIZE - 1);
f2fs_lock_op(sbi);
@@ -1180,12 +1180,12 @@ noalloc:
if (pg_start == pg_end)
new_size = offset + len;
else if (index == pg_start && off_start)
- new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT;
+ new_size = (loff_t)(index + 1) << PAGE_SHIFT;
else if (index == pg_end)
- new_size = ((loff_t)index << PAGE_CACHE_SHIFT) +
+ new_size = ((loff_t)index << PAGE_SHIFT) +
off_end;
else
- new_size += PAGE_CACHE_SIZE;
+ new_size += PAGE_SIZE;
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -1652,8 +1652,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
if (need_inplace_update(inode))
return -EINVAL;
- pg_start = range->start >> PAGE_CACHE_SHIFT;
- pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT;
+ pg_start = range->start >> PAGE_SHIFT;
+ pg_end = (range->start + range->len) >> PAGE_SHIFT;
f2fs_balance_fs(sbi, true);
@@ -1770,7 +1770,7 @@ clear_out:
out:
inode_unlock(inode);
if (!err)
- range->len = (u64)total << PAGE_CACHE_SHIFT;
+ range->len = (u64)total << PAGE_SHIFT;
return err;
}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 358214e9f707..a2fbe6f427d3 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -51,7 +51,7 @@ void read_inline_data(struct page *page, struct page *ipage)
f2fs_bug_on(F2FS_P_SB(page), page->index);
- zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+ zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
/* Copy the whole inline data block */
src_addr = inline_data_addr(ipage);
@@ -93,7 +93,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
}
if (page->index)
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
else
read_inline_data(page, ipage);
@@ -375,7 +375,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
goto out;
f2fs_wait_on_page_writeback(page, DATA, true);
- zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+ zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
dentry_blk = kmap_atomic(page);
@@ -405,8 +405,8 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
stat_dec_inline_dir(dir);
clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
- if (i_size_read(dir) < PAGE_CACHE_SIZE) {
- i_size_write(dir, PAGE_CACHE_SIZE);
+ if (i_size_read(dir) < PAGE_SIZE) {
+ i_size_write(dir, PAGE_SIZE);
set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index db874ad3514a..324ed3812f30 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -1027,12 +1027,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
goto errout;
}
- /* this is broken symlink case */
- if (unlikely(cstr.name[0] == 0)) {
- res = -ENOENT;
- goto errout;
- }
-
if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
/* Symlink data on the disk is corrupted */
res = -EIO;
@@ -1046,17 +1040,23 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
if (res < 0)
goto errout;
+ /* this is broken symlink case */
+ if (unlikely(pstr.name[0] == 0)) {
+ res = -ENOENT;
+ goto errout;
+ }
+
paddr = pstr.name;
/* Null-terminate the name */
paddr[res] = '\0';
- page_cache_release(cpage);
+ put_page(cpage);
set_delayed_call(done, kfree_link, paddr);
return paddr;
errout:
fscrypt_fname_free_buffer(&pstr);
- page_cache_release(cpage);
+ put_page(cpage);
return ERR_PTR(res);
}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 118321bd1a7f..1a33de9d84b1 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -46,11 +46,11 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
*/
if (type == FREE_NIDS) {
mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
} else if (type == NAT_ENTRIES) {
mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
} else if (type == DIRTY_DENTS) {
if (sbi->sb->s_bdi->wb.dirty_exceeded)
@@ -62,13 +62,13 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
for (i = 0; i <= UPDATE_INO; i++)
mem_size += (sbi->im[i].ino_num *
- sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
+ sizeof(struct ino_entry)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
} else if (type == EXTENT_CACHE) {
mem_size = (atomic_read(&sbi->total_ext_tree) *
sizeof(struct extent_tree) +
atomic_read(&sbi->total_ext_node) *
- sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT;
+ sizeof(struct extent_node)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
} else {
if (!sbi->sb->s_bdi->wb.dirty_exceeded)
@@ -121,7 +121,7 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
- memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+ memcpy(dst_addr, src_addr, PAGE_SIZE);
set_page_dirty(dst_page);
f2fs_put_page(src_page, 1);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 0b30cd2aeebd..011942f94d64 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -591,7 +591,7 @@ out:
/* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi),
- (loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
+ (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 6f16b39f0b52..540669d6978e 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -885,12 +885,12 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
}
}
- sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
+ sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
SUM_FOOTER_SIZE) / SUMMARY_SIZE;
if (valid_sum_count <= sum_in_page)
return 1;
else if ((valid_sum_count - sum_in_page) <=
- (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
+ (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
return 2;
return 3;
}
@@ -909,9 +909,9 @@ void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
void *dst = page_address(page);
if (src)
- memcpy(dst, src, PAGE_CACHE_SIZE);
+ memcpy(dst, src, PAGE_SIZE);
else
- memset(dst, 0, PAGE_CACHE_SIZE);
+ memset(dst, 0, PAGE_SIZE);
set_page_dirty(page);
f2fs_put_page(page, 1);
}
@@ -1596,7 +1596,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
s = (struct f2fs_summary *)(kaddr + offset);
seg_i->sum_blk->entries[j] = *s;
offset += SUMMARY_SIZE;
- if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+ if (offset + SUMMARY_SIZE <= PAGE_SIZE -
SUM_FOOTER_SIZE)
continue;
@@ -1757,7 +1757,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
*summary = seg_i->sum_blk->entries[j];
written_size += SUMMARY_SIZE;
- if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+ if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
SUM_FOOTER_SIZE)
continue;
@@ -1844,7 +1844,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
- memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+ memcpy(dst_addr, src_addr, PAGE_SIZE);
set_page_dirty(dst_page);
f2fs_put_page(src_page, 1);
@@ -2171,7 +2171,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
for (i = 0; i < NR_CURSEG_TYPE; i++) {
mutex_init(&array[i].curseg_mutex);
- array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!array[i].sum_blk)
return -ENOMEM;
init_rwsem(&array[i].journal_rwsem);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 15bb81f8dac2..006f87d69921 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -984,9 +984,25 @@ static loff_t max_file_blocks(void)
return result;
}
+static int __f2fs_commit_super(struct buffer_head *bh,
+ struct f2fs_super_block *super)
+{
+ lock_buffer(bh);
+ if (super)
+ memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
+ set_buffer_uptodate(bh);
+ set_buffer_dirty(bh);
+ unlock_buffer(bh);
+
+ /* it's rare case, we can do fua all the time */
+ return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+}
+
static inline bool sanity_check_area_boundary(struct super_block *sb,
- struct f2fs_super_block *raw_super)
+ struct buffer_head *bh)
{
+ struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+ (bh->b_data + F2FS_SUPER_OFFSET);
u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
@@ -1000,6 +1016,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
u32 segment_count = le32_to_cpu(raw_super->segment_count);
u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+ u64 main_end_blkaddr = main_blkaddr +
+ (segment_count_main << log_blocks_per_seg);
+ u64 seg_end_blkaddr = segment0_blkaddr +
+ (segment_count << log_blocks_per_seg);
if (segment0_blkaddr != cp_blkaddr) {
f2fs_msg(sb, KERN_INFO,
@@ -1044,22 +1064,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
return true;
}
- if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
- segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
+ if (main_end_blkaddr > seg_end_blkaddr) {
f2fs_msg(sb, KERN_INFO,
- "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
+ "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
main_blkaddr,
- segment0_blkaddr + (segment_count << log_blocks_per_seg),
+ segment0_blkaddr +
+ (segment_count << log_blocks_per_seg),
segment_count_main << log_blocks_per_seg);
return true;
+ } else if (main_end_blkaddr < seg_end_blkaddr) {
+ int err = 0;
+ char *res;
+
+ /* fix in-memory information all the time */
+ raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
+ segment0_blkaddr) >> log_blocks_per_seg);
+
+ if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
+ res = "internally";
+ } else {
+ err = __f2fs_commit_super(bh, NULL);
+ res = err ? "failed" : "done";
+ }
+ f2fs_msg(sb, KERN_INFO,
+ "Fix alignment : %s, start(%u) end(%u) block(%u)",
+ res, main_blkaddr,
+ segment0_blkaddr +
+ (segment_count << log_blocks_per_seg),
+ segment_count_main << log_blocks_per_seg);
+ if (err)
+ return true;
}
-
return false;
}
static int sanity_check_raw_super(struct super_block *sb,
- struct f2fs_super_block *raw_super)
+ struct buffer_head *bh)
{
+ struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+ (bh->b_data + F2FS_SUPER_OFFSET);
unsigned int blocksize;
if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
@@ -1070,10 +1113,10 @@ static int sanity_check_raw_super(struct super_block *sb,
}
/* Currently, support only 4KB page cache size */
- if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
+ if (F2FS_BLKSIZE != PAGE_SIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid page_cache_size (%lu), supports only 4KB\n",
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
return 1;
}
@@ -1126,7 +1169,7 @@ static int sanity_check_raw_super(struct super_block *sb,
}
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
- if (sanity_check_area_boundary(sb, raw_super))
+ if (sanity_check_area_boundary(sb, bh))
return 1;
return 0;
@@ -1202,7 +1245,7 @@ static int read_raw_super_block(struct super_block *sb,
{
int block;
struct buffer_head *bh;
- struct f2fs_super_block *super, *buf;
+ struct f2fs_super_block *super;
int err = 0;
super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
@@ -1218,11 +1261,8 @@ static int read_raw_super_block(struct super_block *sb,
continue;
}
- buf = (struct f2fs_super_block *)
- (bh->b_data + F2FS_SUPER_OFFSET);
-
/* sanity checking of raw super */
- if (sanity_check_raw_super(sb, buf)) {
+ if (sanity_check_raw_super(sb, bh)) {
f2fs_msg(sb, KERN_ERR,
"Can't find valid F2FS filesystem in %dth superblock",
block + 1);
@@ -1232,7 +1272,8 @@ static int read_raw_super_block(struct super_block *sb,
}
if (!*raw_super) {
- memcpy(super, buf, sizeof(*super));
+ memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
+ sizeof(*super));
*valid_super_block = block;
*raw_super = super;
}
@@ -1252,42 +1293,29 @@ static int read_raw_super_block(struct super_block *sb,
return err;
}
-static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block)
+int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
{
- struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
struct buffer_head *bh;
int err;
- bh = sb_getblk(sbi->sb, block);
+ /* write back-up superblock first */
+ bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
if (!bh)
return -EIO;
-
- lock_buffer(bh);
- memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
- set_buffer_uptodate(bh);
- set_buffer_dirty(bh);
- unlock_buffer(bh);
-
- /* it's rare case, we can do fua all the time */
- err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+ err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
brelse(bh);
- return err;
-}
-
-int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
-{
- int err;
-
- /* write back-up superblock first */
- err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
-
/* if we are in recovery path, skip writing valid superblock */
if (recover || err)
return err;
/* write current valid superblock */
- return __f2fs_commit_super(sbi, sbi->valid_super_block);
+ bh = sb_getblk(sbi->sb, sbi->valid_super_block);
+ if (!bh)
+ return -EIO;
+ err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+ brelse(bh);
+ return err;
}
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -1442,7 +1470,7 @@ try_onemore:
seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
if (__exist_node_summaries(sbi))
sbi->kbytes_written =
- le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written);
+ le64_to_cpu(seg_i->journal->info.kbytes_written);
build_gc_manager(sbi);
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index cb84f0fcc72a..bfc780c682fb 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -66,11 +66,11 @@ static int
vxfs_immed_readpage(struct file *fp, struct page *pp)
{
struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host);
- u_int64_t offset = (u_int64_t)pp->index << PAGE_CACHE_SHIFT;
+ u_int64_t offset = (u_int64_t)pp->index << PAGE_SHIFT;
caddr_t kaddr;
kaddr = kmap(pp);
- memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE);
+ memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_SIZE);
kunmap(pp);
flush_dcache_page(pp);
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 1cff72df0389..a49e0cfbb686 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -45,7 +45,7 @@
/*
* Number of VxFS blocks per page.
*/
-#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_CACHE_SIZE / (sbp)->s_blocksize))
+#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_SIZE / (sbp)->s_blocksize))
static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int);
@@ -175,7 +175,7 @@ vxfs_inode_by_name(struct inode *dip, struct dentry *dp)
if (de) {
ino = de->d_ino;
kunmap(pp);
- page_cache_release(pp);
+ put_page(pp);
}
return (ino);
@@ -255,8 +255,8 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
nblocks = dir_blocks(ip);
pblocks = VXFS_BLOCK_PER_PAGE(sbp);
- page = pos >> PAGE_CACHE_SHIFT;
- offset = pos & ~PAGE_CACHE_MASK;
+ page = pos >> PAGE_SHIFT;
+ offset = pos & ~PAGE_MASK;
block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks;
for (; page < npages; page++, block = 0) {
@@ -289,7 +289,7 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
continue;
offset = (char *)de - kaddr;
- ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
+ ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
if (!dir_emit(ctx, de->d_name, de->d_namelen,
de->d_ino, DT_UNKNOWN)) {
vxfs_put_page(pp);
@@ -301,6 +301,6 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
vxfs_put_page(pp);
offset = 0;
}
- ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
+ ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
return 0;
}
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 5d318c44f855..e806694d4145 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -50,7 +50,7 @@ inline void
vxfs_put_page(struct page *pp)
{
kunmap(pp);
- page_cache_release(pp);
+ put_page(pp);
}
/**
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index fee81e8768c9..592cea54cea0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -33,7 +33,7 @@
/*
* 4MB minimal write chunk size
*/
-#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
+#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
struct wb_completion {
atomic_t cnt;
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 6b35fc4860a0..3078b679fcd1 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -113,7 +113,7 @@ try_again:
wake_up_bit(&cookie->flags, 0);
if (xpage)
- page_cache_release(xpage);
+ put_page(xpage);
__fscache_uncache_page(cookie, page);
return true;
@@ -164,7 +164,7 @@ static void fscache_end_page_write(struct fscache_object *object,
}
spin_unlock(&object->lock);
if (xpage)
- page_cache_release(xpage);
+ put_page(xpage);
}
/*
@@ -884,7 +884,7 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
spin_unlock(&cookie->stores_lock);
for (i = n - 1; i >= 0; i--)
- page_cache_release(results[i]);
+ put_page(results[i]);
}
_leave("");
@@ -982,7 +982,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
radix_tree_tag_set(&cookie->stores, page->index,
FSCACHE_COOKIE_PENDING_TAG);
- page_cache_get(page);
+ get_page(page);
/* we only want one writer at a time, but we do need to queue new
* writers after exclusive ops */
@@ -1026,7 +1026,7 @@ submit_failed:
radix_tree_delete(&cookie->stores, page->index);
spin_unlock(&cookie->stores_lock);
wake_cookie = __fscache_unuse_cookie(cookie);
- page_cache_release(page);
+ put_page(page);
ret = -ENOBUFS;
goto nobufs;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ebb5e37455a0..cbece1221417 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -897,7 +897,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
return err;
}
- page_cache_get(newpage);
+ get_page(newpage);
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
lru_cache_add_file(newpage);
@@ -912,12 +912,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
if (err) {
unlock_page(newpage);
- page_cache_release(newpage);
+ put_page(newpage);
return err;
}
unlock_page(oldpage);
- page_cache_release(oldpage);
+ put_page(oldpage);
cs->len = 0;
return 0;
@@ -951,7 +951,7 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
fuse_copy_finish(cs);
buf = cs->pipebufs;
- page_cache_get(page);
+ get_page(page);
buf->page = page;
buf->offset = offset;
buf->len = count;
@@ -1435,7 +1435,7 @@ out_unlock:
out:
for (; page_nr < cs.nr_segs; page_nr++)
- page_cache_release(bufs[page_nr].page);
+ put_page(bufs[page_nr].page);
kfree(bufs);
return ret;
@@ -1632,8 +1632,8 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
goto out_up_killsb;
mapping = inode->i_mapping;
- index = outarg.offset >> PAGE_CACHE_SHIFT;
- offset = outarg.offset & ~PAGE_CACHE_MASK;
+ index = outarg.offset >> PAGE_SHIFT;
+ offset = outarg.offset & ~PAGE_MASK;
file_size = i_size_read(inode);
end = outarg.offset + outarg.size;
if (end > file_size) {
@@ -1652,13 +1652,13 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
if (!page)
goto out_iput;
- this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
+ this_num = min_t(unsigned, num, PAGE_SIZE - offset);
err = fuse_copy_page(cs, &page, offset, this_num, 0);
if (!err && offset == 0 &&
- (this_num == PAGE_CACHE_SIZE || file_size == end))
+ (this_num == PAGE_SIZE || file_size == end))
SetPageUptodate(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (err)
goto out_iput;
@@ -1697,7 +1697,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
size_t total_len = 0;
int num_pages;
- offset = outarg->offset & ~PAGE_CACHE_MASK;
+ offset = outarg->offset & ~PAGE_MASK;
file_size = i_size_read(inode);
num = outarg->size;
@@ -1720,7 +1720,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
req->page_descs[0].offset = offset;
req->end = fuse_retrieve_end;
- index = outarg->offset >> PAGE_CACHE_SHIFT;
+ index = outarg->offset >> PAGE_SHIFT;
while (num && req->num_pages < num_pages) {
struct page *page;
@@ -1730,7 +1730,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
if (!page)
break;
- this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
+ this_num = min_t(unsigned, num, PAGE_SIZE - offset);
req->pages[req->num_pages] = page;
req->page_descs[req->num_pages].length = this_num;
req->num_pages++;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9dde38f12c07..719924d6c706 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -348,7 +348,7 @@ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
pgoff_t curr_index;
BUG_ON(req->inode != inode);
- curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+ curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
if (idx_from < curr_index + req->num_pages &&
curr_index <= idx_to) {
found = true;
@@ -683,11 +683,11 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
* present there.
*/
int i;
- int start_idx = num_read >> PAGE_CACHE_SHIFT;
- size_t off = num_read & (PAGE_CACHE_SIZE - 1);
+ int start_idx = num_read >> PAGE_SHIFT;
+ size_t off = num_read & (PAGE_SIZE - 1);
for (i = start_idx; i < req->num_pages; i++) {
- zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE);
+ zero_user_segment(req->pages[i], off, PAGE_SIZE);
off = 0;
}
} else {
@@ -704,7 +704,7 @@ static int fuse_do_readpage(struct file *file, struct page *page)
struct fuse_req *req;
size_t num_read;
loff_t pos = page_offset(page);
- size_t count = PAGE_CACHE_SIZE;
+ size_t count = PAGE_SIZE;
u64 attr_ver;
int err;
@@ -789,7 +789,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
else
SetPageError(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
if (req->ff)
fuse_file_put(req->ff, false);
@@ -800,7 +800,7 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file)
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
loff_t pos = page_offset(req->pages[0]);
- size_t count = req->num_pages << PAGE_CACHE_SHIFT;
+ size_t count = req->num_pages << PAGE_SHIFT;
req->out.argpages = 1;
req->out.page_zeroing = 1;
@@ -836,7 +836,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
if (req->num_pages &&
(req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
- (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
+ (req->num_pages + 1) * PAGE_SIZE > fc->max_read ||
req->pages[req->num_pages - 1]->index + 1 != page->index)) {
int nr_alloc = min_t(unsigned, data->nr_pages,
FUSE_MAX_PAGES_PER_REQ);
@@ -858,7 +858,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
return -EIO;
}
- page_cache_get(page);
+ get_page(page);
req->pages[req->num_pages] = page;
req->page_descs[req->num_pages].length = PAGE_SIZE;
req->num_pages++;
@@ -1003,17 +1003,17 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
for (i = 0; i < req->num_pages; i++) {
struct page *page = req->pages[i];
- if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
+ if (!req->out.h.error && !offset && count >= PAGE_SIZE)
SetPageUptodate(page);
- if (count > PAGE_CACHE_SIZE - offset)
- count -= PAGE_CACHE_SIZE - offset;
+ if (count > PAGE_SIZE - offset)
+ count -= PAGE_SIZE - offset;
else
count = 0;
offset = 0;
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
return res;
@@ -1024,7 +1024,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
struct iov_iter *ii, loff_t pos)
{
struct fuse_conn *fc = get_fuse_conn(mapping->host);
- unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned offset = pos & (PAGE_SIZE - 1);
size_t count = 0;
int err;
@@ -1034,8 +1034,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
do {
size_t tmp;
struct page *page;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
- size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
+ pgoff_t index = pos >> PAGE_SHIFT;
+ size_t bytes = min_t(size_t, PAGE_SIZE - offset,
iov_iter_count(ii));
bytes = min_t(size_t, bytes, fc->max_write - count);
@@ -1059,7 +1059,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
iov_iter_advance(ii, tmp);
if (!tmp) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
bytes = min(bytes, iov_iter_single_seg_count(ii));
goto again;
}
@@ -1072,7 +1072,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
count += tmp;
pos += tmp;
offset += tmp;
- if (offset == PAGE_CACHE_SIZE)
+ if (offset == PAGE_SIZE)
offset = 0;
if (!fc->big_writes)
@@ -1086,8 +1086,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
{
return min_t(unsigned,
- ((pos + len - 1) >> PAGE_CACHE_SHIFT) -
- (pos >> PAGE_CACHE_SHIFT) + 1,
+ ((pos + len - 1) >> PAGE_SHIFT) -
+ (pos >> PAGE_SHIFT) + 1,
FUSE_MAX_PAGES_PER_REQ);
}
@@ -1205,8 +1205,8 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
invalidate_mapping_pages(file->f_mapping,
- pos >> PAGE_CACHE_SHIFT,
- endbyte >> PAGE_CACHE_SHIFT);
+ pos >> PAGE_SHIFT,
+ endbyte >> PAGE_SHIFT);
written += written_buffered;
iocb->ki_pos = pos + written_buffered;
@@ -1315,8 +1315,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
size_t nmax = write ? fc->max_write : fc->max_read;
loff_t pos = *ppos;
size_t count = iov_iter_count(iter);
- pgoff_t idx_from = pos >> PAGE_CACHE_SHIFT;
- pgoff_t idx_to = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+ pgoff_t idx_from = pos >> PAGE_SHIFT;
+ pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
ssize_t res = 0;
struct fuse_req *req;
int err = 0;
@@ -1466,7 +1466,7 @@ __acquires(fc->lock)
{
struct fuse_inode *fi = get_fuse_inode(req->inode);
struct fuse_write_in *inarg = &req->misc.write.in;
- __u64 data_size = req->num_pages * PAGE_CACHE_SIZE;
+ __u64 data_size = req->num_pages * PAGE_SIZE;
if (!fc->connected)
goto out_free;
@@ -1727,7 +1727,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
list_del(&new_req->writepages_entry);
list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
BUG_ON(old_req->inode != new_req->inode);
- curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+ curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT;
if (curr_index <= page->index &&
page->index < curr_index + old_req->num_pages) {
found = true;
@@ -1742,7 +1742,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
new_req->num_pages = 1;
for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
BUG_ON(tmp->inode != new_req->inode);
- curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+ curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT;
if (tmp->num_pages == 1 &&
curr_index == page->index) {
old_req = tmp;
@@ -1799,7 +1799,7 @@ static int fuse_writepages_fill(struct page *page,
if (req && req->num_pages &&
(is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
- (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write ||
+ (req->num_pages + 1) * PAGE_SIZE > fc->max_write ||
data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
fuse_writepages_send(data);
data->req = NULL;
@@ -1924,7 +1924,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
struct fuse_conn *fc = get_fuse_conn(file_inode(file));
struct page *page;
loff_t fsize;
@@ -1938,15 +1938,15 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
fuse_wait_on_page_writeback(mapping->host, page->index);
- if (PageUptodate(page) || len == PAGE_CACHE_SIZE)
+ if (PageUptodate(page) || len == PAGE_SIZE)
goto success;
/*
* Check if the start this page comes after the end of file, in which
* case the readpage can be optimized away.
*/
fsize = i_size_read(mapping->host);
- if (fsize <= (pos & PAGE_CACHE_MASK)) {
- size_t off = pos & ~PAGE_CACHE_MASK;
+ if (fsize <= (pos & PAGE_MASK)) {
+ size_t off = pos & ~PAGE_MASK;
if (off)
zero_user_segment(page, 0, off);
goto success;
@@ -1960,7 +1960,7 @@ success:
cleanup:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
error:
return err;
}
@@ -1973,16 +1973,16 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
if (!PageUptodate(page)) {
/* Zero any unwritten bytes at the end of the page */
- size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK;
+ size_t endoff = (pos + copied) & ~PAGE_MASK;
if (endoff)
- zero_user_segment(page, endoff, PAGE_CACHE_SIZE);
+ zero_user_segment(page, endoff, PAGE_SIZE);
SetPageUptodate(page);
}
fuse_write_update_size(inode, pos + copied);
set_page_dirty(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return copied;
}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 4d69d5c0bedc..1ce67668a8e1 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -339,11 +339,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
fuse_invalidate_attr(inode);
if (offset >= 0) {
- pg_start = offset >> PAGE_CACHE_SHIFT;
+ pg_start = offset >> PAGE_SHIFT;
if (len <= 0)
pg_end = -1;
else
- pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+ pg_end = (offset + len - 1) >> PAGE_SHIFT;
invalidate_inode_pages2_range(inode->i_mapping,
pg_start, pg_end);
}
@@ -864,7 +864,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
process_init_limits(fc, arg);
if (arg->minor >= 6) {
- ra_pages = arg->max_readahead / PAGE_CACHE_SIZE;
+ ra_pages = arg->max_readahead / PAGE_SIZE;
if (arg->flags & FUSE_ASYNC_READ)
fc->async_read = 1;
if (!(arg->flags & FUSE_POSIX_LOCKS))
@@ -901,7 +901,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
if (arg->time_gran && arg->time_gran <= 1000000000)
fc->sb->s_time_gran = arg->time_gran;
} else {
- ra_pages = fc->max_read / PAGE_CACHE_SIZE;
+ ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
fc->no_flock = 1;
}
@@ -922,7 +922,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
arg->major = FUSE_KERNEL_VERSION;
arg->minor = FUSE_KERNEL_MINOR_VERSION;
- arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
+ arg->max_readahead = fc->bdi.ra_pages * PAGE_SIZE;
arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
@@ -955,7 +955,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
int err;
fc->bdi.name = "fuse";
- fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+ fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
/* fuse does it's own writeback accounting */
fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
@@ -1053,8 +1053,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
goto err;
#endif
} else {
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
}
sb->s_magic = FUSE_SUPER_MAGIC;
sb->s_op = &fuse_super_operations;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index aa016e4b8bec..1bbbee945f46 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -101,7 +101,7 @@ static int gfs2_writepage_common(struct page *page,
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
loff_t i_size = i_size_read(inode);
- pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ pgoff_t end_index = i_size >> PAGE_SHIFT;
unsigned offset;
if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
@@ -109,9 +109,9 @@ static int gfs2_writepage_common(struct page *page,
if (current->journal_info)
goto redirty;
/* Is the page fully outside i_size? (truncate in progress) */
- offset = i_size & (PAGE_CACHE_SIZE-1);
+ offset = i_size & (PAGE_SIZE-1);
if (page->index > end_index || (page->index == end_index && !offset)) {
- page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
goto out;
}
return 1;
@@ -238,7 +238,7 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct gfs2_sbd *sdp = GFS2_SB(inode);
- unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
+ unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
int i;
int ret;
@@ -366,8 +366,8 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
cycled = 0;
end = -1;
} else {
- index = wbc->range_start >> PAGE_CACHE_SHIFT;
- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ index = wbc->range_start >> PAGE_SHIFT;
+ end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
@@ -458,7 +458,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
* so we need to supply one here. It doesn't happen often.
*/
if (unlikely(page->index)) {
- zero_user(page, 0, PAGE_CACHE_SIZE);
+ zero_user(page, 0, PAGE_SIZE);
SetPageUptodate(page);
return 0;
}
@@ -471,7 +471,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
- memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
+ memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
kunmap_atomic(kaddr);
flush_dcache_page(page);
brelse(dibh);
@@ -560,8 +560,8 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
unsigned size)
{
struct address_space *mapping = ip->i_inode.i_mapping;
- unsigned long index = *pos / PAGE_CACHE_SIZE;
- unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
+ unsigned long index = *pos / PAGE_SIZE;
+ unsigned offset = *pos & (PAGE_SIZE - 1);
unsigned copied = 0;
unsigned amt;
struct page *page;
@@ -569,15 +569,15 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
do {
amt = size - copied;
- if (offset + size > PAGE_CACHE_SIZE)
- amt = PAGE_CACHE_SIZE - offset;
+ if (offset + size > PAGE_SIZE)
+ amt = PAGE_SIZE - offset;
page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
p = kmap_atomic(page);
memcpy(buf + copied, p + offset, amt);
kunmap_atomic(p);
- page_cache_release(page);
+ put_page(page);
copied += amt;
index++;
offset = 0;
@@ -651,8 +651,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
unsigned requested = 0;
int alloc_required;
int error = 0;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ pgoff_t index = pos >> PAGE_SHIFT;
+ unsigned from = pos & (PAGE_SIZE - 1);
struct page *page;
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
@@ -697,7 +697,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
rblocks += gfs2_rg_blocks(ip, requested);
error = gfs2_trans_begin(sdp, rblocks,
- PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
+ PAGE_SIZE/sdp->sd_sb.sb_bsize);
if (error)
goto out_trans_fail;
@@ -727,7 +727,7 @@ out:
return 0;
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
gfs2_trans_end(sdp);
if (pos + len > ip->i_inode.i_size)
@@ -827,7 +827,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
if (!PageUptodate(page))
SetPageUptodate(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (copied) {
if (inode->i_size < to)
@@ -877,7 +877,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
struct buffer_head *dibh;
- unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned int from = pos & (PAGE_SIZE - 1);
unsigned int to = from + len;
int ret;
struct gfs2_trans *tr = current->journal_info;
@@ -888,7 +888,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
ret = gfs2_meta_inode_buffer(ip, &dibh);
if (unlikely(ret)) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
goto failed;
}
@@ -992,7 +992,7 @@ static void gfs2_invalidatepage(struct page *page, unsigned int offset,
{
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
unsigned int stop = offset + length;
- int partial_page = (offset || length < PAGE_CACHE_SIZE);
+ int partial_page = (offset || length < PAGE_SIZE);
struct buffer_head *bh, *head;
unsigned long pos = 0;
@@ -1082,7 +1082,7 @@ static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
* the first place, mapping->nr_pages will always be zero.
*/
if (mapping->nrpages) {
- loff_t lstart = offset & ~(PAGE_CACHE_SIZE - 1);
+ loff_t lstart = offset & ~(PAGE_SIZE - 1);
loff_t len = iov_iter_count(iter);
loff_t end = PAGE_ALIGN(offset + len) - 1;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 0860f0b5b3f1..24ce1cdd434a 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -75,7 +75,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
dsize = dibh->b_size - sizeof(struct gfs2_dinode);
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
- memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
+ memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
kunmap(page);
SetPageUptodate(page);
@@ -98,7 +98,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
if (release) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
return 0;
@@ -932,8 +932,8 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
{
struct inode *inode = mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
- unsigned long index = from >> PAGE_CACHE_SHIFT;
- unsigned offset = from & (PAGE_CACHE_SIZE-1);
+ unsigned long index = from >> PAGE_SHIFT;
+ unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize, iblock, length, pos;
struct buffer_head *bh;
struct page *page;
@@ -945,7 +945,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
blocksize = inode->i_sb->s_blocksize;
length = blocksize - (offset & (blocksize - 1));
- iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+ iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
@@ -989,7 +989,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
mark_buffer_dirty(bh);
unlock:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return err;
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index c9384f932975..208efc70ad49 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -354,8 +354,8 @@ static int gfs2_allocate_page_backing(struct page *page)
{
struct inode *inode = page->mapping->host;
struct buffer_head bh;
- unsigned long size = PAGE_CACHE_SIZE;
- u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ unsigned long size = PAGE_SIZE;
+ u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
do {
bh.b_state = 0;
@@ -386,7 +386,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned long last_index;
- u64 pos = page->index << PAGE_CACHE_SHIFT;
+ u64 pos = page->index << PAGE_SHIFT;
unsigned int data_blocks, ind_blocks, rblocks;
struct gfs2_holder gh;
loff_t size;
@@ -401,7 +401,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto out;
- gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
+ gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh);
@@ -411,7 +411,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
- if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
+ if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
lock_page(page);
if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
ret = -EAGAIN;
@@ -424,7 +424,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto out_unlock;
- gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
+ gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
ap.target = data_blocks + ind_blocks;
ret = gfs2_quota_lock_check(ip, &ap);
if (ret)
@@ -447,7 +447,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
lock_page(page);
ret = -EINVAL;
size = i_size_read(inode);
- last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+ last_index = (size - 1) >> PAGE_SHIFT;
/* Check page index against inode size */
if (size == 0 || (page->index > last_index))
goto out_trans_end;
@@ -873,7 +873,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
rblocks += data_blocks ? data_blocks : 1;
error = gfs2_trans_begin(sdp, rblocks,
- PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
+ PAGE_SIZE/sdp->sd_sb.sb_bsize);
if (error)
goto out_trans_fail;
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index e137d96f1b17..0448524c11bc 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -124,7 +124,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
if (mapping == NULL)
mapping = &sdp->sd_aspace;
- shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
+ shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
index = blkno >> shift; /* convert block to page */
bufnum = blkno - (index << shift); /* block buf index within page */
@@ -154,7 +154,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
map_bh(bh, sdp->sd_vfs, blkno);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return bh;
}
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a39891344259..ce7d69a2fdc0 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -701,7 +701,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
unsigned to_write = bytes, pg_off = off;
int done = 0;
- blk = index << (PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift);
+ blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
boff = off % bsize;
page = find_or_create_page(mapping, index, GFP_NOFS);
@@ -753,13 +753,13 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
flush_dcache_page(page);
kunmap_atomic(kaddr);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return 0;
unlock_out:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return -EIO;
}
@@ -773,13 +773,13 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
nbytes = sizeof(struct gfs2_quota);
- pg_beg = loc >> PAGE_CACHE_SHIFT;
- pg_off = loc % PAGE_CACHE_SIZE;
+ pg_beg = loc >> PAGE_SHIFT;
+ pg_off = loc % PAGE_SIZE;
/* If the quota straddles a page boundary, split the write in two */
- if ((pg_off + nbytes) > PAGE_CACHE_SIZE) {
+ if ((pg_off + nbytes) > PAGE_SIZE) {
pg_oflow = 1;
- overflow = (pg_off + nbytes) - PAGE_CACHE_SIZE;
+ overflow = (pg_off + nbytes) - PAGE_SIZE;
}
ptr = qp;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 07c0265aa195..99a0bdac8796 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -918,9 +918,8 @@ static int read_rindex_entry(struct gfs2_inode *ip)
goto fail;
rgd->rd_gl->gl_object = rgd;
- rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK;
- rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr +
- rgd->rd_length) * bsize) - 1;
+ rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
+ rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
if (rgd->rd_data > sdp->sd_max_rg_data)
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 221719eac5de..d77d844b668b 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -278,14 +278,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
mapping = tree->inode->i_mapping;
off = (loff_t)cnid * tree->node_size;
- block = off >> PAGE_CACHE_SHIFT;
- node->page_offset = off & ~PAGE_CACHE_MASK;
+ block = off >> PAGE_SHIFT;
+ node->page_offset = off & ~PAGE_MASK;
for (i = 0; i < tree->pages_per_bnode; i++) {
page = read_mapping_page(mapping, block++, NULL);
if (IS_ERR(page))
goto fail;
if (PageError(page)) {
- page_cache_release(page);
+ put_page(page);
goto fail;
}
node->page[i] = page;
@@ -401,7 +401,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
for (i = 0; i < node->tree->pages_per_bnode; i++)
if (node->page[i])
- page_cache_release(node->page[i]);
+ put_page(node->page[i]);
kfree(node);
}
@@ -429,11 +429,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
pagep = node->page;
memset(kmap(*pagep) + node->page_offset, 0,
- min((int)PAGE_CACHE_SIZE, (int)tree->node_size));
+ min((int)PAGE_SIZE, (int)tree->node_size));
set_page_dirty(*pagep);
kunmap(*pagep);
for (i = 1; i < tree->pages_per_bnode; i++) {
- memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE);
+ memset(kmap(*++pagep), 0, PAGE_SIZE);
set_page_dirty(*pagep);
kunmap(*pagep);
}
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 1ab19e660e69..37cdd955eceb 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -116,14 +116,14 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
}
tree->node_size_shift = ffs(size) - 1;
- tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
kunmap(page);
- page_cache_release(page);
+ put_page(page);
return tree;
fail_page:
- page_cache_release(page);
+ put_page(page);
free_inode:
tree->inode->i_mapping->a_ops = &hfs_aops;
iput(tree->inode);
@@ -257,9 +257,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
off = off16;
off += node->page_offset;
- pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+ pagep = node->page + (off >> PAGE_SHIFT);
data = kmap(*pagep);
- off &= ~PAGE_CACHE_MASK;
+ off &= ~PAGE_MASK;
idx = 0;
for (;;) {
@@ -279,7 +279,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
}
}
}
- if (++off >= PAGE_CACHE_SIZE) {
+ if (++off >= PAGE_SIZE) {
kunmap(*pagep);
data = kmap(*++pagep);
off = 0;
@@ -302,9 +302,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
len = hfs_brec_lenoff(node, 0, &off16);
off = off16;
off += node->page_offset;
- pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+ pagep = node->page + (off >> PAGE_SHIFT);
data = kmap(*pagep);
- off &= ~PAGE_CACHE_MASK;
+ off &= ~PAGE_MASK;
}
}
@@ -348,9 +348,9 @@ void hfs_bmap_free(struct hfs_bnode *node)
len = hfs_brec_lenoff(node, 0, &off);
}
off += node->page_offset + nidx / 8;
- page = node->page[off >> PAGE_CACHE_SHIFT];
+ page = node->page[off >> PAGE_SHIFT];
data = kmap(page);
- off &= ~PAGE_CACHE_MASK;
+ off &= ~PAGE_MASK;
m = 1 << (~nidx & 7);
byte = data[off];
if (!(byte & m)) {
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 6686bf39a5b5..cb1e5faa2fb7 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -91,8 +91,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
if (!tree)
return 0;
- if (tree->node_size >= PAGE_CACHE_SIZE) {
- nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
+ if (tree->node_size >= PAGE_SIZE) {
+ nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, nidx);
if (!node)
@@ -105,8 +105,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
}
spin_unlock(&tree->hash_lock);
} else {
- nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
- i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
+ nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
+ i = 1 << (PAGE_SHIFT - tree->node_size_shift);
spin_lock(&tree->hash_lock);
do {
node = hfs_bnode_findhash(tree, nidx++);
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index d2954451519e..c0ae274c0a22 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -13,7 +13,7 @@
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
-#define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8)
+#define PAGE_CACHE_BITS (PAGE_SIZE * 8)
int hfsplus_block_allocate(struct super_block *sb, u32 size,
u32 offset, u32 *max)
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 63924662aaf3..ce014ceb89ef 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -24,16 +24,16 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
int l;
off += node->page_offset;
- pagep = node->page + (off >> PAGE_CACHE_SHIFT);
- off &= ~PAGE_CACHE_MASK;
+ pagep = node->page + (off >> PAGE_SHIFT);
+ off &= ~PAGE_MASK;
- l = min_t(int, len, PAGE_CACHE_SIZE - off);
+ l = min_t(int, len, PAGE_SIZE - off);
memcpy(buf, kmap(*pagep) + off, l);
kunmap(*pagep);
while ((len -= l) != 0) {
buf += l;
- l = min_t(int, len, PAGE_CACHE_SIZE);
+ l = min_t(int, len, PAGE_SIZE);
memcpy(buf, kmap(*++pagep), l);
kunmap(*pagep);
}
@@ -77,17 +77,17 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
int l;
off += node->page_offset;
- pagep = node->page + (off >> PAGE_CACHE_SHIFT);
- off &= ~PAGE_CACHE_MASK;
+ pagep = node->page + (off >> PAGE_SHIFT);
+ off &= ~PAGE_MASK;
- l = min_t(int, len, PAGE_CACHE_SIZE - off);
+ l = min_t(int, len, PAGE_SIZE - off);
memcpy(kmap(*pagep) + off, buf, l);
set_page_dirty(*pagep);
kunmap(*pagep);
while ((len -= l) != 0) {
buf += l;
- l = min_t(int, len, PAGE_CACHE_SIZE);
+ l = min_t(int, len, PAGE_SIZE);
memcpy(kmap(*++pagep), buf, l);
set_page_dirty(*pagep);
kunmap(*pagep);
@@ -107,16 +107,16 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
int l;
off += node->page_offset;
- pagep = node->page + (off >> PAGE_CACHE_SHIFT);
- off &= ~PAGE_CACHE_MASK;
+ pagep = node->page + (off >> PAGE_SHIFT);
+ off &= ~PAGE_MASK;
- l = min_t(int, len, PAGE_CACHE_SIZE - off);
+ l = min_t(int, len, PAGE_SIZE - off);
memset(kmap(*pagep) + off, 0, l);
set_page_dirty(*pagep);
kunmap(*pagep);
while ((len -= l) != 0) {
- l = min_t(int, len, PAGE_CACHE_SIZE);
+ l = min_t(int, len, PAGE_SIZE);
memset(kmap(*++pagep), 0, l);
set_page_dirty(*pagep);
kunmap(*pagep);
@@ -136,20 +136,20 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
tree = src_node->tree;
src += src_node->page_offset;
dst += dst_node->page_offset;
- src_page = src_node->page + (src >> PAGE_CACHE_SHIFT);
- src &= ~PAGE_CACHE_MASK;
- dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT);
- dst &= ~PAGE_CACHE_MASK;
+ src_page = src_node->page + (src >> PAGE_SHIFT);
+ src &= ~PAGE_MASK;
+ dst_page = dst_node->page + (dst >> PAGE_SHIFT);
+ dst &= ~PAGE_MASK;
if (src == dst) {
- l = min_t(int, len, PAGE_CACHE_SIZE - src);
+ l = min_t(int, len, PAGE_SIZE - src);
memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
kunmap(*src_page);
set_page_dirty(*dst_page);
kunmap(*dst_page);
while ((len -= l) != 0) {
- l = min_t(int, len, PAGE_CACHE_SIZE);
+ l = min_t(int, len, PAGE_SIZE);
memcpy(kmap(*++dst_page), kmap(*++src_page), l);
kunmap(*src_page);
set_page_dirty(*dst_page);
@@ -161,12 +161,12 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
do {
src_ptr = kmap(*src_page) + src;
dst_ptr = kmap(*dst_page) + dst;
- if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
- l = PAGE_CACHE_SIZE - src;
+ if (PAGE_SIZE - src < PAGE_SIZE - dst) {
+ l = PAGE_SIZE - src;
src = 0;
dst += l;
} else {
- l = PAGE_CACHE_SIZE - dst;
+ l = PAGE_SIZE - dst;
src += l;
dst = 0;
}
@@ -195,11 +195,11 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
dst += node->page_offset;
if (dst > src) {
src += len - 1;
- src_page = node->page + (src >> PAGE_CACHE_SHIFT);
- src = (src & ~PAGE_CACHE_MASK) + 1;
+ src_page = node->page + (src >> PAGE_SHIFT);
+ src = (src & ~PAGE_MASK) + 1;
dst += len - 1;
- dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
- dst = (dst & ~PAGE_CACHE_MASK) + 1;
+ dst_page = node->page + (dst >> PAGE_SHIFT);
+ dst = (dst & ~PAGE_MASK) + 1;
if (src == dst) {
while (src < len) {
@@ -208,7 +208,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
set_page_dirty(*dst_page);
kunmap(*dst_page);
len -= src;
- src = PAGE_CACHE_SIZE;
+ src = PAGE_SIZE;
src_page--;
dst_page--;
}
@@ -226,32 +226,32 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
dst_ptr = kmap(*dst_page) + dst;
if (src < dst) {
l = src;
- src = PAGE_CACHE_SIZE;
+ src = PAGE_SIZE;
dst -= l;
} else {
l = dst;
src -= l;
- dst = PAGE_CACHE_SIZE;
+ dst = PAGE_SIZE;
}
l = min(len, l);
memmove(dst_ptr - l, src_ptr - l, l);
kunmap(*src_page);
set_page_dirty(*dst_page);
kunmap(*dst_page);
- if (dst == PAGE_CACHE_SIZE)
+ if (dst == PAGE_SIZE)
dst_page--;
else
src_page--;
} while ((len -= l));
}
} else {
- src_page = node->page + (src >> PAGE_CACHE_SHIFT);
- src &= ~PAGE_CACHE_MASK;
- dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
- dst &= ~PAGE_CACHE_MASK;
+ src_page = node->page + (src >> PAGE_SHIFT);
+ src &= ~PAGE_MASK;
+ dst_page = node->page + (dst >> PAGE_SHIFT);
+ dst &= ~PAGE_MASK;
if (src == dst) {
- l = min_t(int, len, PAGE_CACHE_SIZE - src);
+ l = min_t(int, len, PAGE_SIZE - src);
memmove(kmap(*dst_page) + src,
kmap(*src_page) + src, l);
kunmap(*src_page);
@@ -259,7 +259,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
kunmap(*dst_page);
while ((len -= l) != 0) {
- l = min_t(int, len, PAGE_CACHE_SIZE);
+ l = min_t(int, len, PAGE_SIZE);
memmove(kmap(*++dst_page),
kmap(*++src_page), l);
kunmap(*src_page);
@@ -272,13 +272,13 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
do {
src_ptr = kmap(*src_page) + src;
dst_ptr = kmap(*dst_page) + dst;
- if (PAGE_CACHE_SIZE - src <
- PAGE_CACHE_SIZE - dst) {
- l = PAGE_CACHE_SIZE - src;
+ if (PAGE_SIZE - src <
+ PAGE_SIZE - dst) {
+ l = PAGE_SIZE - src;
src = 0;
dst += l;
} else {
- l = PAGE_CACHE_SIZE - dst;
+ l = PAGE_SIZE - dst;
src += l;
dst = 0;
}
@@ -444,14 +444,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
mapping = tree->inode->i_mapping;
off = (loff_t)cnid << tree->node_size_shift;
- block = off >> PAGE_CACHE_SHIFT;
- node->page_offset = off & ~PAGE_CACHE_MASK;
+ block = off >> PAGE_SHIFT;
+ node->page_offset = off & ~PAGE_MASK;
for (i = 0; i < tree->pages_per_bnode; block++, i++) {
page = read_mapping_page(mapping, block, NULL);
if (IS_ERR(page))
goto fail;
if (PageError(page)) {
- page_cache_release(page);
+ put_page(page);
goto fail;
}
node->page[i] = page;
@@ -569,7 +569,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
for (i = 0; i < node->tree->pages_per_bnode; i++)
if (node->page[i])
- page_cache_release(node->page[i]);
+ put_page(node->page[i]);
kfree(node);
}
@@ -597,11 +597,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
pagep = node->page;
memset(kmap(*pagep) + node->page_offset, 0,
- min_t(int, PAGE_CACHE_SIZE, tree->node_size));
+ min_t(int, PAGE_SIZE, tree->node_size));
set_page_dirty(*pagep);
kunmap(*pagep);
for (i = 1; i < tree->pages_per_bnode; i++) {
- memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE);
+ memset(kmap(*++pagep), 0, PAGE_SIZE);
set_page_dirty(*pagep);
kunmap(*pagep);
}
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 3345c7553edc..d9d1a36ba826 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -236,15 +236,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
tree->node_size_shift = ffs(size) - 1;
tree->pages_per_bnode =
- (tree->node_size + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ (tree->node_size + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
kunmap(page);
- page_cache_release(page);
+ put_page(page);
return tree;
fail_page:
- page_cache_release(page);
+ put_page(page);
free_inode:
tree->inode->i_mapping->a_ops = &hfsplus_aops;
iput(tree->inode);
@@ -380,9 +380,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
off = off16;
off += node->page_offset;
- pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+ pagep = node->page + (off >> PAGE_SHIFT);
data = kmap(*pagep);
- off &= ~PAGE_CACHE_MASK;
+ off &= ~PAGE_MASK;
idx = 0;
for (;;) {
@@ -403,7 +403,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
}
}
}
- if (++off >= PAGE_CACHE_SIZE) {
+ if (++off >= PAGE_SIZE) {
kunmap(*pagep);
data = kmap(*++pagep);
off = 0;
@@ -426,9 +426,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
len = hfs_brec_lenoff(node, 0, &off16);
off = off16;
off += node->page_offset;
- pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+ pagep = node->page + (off >> PAGE_SHIFT);
data = kmap(*pagep);
- off &= ~PAGE_CACHE_MASK;
+ off &= ~PAGE_MASK;
}
}
@@ -475,9 +475,9 @@ void hfs_bmap_free(struct hfs_bnode *node)
len = hfs_brec_lenoff(node, 0, &off);
}
off += node->page_offset + nidx / 8;
- page = node->page[off >> PAGE_CACHE_SHIFT];
+ page = node->page[off >> PAGE_SHIFT];
data = kmap(page);
- off &= ~PAGE_CACHE_MASK;
+ off &= ~PAGE_MASK;
m = 1 << (~nidx & 7);
byte = data[off];
if (!(byte & m)) {
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 1a6394cdb54e..b28f39865c3a 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -87,9 +87,9 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
}
if (!tree)
return 0;
- if (tree->node_size >= PAGE_CACHE_SIZE) {
+ if (tree->node_size >= PAGE_SIZE) {
nidx = page->index >>
- (tree->node_size_shift - PAGE_CACHE_SHIFT);
+ (tree->node_size_shift - PAGE_SHIFT);
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, nidx);
if (!node)
@@ -103,8 +103,8 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
spin_unlock(&tree->hash_lock);
} else {
nidx = page->index <<
- (PAGE_CACHE_SHIFT - tree->node_size_shift);
- i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
+ (PAGE_SHIFT - tree->node_size_shift);
+ i = 1 << (PAGE_SHIFT - tree->node_size_shift);
spin_lock(&tree->hash_lock);
do {
node = hfs_bnode_findhash(tree, nidx++);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 5d54490a136d..c35911362ff9 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -438,7 +438,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
err = -EFBIG;
last_fs_block = sbi->total_blocks - 1;
last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
(last_fs_page > (pgoff_t)(~0ULL))) {
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index 45dc4ae3791a..4f118d282a7a 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -220,7 +220,7 @@ check_attr_tree_state_again:
index = 0;
written = 0;
- for (; written < node_size; index++, written += PAGE_CACHE_SIZE) {
+ for (; written < node_size; index++, written += PAGE_SIZE) {
void *kaddr;
page = read_mapping_page(mapping, index, NULL);
@@ -231,11 +231,11 @@ check_attr_tree_state_again:
kaddr = kmap_atomic(page);
memcpy(kaddr, buf + written,
- min_t(size_t, PAGE_CACHE_SIZE, node_size - written));
+ min_t(size_t, PAGE_SIZE, node_size - written));
kunmap_atomic(kaddr);
set_page_dirty(page);
- page_cache_release(page);
+ put_page(page);
}
hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index d1abbee281d1..7016653f3e41 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -410,12 +410,12 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
struct inode *inode = mapping->host;
char *buffer;
loff_t base = page_offset(page);
- int count = PAGE_CACHE_SIZE;
- int end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+ int count = PAGE_SIZE;
+ int end_index = inode->i_size >> PAGE_SHIFT;
int err;
if (page->index >= end_index)
- count = inode->i_size & (PAGE_CACHE_SIZE-1);
+ count = inode->i_size & (PAGE_SIZE-1);
buffer = kmap(page);
@@ -447,7 +447,7 @@ static int hostfs_readpage(struct file *file, struct page *page)
buffer = kmap(page);
bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
if (bytes_read < 0) {
ClearPageUptodate(page);
SetPageError(page);
@@ -455,7 +455,7 @@ static int hostfs_readpage(struct file *file, struct page *page)
goto out;
}
- memset(buffer + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read);
+ memset(buffer + bytes_read, 0, PAGE_SIZE - bytes_read);
ClearPageError(page);
SetPageUptodate(page);
@@ -471,7 +471,7 @@ static int hostfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
*pagep = grab_cache_page_write_begin(mapping, index, flags);
if (!*pagep)
@@ -485,14 +485,14 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
{
struct inode *inode = mapping->host;
void *buffer;
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
int err;
buffer = kmap(page);
err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied);
kunmap(page);
- if (!PageUptodate(page) && err == PAGE_CACHE_SIZE)
+ if (!PageUptodate(page) && err == PAGE_SIZE)
SetPageUptodate(page);
/*
@@ -502,7 +502,7 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
if (err > 0 && (pos > inode->i_size))
inode->i_size = pos;
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return err;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e1f465a389d5..4ea71eba40a5 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -213,12 +213,12 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
int i, chunksize;
/* Find which 4k chunk and offset with in that chunk */
- i = offset >> PAGE_CACHE_SHIFT;
- offset = offset & ~PAGE_CACHE_MASK;
+ i = offset >> PAGE_SHIFT;
+ offset = offset & ~PAGE_MASK;
while (size) {
size_t n;
- chunksize = PAGE_CACHE_SIZE;
+ chunksize = PAGE_SIZE;
if (offset)
chunksize -= offset;
if (chunksize > size)
@@ -237,7 +237,7 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
/*
* Support for read() - Find the page attached to f_mapping and copy out the
* data. Its *very* similar to do_generic_mapping_read(), we can't use that
- * since it has PAGE_CACHE_SIZE assumptions.
+ * since it has PAGE_SIZE assumptions.
*/
static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
@@ -285,7 +285,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
* We have the page, copy it to user space buffer.
*/
copied = hugetlbfs_read_actor(page, offset, to, nr);
- page_cache_release(page);
+ put_page(page);
}
offset += copied;
retval += copied;
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index f311bf084015..2e4e834d1a98 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -26,7 +26,7 @@
#include "zisofs.h"
/* This should probably be global. */
-static char zisofs_sink_page[PAGE_CACHE_SIZE];
+static char zisofs_sink_page[PAGE_SIZE];
/*
* This contains the zlib memory allocation and the mutex for the
@@ -70,11 +70,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
for ( i = 0 ; i < pcount ; i++ ) {
if (!pages[i])
continue;
- memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE);
+ memset(page_address(pages[i]), 0, PAGE_SIZE);
flush_dcache_page(pages[i]);
SetPageUptodate(pages[i]);
}
- return ((loff_t)pcount) << PAGE_CACHE_SHIFT;
+ return ((loff_t)pcount) << PAGE_SHIFT;
}
/* Because zlib is not thread-safe, do all the I/O at the top. */
@@ -121,11 +121,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
if (pages[curpage]) {
stream.next_out = page_address(pages[curpage])
+ poffset;
- stream.avail_out = PAGE_CACHE_SIZE - poffset;
+ stream.avail_out = PAGE_SIZE - poffset;
poffset = 0;
} else {
stream.next_out = (void *)&zisofs_sink_page;
- stream.avail_out = PAGE_CACHE_SIZE;
+ stream.avail_out = PAGE_SIZE;
}
}
if (!stream.avail_in) {
@@ -220,14 +220,14 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
* pages with the data we have anyway...
*/
start_off = page_offset(pages[full_page]);
- end_off = min_t(loff_t, start_off + PAGE_CACHE_SIZE, inode->i_size);
+ end_off = min_t(loff_t, start_off + PAGE_SIZE, inode->i_size);
cstart_block = start_off >> zisofs_block_shift;
cend_block = (end_off + (1 << zisofs_block_shift) - 1)
>> zisofs_block_shift;
- WARN_ON(start_off - (full_page << PAGE_CACHE_SHIFT) !=
- ((cstart_block << zisofs_block_shift) & PAGE_CACHE_MASK));
+ WARN_ON(start_off - (full_page << PAGE_SHIFT) !=
+ ((cstart_block << zisofs_block_shift) & PAGE_MASK));
/* Find the pointer to this specific chunk */
/* Note: we're not using isonum_731() here because the data is known aligned */
@@ -260,10 +260,10 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
ret = zisofs_uncompress_block(inode, block_start, block_end,
pcount, pages, poffset, &err);
poffset += ret;
- pages += poffset >> PAGE_CACHE_SHIFT;
- pcount -= poffset >> PAGE_CACHE_SHIFT;
- full_page -= poffset >> PAGE_CACHE_SHIFT;
- poffset &= ~PAGE_CACHE_MASK;
+ pages += poffset >> PAGE_SHIFT;
+ pcount -= poffset >> PAGE_SHIFT;
+ full_page -= poffset >> PAGE_SHIFT;
+ poffset &= ~PAGE_MASK;
if (err) {
brelse(bh);
@@ -282,7 +282,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
if (poffset && *pages) {
memset(page_address(*pages) + poffset, 0,
- PAGE_CACHE_SIZE - poffset);
+ PAGE_SIZE - poffset);
flush_dcache_page(*pages);
SetPageUptodate(*pages);
}
@@ -302,12 +302,12 @@ static int zisofs_readpage(struct file *file, struct page *page)
int i, pcount, full_page;
unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
unsigned int zisofs_pages_per_cblock =
- PAGE_CACHE_SHIFT <= zisofs_block_shift ?
- (1 << (zisofs_block_shift - PAGE_CACHE_SHIFT)) : 0;
+ PAGE_SHIFT <= zisofs_block_shift ?
+ (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
pgoff_t index = page->index, end_index;
- end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
/*
* If this page is wholly outside i_size we just return zero;
* do_generic_file_read() will handle this for us
@@ -318,7 +318,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
return 0;
}
- if (PAGE_CACHE_SHIFT <= zisofs_block_shift) {
+ if (PAGE_SHIFT <= zisofs_block_shift) {
/* We have already been given one page, this is the one
we must do. */
full_page = index & (zisofs_pages_per_cblock - 1);
@@ -351,7 +351,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
kunmap(pages[i]);
unlock_page(pages[i]);
if (i != full_page)
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index bcd2d41b318a..131dedc920d8 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1021,7 +1021,7 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock,
* the page with useless information without generating any
* I/O errors.
*/
- if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
+ if (b_off > ((inode->i_size + PAGE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n",
__func__, b_off,
(unsigned long long)inode->i_size);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 517f2de784cf..2ad98d6e19f4 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -81,11 +81,11 @@ static void release_buffer_page(struct buffer_head *bh)
if (!trylock_page(page))
goto nope;
- page_cache_get(page);
+ get_page(page);
__brelse(bh);
try_to_free_buffers(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return;
nope:
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index de73a9516a54..435f0b26ac20 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2221,7 +2221,7 @@ void jbd2_journal_ack_err(journal_t *journal)
int jbd2_journal_blocks_per_page(struct inode *inode)
{
- return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+ return 1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
}
/*
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 01e4652d88f6..67c103867bf8 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -2263,7 +2263,7 @@ int jbd2_journal_invalidatepage(journal_t *journal,
struct buffer_head *head, *bh, *next;
unsigned int stop = offset + length;
unsigned int curr_off = 0;
- int partial_page = (offset || length < PAGE_CACHE_SIZE);
+ int partial_page = (offset || length < PAGE_SIZE);
int may_free = 1;
int ret = 0;
@@ -2272,7 +2272,7 @@ int jbd2_journal_invalidatepage(journal_t *journal,
if (!page_has_buffers(page))
return 0;
- BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+ BUG_ON(stop > PAGE_SIZE || stop < length);
/* We will potentially be playing with lists other than just the
* data lists (especially for journaled data mode), so be
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index 1090eb64b90d..9d26b1b9fc01 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -95,15 +95,15 @@ __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f)
rather than mucking around with actually reading the node
and checking the compression type, which is the real way
to tell a hole node. */
- if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag)
- && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) {
+ if (frag->ofs & (PAGE_SIZE-1) && frag_prev(frag)
+ && frag_prev(frag)->size < PAGE_SIZE && frag_prev(frag)->node) {
JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n",
ref_offset(fn->raw));
bitched = 1;
}
- if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag)
- && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) {
+ if ((frag->ofs+frag->size) & (PAGE_SIZE-1) && frag_next(frag)
+ && frag_next(frag)->size < PAGE_SIZE && frag_next(frag)->node) {
JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n",
ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
bitched = 1;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index cad86bac3453..0e62dec3effc 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -87,14 +87,15 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
int ret;
jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
- __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT);
+ __func__, inode->i_ino, pg->index << PAGE_SHIFT);
BUG_ON(!PageLocked(pg));
pg_buf = kmap(pg);
/* FIXME: Can kmap fail? */
- ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);
+ ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
+ PAGE_SIZE);
if (ret) {
ClearPageUptodate(pg);
@@ -137,8 +138,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
struct page *pg;
struct inode *inode = mapping->host;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
- uint32_t pageofs = index << PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
+ uint32_t pageofs = index << PAGE_SHIFT;
int ret = 0;
pg = grab_cache_page_write_begin(mapping, index, flags);
@@ -230,7 +231,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
out_page:
unlock_page(pg);
- page_cache_release(pg);
+ put_page(pg);
return ret;
}
@@ -245,14 +246,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_raw_inode *ri;
- unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned start = pos & (PAGE_SIZE - 1);
unsigned end = start + copied;
unsigned aligned_start = start & ~3;
int ret = 0;
uint32_t writtenlen = 0;
jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
- __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT,
+ __func__, inode->i_ino, pg->index << PAGE_SHIFT,
start, end, pg->flags);
/* We need to avoid deadlock with page_cache_read() in
@@ -261,7 +262,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
to re-lock it. */
BUG_ON(!PageUptodate(pg));
- if (end == PAGE_CACHE_SIZE) {
+ if (end == PAGE_SIZE) {
/* When writing out the end of a page, write out the
_whole_ page. This helps to reduce the number of
nodes in files which have many short writes, like
@@ -275,7 +276,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
__func__);
unlock_page(pg);
- page_cache_release(pg);
+ put_page(pg);
return -ENOMEM;
}
@@ -292,7 +293,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
kmap(pg);
ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
- (pg->index << PAGE_CACHE_SHIFT) + aligned_start,
+ (pg->index << PAGE_SHIFT) + aligned_start,
end - aligned_start, &writtenlen);
kunmap(pg);
@@ -329,6 +330,6 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
jffs2_dbg(1, "%s() returning %d\n",
__func__, writtenlen > 0 ? writtenlen : ret);
unlock_page(pg);
- page_cache_release(pg);
+ put_page(pg);
return writtenlen > 0 ? writtenlen : ret;
}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index bead25ae8fe4..ae2ebb26b446 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -586,8 +586,8 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
goto out_root;
sb->s_maxbytes = 0xFFFFFFFF;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = JFFS2_SUPER_MAGIC;
if (!(sb->s_flags & MS_RDONLY))
jffs2_start_garbage_collect_thread(c);
@@ -685,7 +685,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
struct inode *inode = OFNI_EDONI_2SFFJ(f);
struct page *pg;
- pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
+ pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
(void *)jffs2_do_readpage_unlock, inode);
if (IS_ERR(pg))
return (void *)pg;
@@ -701,7 +701,7 @@ void jffs2_gc_release_page(struct jffs2_sb_info *c,
struct page *pg = (void *)*priv;
kunmap(pg);
- page_cache_release(pg);
+ put_page(pg);
}
static int jffs2_flash_setup(struct jffs2_sb_info *c) {
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 7e553f286775..9ed0f26cf023 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -552,7 +552,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
goto upnout;
}
/* We found a datanode. Do the GC */
- if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) {
+ if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) {
/* It crosses a page boundary. Therefore, it must be a hole. */
ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
} else {
@@ -1192,8 +1192,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
struct jffs2_node_frag *frag;
uint32_t min, max;
- min = start & ~(PAGE_CACHE_SIZE-1);
- max = min + PAGE_CACHE_SIZE;
+ min = start & ~(PAGE_SIZE-1);
+ max = min + PAGE_SIZE;
frag = jffs2_lookup_node_frag(&f->fragtree, start);
@@ -1351,7 +1351,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
datalen = end - offset;
- writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1));
+ writebuf = pg_ptr + (offset & (PAGE_SIZE -1));
comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 9a5449bc3afb..b86c78d178c6 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -90,7 +90,7 @@ uint32_t jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list,
/* If the last fragment starts at the RAM page boundary, it is
* REF_PRISTINE irrespective of its size. */
- if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) {
+ if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) {
dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n",
frag->ofs, frag->ofs + frag->size);
frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE;
@@ -237,7 +237,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r
If so, both 'this' and the new node get marked REF_NORMAL so
the GC can take a look.
*/
- if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) {
+ if (lastend && (lastend-1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) {
if (this->node)
mark_ref_normal(this->node->raw);
mark_ref_normal(newfrag->node->raw);
@@ -382,7 +382,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in
/* If we now share a page with other nodes, mark either previous
or next node REF_NORMAL, as appropriate. */
- if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) {
+ if (newfrag->ofs & (PAGE_SIZE-1)) {
struct jffs2_node_frag *prev = frag_prev(newfrag);
mark_ref_normal(fn->raw);
@@ -391,7 +391,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in
mark_ref_normal(prev->node->raw);
}
- if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) {
+ if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE-1)) {
struct jffs2_node_frag *next = frag_next(newfrag);
if (next) {
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index b634de4c8101..7fb187ab2682 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -172,8 +172,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
beginning of a page and runs to the end of the file, or if
it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
*/
- if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) ||
- ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) &&
+ if ((je32_to_cpu(ri->dsize) >= PAGE_SIZE) ||
+ ( ((je32_to_cpu(ri->offset)&(PAGE_SIZE-1))==0) &&
(je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) {
flash_ofs |= REF_PRISTINE;
} else {
@@ -366,7 +366,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
break;
}
mutex_lock(&f->sem);
- datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)));
+ datalen = min_t(uint32_t, writelen,
+ PAGE_SIZE - (offset & (PAGE_SIZE-1)));
cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen);
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index a3eb316b1ac3..b60e015cc757 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -80,7 +80,7 @@ static inline void lock_metapage(struct metapage *mp)
static struct kmem_cache *metapage_cache;
static mempool_t *metapage_mempool;
-#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
+#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
#if MPS_PER_PAGE > 1
@@ -316,7 +316,7 @@ static void last_write_complete(struct page *page)
struct metapage *mp;
unsigned int offset;
- for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
+ for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
mp = page_to_mp(page, offset);
if (mp && test_bit(META_io, &mp->flag)) {
if (mp->lsn)
@@ -366,12 +366,12 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
int bad_blocks = 0;
page_start = (sector_t)page->index <<
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ (PAGE_SHIFT - inode->i_blkbits);
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
set_page_writeback(page);
- for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
+ for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
mp = page_to_mp(page, offset);
if (!mp || !test_bit(META_dirty, &mp->flag))
@@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
bio = NULL;
} else
inc_io(page);
- xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits;
+ xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
pblock = metapage_get_blocks(inode, lblock, &xlen);
if (!pblock) {
printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
@@ -485,7 +485,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
struct inode *inode = page->mapping->host;
struct bio *bio = NULL;
int block_offset;
- int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
sector_t page_start; /* address of page in fs blocks */
sector_t pblock;
int xlen;
@@ -494,7 +494,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
BUG_ON(!PageLocked(page));
page_start = (sector_t)page->index <<
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ (PAGE_SHIFT - inode->i_blkbits);
block_offset = 0;
while (block_offset < blocks_per_page) {
@@ -542,7 +542,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
int ret = 1;
int offset;
- for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
+ for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
mp = page_to_mp(page, offset);
if (!mp)
@@ -568,7 +568,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
static void metapage_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
- BUG_ON(offset || length < PAGE_CACHE_SIZE);
+ BUG_ON(offset || length < PAGE_SIZE);
BUG_ON(PageWriteback(page));
@@ -599,10 +599,10 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
inode->i_ino, lblock, absolute);
l2bsize = inode->i_blkbits;
- l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
+ l2BlocksPerPage = PAGE_SHIFT - l2bsize;
page_index = lblock >> l2BlocksPerPage;
page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
- if ((page_offset + size) > PAGE_CACHE_SIZE) {
+ if ((page_offset + size) > PAGE_SIZE) {
jfs_err("MetaData crosses page boundary!!");
jfs_err("lblock = %lx, size = %d", lblock, size);
dump_stack();
@@ -621,7 +621,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
mapping = inode->i_mapping;
}
- if (new && (PSIZE == PAGE_CACHE_SIZE)) {
+ if (new && (PSIZE == PAGE_SIZE)) {
page = grab_cache_page(mapping, page_index);
if (!page) {
jfs_err("grab_cache_page failed!");
@@ -693,7 +693,7 @@ unlock:
void grab_metapage(struct metapage * mp)
{
jfs_info("grab_metapage: mp = 0x%p", mp);
- page_cache_get(mp->page);
+ get_page(mp->page);
lock_page(mp->page);
mp->count++;
lock_metapage(mp);
@@ -706,12 +706,12 @@ void force_metapage(struct metapage *mp)
jfs_info("force_metapage: mp = 0x%p", mp);
set_bit(META_forcewrite, &mp->flag);
clear_bit(META_sync, &mp->flag);
- page_cache_get(page);
+ get_page(page);
lock_page(page);
set_page_dirty(page);
write_one_page(page, 1);
clear_bit(META_forcewrite, &mp->flag);
- page_cache_release(page);
+ put_page(page);
}
void hold_metapage(struct metapage *mp)
@@ -726,7 +726,7 @@ void put_metapage(struct metapage *mp)
unlock_page(mp->page);
return;
}
- page_cache_get(mp->page);
+ get_page(mp->page);
mp->count++;
lock_metapage(mp);
unlock_page(mp->page);
@@ -746,7 +746,7 @@ void release_metapage(struct metapage * mp)
assert(mp->count);
if (--mp->count || mp->nohomeok) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return;
}
@@ -764,13 +764,13 @@ void release_metapage(struct metapage * mp)
drop_metapage(page, mp);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
void __invalidate_metapages(struct inode *ip, s64 addr, int len)
{
sector_t lblock;
- int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits;
+ int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
int BlocksPerPage = 1 << l2BlocksPerPage;
/* All callers are interested in block device's mapping */
struct address_space *mapping =
@@ -788,7 +788,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
if (!page)
continue;
- for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
+ for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
mp = page_to_mp(page, offset);
if (!mp)
continue;
@@ -803,7 +803,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
remove_from_logsync(mp);
}
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
}
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index 337e9e51ac06..a869fb4a20d6 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -106,7 +106,7 @@ static inline void metapage_nohomeok(struct metapage *mp)
lock_page(page);
if (!mp->nohomeok++) {
mark_metapage_dirty(mp);
- page_cache_get(page);
+ get_page(page);
wait_on_page_writeback(page);
}
unlock_page(page);
@@ -128,7 +128,7 @@ static inline void metapage_wait_for_io(struct metapage *mp)
static inline void _metapage_homeok(struct metapage *mp)
{
if (!--mp->nohomeok)
- page_cache_release(mp->page);
+ put_page(mp->page);
}
static inline void metapage_homeok(struct metapage *mp)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 4f5d85ba8e23..78d599198bf5 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -596,7 +596,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
* Page cache is indexed by long.
* I would use MAX_LFS_FILESIZE, but it's only half as big
*/
- sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1,
+ sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1,
(u64)sb->s_maxbytes);
#endif
sb->s_time_gran = 1;
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index b67dbccdaf88..f73541fbe7af 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -138,8 +138,8 @@ static int kernfs_fill_super(struct super_block *sb, unsigned long magic)
struct dentry *root;
info->sb = sb;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = magic;
sb->s_op = &kernfs_sops;
sb->s_time_gran = 1;
diff --git a/fs/libfs.c b/fs/libfs.c
index 03332f4bdedf..43dd28031a1a 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -25,7 +25,7 @@ int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
{
struct inode *inode = d_inode(dentry);
generic_fillattr(inode, stat);
- stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
+ stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
return 0;
}
EXPORT_SYMBOL(simple_getattr);
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(simple_getattr);
int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
{
buf->f_type = dentry->d_sb->s_magic;
- buf->f_bsize = PAGE_CACHE_SIZE;
+ buf->f_bsize = PAGE_SIZE;
buf->f_namelen = NAME_MAX;
return 0;
}
@@ -395,7 +395,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
struct page *page;
pgoff_t index;
- index = pos >> PAGE_CACHE_SHIFT;
+ index = pos >> PAGE_SHIFT;
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
@@ -403,10 +403,10 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
*pagep = page;
- if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ if (!PageUptodate(page) && (len != PAGE_SIZE)) {
+ unsigned from = pos & (PAGE_SIZE - 1);
- zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE);
+ zero_user_segments(page, 0, from, from + len, PAGE_SIZE);
}
return 0;
}
@@ -442,7 +442,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
/* zero the stale part of the page if we did a short copy */
if (copied < len) {
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
zero_user(page, from + copied, len - copied);
}
@@ -458,7 +458,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
set_page_dirty(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return copied;
}
@@ -477,8 +477,8 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
struct dentry *dentry;
int i;
- s->s_blocksize = PAGE_CACHE_SIZE;
- s->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ s->s_blocksize = PAGE_SIZE;
+ s->s_blocksize_bits = PAGE_SHIFT;
s->s_magic = magic;
s->s_op = &simple_super_operations;
s->s_time_gran = 1;
@@ -994,12 +994,12 @@ int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
{
u64 last_fs_block = num_blocks - 1;
u64 last_fs_page =
- last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits);
+ last_fs_block >> (PAGE_SHIFT - blocksize_bits);
if (unlikely(num_blocks == 0))
return 0;
- if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT))
+ if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT))
return -EINVAL;
if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index a709d80c8ebc..cc26f8f215f5 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -64,7 +64,7 @@ static void writeseg_end_io(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
end_page_writeback(bvec->bv_page);
- page_cache_release(bvec->bv_page);
+ put_page(bvec->bv_page);
}
bio_put(bio);
if (atomic_dec_and_test(&super->s_pending_writes))
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index 9c501449450d..b76a62b1978f 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -46,9 +46,9 @@ static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len,
BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs));
BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift);
- BUG_ON(len > PAGE_CACHE_SIZE);
- page_start = ofs & PAGE_CACHE_MASK;
- page_end = PAGE_CACHE_ALIGN(ofs + len) - 1;
+ BUG_ON(len > PAGE_SIZE);
+ page_start = ofs & PAGE_MASK;
+ page_end = PAGE_ALIGN(ofs + len) - 1;
ret = mtd_write(mtd, ofs, len, &retlen, buf);
if (ret || (retlen != len))
return -EIO;
@@ -82,7 +82,7 @@ static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs,
if (!page)
continue;
memset(page_address(page), 0xFF, PAGE_SIZE);
- page_cache_release(page);
+ put_page(page);
}
return 0;
}
@@ -195,7 +195,7 @@ static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
page_address(page));
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (err)
return err;
}
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 542468e9bfb4..ddbed2be5366 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -183,7 +183,7 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
if (name->len != be16_to_cpu(dd->namelen) ||
memcmp(name->name, dd->name, name->len)) {
kunmap_atomic(dd);
- page_cache_release(page);
+ put_page(page);
continue;
}
@@ -238,7 +238,7 @@ static int logfs_unlink(struct inode *dir, struct dentry *dentry)
return PTR_ERR(page);
}
index = page->index;
- page_cache_release(page);
+ put_page(page);
mutex_lock(&super->s_dirop_mutex);
logfs_add_transaction(dir, ta);
@@ -316,7 +316,7 @@ static int logfs_readdir(struct file *file, struct dir_context *ctx)
be16_to_cpu(dd->namelen),
be64_to_cpu(dd->ino), dd->type);
kunmap(page);
- page_cache_release(page);
+ put_page(page);
if (full)
break;
}
@@ -349,7 +349,7 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
dd = kmap_atomic(page);
ino = be64_to_cpu(dd->ino);
kunmap_atomic(dd);
- page_cache_release(page);
+ put_page(page);
inode = logfs_iget(dir->i_sb, ino);
if (IS_ERR(inode))
@@ -392,7 +392,7 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
err = logfs_write_buf(dir, page, WF_LOCK);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (!err)
grow_dir(dir, index);
return err;
@@ -561,7 +561,7 @@ static int logfs_get_dd(struct inode *dir, struct dentry *dentry,
map = kmap_atomic(page);
memcpy(dd, map, sizeof(*dd));
kunmap_atomic(map);
- page_cache_release(page);
+ put_page(page);
return 0;
}
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index 61eaeb1b6cac..f01ddfb1a03b 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -15,21 +15,21 @@ static int logfs_write_begin(struct file *file, struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct page *page;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
*pagep = page;
- if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
+ if ((len == PAGE_SIZE) || PageUptodate(page))
return 0;
- if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
- unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+ if ((pos & PAGE_MASK) >= i_size_read(inode)) {
+ unsigned start = pos & (PAGE_SIZE - 1);
unsigned end = start + len;
/* Reading beyond i_size is simple: memset to zero */
- zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
+ zero_user_segments(page, 0, start, end, PAGE_SIZE);
return 0;
}
return logfs_readpage_nolock(page);
@@ -41,11 +41,11 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
{
struct inode *inode = mapping->host;
pgoff_t index = page->index;
- unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned start = pos & (PAGE_SIZE - 1);
unsigned end = start + copied;
int ret = 0;
- BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize);
+ BUG_ON(PAGE_SIZE != inode->i_sb->s_blocksize);
BUG_ON(page->index > I3_BLOCKS);
if (copied < len) {
@@ -61,8 +61,8 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
if (copied == 0)
goto out; /* FIXME: do we need to update inode? */
- if (i_size_read(inode) < (index << PAGE_CACHE_SHIFT) + end) {
- i_size_write(inode, (index << PAGE_CACHE_SHIFT) + end);
+ if (i_size_read(inode) < (index << PAGE_SHIFT) + end) {
+ i_size_write(inode, (index << PAGE_SHIFT) + end);
mark_inode_dirty_sync(inode);
}
@@ -75,7 +75,7 @@ static int logfs_write_end(struct file *file, struct address_space *mapping,
}
out:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return ret ? ret : copied;
}
@@ -118,7 +118,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
loff_t i_size = i_size_read(inode);
- pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ pgoff_t end_index = i_size >> PAGE_SHIFT;
unsigned offset;
u64 bix;
level_t level;
@@ -142,7 +142,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
return __logfs_writepage(page);
/* Is the page fully outside i_size? (truncate in progress) */
- offset = i_size & (PAGE_CACHE_SIZE-1);
+ offset = i_size & (PAGE_SIZE-1);
if (bix > end_index || offset == 0) {
unlock_page(page);
return 0; /* don't care */
@@ -155,7 +155,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ zero_user_segment(page, offset, PAGE_SIZE);
return __logfs_writepage(page);
}
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 20973c9e52f8..3fb8c6d67303 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -281,7 +281,7 @@ static struct page *logfs_get_read_page(struct inode *inode, u64 bix,
static void logfs_put_read_page(struct page *page)
{
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
static void logfs_lock_write_page(struct page *page)
@@ -323,7 +323,7 @@ repeat:
return NULL;
err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS);
if (unlikely(err)) {
- page_cache_release(page);
+ put_page(page);
if (err == -EEXIST)
goto repeat;
return NULL;
@@ -342,7 +342,7 @@ static void logfs_unlock_write_page(struct page *page)
static void logfs_put_write_page(struct page *page)
{
logfs_unlock_write_page(page);
- page_cache_release(page);
+ put_page(page);
}
static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level,
@@ -562,7 +562,7 @@ static void indirect_free_block(struct super_block *sb,
if (PagePrivate(page)) {
ClearPagePrivate(page);
- page_cache_release(page);
+ put_page(page);
set_page_private(page, 0);
}
__free_block(sb, block);
@@ -655,7 +655,7 @@ static void alloc_data_block(struct inode *inode, struct page *page)
block->page = page;
SetPagePrivate(page);
- page_cache_get(page);
+ get_page(page);
set_page_private(page, (unsigned long) block);
block->ops = &indirect_block_ops;
@@ -709,7 +709,7 @@ static u64 block_get_pointer(struct page *page, int index)
static int logfs_read_empty(struct page *page)
{
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
return 0;
}
@@ -1660,7 +1660,7 @@ static int truncate_data_block(struct inode *inode, struct page *page,
if (err)
return err;
- zero_user_segment(page, size - pageofs, PAGE_CACHE_SIZE);
+ zero_user_segment(page, size - pageofs, PAGE_SIZE);
return logfs_segment_write(inode, page, shadow);
}
@@ -1919,7 +1919,7 @@ static void move_page_to_inode(struct inode *inode, struct page *page)
block->page = NULL;
if (PagePrivate(page)) {
ClearPagePrivate(page);
- page_cache_release(page);
+ put_page(page);
set_page_private(page, 0);
}
}
@@ -1940,7 +1940,7 @@ static void move_inode_to_page(struct page *page, struct inode *inode)
if (!PagePrivate(page)) {
SetPagePrivate(page);
- page_cache_get(page);
+ get_page(page);
set_page_private(page, (unsigned long) block);
}
@@ -1971,7 +1971,7 @@ int logfs_read_inode(struct inode *inode)
logfs_disk_to_inode(di, inode);
kunmap_atomic(di);
move_page_to_inode(inode, page);
- page_cache_release(page);
+ put_page(page);
return 0;
}
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index d270e4b2ab6b..1efd6055f4b0 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -90,9 +90,9 @@ int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
if (!PagePrivate(page)) {
SetPagePrivate(page);
- page_cache_get(page);
+ get_page(page);
}
- page_cache_release(page);
+ put_page(page);
buf += copylen;
len -= copylen;
@@ -117,9 +117,9 @@ static void pad_partial_page(struct logfs_area *area)
memset(page_address(page) + offset, 0xff, len);
if (!PagePrivate(page)) {
SetPagePrivate(page);
- page_cache_get(page);
+ get_page(page);
}
- page_cache_release(page);
+ put_page(page);
}
}
@@ -129,20 +129,20 @@ static void pad_full_pages(struct logfs_area *area)
struct logfs_super *super = logfs_super(sb);
u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
u32 len = super->s_segsize - area->a_used_bytes;
- pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
- pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
+ pgoff_t index = PAGE_ALIGN(ofs) >> PAGE_SHIFT;
+ pgoff_t no_indizes = len >> PAGE_SHIFT;
struct page *page;
while (no_indizes) {
page = get_mapping_page(sb, index, 0);
BUG_ON(!page); /* FIXME: reserve a pool */
SetPageUptodate(page);
- memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
+ memset(page_address(page), 0xff, PAGE_SIZE);
if (!PagePrivate(page)) {
SetPagePrivate(page);
- page_cache_get(page);
+ get_page(page);
}
- page_cache_release(page);
+ put_page(page);
index++;
no_indizes--;
}
@@ -411,7 +411,7 @@ int wbuf_read(struct super_block *sb, u64 ofs, size_t len, void *buf)
if (IS_ERR(page))
return PTR_ERR(page);
memcpy(buf, page_address(page) + offset, copylen);
- page_cache_release(page);
+ put_page(page);
buf += copylen;
len -= copylen;
@@ -499,7 +499,7 @@ static void move_btree_to_page(struct inode *inode, struct page *page,
if (!PagePrivate(page)) {
SetPagePrivate(page);
- page_cache_get(page);
+ get_page(page);
set_page_private(page, (unsigned long) block);
}
block->ops = &indirect_block_ops;
@@ -554,7 +554,7 @@ void move_page_to_btree(struct page *page)
if (PagePrivate(page)) {
ClearPagePrivate(page);
- page_cache_release(page);
+ put_page(page);
set_page_private(page, 0);
}
block->ops = &btree_block_ops;
@@ -723,9 +723,9 @@ void freeseg(struct super_block *sb, u32 segno)
continue;
if (PagePrivate(page)) {
ClearPagePrivate(page);
- page_cache_release(page);
+ put_page(page);
}
- page_cache_release(page);
+ put_page(page);
}
}
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index 54360293bcb5..5751082dba52 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -48,7 +48,7 @@ void emergency_read_end(struct page *page)
if (page == emergency_page)
mutex_unlock(&emergency_mutex);
else
- page_cache_release(page);
+ put_page(page);
}
static void dump_segfile(struct super_block *sb)
@@ -206,7 +206,7 @@ static int write_one_sb(struct super_block *sb,
logfs_set_segment_erased(sb, segno, ec, 0);
logfs_write_ds(sb, ds, segno, ec);
err = super->s_devops->write_sb(sb, page);
- page_cache_release(page);
+ put_page(page);
return err;
}
@@ -366,24 +366,24 @@ static struct page *find_super_block(struct super_block *sb)
return NULL;
last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]);
if (!last || IS_ERR(last)) {
- page_cache_release(first);
+ put_page(first);
return NULL;
}
if (!logfs_check_ds(page_address(first))) {
- page_cache_release(last);
+ put_page(last);
return first;
}
/* First one didn't work, try the second superblock */
if (!logfs_check_ds(page_address(last))) {
- page_cache_release(first);
+ put_page(first);
return last;
}
/* Neither worked, sorry folks */
- page_cache_release(first);
- page_cache_release(last);
+ put_page(first);
+ put_page(last);
return NULL;
}
@@ -425,7 +425,7 @@ static int __logfs_read_sb(struct super_block *sb)
super->s_data_levels = ds->ds_data_levels;
super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels
+ super->s_data_levels;
- page_cache_release(page);
+ put_page(page);
return 0;
}
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index d19ac258105a..33957c07cd11 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -28,7 +28,7 @@ const struct file_operations minix_dir_operations = {
static inline void dir_put_page(struct page *page)
{
kunmap(page);
- page_cache_release(page);
+ put_page(page);
}
/*
@@ -38,10 +38,10 @@ static inline void dir_put_page(struct page *page)
static unsigned
minix_last_byte(struct inode *inode, unsigned long page_nr)
{
- unsigned last_byte = PAGE_CACHE_SIZE;
+ unsigned last_byte = PAGE_SIZE;
- if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT))
- last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1);
+ if (page_nr == (inode->i_size >> PAGE_SHIFT))
+ last_byte = inode->i_size & (PAGE_SIZE - 1);
return last_byte;
}
@@ -92,8 +92,8 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
if (pos >= inode->i_size)
return 0;
- offset = pos & ~PAGE_CACHE_MASK;
- n = pos >> PAGE_CACHE_SHIFT;
+ offset = pos & ~PAGE_MASK;
+ n = pos >> PAGE_SHIFT;
for ( ; n < npages; n++, offset = 0) {
char *p, *kaddr, *limit;
@@ -229,7 +229,7 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
lock_page(page);
kaddr = (char*)page_address(page);
dir_end = kaddr + minix_last_byte(dir, n);
- limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize;
+ limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
de = (minix_dirent *)p;
de3 = (minix3_dirent *)p;
@@ -327,7 +327,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
}
kaddr = kmap_atomic(page);
- memset(kaddr, 0, PAGE_CACHE_SIZE);
+ memset(kaddr, 0, PAGE_SIZE);
if (sbi->s_version == MINIX_V3) {
minix3_dirent *de3 = (minix3_dirent *)kaddr;
@@ -350,7 +350,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
fail:
- page_cache_release(page);
+ put_page(page);
return err;
}
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index a795a11e50c7..2887d1d95ce2 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -243,11 +243,11 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
out_dir:
if (dir_de) {
kunmap(dir_page);
- page_cache_release(dir_page);
+ put_page(dir_page);
}
out_old:
kunmap(old_page);
- page_cache_release(old_page);
+ put_page(old_page);
out:
return err;
}
diff --git a/fs/mpage.c b/fs/mpage.c
index 6bd9fd90964e..eedc644b78d7 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -107,7 +107,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
* don't make any buffers if there is only one buffer on
* the page and the page just needs to be set up to date
*/
- if (inode->i_blkbits == PAGE_CACHE_SHIFT &&
+ if (inode->i_blkbits == PAGE_SHIFT &&
buffer_uptodate(bh)) {
SetPageUptodate(page);
return;
@@ -145,7 +145,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
{
struct inode *inode = page->mapping->host;
const unsigned blkbits = inode->i_blkbits;
- const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+ const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
const unsigned blocksize = 1 << blkbits;
sector_t block_in_file;
sector_t last_block;
@@ -162,7 +162,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
if (page_has_buffers(page))
goto confused;
- block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+ block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
last_block = block_in_file + nr_pages * blocks_per_page;
last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
if (last_block > last_block_in_file)
@@ -249,7 +249,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
}
if (first_hole != blocks_per_page) {
- zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
+ zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
if (first_hole == 0) {
SetPageUptodate(page);
unlock_page(page);
@@ -331,7 +331,7 @@ confused:
*
* then this code just gives up and calls the buffer_head-based read function.
* It does handle a page which has holes at the end - that is a common case:
- * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
+ * the end-of-file on blocksize < PAGE_SIZE setups.
*
* BH_Boundary explanation:
*
@@ -380,7 +380,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
&first_logical_block,
get_block, gfp);
}
- page_cache_release(page);
+ put_page(page);
}
BUG_ON(!list_empty(pages));
if (bio)
@@ -472,7 +472,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
struct inode *inode = page->mapping->host;
const unsigned blkbits = inode->i_blkbits;
unsigned long end_index;
- const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+ const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
sector_t last_block;
sector_t block_in_file;
sector_t blocks[MAX_BUF_PER_PAGE];
@@ -542,7 +542,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
* The page has no buffers: map it to disk
*/
BUG_ON(!PageUptodate(page));
- block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+ block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
last_block = (i_size - 1) >> blkbits;
map_bh.b_page = page;
for (page_block = 0; page_block < blocks_per_page; ) {
@@ -574,7 +574,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
first_unmapped = page_block;
page_is_mapped:
- end_index = i_size >> PAGE_CACHE_SHIFT;
+ end_index = i_size >> PAGE_SHIFT;
if (page->index >= end_index) {
/*
* The page straddles i_size. It must be zeroed out on each
@@ -584,11 +584,11 @@ page_is_mapped:
* is zeroed when mapped, and writes to that region are not
* written out to the file."
*/
- unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
+ unsigned offset = i_size & (PAGE_SIZE - 1);
if (page->index > end_index || !offset)
goto confused;
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ zero_user_segment(page, offset, PAGE_SIZE);
}
/*
diff --git a/fs/namei.c b/fs/namei.c
index c0d551fc43a0..bdcea8a018b8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1740,15 +1740,17 @@ static int walk_component(struct nameidata *nd, int flags)
nd->flags);
if (IS_ERR(path.dentry))
return PTR_ERR(path.dentry);
- if (unlikely(d_is_negative(path.dentry))) {
- dput(path.dentry);
- return -ENOENT;
- }
+
path.mnt = nd->path.mnt;
err = follow_managed(&path, nd);
if (unlikely(err < 0))
return err;
+ if (unlikely(d_is_negative(path.dentry))) {
+ path_to_nameidata(&path, nd);
+ return -ENOENT;
+ }
+
seq = 0; /* we are already out of RCU mode */
inode = d_backing_inode(path.dentry);
}
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index b7f8eaeea5d8..bfdad003ee56 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -510,7 +510,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
kunmap(ctl.page);
SetPageUptodate(ctl.page);
unlock_page(ctl.page);
- page_cache_release(ctl.page);
+ put_page(ctl.page);
ctl.page = NULL;
}
ctl.idx = 0;
@@ -520,7 +520,7 @@ invalid_cache:
if (ctl.page) {
kunmap(ctl.page);
unlock_page(ctl.page);
- page_cache_release(ctl.page);
+ put_page(ctl.page);
ctl.page = NULL;
}
ctl.cache = cache;
@@ -554,14 +554,14 @@ finished:
kunmap(ctl.page);
SetPageUptodate(ctl.page);
unlock_page(ctl.page);
- page_cache_release(ctl.page);
+ put_page(ctl.page);
}
if (page) {
cache->head = ctl.head;
kunmap(page);
SetPageUptodate(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
out:
return result;
@@ -649,7 +649,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
kunmap(ctl.page);
SetPageUptodate(ctl.page);
unlock_page(ctl.page);
- page_cache_release(ctl.page);
+ put_page(ctl.page);
}
ctl.cache = NULL;
ctl.idx -= NCP_DIRCACHE_SIZE;
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 5233fbc1747a..17cfb743b5bf 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -191,7 +191,7 @@ struct ncp_cache_head {
int eof;
};
-#define NCP_DIRCACHE_SIZE ((int)(PAGE_CACHE_SIZE/sizeof(struct dentry *)))
+#define NCP_DIRCACHE_SIZE ((int)(PAGE_SIZE/sizeof(struct dentry *)))
union ncp_dir_cache {
struct ncp_cache_head head;
struct dentry *dentry[NCP_DIRCACHE_SIZE];
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 02e4d87d2ed3..17a42e4eb872 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -231,7 +231,7 @@ bl_read_pagelist(struct nfs_pgio_header *header)
size_t bytes_left = header->args.count;
unsigned int pg_offset = header->args.pgbase, pg_len;
struct page **pages = header->args.pages;
- int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
+ int pg_index = header->args.pgbase >> PAGE_SHIFT;
const bool is_dio = (header->dreq != NULL);
struct blk_plug plug;
int i;
@@ -263,13 +263,13 @@ bl_read_pagelist(struct nfs_pgio_header *header)
}
if (is_dio) {
- if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
- pg_len = PAGE_CACHE_SIZE - pg_offset;
+ if (pg_offset + bytes_left > PAGE_SIZE)
+ pg_len = PAGE_SIZE - pg_offset;
else
pg_len = bytes_left;
} else {
BUG_ON(pg_offset != 0);
- pg_len = PAGE_CACHE_SIZE;
+ pg_len = PAGE_SIZE;
}
if (is_hole(&be)) {
@@ -339,9 +339,9 @@ static void bl_write_cleanup(struct work_struct *work)
if (likely(!hdr->pnfs_error)) {
struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
- u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK;
+ u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
u64 end = (hdr->args.offset + hdr->args.count +
- PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK;
+ PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
(end - start) >> SECTOR_SHIFT);
@@ -373,7 +373,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
loff_t offset = header->args.offset;
size_t count = header->args.count;
struct page **pages = header->args.pages;
- int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
+ int pg_index = header->args.pgbase >> PAGE_SHIFT;
unsigned int pg_len;
struct blk_plug plug;
int i;
@@ -392,7 +392,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
blk_start_plug(&plug);
/* we always write out the whole page */
- offset = offset & (loff_t)PAGE_CACHE_MASK;
+ offset = offset & (loff_t)PAGE_MASK;
isect = offset >> SECTOR_SHIFT;
for (i = pg_index; i < header->page_array.npages; i++) {
@@ -408,7 +408,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
extent_length = be.be_length - (isect - be.be_f_offset);
}
- pg_len = PAGE_CACHE_SIZE;
+ pg_len = PAGE_SIZE;
bio = do_add_page_to_bio(bio, header->page_array.npages - i,
WRITE, isect, pages[i], &map, &be,
bl_end_io_write, par,
@@ -820,7 +820,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
pgoff_t end;
/* Optimize common case that writes from 0 to end of file */
- end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
+ end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (end != inode->i_mapping->nrpages) {
rcu_read_lock();
end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
@@ -828,9 +828,9 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
}
if (!end)
- return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
+ return i_size_read(inode) - (idx << PAGE_SHIFT);
else
- return (end - idx) << PAGE_CACHE_SHIFT;
+ return (end - idx) << PAGE_SHIFT;
}
static void
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index bc21205309e0..18e6fd0b9506 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -40,8 +40,8 @@
#include "../pnfs.h"
#include "../netns.h"
-#define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
-#define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
+#define PAGE_CACHE_SECTORS (PAGE_SIZE >> SECTOR_SHIFT)
+#define PAGE_CACHE_SECTOR_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTOR_SIZE (1 << SECTOR_SHIFT)
struct pnfs_block_dev;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index d6d5d2a48e83..0c96528db94a 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -736,7 +736,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
server->rsize = max_rpc_payload;
if (server->rsize > NFS_MAX_FILE_IO_SIZE)
server->rsize = NFS_MAX_FILE_IO_SIZE;
- server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ server->rpages = (server->rsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
server->backing_dev_info.name = "nfs";
server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
@@ -745,13 +745,13 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
server->wsize = max_rpc_payload;
if (server->wsize > NFS_MAX_FILE_IO_SIZE)
server->wsize = NFS_MAX_FILE_IO_SIZE;
- server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ server->wpages = (server->wsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
server->dtsize = nfs_block_size(fsinfo->dtpref, NULL);
- if (server->dtsize > PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES)
- server->dtsize = PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES;
+ if (server->dtsize > PAGE_SIZE * NFS_MAX_READDIR_PAGES)
+ server->dtsize = PAGE_SIZE * NFS_MAX_READDIR_PAGES;
if (server->dtsize > server->rsize)
server->dtsize = server->rsize;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 4bfa7d8bcade..33eb81738d03 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -377,7 +377,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
again:
timestamp = jiffies;
gencount = nfs_inc_attr_generation_counter();
- error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages,
+ error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
NFS_SERVER(inode)->dtsize, desc->plus);
if (error < 0) {
/* We requested READDIRPLUS, but the server doesn't grok it */
@@ -560,7 +560,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
count++;
if (desc->plus != 0)
- nfs_prime_dcache(desc->file->f_path.dentry, entry);
+ nfs_prime_dcache(file_dentry(desc->file), entry);
status = nfs_readdir_add_to_array(entry, page);
if (status != 0)
@@ -707,7 +707,7 @@ void cache_page_release(nfs_readdir_descriptor_t *desc)
{
if (!desc->page->mapping)
nfs_readdir_clear_array(desc->page);
- page_cache_release(desc->page);
+ put_page(desc->page);
desc->page = NULL;
}
@@ -864,7 +864,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
*/
static int nfs_readdir(struct file *file, struct dir_context *ctx)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
nfs_readdir_descriptor_t my_desc,
*desc = &my_desc;
@@ -1923,7 +1923,7 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
* add_to_page_cache_lru() grabs an extra page refcount.
* Drop it here to avoid leaking this page later.
*/
- page_cache_release(page);
+ put_page(page);
} else
__free_page(page);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e5daa932b823..438d4e70742f 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -269,7 +269,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
{
unsigned int i;
for (i = 0; i < npages; i++)
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
@@ -1003,7 +1003,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
iov_iter_count(iter));
pos = iocb->ki_pos;
- end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT;
+ end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
inode_lock(inode);
@@ -1013,7 +1013,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
if (mapping->nrpages) {
result = invalidate_inode_pages2_range(mapping,
- pos >> PAGE_CACHE_SHIFT, end);
+ pos >> PAGE_SHIFT, end);
if (result)
goto out_unlock;
}
@@ -1042,7 +1042,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
if (mapping->nrpages) {
invalidate_inode_pages2_range(mapping,
- pos >> PAGE_CACHE_SHIFT, end);
+ pos >> PAGE_SHIFT, end);
}
inode_unlock(inode);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 89bf093d342a..be01095b97ae 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -320,7 +320,7 @@ static int nfs_want_read_modify_write(struct file *file, struct page *page,
loff_t pos, unsigned len)
{
unsigned int pglen = nfs_page_length(page);
- unsigned int offset = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned int offset = pos & (PAGE_SIZE - 1);
unsigned int end = offset + len;
if (pnfs_ld_read_whole_page(file->f_mapping->host)) {
@@ -351,7 +351,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata)
{
int ret;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
int once_thru = 0;
@@ -380,12 +380,12 @@ start:
ret = nfs_flush_incompatible(file, page);
if (ret) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
} else if (!once_thru &&
nfs_want_read_modify_write(file, page, pos, len)) {
once_thru = 1;
ret = nfs_readpage(file, page);
- page_cache_release(page);
+ put_page(page);
if (!ret)
goto start;
}
@@ -396,7 +396,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
- unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned offset = pos & (PAGE_SIZE - 1);
struct nfs_open_context *ctx = nfs_file_open_context(file);
int status;
@@ -413,20 +413,20 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
if (pglen == 0) {
zero_user_segments(page, 0, offset,
- end, PAGE_CACHE_SIZE);
+ end, PAGE_SIZE);
SetPageUptodate(page);
} else if (end >= pglen) {
- zero_user_segment(page, end, PAGE_CACHE_SIZE);
+ zero_user_segment(page, end, PAGE_SIZE);
if (offset == 0)
SetPageUptodate(page);
} else
- zero_user_segment(page, pglen, PAGE_CACHE_SIZE);
+ zero_user_segment(page, pglen, PAGE_SIZE);
}
status = nfs_updatepage(file, page, offset, copied);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (status < 0)
return status;
@@ -454,7 +454,7 @@ static void nfs_invalidate_page(struct page *page, unsigned int offset,
dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
page, offset, length);
- if (offset != 0 || length < PAGE_CACHE_SIZE)
+ if (offset != 0 || length < PAGE_SIZE)
return;
/* Cancel any unstarted writes on this page */
nfs_wb_page_cancel(page_file_mapping(page)->host, page);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 33d18c411905..738c84a42eb0 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -940,7 +940,7 @@ int nfs_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
- ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+ ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
nfs_file_set_open_context(filp, ctx);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 565f8135ae1f..f1d1d2c472e9 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -638,11 +638,11 @@ unsigned int nfs_page_length(struct page *page)
if (i_size > 0) {
pgoff_t page_index = page_file_index(page);
- pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+ pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT;
if (page_index < end_index)
- return PAGE_CACHE_SIZE;
+ return PAGE_SIZE;
if (page_index == end_index)
- return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1;
+ return ((i_size - 1) & ~PAGE_MASK) + 1;
}
return 0;
}
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 22c35abbee9d..d0390516467c 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -26,7 +26,7 @@ static int
nfs4_file_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
- struct dentry *dentry = filp->f_path.dentry;
+ struct dentry *dentry = file_dentry(filp);
struct dentry *parent = NULL;
struct inode *dir;
unsigned openflags = filp->f_flags;
@@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
parent = dget_parent(dentry);
dir = d_inode(parent);
- ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+ ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
goto out;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 4e4441216804..88474a4fc669 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -5001,7 +5001,7 @@ static int decode_space_limit(struct xdr_stream *xdr,
blocksize = be32_to_cpup(p);
maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
}
- maxsize >>= PAGE_CACHE_SHIFT;
+ maxsize >>= PAGE_SHIFT;
*pagemod_limit = min_t(u64, maxsize, ULONG_MAX);
return 0;
out_overflow:
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 9aebffb40505..049c1b1f2932 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -486,7 +486,7 @@ static void __r4w_put_page(void *priv, struct page *page)
dprintk("%s: index=0x%lx\n", __func__,
(page == ZERO_PAGE(0)) ? -1UL : page->index);
if (ZERO_PAGE(0) != page)
- page_cache_release(page);
+ put_page(page);
return;
}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 8ce4f61cbaa5..1f6db4231057 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -342,7 +342,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
* update_nfs_request below if the region is not locked. */
req->wb_page = page;
req->wb_index = page_file_index(page);
- page_cache_get(page);
+ get_page(page);
req->wb_offset = offset;
req->wb_pgbase = offset;
req->wb_bytes = count;
@@ -392,7 +392,7 @@ static void nfs_clear_request(struct nfs_page *req)
struct nfs_lock_context *l_ctx = req->wb_lock_context;
if (page != NULL) {
- page_cache_release(page);
+ put_page(page);
req->wb_page = NULL;
}
if (l_ctx != NULL) {
@@ -904,7 +904,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
return false;
} else {
if (req->wb_pgbase != 0 ||
- prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
+ prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
return false;
}
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 2fa483e6dbe2..89a5ef4df08a 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -841,7 +841,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
i_size = i_size_read(ino);
- lgp->args.minlength = PAGE_CACHE_SIZE;
+ lgp->args.minlength = PAGE_SIZE;
if (lgp->args.minlength > range->length)
lgp->args.minlength = range->length;
if (range->iomode == IOMODE_READ) {
@@ -1618,13 +1618,13 @@ lookup_again:
spin_unlock(&clp->cl_lock);
}
- pg_offset = arg.offset & ~PAGE_CACHE_MASK;
+ pg_offset = arg.offset & ~PAGE_MASK;
if (pg_offset) {
arg.offset -= pg_offset;
arg.length += pg_offset;
}
if (arg.length != NFS4_MAX_UINT64)
- arg.length = PAGE_CACHE_ALIGN(arg.length);
+ arg.length = PAGE_ALIGN(arg.length);
lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
atomic_dec(&lo->plh_outstanding);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index eb31e23e7def..6776d7a7839e 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -46,7 +46,7 @@ static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
static
int nfs_return_empty_page(struct page *page)
{
- zero_user(page, 0, PAGE_CACHE_SIZE);
+ zero_user(page, 0, PAGE_SIZE);
SetPageUptodate(page);
unlock_page(page);
return 0;
@@ -118,8 +118,8 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
unlock_page(page);
return PTR_ERR(new);
}
- if (len < PAGE_CACHE_SIZE)
- zero_user_segment(page, len, PAGE_CACHE_SIZE);
+ if (len < PAGE_SIZE)
+ zero_user_segment(page, len, PAGE_SIZE);
nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops);
@@ -295,7 +295,7 @@ int nfs_readpage(struct file *file, struct page *page)
int error;
dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
- page, PAGE_CACHE_SIZE, page_file_index(page));
+ page, PAGE_SIZE, page_file_index(page));
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
nfs_add_stats(inode, NFSIOS_READPAGES, 1);
@@ -361,8 +361,8 @@ readpage_async_filler(void *data, struct page *page)
if (IS_ERR(new))
goto out_error;
- if (len < PAGE_CACHE_SIZE)
- zero_user_segment(page, len, PAGE_CACHE_SIZE);
+ if (len < PAGE_SIZE)
+ zero_user_segment(page, len, PAGE_SIZE);
if (!nfs_pageio_add_request(desc->pgio, new)) {
nfs_list_remove_request(new);
nfs_readpage_release(new);
@@ -424,8 +424,8 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
pgm = &pgio.pg_mirrors[0];
NFS_I(inode)->read_io += pgm->pg_bytes_written;
- npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
nfs_add_stats(inode, NFSIOS_READPAGES, npages);
read_complete:
put_nfs_open_context(desc.ctx);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5754835a2886..5f4fd53e5764 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -150,7 +150,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
spin_lock(&inode->i_lock);
i_size = i_size_read(inode);
- end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+ end_index = (i_size - 1) >> PAGE_SHIFT;
if (i_size > 0 && page_file_index(page) < end_index)
goto out;
end = page_file_offset(page) + ((loff_t)offset+count);
@@ -1942,7 +1942,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
{
loff_t range_start = page_file_offset(page);
- loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+ loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 0,
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 27f75bcbeb30..a9fb3636c142 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -458,7 +458,7 @@ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
struct buffer_head *pbh;
__u64 key;
- key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT -
+ key = page_index(bh->b_page) << (PAGE_SHIFT -
bmap->b_inode->i_blkbits);
for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page)
key++;
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index a35ae35e6932..e0c9daf9aa22 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -62,7 +62,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
set_buffer_uptodate(bh);
unlock_page(bh->b_page);
- page_cache_release(bh->b_page);
+ put_page(bh->b_page);
return bh;
}
@@ -128,7 +128,7 @@ found:
out_locked:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return err;
}
@@ -146,7 +146,7 @@ void nilfs_btnode_delete(struct buffer_head *bh)
pgoff_t index = page_index(page);
int still_dirty;
- page_cache_get(page);
+ get_page(page);
lock_page(page);
wait_on_page_writeback(page);
@@ -154,7 +154,7 @@ void nilfs_btnode_delete(struct buffer_head *bh)
still_dirty = PageDirty(page);
mapping = page->mapping;
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (!still_dirty && mapping)
invalidate_inode_pages2_range(mapping, index, index);
@@ -181,7 +181,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
obh = ctxt->bh;
ctxt->newbh = NULL;
- if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
+ if (inode->i_blkbits == PAGE_SHIFT) {
lock_page(obh->b_page);
/*
* We cannot call radix_tree_preload for the kernels older
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 6b8b92b19cec..e08f064e4bd7 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -58,7 +58,7 @@ static inline unsigned nilfs_chunk_size(struct inode *inode)
static inline void nilfs_put_page(struct page *page)
{
kunmap(page);
- page_cache_release(page);
+ put_page(page);
}
/*
@@ -69,9 +69,9 @@ static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr)
{
unsigned last_byte = inode->i_size;
- last_byte -= page_nr << PAGE_CACHE_SHIFT;
- if (last_byte > PAGE_CACHE_SIZE)
- last_byte = PAGE_CACHE_SIZE;
+ last_byte -= page_nr << PAGE_SHIFT;
+ if (last_byte > PAGE_SIZE)
+ last_byte = PAGE_SIZE;
return last_byte;
}
@@ -109,12 +109,12 @@ static void nilfs_check_page(struct page *page)
unsigned chunk_size = nilfs_chunk_size(dir);
char *kaddr = page_address(page);
unsigned offs, rec_len;
- unsigned limit = PAGE_CACHE_SIZE;
+ unsigned limit = PAGE_SIZE;
struct nilfs_dir_entry *p;
char *error;
- if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
- limit = dir->i_size & ~PAGE_CACHE_MASK;
+ if ((dir->i_size >> PAGE_SHIFT) == page->index) {
+ limit = dir->i_size & ~PAGE_MASK;
if (limit & (chunk_size - 1))
goto Ebadsize;
if (!limit)
@@ -161,7 +161,7 @@ Espan:
bad_entry:
nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - "
"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
- dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+ dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
(unsigned long) le64_to_cpu(p->inode),
rec_len, p->name_len);
goto fail;
@@ -170,7 +170,7 @@ Eend:
nilfs_error(sb, "nilfs_check_page",
"entry in directory #%lu spans the page boundary"
"offset=%lu, inode=%lu",
- dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
+ dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
(unsigned long) le64_to_cpu(p->inode));
fail:
SetPageChecked(page);
@@ -256,8 +256,8 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
loff_t pos = ctx->pos;
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
- unsigned int offset = pos & ~PAGE_CACHE_MASK;
- unsigned long n = pos >> PAGE_CACHE_SHIFT;
+ unsigned int offset = pos & ~PAGE_MASK;
+ unsigned long n = pos >> PAGE_SHIFT;
unsigned long npages = dir_pages(inode);
/* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */
@@ -272,7 +272,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
if (IS_ERR(page)) {
nilfs_error(sb, __func__, "bad page in #%lu",
inode->i_ino);
- ctx->pos += PAGE_CACHE_SIZE - offset;
+ ctx->pos += PAGE_SIZE - offset;
return -EIO;
}
kaddr = page_address(page);
@@ -361,7 +361,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
if (++n >= npages)
n = 0;
/* next page is past the blocks we've got */
- if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
+ if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
nilfs_error(dir->i_sb, __func__,
"dir %lu size %lld exceeds block count %llu",
dir->i_ino, dir->i_size,
@@ -401,7 +401,7 @@ ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
if (de) {
res = le64_to_cpu(de->inode);
kunmap(page);
- page_cache_release(page);
+ put_page(page);
}
return res;
}
@@ -460,7 +460,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
kaddr = page_address(page);
dir_end = kaddr + nilfs_last_byte(dir, n);
de = (struct nilfs_dir_entry *)kaddr;
- kaddr += PAGE_CACHE_SIZE - reclen;
+ kaddr += PAGE_SIZE - reclen;
while ((char *)de <= kaddr) {
if ((char *)de == dir_end) {
/* We hit i_size */
@@ -603,7 +603,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
kunmap_atomic(kaddr);
nilfs_commit_chunk(page, mapping, 0, chunk_size);
fail:
- page_cache_release(page);
+ put_page(page);
return err;
}
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 748ca238915a..0224b7826ace 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -115,7 +115,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
failed:
unlock_page(bh->b_page);
- page_cache_release(bh->b_page);
+ put_page(bh->b_page);
return err;
}
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 21a1e2e0d92f..534631358b13 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -249,7 +249,7 @@ static int nilfs_set_page_dirty(struct page *page)
if (nr_dirty)
nilfs_set_file_dirty(inode, nr_dirty);
} else if (ret) {
- unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
nilfs_set_file_dirty(inode, nr_dirty);
}
@@ -291,7 +291,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
- unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned start = pos & (PAGE_SIZE - 1);
unsigned nr_dirty;
int err;
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 1125f40233ff..f6982b9153d5 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -110,7 +110,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
failed_bh:
unlock_page(bh->b_page);
- page_cache_release(bh->b_page);
+ put_page(bh->b_page);
brelse(bh);
failed_unlock:
@@ -170,7 +170,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
failed_bh:
unlock_page(bh->b_page);
- page_cache_release(bh->b_page);
+ put_page(bh->b_page);
brelse(bh);
failed:
return ret;
@@ -363,7 +363,7 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
{
pgoff_t index = (pgoff_t)block >>
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ (PAGE_SHIFT - inode->i_blkbits);
struct page *page;
unsigned long first_block;
int ret = 0;
@@ -376,7 +376,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
wait_on_page_writeback(page);
first_block = (unsigned long)index <<
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ (PAGE_SHIFT - inode->i_blkbits);
if (page_has_buffers(page)) {
struct buffer_head *bh;
@@ -385,7 +385,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
}
still_dirty = PageDirty(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (still_dirty ||
invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
@@ -578,7 +578,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
}
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return 0;
}
@@ -597,7 +597,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
bh_frozen = nilfs_page_get_nth_block(page, n);
}
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
return bh_frozen;
}
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 38d67f3e25bc..3b2af05f9fb4 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -431,11 +431,11 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
out_dir:
if (dir_de) {
kunmap(dir_page);
- page_cache_release(dir_page);
+ put_page(dir_page);
}
out_old:
kunmap(old_page);
- page_cache_release(old_page);
+ put_page(old_page);
out:
nilfs_transaction_abort(old_dir->i_sb);
return err;
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index c20df77eff99..489391561cda 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -50,7 +50,7 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
if (!page_has_buffers(page))
create_empty_buffers(page, 1 << blkbits, b_state);
- first_block = (unsigned long)index << (PAGE_CACHE_SHIFT - blkbits);
+ first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
bh = nilfs_page_get_nth_block(page, block - first_block);
touch_buffer(bh);
@@ -64,7 +64,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
unsigned long b_state)
{
int blkbits = inode->i_blkbits;
- pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits);
+ pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
struct page *page;
struct buffer_head *bh;
@@ -75,7 +75,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
if (unlikely(!bh)) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return NULL;
}
return bh;
@@ -288,7 +288,7 @@ repeat:
__set_page_dirty_nobuffers(dpage);
unlock_page(dpage);
- page_cache_release(dpage);
+ put_page(dpage);
unlock_page(page);
}
pagevec_release(&pvec);
@@ -333,7 +333,7 @@ repeat:
WARN_ON(PageDirty(dpage));
nilfs_copy_page(dpage, page, 0);
unlock_page(dpage);
- page_cache_release(dpage);
+ put_page(dpage);
} else {
struct page *page2;
@@ -350,7 +350,7 @@ repeat:
if (unlikely(err < 0)) {
WARN_ON(err == -EEXIST);
page->mapping = NULL;
- page_cache_release(page); /* for cache */
+ put_page(page); /* for cache */
} else {
page->mapping = dmap;
dmap->nrpages++;
@@ -523,8 +523,8 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
if (inode->i_mapping->nrpages == 0)
return 0;
- index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
- nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
+ nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
pagevec_init(&pvec, 0);
@@ -537,7 +537,7 @@ repeat:
if (length > 0 && pvec.pages[0]->index > index)
goto out;
- b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);
i = 0;
do {
page = pvec.pages[i];
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 9b4f205d1173..5afa77fadc11 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -544,14 +544,14 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
blocksize, page, NULL);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
(*nr_salvaged_blocks)++;
goto next;
failed_page:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
failed_inode:
printk(KERN_WARNING
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 3b65adaae7e4..4317f72568e6 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2070,7 +2070,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
goto failed_to_write;
if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
- nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
+ nilfs->ns_blocksize_bits != PAGE_SHIFT) {
/*
* At this point, we avoid double buffering
* for blocksize < pagesize because page dirty
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 7521e11db728..97768a1379f2 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -74,7 +74,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
set_buffer_uptodate(bh);
- file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +
+ file_ofs = ((s64)page->index << PAGE_SHIFT) +
bh_offset(bh);
read_lock_irqsave(&ni->size_lock, flags);
init_size = ni->initialized_size;
@@ -142,7 +142,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
u32 rec_size;
rec_size = ni->itype.index.block_size;
- recs = PAGE_CACHE_SIZE / rec_size;
+ recs = PAGE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
local_irq_save(flags);
@@ -229,7 +229,7 @@ static int ntfs_read_block(struct page *page)
* fully truncated, truncate will throw it away as soon as we unlock
* it so no need to worry what we do with it.
*/
- iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
+ iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
read_lock_irqsave(&ni->size_lock, flags);
lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
init_size = ni->initialized_size;
@@ -412,9 +412,9 @@ retry_readpage:
vi = page->mapping->host;
i_size = i_size_read(vi);
/* Is the page fully outside i_size? (truncate in progress) */
- if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT)) {
- zero_user(page, 0, PAGE_CACHE_SIZE);
+ if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
+ PAGE_SHIFT)) {
+ zero_user(page, 0, PAGE_SIZE);
ntfs_debug("Read outside i_size - truncated?");
goto done;
}
@@ -463,7 +463,7 @@ retry_readpage:
* ok to ignore the compressed flag here.
*/
if (unlikely(page->index > 0)) {
- zero_user(page, 0, PAGE_CACHE_SIZE);
+ zero_user(page, 0, PAGE_SIZE);
goto done;
}
if (!NInoAttr(ni))
@@ -509,7 +509,7 @@ retry_readpage:
le16_to_cpu(ctx->attr->data.resident.value_offset),
attr_len);
/* Zero the remainder of the page. */
- memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+ memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
flush_dcache_page(page);
kunmap_atomic(addr);
put_unm_err_out:
@@ -599,7 +599,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
/* NOTE: Different naming scheme to ntfs_read_block()! */
/* The first block in the page. */
- block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
+ block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
read_lock_irqsave(&ni->size_lock, flags);
i_size = i_size_read(vi);
@@ -674,7 +674,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
// in the inode.
// Again, for each page do:
// __set_page_dirty_buffers();
- // page_cache_release()
+ // put_page()
// We don't need to wait on the writes.
// Update iblock.
}
@@ -925,7 +925,7 @@ static int ntfs_write_mst_block(struct page *page,
ntfs_volume *vol = ni->vol;
u8 *kaddr;
unsigned int rec_size = ni->itype.index.block_size;
- ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size];
+ ntfs_inode *locked_nis[PAGE_SIZE / rec_size];
struct buffer_head *bh, *head, *tbh, *rec_start_bh;
struct buffer_head *bhs[MAX_BUF_PER_PAGE];
runlist_element *rl;
@@ -949,7 +949,7 @@ static int ntfs_write_mst_block(struct page *page,
(NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
bh_size = vol->sb->s_blocksize;
bh_size_bits = vol->sb->s_blocksize_bits;
- max_bhs = PAGE_CACHE_SIZE / bh_size;
+ max_bhs = PAGE_SIZE / bh_size;
BUG_ON(!max_bhs);
BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
@@ -961,13 +961,13 @@ static int ntfs_write_mst_block(struct page *page,
BUG_ON(!bh);
rec_size_bits = ni->itype.index.block_size_bits;
- BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits));
+ BUG_ON(!(PAGE_SIZE >> rec_size_bits));
bhs_per_rec = rec_size >> bh_size_bits;
BUG_ON(!bhs_per_rec);
/* The first block in the page. */
rec_block = block = (sector_t)page->index <<
- (PAGE_CACHE_SHIFT - bh_size_bits);
+ (PAGE_SHIFT - bh_size_bits);
/* The first out of bounds block for the data size. */
dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
@@ -1133,7 +1133,7 @@ lock_retry_remap:
unsigned long mft_no;
/* Get the mft record number. */
- mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
+ mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
>> rec_size_bits;
/* Check whether to write this mft record. */
tni = NULL;
@@ -1249,7 +1249,7 @@ do_mirror:
continue;
ofs = bh_offset(tbh);
/* Get the mft record number. */
- mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
+ mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
>> rec_size_bits;
if (mft_no < vol->mftmirr_size)
ntfs_sync_mft_mirror(vol, mft_no,
@@ -1300,7 +1300,7 @@ done:
* Set page error if there is only one ntfs record in the page.
* Otherwise we would loose per-record granularity.
*/
- if (ni->itype.index.block_size == PAGE_CACHE_SIZE)
+ if (ni->itype.index.block_size == PAGE_SIZE)
SetPageError(page);
NVolSetErrors(vol);
}
@@ -1308,7 +1308,7 @@ done:
ntfs_debug("Page still contains one or more dirty ntfs "
"records. Redirtying the page starting at "
"record 0x%lx.", page->index <<
- (PAGE_CACHE_SHIFT - rec_size_bits));
+ (PAGE_SHIFT - rec_size_bits));
redirty_page_for_writepage(wbc, page);
unlock_page(page);
} else {
@@ -1365,13 +1365,13 @@ retry_writepage:
BUG_ON(!PageLocked(page));
i_size = i_size_read(vi);
/* Is the page fully outside i_size? (truncate in progress) */
- if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT)) {
+ if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
+ PAGE_SHIFT)) {
/*
* The page may have dirty, unmapped buffers. Make them
* freeable here, so the page does not leak.
*/
- block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ block_invalidatepage(page, 0, PAGE_SIZE);
unlock_page(page);
ntfs_debug("Write outside i_size - truncated?");
return 0;
@@ -1414,10 +1414,10 @@ retry_writepage:
/* NInoNonResident() == NInoIndexAllocPresent() */
if (NInoNonResident(ni)) {
/* We have to zero every time due to mmap-at-end-of-file. */
- if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
+ if (page->index >= (i_size >> PAGE_SHIFT)) {
/* The page straddles i_size. */
- unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
- zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
+ unsigned int ofs = i_size & ~PAGE_MASK;
+ zero_user_segment(page, ofs, PAGE_SIZE);
}
/* Handle mst protected attributes. */
if (NInoMstProtected(ni))
@@ -1500,7 +1500,7 @@ retry_writepage:
le16_to_cpu(ctx->attr->data.resident.value_offset),
addr, attr_len);
/* Zero out of bounds area in the page cache page. */
- memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+ memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
kunmap_atomic(addr);
flush_dcache_page(page);
flush_dcache_mft_record_page(ctx->ntfs_ino);
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index caecc58f529c..820d6eabf60f 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -40,7 +40,7 @@
static inline void ntfs_unmap_page(struct page *page)
{
kunmap(page);
- page_cache_release(page);
+ put_page(page);
}
/**
@@ -49,7 +49,7 @@ static inline void ntfs_unmap_page(struct page *page)
* @index: index into the page cache for @mapping of the page to map
*
* Read a page from the page cache of the address space @mapping at position
- * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes.
+ * @index, where @index is in units of PAGE_SIZE, and not in bytes.
*
* If the page is not in memory it is loaded from disk first using the readpage
* method defined in the address space operations of @mapping and the page is
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 250ed5b20c8f..44a39a099b54 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -152,7 +152,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
old_ctx.base_ntfs_ino) {
put_this_page = old_ctx.ntfs_ino->page;
- page_cache_get(put_this_page);
+ get_page(put_this_page);
}
/*
* Reinitialize the search context so we can lookup the
@@ -275,7 +275,7 @@ retry_map:
* the pieces anyway.
*/
if (put_this_page)
- page_cache_release(put_this_page);
+ put_page(put_this_page);
}
return err;
}
@@ -1660,7 +1660,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
memcpy(kaddr, (u8*)a +
le16_to_cpu(a->data.resident.value_offset),
attr_size);
- memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
+ memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size);
kunmap_atomic(kaddr);
flush_dcache_page(page);
SetPageUptodate(page);
@@ -1748,7 +1748,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
if (page) {
set_page_dirty(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
ntfs_debug("Done.");
return 0;
@@ -1835,7 +1835,7 @@ rl_err_out:
ntfs_free(rl);
page_err_out:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
if (err == -EINVAL)
err = -EIO;
@@ -2513,17 +2513,17 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
BUG_ON(NInoEncrypted(ni));
mapping = VFS_I(ni)->i_mapping;
/* Work out the starting index and page offset. */
- idx = ofs >> PAGE_CACHE_SHIFT;
- start_ofs = ofs & ~PAGE_CACHE_MASK;
+ idx = ofs >> PAGE_SHIFT;
+ start_ofs = ofs & ~PAGE_MASK;
/* Work out the ending index and page offset. */
end = ofs + cnt;
- end_ofs = end & ~PAGE_CACHE_MASK;
+ end_ofs = end & ~PAGE_MASK;
/* If the end is outside the inode size return -ESPIPE. */
if (unlikely(end > i_size_read(VFS_I(ni)))) {
ntfs_error(vol->sb, "Request exceeds end of attribute.");
return -ESPIPE;
}
- end >>= PAGE_CACHE_SHIFT;
+ end >>= PAGE_SHIFT;
/* If there is a first partial page, need to do it the slow way. */
if (start_ofs) {
page = read_mapping_page(mapping, idx, NULL);
@@ -2536,7 +2536,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
* If the last page is the same as the first page, need to
* limit the write to the end offset.
*/
- size = PAGE_CACHE_SIZE;
+ size = PAGE_SIZE;
if (idx == end)
size = end_ofs;
kaddr = kmap_atomic(page);
@@ -2544,7 +2544,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
flush_dcache_page(page);
kunmap_atomic(kaddr);
set_page_dirty(page);
- page_cache_release(page);
+ put_page(page);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
if (idx == end)
@@ -2561,7 +2561,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
return -ENOMEM;
}
kaddr = kmap_atomic(page);
- memset(kaddr, val, PAGE_CACHE_SIZE);
+ memset(kaddr, val, PAGE_SIZE);
flush_dcache_page(page);
kunmap_atomic(kaddr);
/*
@@ -2585,7 +2585,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
set_page_dirty(page);
/* Finally unlock and release the page. */
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
}
@@ -2602,7 +2602,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
flush_dcache_page(page);
kunmap_atomic(kaddr);
set_page_dirty(page);
- page_cache_release(page);
+ put_page(page);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
}
diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c
index 0809cf876098..ec130c588d2b 100644
--- a/fs/ntfs/bitmap.c
+++ b/fs/ntfs/bitmap.c
@@ -67,8 +67,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
* Calculate the indices for the pages containing the first and last
* bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively.
*/
- index = start_bit >> (3 + PAGE_CACHE_SHIFT);
- end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT);
+ index = start_bit >> (3 + PAGE_SHIFT);
+ end_index = (start_bit + cnt - 1) >> (3 + PAGE_SHIFT);
/* Get the page containing the first bit (@start_bit). */
mapping = vi->i_mapping;
@@ -82,7 +82,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
kaddr = page_address(page);
/* Set @pos to the position of the byte containing @start_bit. */
- pos = (start_bit >> 3) & ~PAGE_CACHE_MASK;
+ pos = (start_bit >> 3) & ~PAGE_MASK;
/* Calculate the position of @start_bit in the first byte. */
bit = start_bit & 7;
@@ -108,7 +108,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
* Depending on @value, modify all remaining whole bytes in the page up
* to @cnt.
*/
- len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos);
+ len = min_t(s64, cnt >> 3, PAGE_SIZE - pos);
memset(kaddr + pos, value ? 0xff : 0, len);
cnt -= len << 3;
@@ -132,7 +132,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
* Depending on @value, modify all remaining whole bytes in the
* page up to @cnt.
*/
- len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE);
+ len = min_t(s64, cnt >> 3, PAGE_SIZE);
memset(kaddr, value ? 0xff : 0, len);
cnt -= len << 3;
}
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index f82498c35e78..f2b5e746f49b 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -104,16 +104,12 @@ static void zero_partial_compressed_page(struct page *page,
unsigned int kp_ofs;
ntfs_debug("Zeroing page region outside initialized size.");
- if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) {
- /*
- * FIXME: Using clear_page() will become wrong when we get
- * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
- */
+ if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
clear_page(kp);
return;
}
- kp_ofs = initialized_size & ~PAGE_CACHE_MASK;
- memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
+ kp_ofs = initialized_size & ~PAGE_MASK;
+ memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
return;
}
@@ -123,7 +119,7 @@ static void zero_partial_compressed_page(struct page *page,
static inline void handle_bounds_compressed_page(struct page *page,
const loff_t i_size, const s64 initialized_size)
{
- if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) &&
+ if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
(initialized_size < i_size))
zero_partial_compressed_page(page, initialized_size);
return;
@@ -160,7 +156,7 @@ static inline void handle_bounds_compressed_page(struct page *page,
* @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
* completed during the decompression of the compression block (@cb_start).
*
- * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up
+ * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
* unpredicatbly! You have been warned!
*
* Note to hackers: This function may not sleep until it has finished accessing
@@ -241,7 +237,7 @@ return_error:
if (di == xpage)
*xpage_done = 1;
else
- page_cache_release(dp);
+ put_page(dp);
dest_pages[di] = NULL;
}
}
@@ -274,7 +270,7 @@ return_error:
cb = cb_sb_end;
/* Advance destination position to next sub-block. */
- *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK;
+ *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
if (!*dest_ofs && (++*dest_index > dest_max_index))
goto return_overflow;
goto do_next_sb;
@@ -301,7 +297,7 @@ return_error:
/* Advance destination position to next sub-block. */
*dest_ofs += NTFS_SB_SIZE;
- if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) {
+ if (!(*dest_ofs &= ~PAGE_MASK)) {
finalize_page:
/*
* First stage: add current page index to array of
@@ -335,7 +331,7 @@ do_next_tag:
*dest_ofs += nr_bytes;
}
/* We have finished the current sub-block. */
- if (!(*dest_ofs &= ~PAGE_CACHE_MASK))
+ if (!(*dest_ofs &= ~PAGE_MASK))
goto finalize_page;
goto do_next_sb;
}
@@ -462,7 +458,7 @@ return_overflow:
* have been written to so that we would lose data if we were to just overwrite
* them with the out-of-date uncompressed data.
*
- * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at
+ * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
* the end of the file I think. We need to detect this case and zero the out
* of bounds remainder of the page in question and mark it as handled. At the
* moment we would just return -EIO on such a page. This bug will only become
@@ -470,7 +466,7 @@ return_overflow:
* clusters so is probably not going to be seen by anyone. Still this should
* be fixed. (AIA)
*
- * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in
+ * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
* handling sparse and compressed cbs. (AIA)
*
* FIXME: At the moment we don't do any zeroing out in the case that
@@ -497,14 +493,14 @@ int ntfs_read_compressed_block(struct page *page)
u64 cb_size_mask = cb_size - 1UL;
VCN vcn;
LCN lcn;
- /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */
- VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
+ /* The first wanted vcn (minimum alignment is PAGE_SIZE). */
+ VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
vol->cluster_size_bits;
/*
* The first vcn after the last wanted vcn (minimum alignment is again
- * PAGE_CACHE_SIZE.
+ * PAGE_SIZE.
*/
- VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
+ VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
& ~cb_size_mask) >> vol->cluster_size_bits;
/* Number of compression blocks (cbs) in the wanted vcn range. */
unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
@@ -515,7 +511,7 @@ int ntfs_read_compressed_block(struct page *page)
* guarantees of start_vcn and end_vcn, no need to round up here.
*/
unsigned int nr_pages = (end_vcn - start_vcn) <<
- vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
+ vol->cluster_size_bits >> PAGE_SHIFT;
unsigned int xpage, max_page, cur_page, cur_ofs, i;
unsigned int cb_clusters, cb_max_ofs;
int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
@@ -549,7 +545,7 @@ int ntfs_read_compressed_block(struct page *page)
* We have already been given one page, this is the one we must do.
* Once again, the alignment guarantees keep it simple.
*/
- offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
+ offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
xpage = index - offset;
pages[xpage] = page;
/*
@@ -560,13 +556,13 @@ int ntfs_read_compressed_block(struct page *page)
i_size = i_size_read(VFS_I(ni));
initialized_size = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock, flags);
- max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
+ max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
offset;
/* Is the page fully outside i_size? (truncate in progress) */
if (xpage >= max_page) {
kfree(bhs);
kfree(pages);
- zero_user(page, 0, PAGE_CACHE_SIZE);
+ zero_user(page, 0, PAGE_SIZE);
ntfs_debug("Compressed read outside i_size - truncated?");
SetPageUptodate(page);
unlock_page(page);
@@ -591,7 +587,7 @@ int ntfs_read_compressed_block(struct page *page)
continue;
}
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
pages[i] = NULL;
}
}
@@ -735,9 +731,9 @@ lock_retry_remap:
ntfs_debug("Successfully read the compression block.");
/* The last page and maximum offset within it for the current cb. */
- cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size;
- cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK;
- cb_max_page >>= PAGE_CACHE_SHIFT;
+ cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
+ cb_max_ofs = cb_max_page & ~PAGE_MASK;
+ cb_max_page >>= PAGE_SHIFT;
/* Catch end of file inside a compression block. */
if (cb_max_page > max_page)
@@ -753,16 +749,11 @@ lock_retry_remap:
for (; cur_page < cb_max_page; cur_page++) {
page = pages[cur_page];
if (page) {
- /*
- * FIXME: Using clear_page() will become wrong
- * when we get PAGE_CACHE_SIZE != PAGE_SIZE but
- * for now there is no problem.
- */
if (likely(!cur_ofs))
clear_page(page_address(page));
else
memset(page_address(page) + cur_ofs, 0,
- PAGE_CACHE_SIZE -
+ PAGE_SIZE -
cur_ofs);
flush_dcache_page(page);
kunmap(page);
@@ -771,10 +762,10 @@ lock_retry_remap:
if (cur_page == xpage)
xpage_done = 1;
else
- page_cache_release(page);
+ put_page(page);
pages[cur_page] = NULL;
}
- cb_pos += PAGE_CACHE_SIZE - cur_ofs;
+ cb_pos += PAGE_SIZE - cur_ofs;
cur_ofs = 0;
if (cb_pos >= cb_end)
break;
@@ -807,7 +798,7 @@ lock_retry_remap:
* synchronous io for the majority of pages.
* Or if we choose not to do the read-ahead/-behind stuff, we
* could just return block_read_full_page(pages[xpage]) as long
- * as PAGE_CACHE_SIZE <= cb_size.
+ * as PAGE_SIZE <= cb_size.
*/
if (cb_max_ofs)
cb_max_page--;
@@ -816,8 +807,8 @@ lock_retry_remap:
page = pages[cur_page];
if (page)
memcpy(page_address(page) + cur_ofs, cb_pos,
- PAGE_CACHE_SIZE - cur_ofs);
- cb_pos += PAGE_CACHE_SIZE - cur_ofs;
+ PAGE_SIZE - cur_ofs);
+ cb_pos += PAGE_SIZE - cur_ofs;
cur_ofs = 0;
if (cb_pos >= cb_end)
break;
@@ -850,10 +841,10 @@ lock_retry_remap:
if (cur2_page == xpage)
xpage_done = 1;
else
- page_cache_release(page);
+ put_page(page);
pages[cur2_page] = NULL;
}
- cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2;
+ cb_pos2 += PAGE_SIZE - cur_ofs2;
cur_ofs2 = 0;
if (cb_pos2 >= cb_end)
break;
@@ -884,7 +875,7 @@ lock_retry_remap:
kunmap(page);
unlock_page(page);
if (prev_cur_page != xpage)
- page_cache_release(page);
+ put_page(page);
pages[prev_cur_page] = NULL;
}
}
@@ -914,7 +905,7 @@ lock_retry_remap:
kunmap(page);
unlock_page(page);
if (cur_page != xpage)
- page_cache_release(page);
+ put_page(page);
pages[cur_page] = NULL;
}
}
@@ -961,7 +952,7 @@ err_out:
kunmap(page);
unlock_page(page);
if (i != xpage)
- page_cache_release(page);
+ put_page(page);
}
}
kfree(pages);
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index b2eff5816adc..a18613579001 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -315,11 +315,11 @@ found_it:
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
- * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+ * of PAGE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
- dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+ dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
if (IS_ERR(page)) {
ntfs_error(sb, "Failed to map directory index page, error %ld.",
-PTR_ERR(page));
@@ -331,9 +331,9 @@ descend_into_child_node:
fast_descend_into_child_node:
/* Get to the index allocation block. */
ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
- dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+ dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
/* Bounds checks. */
- if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+ if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
"inode 0x%lx or driver bug.", dir_ni->mft_no);
goto unm_err_out;
@@ -366,7 +366,7 @@ fast_descend_into_child_node:
goto unm_err_out;
}
index_end = (u8*)ia + dir_ni->itype.index.block_size;
- if (index_end > kaddr + PAGE_CACHE_SIZE) {
+ if (index_end > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
"0x%lx crosses page boundary. Impossible! "
"Cannot access! This is probably a bug in the "
@@ -559,9 +559,9 @@ found_it2:
/* If vcn is in the same page cache page as old_vcn we
* recycle the mapped page. */
if (old_vcn << vol->cluster_size_bits >>
- PAGE_CACHE_SHIFT == vcn <<
+ PAGE_SHIFT == vcn <<
vol->cluster_size_bits >>
- PAGE_CACHE_SHIFT)
+ PAGE_SHIFT)
goto fast_descend_into_child_node;
unlock_page(page);
ntfs_unmap_page(page);
@@ -793,11 +793,11 @@ found_it:
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
- * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+ * of PAGE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
- dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+ dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
if (IS_ERR(page)) {
ntfs_error(sb, "Failed to map directory index page, error %ld.",
-PTR_ERR(page));
@@ -809,9 +809,9 @@ descend_into_child_node:
fast_descend_into_child_node:
/* Get to the index allocation block. */
ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
- dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+ dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
/* Bounds checks. */
- if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+ if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
"inode 0x%lx or driver bug.", dir_ni->mft_no);
goto unm_err_out;
@@ -844,7 +844,7 @@ fast_descend_into_child_node:
goto unm_err_out;
}
index_end = (u8*)ia + dir_ni->itype.index.block_size;
- if (index_end > kaddr + PAGE_CACHE_SIZE) {
+ if (index_end > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
"0x%lx crosses page boundary. Impossible! "
"Cannot access! This is probably a bug in the "
@@ -968,9 +968,9 @@ found_it2:
/* If vcn is in the same page cache page as old_vcn we
* recycle the mapped page. */
if (old_vcn << vol->cluster_size_bits >>
- PAGE_CACHE_SHIFT == vcn <<
+ PAGE_SHIFT == vcn <<
vol->cluster_size_bits >>
- PAGE_CACHE_SHIFT)
+ PAGE_SHIFT)
goto fast_descend_into_child_node;
unlock_page(page);
ntfs_unmap_page(page);
@@ -1246,15 +1246,15 @@ skip_index_root:
goto iput_err_out;
}
/* Get the starting bit position in the current bitmap page. */
- cur_bmp_pos = bmp_pos & ((PAGE_CACHE_SIZE * 8) - 1);
- bmp_pos &= ~(u64)((PAGE_CACHE_SIZE * 8) - 1);
+ cur_bmp_pos = bmp_pos & ((PAGE_SIZE * 8) - 1);
+ bmp_pos &= ~(u64)((PAGE_SIZE * 8) - 1);
get_next_bmp_page:
ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx",
- (unsigned long long)bmp_pos >> (3 + PAGE_CACHE_SHIFT),
+ (unsigned long long)bmp_pos >> (3 + PAGE_SHIFT),
(unsigned long long)bmp_pos &
- (unsigned long long)((PAGE_CACHE_SIZE * 8) - 1));
+ (unsigned long long)((PAGE_SIZE * 8) - 1));
bmp_page = ntfs_map_page(bmp_mapping,
- bmp_pos >> (3 + PAGE_CACHE_SHIFT));
+ bmp_pos >> (3 + PAGE_SHIFT));
if (IS_ERR(bmp_page)) {
ntfs_error(sb, "Reading index bitmap failed.");
err = PTR_ERR(bmp_page);
@@ -1270,9 +1270,9 @@ find_next_index_buffer:
* If we have reached the end of the bitmap page, get the next
* page, and put away the old one.
*/
- if (unlikely((cur_bmp_pos >> 3) >= PAGE_CACHE_SIZE)) {
+ if (unlikely((cur_bmp_pos >> 3) >= PAGE_SIZE)) {
ntfs_unmap_page(bmp_page);
- bmp_pos += PAGE_CACHE_SIZE * 8;
+ bmp_pos += PAGE_SIZE * 8;
cur_bmp_pos = 0;
goto get_next_bmp_page;
}
@@ -1285,8 +1285,8 @@ find_next_index_buffer:
ntfs_debug("Handling index buffer 0x%llx.",
(unsigned long long)bmp_pos + cur_bmp_pos);
/* If the current index buffer is in the same page we reuse the page. */
- if ((prev_ia_pos & (s64)PAGE_CACHE_MASK) !=
- (ia_pos & (s64)PAGE_CACHE_MASK)) {
+ if ((prev_ia_pos & (s64)PAGE_MASK) !=
+ (ia_pos & (s64)PAGE_MASK)) {
prev_ia_pos = ia_pos;
if (likely(ia_page != NULL)) {
unlock_page(ia_page);
@@ -1296,7 +1296,7 @@ find_next_index_buffer:
* Map the page cache page containing the current ia_pos,
* reading it from disk if necessary.
*/
- ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_CACHE_SHIFT);
+ ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_SHIFT);
if (IS_ERR(ia_page)) {
ntfs_error(sb, "Reading index allocation data failed.");
err = PTR_ERR(ia_page);
@@ -1307,10 +1307,10 @@ find_next_index_buffer:
kaddr = (u8*)page_address(ia_page);
}
/* Get the current index buffer. */
- ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
- ~(s64)(ndir->itype.index.block_size - 1)));
+ ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_MASK &
+ ~(s64)(ndir->itype.index.block_size - 1)));
/* Bounds checks. */
- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
+ if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE)) {
ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
"inode 0x%lx or driver bug.", vdir->i_ino);
goto err_out;
@@ -1348,7 +1348,7 @@ find_next_index_buffer:
goto err_out;
}
index_end = (u8*)ia + ndir->itype.index.block_size;
- if (unlikely(index_end > kaddr + PAGE_CACHE_SIZE)) {
+ if (unlikely(index_end > kaddr + PAGE_SIZE)) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
"0x%lx crosses page boundary. Impossible! "
"Cannot access! This is probably a bug in the "
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index bed4d427dfae..91117ada8528 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -220,8 +220,8 @@ do_non_resident_extend:
m = NULL;
}
mapping = vi->i_mapping;
- index = old_init_size >> PAGE_CACHE_SHIFT;
- end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ index = old_init_size >> PAGE_SHIFT;
+ end_index = (new_init_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
do {
/*
* Read the page. If the page is not present, this will zero
@@ -233,7 +233,7 @@ do_non_resident_extend:
goto init_err_out;
}
if (unlikely(PageError(page))) {
- page_cache_release(page);
+ put_page(page);
err = -EIO;
goto init_err_out;
}
@@ -242,13 +242,13 @@ do_non_resident_extend:
* enough to make ntfs_writepage() work.
*/
write_lock_irqsave(&ni->size_lock, flags);
- ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT;
+ ni->initialized_size = (s64)(index + 1) << PAGE_SHIFT;
if (ni->initialized_size > new_init_size)
ni->initialized_size = new_init_size;
write_unlock_irqrestore(&ni->size_lock, flags);
/* Set the page dirty so it gets written out. */
set_page_dirty(page);
- page_cache_release(page);
+ put_page(page);
/*
* Play nice with the vm and the rest of the system. This is
* very much needed as we can potentially be modifying the
@@ -543,7 +543,7 @@ out:
err_out:
while (nr > 0) {
unlock_page(pages[--nr]);
- page_cache_release(pages[nr]);
+ put_page(pages[nr]);
}
goto out;
}
@@ -573,7 +573,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
* only partially being written to.
*
* If @nr_pages is greater than one, we are guaranteed that the cluster size is
- * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
+ * greater than PAGE_SIZE, that all pages in @pages are entirely inside
* the same cluster and that they are the entirety of that cluster, and that
* the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
*
@@ -653,7 +653,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
u = 0;
do_next_page:
page = pages[u];
- bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
+ bh_pos = (s64)page->index << PAGE_SHIFT;
bh = head = page_buffers(page);
do {
VCN cdelta;
@@ -810,11 +810,11 @@ map_buffer_cached:
kaddr = kmap_atomic(page);
if (bh_pos < pos) {
- pofs = bh_pos & ~PAGE_CACHE_MASK;
+ pofs = bh_pos & ~PAGE_MASK;
memset(kaddr + pofs, 0, pos - bh_pos);
}
if (bh_end > end) {
- pofs = end & ~PAGE_CACHE_MASK;
+ pofs = end & ~PAGE_MASK;
memset(kaddr + pofs, 0, bh_end - end);
}
kunmap_atomic(kaddr);
@@ -942,7 +942,7 @@ rl_not_mapped_enoent:
* unmapped. This can only happen when the cluster size is
* less than the page cache size.
*/
- if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) {
+ if (unlikely(vol->cluster_size < PAGE_SIZE)) {
bh_cend = (bh_end + vol->cluster_size - 1) >>
vol->cluster_size_bits;
if ((bh_cend <= cpos || bh_cpos >= cend)) {
@@ -1208,7 +1208,7 @@ rl_not_mapped_enoent:
wait_on_buffer(bh);
if (likely(buffer_uptodate(bh))) {
page = bh->b_page;
- bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) +
+ bh_pos = ((s64)page->index << PAGE_SHIFT) +
bh_offset(bh);
/*
* If the buffer overflows the initialized size, need
@@ -1350,7 +1350,7 @@ rl_not_mapped_enoent:
bh = head = page_buffers(page);
do {
if (u == nr_pages &&
- ((s64)page->index << PAGE_CACHE_SHIFT) +
+ ((s64)page->index << PAGE_SHIFT) +
bh_offset(bh) >= end)
break;
if (!buffer_new(bh))
@@ -1422,7 +1422,7 @@ static inline int ntfs_commit_pages_after_non_resident_write(
bool partial;
page = pages[u];
- bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
+ bh_pos = (s64)page->index << PAGE_SHIFT;
bh = head = page_buffers(page);
partial = false;
do {
@@ -1639,7 +1639,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
if (end < attr_len)
memcpy(kaddr + end, kattr + end, attr_len - end);
/* Zero the region outside the end of the attribute value. */
- memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+ memset(kaddr + attr_len, 0, PAGE_SIZE - attr_len);
flush_dcache_page(page);
SetPageUptodate(page);
}
@@ -1706,7 +1706,7 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
unsigned len, copied;
do {
- len = PAGE_CACHE_SIZE - ofs;
+ len = PAGE_SIZE - ofs;
if (len > bytes)
len = bytes;
copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
@@ -1724,14 +1724,14 @@ out:
return total;
err:
/* Zero the rest of the target like __copy_from_user(). */
- len = PAGE_CACHE_SIZE - copied;
+ len = PAGE_SIZE - copied;
do {
if (len > bytes)
len = bytes;
zero_user(*pages, copied, len);
bytes -= len;
copied = 0;
- len = PAGE_CACHE_SIZE;
+ len = PAGE_SIZE;
} while (++pages < last_page);
goto out;
}
@@ -1787,8 +1787,8 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
* attributes.
*/
nr_pages = 1;
- if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni))
- nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT;
+ if (vol->cluster_size > PAGE_SIZE && NInoNonResident(ni))
+ nr_pages = vol->cluster_size >> PAGE_SHIFT;
last_vcn = -1;
do {
VCN vcn;
@@ -1796,9 +1796,9 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
unsigned ofs, do_pages, u;
size_t copied;
- start_idx = idx = pos >> PAGE_CACHE_SHIFT;
- ofs = pos & ~PAGE_CACHE_MASK;
- bytes = PAGE_CACHE_SIZE - ofs;
+ start_idx = idx = pos >> PAGE_SHIFT;
+ ofs = pos & ~PAGE_MASK;
+ bytes = PAGE_SIZE - ofs;
do_pages = 1;
if (nr_pages > 1) {
vcn = pos >> vol->cluster_size_bits;
@@ -1832,7 +1832,7 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
if (lcn == LCN_HOLE) {
start_idx = (pos & ~(s64)
vol->cluster_size_mask)
- >> PAGE_CACHE_SHIFT;
+ >> PAGE_SHIFT;
bytes = vol->cluster_size - (pos &
vol->cluster_size_mask);
do_pages = nr_pages;
@@ -1871,12 +1871,12 @@ again:
if (unlikely(status)) {
do {
unlock_page(pages[--do_pages]);
- page_cache_release(pages[do_pages]);
+ put_page(pages[do_pages]);
} while (do_pages);
break;
}
}
- u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index;
+ u = (pos >> PAGE_SHIFT) - pages[0]->index;
copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
i, bytes);
ntfs_flush_dcache_pages(pages + u, do_pages - u);
@@ -1889,7 +1889,7 @@ again:
}
do {
unlock_page(pages[--do_pages]);
- page_cache_release(pages[do_pages]);
+ put_page(pages[do_pages]);
} while (do_pages);
if (unlikely(status < 0))
break;
@@ -1921,7 +1921,7 @@ again:
}
} while (iov_iter_count(i));
if (cached_page)
- page_cache_release(cached_page);
+ put_page(cached_page);
ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
written ? "written" : "status", (unsigned long)written,
(long)status);
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index 096c135691ae..0d645f357930 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -272,11 +272,11 @@ done:
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
- * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+ * of PAGE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
- idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+ idx_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
if (IS_ERR(page)) {
ntfs_error(sb, "Failed to map index page, error %ld.",
-PTR_ERR(page));
@@ -288,9 +288,9 @@ descend_into_child_node:
fast_descend_into_child_node:
/* Get to the index allocation block. */
ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
- idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+ idx_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
/* Bounds checks. */
- if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+ if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Out of bounds check failed. Corrupt inode "
"0x%lx or driver bug.", idx_ni->mft_no);
goto unm_err_out;
@@ -323,7 +323,7 @@ fast_descend_into_child_node:
goto unm_err_out;
}
index_end = (u8*)ia + idx_ni->itype.index.block_size;
- if (index_end > kaddr + PAGE_CACHE_SIZE) {
+ if (index_end > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx "
"crosses page boundary. Impossible! Cannot "
"access! This is probably a bug in the "
@@ -427,9 +427,9 @@ ia_done:
* the mapped page.
*/
if (old_vcn << vol->cluster_size_bits >>
- PAGE_CACHE_SHIFT == vcn <<
+ PAGE_SHIFT == vcn <<
vol->cluster_size_bits >>
- PAGE_CACHE_SHIFT)
+ PAGE_SHIFT)
goto fast_descend_into_child_node;
unlock_page(page);
ntfs_unmap_page(page);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index d284f07eda77..f40972d6df90 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -868,12 +868,12 @@ skip_attr_list_load:
ni->itype.index.block_size);
goto unm_err_out;
}
- if (ni->itype.index.block_size > PAGE_CACHE_SIZE) {
+ if (ni->itype.index.block_size > PAGE_SIZE) {
ntfs_error(vi->i_sb, "Index block size (%u) > "
- "PAGE_CACHE_SIZE (%ld) is not "
+ "PAGE_SIZE (%ld) is not "
"supported. Sorry.",
ni->itype.index.block_size,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
err = -EOPNOTSUPP;
goto unm_err_out;
}
@@ -1585,10 +1585,10 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
"two.", ni->itype.index.block_size);
goto unm_err_out;
}
- if (ni->itype.index.block_size > PAGE_CACHE_SIZE) {
- ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE "
+ if (ni->itype.index.block_size > PAGE_SIZE) {
+ ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE "
"(%ld) is not supported. Sorry.",
- ni->itype.index.block_size, PAGE_CACHE_SIZE);
+ ni->itype.index.block_size, PAGE_SIZE);
err = -EOPNOTSUPP;
goto unm_err_out;
}
diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c
index 1711b710b641..27a24a42f712 100644
--- a/fs/ntfs/lcnalloc.c
+++ b/fs/ntfs/lcnalloc.c
@@ -283,15 +283,15 @@ runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn,
ntfs_unmap_page(page);
}
page = ntfs_map_page(mapping, last_read_pos >>
- PAGE_CACHE_SHIFT);
+ PAGE_SHIFT);
if (IS_ERR(page)) {
err = PTR_ERR(page);
ntfs_error(vol->sb, "Failed to map page.");
goto out;
}
- buf_size = last_read_pos & ~PAGE_CACHE_MASK;
+ buf_size = last_read_pos & ~PAGE_MASK;
buf = page_address(page) + buf_size;
- buf_size = PAGE_CACHE_SIZE - buf_size;
+ buf_size = PAGE_SIZE - buf_size;
if (unlikely(last_read_pos + buf_size > i_size))
buf_size = i_size - last_read_pos;
buf_size <<= 3;
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index c71de292c5ad..9d71213ca81e 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -381,7 +381,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
* completely inside @rp, just copy it from there. Otherwise map all
* the required pages and copy the data from them.
*/
- size = PAGE_CACHE_SIZE - (pos & ~PAGE_CACHE_MASK);
+ size = PAGE_SIZE - (pos & ~PAGE_MASK);
if (size >= le32_to_cpu(rp->system_page_size)) {
memcpy(trp, rp, le32_to_cpu(rp->system_page_size));
} else {
@@ -394,8 +394,8 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
/* Copy the remaining data one page at a time. */
have_read = size;
to_read = le32_to_cpu(rp->system_page_size) - size;
- idx = (pos + size) >> PAGE_CACHE_SHIFT;
- BUG_ON((pos + size) & ~PAGE_CACHE_MASK);
+ idx = (pos + size) >> PAGE_SHIFT;
+ BUG_ON((pos + size) & ~PAGE_MASK);
do {
page = ntfs_map_page(vi->i_mapping, idx);
if (IS_ERR(page)) {
@@ -406,7 +406,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi,
err = -EIO;
goto err_out;
}
- size = min_t(int, to_read, PAGE_CACHE_SIZE);
+ size = min_t(int, to_read, PAGE_SIZE);
memcpy((u8*)trp + have_read, page_address(page), size);
ntfs_unmap_page(page);
have_read += size;
@@ -509,11 +509,11 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
* log page size if the page cache size is between the default log page
* size and twice that.
*/
- if (PAGE_CACHE_SIZE >= DefaultLogPageSize && PAGE_CACHE_SIZE <=
+ if (PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <=
DefaultLogPageSize * 2)
log_page_size = DefaultLogPageSize;
else
- log_page_size = PAGE_CACHE_SIZE;
+ log_page_size = PAGE_SIZE;
log_page_mask = log_page_size - 1;
/*
* Use ntfs_ffs() instead of ffs() to enable the compiler to
@@ -539,7 +539,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
* to be empty.
*/
for (pos = 0; pos < size; pos <<= 1) {
- pgoff_t idx = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t idx = pos >> PAGE_SHIFT;
if (!page || page->index != idx) {
if (page)
ntfs_unmap_page(page);
@@ -550,7 +550,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
goto err_out;
}
}
- kaddr = (u8*)page_address(page) + (pos & ~PAGE_CACHE_MASK);
+ kaddr = (u8*)page_address(page) + (pos & ~PAGE_MASK);
/*
* A non-empty block means the logfile is not empty while an
* empty block after a non-empty block has been encountered
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 3014a36a255b..37b2501caaa4 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -61,16 +61,16 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
* here if the volume was that big...
*/
index = (u64)ni->mft_no << vol->mft_record_size_bits >>
- PAGE_CACHE_SHIFT;
- ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+ PAGE_SHIFT;
+ ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
i_size = i_size_read(mft_vi);
/* The maximum valid index into the page cache for $MFT's data. */
- end_index = i_size >> PAGE_CACHE_SHIFT;
+ end_index = i_size >> PAGE_SHIFT;
/* If the wanted index is out of bounds the mft record doesn't exist. */
if (unlikely(index >= end_index)) {
- if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs +
+ if (index > end_index || (i_size & ~PAGE_MASK) < ofs +
vol->mft_record_size) {
page = ERR_PTR(-ENOENT);
ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, "
@@ -487,7 +487,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
}
/* Get the page containing the mirror copy of the mft record @m. */
page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>
- (PAGE_CACHE_SHIFT - vol->mft_record_size_bits));
+ (PAGE_SHIFT - vol->mft_record_size_bits));
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to map mft mirror page.");
err = PTR_ERR(page);
@@ -497,7 +497,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
BUG_ON(!PageUptodate(page));
ClearPageUptodate(page);
/* Offset of the mft mirror record inside the page. */
- page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+ page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
/* The address in the page of the mirror copy of the mft record @m. */
kmirr = page_address(page) + page_ofs;
/* Copy the mst protected mft record to the mirror. */
@@ -1178,8 +1178,8 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
for (; pass <= 2;) {
/* Cap size to pass_end. */
ofs = data_pos >> 3;
- page_ofs = ofs & ~PAGE_CACHE_MASK;
- size = PAGE_CACHE_SIZE - page_ofs;
+ page_ofs = ofs & ~PAGE_MASK;
+ size = PAGE_SIZE - page_ofs;
ll = ((pass_end + 7) >> 3) - ofs;
if (size > ll)
size = ll;
@@ -1190,7 +1190,7 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
*/
if (size) {
page = ntfs_map_page(mftbmp_mapping,
- ofs >> PAGE_CACHE_SHIFT);
+ ofs >> PAGE_SHIFT);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read mft "
"bitmap, aborting.");
@@ -1328,13 +1328,13 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
*/
ll = lcn >> 3;
page = ntfs_map_page(vol->lcnbmp_ino->i_mapping,
- ll >> PAGE_CACHE_SHIFT);
+ ll >> PAGE_SHIFT);
if (IS_ERR(page)) {
up_write(&mftbmp_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to read from lcn bitmap.");
return PTR_ERR(page);
}
- b = (u8*)page_address(page) + (ll & ~PAGE_CACHE_MASK);
+ b = (u8*)page_address(page) + (ll & ~PAGE_MASK);
tb = 1 << (lcn & 7ull);
down_write(&vol->lcnbmp_lock);
if (*b != 0xff && !(*b & tb)) {
@@ -2103,14 +2103,14 @@ static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no)
* The index into the page cache and the offset within the page cache
* page of the wanted mft record.
*/
- index = mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
- ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+ index = mft_no << vol->mft_record_size_bits >> PAGE_SHIFT;
+ ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
/* The maximum valid index into the page cache for $MFT's data. */
i_size = i_size_read(mft_vi);
- end_index = i_size >> PAGE_CACHE_SHIFT;
+ end_index = i_size >> PAGE_SHIFT;
if (unlikely(index >= end_index)) {
if (unlikely(index > end_index || ofs + vol->mft_record_size >=
- (i_size & ~PAGE_CACHE_MASK))) {
+ (i_size & ~PAGE_MASK))) {
ntfs_error(vol->sb, "Tried to format non-existing mft "
"record 0x%llx.", (long long)mft_no);
return -ENOENT;
@@ -2515,8 +2515,8 @@ mft_rec_already_initialized:
* We now have allocated and initialized the mft record. Calculate the
* index of and the offset within the page cache page the record is in.
*/
- index = bit << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
- ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+ index = bit << vol->mft_record_size_bits >> PAGE_SHIFT;
+ ofs = (bit << vol->mft_record_size_bits) & ~PAGE_MASK;
/* Read, map, and pin the page containing the mft record. */
page = ntfs_map_page(vol->mft_ino->i_mapping, index);
if (IS_ERR(page)) {
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index c581e26a350d..12de47b96ca9 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -43,7 +43,7 @@ typedef enum {
NTFS_MAX_NAME_LEN = 255,
NTFS_MAX_ATTR_NAME_LEN = 255,
NTFS_MAX_CLUSTER_SIZE = 64 * 1024, /* 64kiB */
- NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_CACHE_SIZE,
+ NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_SIZE,
} NTFS_CONSTANTS;
/* Global variables. */
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 1b38abdaa3ed..ecb49870a680 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -823,14 +823,14 @@ static bool parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
ntfs_debug("vol->mft_record_size_bits = %i (0x%x)",
vol->mft_record_size_bits, vol->mft_record_size_bits);
/*
- * We cannot support mft record sizes above the PAGE_CACHE_SIZE since
+ * We cannot support mft record sizes above the PAGE_SIZE since
* we store $MFT/$DATA, the table of mft records in the page cache.
*/
- if (vol->mft_record_size > PAGE_CACHE_SIZE) {
+ if (vol->mft_record_size > PAGE_SIZE) {
ntfs_error(vol->sb, "Mft record size (%i) exceeds the "
- "PAGE_CACHE_SIZE on your system (%lu). "
+ "PAGE_SIZE on your system (%lu). "
"This is not supported. Sorry.",
- vol->mft_record_size, PAGE_CACHE_SIZE);
+ vol->mft_record_size, PAGE_SIZE);
return false;
}
/* We cannot support mft record sizes below the sector size. */
@@ -1096,7 +1096,7 @@ static bool check_mft_mirror(ntfs_volume *vol)
ntfs_debug("Entering.");
/* Compare contents of $MFT and $MFTMirr. */
- mrecs_per_page = PAGE_CACHE_SIZE / vol->mft_record_size;
+ mrecs_per_page = PAGE_SIZE / vol->mft_record_size;
BUG_ON(!mrecs_per_page);
BUG_ON(!vol->mftmirr_size);
mft_page = mirr_page = NULL;
@@ -1615,20 +1615,20 @@ static bool load_and_init_attrdef(ntfs_volume *vol)
if (!vol->attrdef)
goto iput_failed;
index = 0;
- max_index = i_size >> PAGE_CACHE_SHIFT;
- size = PAGE_CACHE_SIZE;
+ max_index = i_size >> PAGE_SHIFT;
+ size = PAGE_SIZE;
while (index < max_index) {
/* Read the attrdef table and copy it into the linear buffer. */
read_partial_attrdef_page:
page = ntfs_map_page(ino->i_mapping, index);
if (IS_ERR(page))
goto free_iput_failed;
- memcpy((u8*)vol->attrdef + (index++ << PAGE_CACHE_SHIFT),
+ memcpy((u8*)vol->attrdef + (index++ << PAGE_SHIFT),
page_address(page), size);
ntfs_unmap_page(page);
};
- if (size == PAGE_CACHE_SIZE) {
- size = i_size & ~PAGE_CACHE_MASK;
+ if (size == PAGE_SIZE) {
+ size = i_size & ~PAGE_MASK;
if (size)
goto read_partial_attrdef_page;
}
@@ -1684,20 +1684,20 @@ static bool load_and_init_upcase(ntfs_volume *vol)
if (!vol->upcase)
goto iput_upcase_failed;
index = 0;
- max_index = i_size >> PAGE_CACHE_SHIFT;
- size = PAGE_CACHE_SIZE;
+ max_index = i_size >> PAGE_SHIFT;
+ size = PAGE_SIZE;
while (index < max_index) {
/* Read the upcase table and copy it into the linear buffer. */
read_partial_upcase_page:
page = ntfs_map_page(ino->i_mapping, index);
if (IS_ERR(page))
goto iput_upcase_failed;
- memcpy((char*)vol->upcase + (index++ << PAGE_CACHE_SHIFT),
+ memcpy((char*)vol->upcase + (index++ << PAGE_SHIFT),
page_address(page), size);
ntfs_unmap_page(page);
};
- if (size == PAGE_CACHE_SIZE) {
- size = i_size & ~PAGE_CACHE_MASK;
+ if (size == PAGE_SIZE) {
+ size = i_size & ~PAGE_MASK;
if (size)
goto read_partial_upcase_page;
}
@@ -2471,14 +2471,14 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
down_read(&vol->lcnbmp_lock);
/*
* Convert the number of bits into bytes rounded up, then convert into
- * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one
+ * multiples of PAGE_SIZE, rounding up so that if we have one
* full and one partial page max_index = 2.
*/
- max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
- /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */
+ max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
+ /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
- max_index, PAGE_CACHE_SIZE / 4);
+ max_index, PAGE_SIZE / 4);
for (index = 0; index < max_index; index++) {
unsigned long *kaddr;
@@ -2491,7 +2491,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
if (IS_ERR(page)) {
ntfs_debug("read_mapping_page() error. Skipping "
"page (index 0x%lx).", index);
- nr_free -= PAGE_CACHE_SIZE * 8;
+ nr_free -= PAGE_SIZE * 8;
continue;
}
kaddr = kmap_atomic(page);
@@ -2503,9 +2503,9 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
* ntfs_readpage().
*/
nr_free -= bitmap_weight(kaddr,
- PAGE_CACHE_SIZE * BITS_PER_BYTE);
+ PAGE_SIZE * BITS_PER_BYTE);
kunmap_atomic(kaddr);
- page_cache_release(page);
+ put_page(page);
}
ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
/*
@@ -2547,9 +2547,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
pgoff_t index;
ntfs_debug("Entering.");
- /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */
+ /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
- "0x%lx.", max_index, PAGE_CACHE_SIZE / 4);
+ "0x%lx.", max_index, PAGE_SIZE / 4);
for (index = 0; index < max_index; index++) {
unsigned long *kaddr;
@@ -2562,7 +2562,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
if (IS_ERR(page)) {
ntfs_debug("read_mapping_page() error. Skipping "
"page (index 0x%lx).", index);
- nr_free -= PAGE_CACHE_SIZE * 8;
+ nr_free -= PAGE_SIZE * 8;
continue;
}
kaddr = kmap_atomic(page);
@@ -2574,9 +2574,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
* ntfs_readpage().
*/
nr_free -= bitmap_weight(kaddr,
- PAGE_CACHE_SIZE * BITS_PER_BYTE);
+ PAGE_SIZE * BITS_PER_BYTE);
kunmap_atomic(kaddr);
- page_cache_release(page);
+ put_page(page);
}
ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
index - 1);
@@ -2618,17 +2618,17 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
/* Type of filesystem. */
sfs->f_type = NTFS_SB_MAGIC;
/* Optimal transfer block size. */
- sfs->f_bsize = PAGE_CACHE_SIZE;
+ sfs->f_bsize = PAGE_SIZE;
/*
* Total data blocks in filesystem in units of f_bsize and since
* inodes are also stored in data blocs ($MFT is a file) this is just
* the total clusters.
*/
sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >>
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
/* Free data blocks in filesystem in units of f_bsize. */
size = get_nr_free_clusters(vol) << vol->cluster_size_bits >>
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
if (size < 0LL)
size = 0LL;
/* Free blocks avail to non-superuser, same as above on NTFS. */
@@ -2639,11 +2639,11 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits;
/*
* Convert the maximum number of set bits into bytes rounded up, then
- * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we
+ * convert into multiples of PAGE_SIZE, rounding up so that if we
* have one full and one partial page max_index = 2.
*/
max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits)
- + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ + 7) >> 3) + PAGE_SIZE - 1) >> PAGE_SHIFT;
read_unlock_irqrestore(&mft_ni->size_lock, flags);
/* Number of inodes in filesystem (at this point in time). */
sfs->f_files = size;
@@ -2765,15 +2765,15 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
if (!parse_options(vol, (char*)opt))
goto err_out_now;
- /* We support sector sizes up to the PAGE_CACHE_SIZE. */
- if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) {
+ /* We support sector sizes up to the PAGE_SIZE. */
+ if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) {
if (!silent)
ntfs_error(sb, "Device has unsupported sector size "
"(%i). The maximum supported sector "
"size on this architecture is %lu "
"bytes.",
bdev_logical_block_size(sb->s_bdev),
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
goto err_out_now;
}
/*
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 70907d638b60..e361d1a0ca09 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6671,7 +6671,7 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
{
int i;
struct page *page;
- unsigned int from, to = PAGE_CACHE_SIZE;
+ unsigned int from, to = PAGE_SIZE;
struct super_block *sb = inode->i_sb;
BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
@@ -6679,21 +6679,21 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
if (numpages == 0)
goto out;
- to = PAGE_CACHE_SIZE;
+ to = PAGE_SIZE;
for(i = 0; i < numpages; i++) {
page = pages[i];
- from = start & (PAGE_CACHE_SIZE - 1);
- if ((end >> PAGE_CACHE_SHIFT) == page->index)
- to = end & (PAGE_CACHE_SIZE - 1);
+ from = start & (PAGE_SIZE - 1);
+ if ((end >> PAGE_SHIFT) == page->index)
+ to = end & (PAGE_SIZE - 1);
- BUG_ON(from > PAGE_CACHE_SIZE);
- BUG_ON(to > PAGE_CACHE_SIZE);
+ BUG_ON(from > PAGE_SIZE);
+ BUG_ON(to > PAGE_SIZE);
ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
&phys);
- start = (page->index + 1) << PAGE_CACHE_SHIFT;
+ start = (page->index + 1) << PAGE_SHIFT;
}
out:
if (pages)
@@ -6712,7 +6712,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
numpages = 0;
last_page_bytes = PAGE_ALIGN(end);
- index = start >> PAGE_CACHE_SHIFT;
+ index = start >> PAGE_SHIFT;
do {
pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
if (!pages[numpages]) {
@@ -6723,7 +6723,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
numpages++;
index++;
- } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT));
+ } while (index < (last_page_bytes >> PAGE_SHIFT));
out:
if (ret != 0) {
@@ -6950,8 +6950,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
* to do that now.
*/
if (!ocfs2_sparse_alloc(osb) &&
- PAGE_CACHE_SIZE < osb->s_clustersize)
- end = PAGE_CACHE_SIZE;
+ PAGE_SIZE < osb->s_clustersize)
+ end = PAGE_SIZE;
ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
if (ret) {
@@ -6971,8 +6971,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
goto out_unlock;
}
- page_end = PAGE_CACHE_SIZE;
- if (PAGE_CACHE_SIZE > osb->s_clustersize)
+ page_end = PAGE_SIZE;
+ if (PAGE_SIZE > osb->s_clustersize)
page_end = osb->s_clustersize;
for (i = 0; i < num_pages; i++)
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1581240a7ca0..ad1577348a92 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -234,7 +234,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
size = i_size_read(inode);
- if (size > PAGE_CACHE_SIZE ||
+ if (size > PAGE_SIZE ||
size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
ocfs2_error(inode->i_sb,
"Inode %llu has with inline data has bad size: %Lu\n",
@@ -247,7 +247,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
if (size)
memcpy(kaddr, di->id2.i_data.id_data, size);
/* Clear the remaining part of the page */
- memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
+ memset(kaddr + size, 0, PAGE_SIZE - size);
flush_dcache_page(page);
kunmap_atomic(kaddr);
@@ -282,7 +282,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
+ loff_t start = (loff_t)page->index << PAGE_SHIFT;
int ret, unlock = 1;
trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
@@ -385,7 +385,7 @@ static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
* drop out in that case as it's not worth handling here.
*/
last = list_entry(pages->prev, struct page, lru);
- start = (loff_t)last->index << PAGE_CACHE_SHIFT;
+ start = (loff_t)last->index << PAGE_SHIFT;
if (start >= i_size_read(inode))
goto out_unlock;
@@ -511,12 +511,12 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
unsigned int *start,
unsigned int *end)
{
- unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE;
+ unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
- if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) {
+ if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) {
unsigned int cpp;
- cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits);
+ cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits);
cluster_start = cpos % cpp;
cluster_start = cluster_start << osb->s_clustersize_bits;
@@ -684,13 +684,13 @@ next_bh:
return ret;
}
-#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
+#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
#define OCFS2_MAX_CTXT_PAGES 1
#else
-#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE)
+#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
#endif
-#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE)
+#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
struct ocfs2_unwritten_extent {
struct list_head ue_node;
@@ -785,7 +785,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
if (pages[i]) {
unlock_page(pages[i]);
mark_page_accessed(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
}
}
@@ -808,7 +808,7 @@ static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
}
}
mark_page_accessed(wc->w_target_page);
- page_cache_release(wc->w_target_page);
+ put_page(wc->w_target_page);
}
ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
}
@@ -857,7 +857,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
wc->w_di_bh = di_bh;
wc->w_type = type;
- if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits))
+ if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits))
wc->w_large_pages = 1;
else
wc->w_large_pages = 0;
@@ -920,7 +920,7 @@ static void ocfs2_write_failure(struct inode *inode,
loff_t user_pos, unsigned user_len)
{
int i;
- unsigned from = user_pos & (PAGE_CACHE_SIZE - 1),
+ unsigned from = user_pos & (PAGE_SIZE - 1),
to = user_pos + user_len;
struct page *tmppage;
@@ -960,7 +960,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
(page_offset(page) <= user_pos));
if (page == wc->w_target_page) {
- map_from = user_pos & (PAGE_CACHE_SIZE - 1);
+ map_from = user_pos & (PAGE_SIZE - 1);
map_to = map_from + user_len;
if (new)
@@ -1034,7 +1034,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
struct inode *inode = mapping->host;
loff_t last_byte;
- target_index = user_pos >> PAGE_CACHE_SHIFT;
+ target_index = user_pos >> PAGE_SHIFT;
/*
* Figure out how many pages we'll be manipulating here. For
@@ -1053,14 +1053,14 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
*/
last_byte = max(user_pos + user_len, i_size_read(inode));
BUG_ON(last_byte < 1);
- end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1;
+ end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
if ((start + wc->w_num_pages) > end_index)
wc->w_num_pages = end_index - start;
} else {
wc->w_num_pages = 1;
start = target_index;
}
- end_index = (user_pos + user_len - 1) >> PAGE_CACHE_SHIFT;
+ end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
for(i = 0; i < wc->w_num_pages; i++) {
index = start + i;
@@ -1082,7 +1082,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
goto out;
}
- page_cache_get(mmap_page);
+ get_page(mmap_page);
wc->w_pages[i] = mmap_page;
wc->w_target_locked = true;
} else if (index >= target_index && index <= end_index &&
@@ -1272,7 +1272,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
{
struct ocfs2_write_cluster_desc *desc;
- wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1);
+ wc->w_target_from = pos & (PAGE_SIZE - 1);
wc->w_target_to = wc->w_target_from + len;
if (alloc == 0)
@@ -1309,7 +1309,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
&wc->w_target_to);
} else {
wc->w_target_from = 0;
- wc->w_target_to = PAGE_CACHE_SIZE;
+ wc->w_target_to = PAGE_SIZE;
}
}
@@ -1981,7 +1981,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
struct page *page, void *fsdata)
{
int i, ret;
- unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from, to, start = pos & (PAGE_SIZE - 1);
struct inode *inode = mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_write_ctxt *wc = fsdata;
@@ -2027,8 +2027,8 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
from = wc->w_target_from;
to = wc->w_target_to;
- BUG_ON(from > PAGE_CACHE_SIZE ||
- to > PAGE_CACHE_SIZE ||
+ BUG_ON(from > PAGE_SIZE ||
+ to > PAGE_SIZE ||
to < from);
} else {
/*
@@ -2037,7 +2037,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
* to flush their entire range.
*/
from = 0;
- to = PAGE_CACHE_SIZE;
+ to = PAGE_SIZE;
}
if (page_has_buffers(tmppage)) {
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index bd15929b5f92..1934abb6b680 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -417,13 +417,13 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
bio->bi_private = wc;
bio->bi_end_io = o2hb_bio_end_io;
- vec_start = (cs << bits) % PAGE_CACHE_SIZE;
+ vec_start = (cs << bits) % PAGE_SIZE;
while(cs < max_slots) {
current_page = cs / spp;
page = reg->hr_slot_data[current_page];
- vec_len = min(PAGE_CACHE_SIZE - vec_start,
- (max_slots-cs) * (PAGE_CACHE_SIZE/spp) );
+ vec_len = min(PAGE_SIZE - vec_start,
+ (max_slots-cs) * (PAGE_SIZE/spp) );
mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
current_page, vec_len, vec_start);
@@ -431,7 +431,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
len = bio_add_page(bio, page, vec_len, vec_start);
if (len != vec_len) break;
- cs += vec_len / (PAGE_CACHE_SIZE/spp);
+ cs += vec_len / (PAGE_SIZE/spp);
vec_start = 0;
}
@@ -1576,7 +1576,7 @@ static ssize_t o2hb_region_dev_show(struct config_item *item, char *page)
static void o2hb_init_region_params(struct o2hb_region *reg)
{
- reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits;
+ reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits;
reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS;
mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n",
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 03768bb3aab1..47b3b2d4e775 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -571,8 +571,8 @@ static int dlmfs_fill_super(struct super_block * sb,
int silent)
{
sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = DLMFS_MAGIC;
sb->s_op = &dlmfs_ops;
sb->s_root = d_make_root(dlmfs_get_root_inode(sb));
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index c6fdcbd46bba..2bf23fd333ed 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -770,14 +770,14 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
- unsigned long index = abs_from >> PAGE_CACHE_SHIFT;
+ unsigned long index = abs_from >> PAGE_SHIFT;
handle_t *handle;
int ret = 0;
unsigned zero_from, zero_to, block_start, block_end;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
BUG_ON(abs_from >= abs_to);
- BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
+ BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
BUG_ON(abs_from & (inode->i_blkbits - 1));
handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
@@ -794,10 +794,10 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
}
/* Get the offsets within the page that we want to zero */
- zero_from = abs_from & (PAGE_CACHE_SIZE - 1);
- zero_to = abs_to & (PAGE_CACHE_SIZE - 1);
+ zero_from = abs_from & (PAGE_SIZE - 1);
+ zero_to = abs_to & (PAGE_SIZE - 1);
if (!zero_to)
- zero_to = PAGE_CACHE_SIZE;
+ zero_to = PAGE_SIZE;
trace_ocfs2_write_zero_page(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -851,7 +851,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
out_unlock:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
out_commit_trans:
if (handle)
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
@@ -959,7 +959,7 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
BUG_ON(range_start >= range_end);
while (zero_pos < range_end) {
- next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
+ next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
if (next_pos > range_end)
next_pos = range_end;
rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 9ea081f4e6e4..71545ad4628c 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -65,13 +65,13 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
loff_t pos = page_offset(page);
- unsigned int len = PAGE_CACHE_SIZE;
+ unsigned int len = PAGE_SIZE;
pgoff_t last_index;
struct page *locked_page = NULL;
void *fsdata;
loff_t size = i_size_read(inode);
- last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+ last_index = (size - 1) >> PAGE_SHIFT;
/*
* There are cases that lead to the page no longer bebongs to the
@@ -102,7 +102,7 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
* because the "write" would invalidate their data.
*/
if (page->index == last_index)
- len = ((size - 1) & ~PAGE_CACHE_MASK) + 1;
+ len = ((size - 1) & ~PAGE_MASK) + 1;
ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
&locked_page, &fsdata, di_bh, page);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 6cf6538a0651..e63af7ddfe68 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -822,10 +822,10 @@ static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb,
u32 clusters = pg_index;
unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
- if (unlikely(PAGE_CACHE_SHIFT > cbits))
- clusters = pg_index << (PAGE_CACHE_SHIFT - cbits);
- else if (PAGE_CACHE_SHIFT < cbits)
- clusters = pg_index >> (cbits - PAGE_CACHE_SHIFT);
+ if (unlikely(PAGE_SHIFT > cbits))
+ clusters = pg_index << (PAGE_SHIFT - cbits);
+ else if (PAGE_SHIFT < cbits)
+ clusters = pg_index >> (cbits - PAGE_SHIFT);
return clusters;
}
@@ -839,10 +839,10 @@ static inline pgoff_t ocfs2_align_clusters_to_page_index(struct super_block *sb,
unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
pgoff_t index = clusters;
- if (PAGE_CACHE_SHIFT > cbits) {
- index = (pgoff_t)clusters >> (PAGE_CACHE_SHIFT - cbits);
- } else if (PAGE_CACHE_SHIFT < cbits) {
- index = (pgoff_t)clusters << (cbits - PAGE_CACHE_SHIFT);
+ if (PAGE_SHIFT > cbits) {
+ index = (pgoff_t)clusters >> (PAGE_SHIFT - cbits);
+ } else if (PAGE_SHIFT < cbits) {
+ index = (pgoff_t)clusters << (cbits - PAGE_SHIFT);
}
return index;
@@ -853,8 +853,8 @@ static inline unsigned int ocfs2_pages_per_cluster(struct super_block *sb)
unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
unsigned int pages_per_cluster = 1;
- if (PAGE_CACHE_SHIFT < cbits)
- pages_per_cluster = 1 << (cbits - PAGE_CACHE_SHIFT);
+ if (PAGE_SHIFT < cbits)
+ pages_per_cluster = 1 << (cbits - PAGE_SHIFT);
return pages_per_cluster;
}
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 3892f3c079ca..ab6a6cdcf91c 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -867,6 +867,10 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
int status = 0;
trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
+ if (!sb_has_quota_loaded(sb, type)) {
+ status = -ESRCH;
+ goto out;
+ }
status = ocfs2_lock_global_qf(info, 0);
if (status < 0)
goto out;
@@ -878,8 +882,11 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
out_global:
ocfs2_unlock_global_qf(info, 0);
out:
- /* Avoid logging ENOENT since it just means there isn't next ID */
- if (status && status != -ENOENT)
+ /*
+ * Avoid logging ENOENT since it just means there isn't next ID and
+ * ESRCH which means quota isn't enabled for the filesystem.
+ */
+ if (status && status != -ENOENT && status != -ESRCH)
mlog_errno(status);
return status;
}
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 3eff031aaf26..744d5d90c363 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2937,16 +2937,16 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
end = i_size_read(inode);
while (offset < end) {
- page_index = offset >> PAGE_CACHE_SHIFT;
- map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
+ page_index = offset >> PAGE_SHIFT;
+ map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
if (map_end > end)
map_end = end;
/* from, to is the offset within the page. */
- from = offset & (PAGE_CACHE_SIZE - 1);
- to = PAGE_CACHE_SIZE;
- if (map_end & (PAGE_CACHE_SIZE - 1))
- to = map_end & (PAGE_CACHE_SIZE - 1);
+ from = offset & (PAGE_SIZE - 1);
+ to = PAGE_SIZE;
+ if (map_end & (PAGE_SIZE - 1))
+ to = map_end & (PAGE_SIZE - 1);
page = find_or_create_page(mapping, page_index, GFP_NOFS);
if (!page) {
@@ -2956,10 +2956,10 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
}
/*
- * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
+ * In case PAGE_SIZE <= CLUSTER_SIZE, This page
* can't be dirtied before we CoW it out.
*/
- if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
+ if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
BUG_ON(PageDirty(page));
if (!PageUptodate(page)) {
@@ -2987,7 +2987,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
mark_page_accessed(page);
unlock:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
page = NULL;
offset = map_end;
if (ret)
@@ -3165,8 +3165,8 @@ int ocfs2_cow_sync_writeback(struct super_block *sb,
}
while (offset < end) {
- page_index = offset >> PAGE_CACHE_SHIFT;
- map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
+ page_index = offset >> PAGE_SHIFT;
+ map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
if (map_end > end)
map_end = end;
@@ -3182,7 +3182,7 @@ int ocfs2_cow_sync_writeback(struct super_block *sb,
mark_page_accessed(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
page = NULL;
offset = map_end;
if (ret)
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 7db631e1c8b0..d7cae3327de5 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -605,8 +605,8 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
/*
* We might be limited by page cache size.
*/
- if (bytes > PAGE_CACHE_SIZE) {
- bytes = PAGE_CACHE_SIZE;
+ if (bytes > PAGE_SIZE) {
+ bytes = PAGE_SIZE;
trim = 1;
/*
* Shift by 31 here so that we don't get larger than
diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c
index f30b6ecacdd1..324f0af40d7b 100644
--- a/fs/orangefs/dir.c
+++ b/fs/orangefs/dir.c
@@ -153,7 +153,6 @@ static int orangefs_readdir(struct file *file, struct dir_context *ctx)
struct dentry *dentry = file->f_path.dentry;
struct orangefs_kernel_op_s *new_op = NULL;
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode);
- int buffer_full = 0;
struct orangefs_readdir_response_s readdir_response;
void *dents_buf;
int i = 0;
@@ -235,7 +234,7 @@ get_new_buffer_index:
if (ret == -EIO && op_state_purged(new_op)) {
gossip_err("%s: Client is down. Aborting readdir call.\n",
__func__);
- goto out_slot;
+ goto out_free_op;
}
if (ret < 0 || new_op->downcall.status != 0) {
@@ -244,14 +243,14 @@ get_new_buffer_index:
new_op->downcall.status);
if (ret >= 0)
ret = new_op->downcall.status;
- goto out_slot;
+ goto out_free_op;
}
dents_buf = new_op->downcall.trailer_buf;
if (dents_buf == NULL) {
gossip_err("Invalid NULL buffer in readdir response\n");
ret = -ENOMEM;
- goto out_slot;
+ goto out_free_op;
}
bytes_decoded = decode_dirents(dents_buf, new_op->downcall.trailer_size,
@@ -350,8 +349,7 @@ get_new_buffer_index:
/*
* Did we hit the end of the directory?
*/
- if (readdir_response.token == ORANGEFS_READDIR_END &&
- !buffer_full) {
+ if (readdir_response.token == ORANGEFS_READDIR_END) {
gossip_debug(GOSSIP_DIR_DEBUG,
"End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n");
ctx->pos = ORANGEFS_READDIR_END;
@@ -363,8 +361,6 @@ out_destroy_handle:
out_vfree:
gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n", dents_buf);
vfree(dents_buf);
-out_slot:
- orangefs_readdir_index_put(buffer_index);
out_free_op:
op_release(new_op);
gossip_debug(GOSSIP_DIR_DEBUG, "orangefs_readdir returning %d\n", ret);
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 2382e267b49e..85640e955cde 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -18,8 +18,8 @@ static int read_one_page(struct page *page)
int max_block;
ssize_t bytes_read = 0;
struct inode *inode = page->mapping->host;
- const __u32 blocksize = PAGE_CACHE_SIZE; /* inode->i_blksize */
- const __u32 blockbits = PAGE_CACHE_SHIFT; /* inode->i_blkbits */
+ const __u32 blocksize = PAGE_SIZE; /* inode->i_blksize */
+ const __u32 blockbits = PAGE_SHIFT; /* inode->i_blkbits */
struct iov_iter to;
struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE};
@@ -86,7 +86,7 @@ static int orangefs_readpages(struct file *file,
"failure adding page to cache, read_one_page returned: %d\n",
ret);
} else {
- page_cache_release(page);
+ put_page(page);
}
}
BUG_ON(!list_empty(pages));
@@ -204,22 +204,8 @@ static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
if (ret != 0)
return ret;
- /*
- * Only change the c/mtime if we are changing the size or we are
- * explicitly asked to change it. This handles the semantic difference
- * between truncate() and ftruncate() as implemented in the VFS.
- *
- * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
- * special case where we need to update the times despite not having
- * these flags set. For all other operations the VFS set these flags
- * explicitly if it wants a timestamp update.
- */
- if (orig_size != i_size_read(inode) &&
- !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
- iattr->ia_ctime = iattr->ia_mtime =
- current_fs_time(inode->i_sb);
+ if (orig_size != i_size_read(inode))
iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
- }
return ret;
}
@@ -328,7 +314,7 @@ static int orangefs_init_iops(struct inode *inode)
case S_IFREG:
inode->i_op = &orangefs_file_inode_operations;
inode->i_fop = &orangefs_file_operations;
- inode->i_blkbits = PAGE_CACHE_SHIFT;
+ inode->i_blkbits = PAGE_SHIFT;
break;
case S_IFLNK:
inode->i_op = &orangefs_symlink_inode_operations;
@@ -456,7 +442,7 @@ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- inode->i_size = PAGE_CACHE_SIZE;
+ inode->i_size = PAGE_SIZE;
inode->i_rdev = dev;
error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
index 1f8acc9f9a88..75375e90a63f 100644
--- a/fs/orangefs/orangefs-bufmap.c
+++ b/fs/orangefs/orangefs-bufmap.c
@@ -170,7 +170,7 @@ orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
int i;
for (i = 0; i < bufmap->page_count; i++)
- page_cache_release(bufmap->page_array[i]);
+ put_page(bufmap->page_array[i]);
}
static void
@@ -299,7 +299,7 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
for (i = 0; i < ret; i++) {
SetPageError(bufmap->page_array[i]);
- page_cache_release(bufmap->page_array[i]);
+ put_page(bufmap->page_array[i]);
}
return -ENOMEM;
}
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 19670b8b4053..1714a737d556 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -126,8 +126,7 @@ out:
void orangefs_debugfs_cleanup(void)
{
- if (debug_dir)
- debugfs_remove_recursive(debug_dir);
+ debugfs_remove_recursive(debug_dir);
}
/* open ORANGEFS_KMOD_DEBUG_HELP_FILE */
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index 40f5163b56aa..2d129b5886ee 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -303,7 +303,7 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
}
break;
case S_IFDIR:
- inode->i_size = PAGE_CACHE_SIZE;
+ inode->i_size = PAGE_SIZE;
orangefs_inode->blksize = (1 << inode->i_blkbits);
spin_lock(&inode->i_lock);
inode_set_bytes(inode, inode->i_size);
@@ -315,9 +315,13 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
inode->i_size = (loff_t)strlen(new_op->
downcall.resp.getattr.link_target);
orangefs_inode->blksize = (1 << inode->i_blkbits);
- strlcpy(orangefs_inode->link_target,
+ ret = strscpy(orangefs_inode->link_target,
new_op->downcall.resp.getattr.link_target,
ORANGEFS_NAME_MAX);
+ if (ret == -E2BIG) {
+ ret = -EIO;
+ goto out;
+ }
inode->i_link = orangefs_inode->link_target;
}
break;
diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h
index 45ce4ff4cbc7..1efc6f8a5224 100644
--- a/fs/orangefs/protocol.h
+++ b/fs/orangefs/protocol.h
@@ -1,3 +1,4 @@
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock_types.h>
#include <linux/slab.h>
@@ -74,8 +75,8 @@ static inline void ORANGEFS_khandle_to(const struct orangefs_khandle *kh,
void *p, int size)
{
- memset(p, 0, size);
memcpy(p, kh->u, 16);
+ memset(p + 16, 0, size - 16);
}
@@ -407,7 +408,7 @@ enum {
* space. Zero signifies the upstream version of the kernel module.
*/
#define ORANGEFS_KERNEL_PROTO_VERSION 0
-#define ORANGEFS_MINIMUM_USERSPACE_VERSION 20904
+#define ORANGEFS_MINIMUM_USERSPACE_VERSION 20903
/*
* describes memory regions to map in the ORANGEFS_DEV_MAP ioctl.
@@ -427,26 +428,28 @@ struct ORANGEFS_dev_map_desc {
/* gossip.h *****************************************************************/
#ifdef GOSSIP_DISABLE_DEBUG
-#define gossip_debug(mask, format, f...) do {} while (0)
+#define gossip_debug(mask, fmt, ...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+} while (0)
#else
extern __u64 gossip_debug_mask;
extern struct client_debug_mask client_debug_mask;
/* try to avoid function call overhead by checking masks in macro */
-#define gossip_debug(mask, format, f...) \
-do { \
- if (gossip_debug_mask & mask) \
- printk(format, ##f); \
+#define gossip_debug(mask, fmt, ...) \
+do { \
+ if (gossip_debug_mask & (mask)) \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
} while (0)
#endif /* GOSSIP_DISABLE_DEBUG */
/* do file and line number printouts w/ the GNU preprocessor */
-#define gossip_ldebug(mask, format, f...) \
- gossip_debug(mask, "%s: " format, __func__, ##f)
-
-#define gossip_err printk
-#define gossip_lerr(format, f...) \
- gossip_err("%s line %d: " format, \
- __FILE__, \
- __LINE__, \
- ##f)
+#define gossip_ldebug(mask, fmt, ...) \
+ gossip_debug(mask, "%s: " fmt, __func__, ##__VA_ARGS__)
+
+#define gossip_err pr_err
+#define gossip_lerr(fmt, ...) \
+ gossip_err("%s line %d: " fmt, \
+ __FILE__, __LINE__, ##__VA_ARGS__)
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index 6a4c0f7ce5c1..99c19545752c 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -73,10 +73,6 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
"%s: prefix %s name %s, buffer_size %zd\n",
__func__, prefix, name, size);
- if (name == NULL || (size > 0 && buffer == NULL)) {
- gossip_err("orangefs_inode_getxattr: bogus NULL pointers\n");
- return -EINVAL;
- }
if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) {
gossip_err("Invalid key length (%d)\n",
(int)(strlen(name) + strlen(prefix)));
@@ -146,8 +142,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
goto out_release_op;
}
- memset(buffer, 0, size);
memcpy(buffer, new_op->downcall.resp.getxattr.val, length);
+ memset(buffer + length, 0, size - length);
gossip_debug(GOSSIP_XATTR_DEBUG,
"orangefs_inode_getxattr: inode %pU "
"key %s key_sz %d, val_len %d\n",
@@ -239,8 +235,7 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
"%s: prefix %s, name %s, buffer_size %zd\n",
__func__, prefix, name, size);
- if (size < 0 ||
- size >= ORANGEFS_MAX_XATTR_VALUELEN ||
+ if (size >= ORANGEFS_MAX_XATTR_VALUELEN ||
flags < 0) {
gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n",
(int)size,
@@ -248,12 +243,6 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
return -EINVAL;
}
- if (name == NULL ||
- (size > 0 && value == NULL)) {
- gossip_err("orangefs_inode_setxattr: bogus NULL pointers!\n");
- return -EINVAL;
- }
-
internal_flag = convert_to_internal_xattr_flags(flags);
if (prefix) {
@@ -353,10 +342,6 @@ ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size)
gossip_err("%s: bogus NULL pointers\n", __func__);
return -EINVAL;
}
- if (size < 0) {
- gossip_err("Invalid size (%d)\n", (int)size);
- return -EINVAL;
- }
down_read(&orangefs_inode->xattr_sem);
new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 14cab381cece..29c4d3250725 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -295,6 +295,37 @@ static void ovl_dentry_release(struct dentry *dentry)
}
}
+static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
+{
+ struct dentry *real;
+
+ if (d_is_dir(dentry)) {
+ if (!inode || inode == d_inode(dentry))
+ return dentry;
+ goto bug;
+ }
+
+ real = ovl_dentry_upper(dentry);
+ if (real && (!inode || inode == d_inode(real)))
+ return real;
+
+ real = ovl_dentry_lower(dentry);
+ if (!real)
+ goto bug;
+
+ if (!inode || inode == d_inode(real))
+ return real;
+
+ /* Handle recursion */
+ if (real->d_flags & DCACHE_OP_REAL)
+ return real->d_op->d_real(real, inode);
+
+bug:
+ WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry,
+ inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
+ return dentry;
+}
+
static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
{
struct ovl_entry *oe = dentry->d_fsdata;
@@ -339,11 +370,13 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
static const struct dentry_operations ovl_dentry_operations = {
.d_release = ovl_dentry_release,
.d_select_inode = ovl_d_select_inode,
+ .d_real = ovl_d_real,
};
static const struct dentry_operations ovl_reval_dentry_operations = {
.d_release = ovl_dentry_release,
.d_select_inode = ovl_d_select_inode,
+ .d_real = ovl_d_real,
.d_revalidate = ovl_dentry_revalidate,
.d_weak_revalidate = ovl_dentry_weak_revalidate,
};
diff --git a/fs/pipe.c b/fs/pipe.c
index ab8dad3ccb6a..0d3f5165cb0b 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -134,7 +134,7 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
if (page_count(page) == 1 && !pipe->tmp_page)
pipe->tmp_page = page;
else
- page_cache_release(page);
+ put_page(page);
}
/**
@@ -180,7 +180,7 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
*/
void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
{
- page_cache_get(buf->page);
+ get_page(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_get);
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(generic_pipe_buf_confirm);
void generic_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
- page_cache_release(buf->page);
+ put_page(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_release);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9df431642042..229cb546bee0 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -553,7 +553,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
if (radix_tree_exceptional_entry(page))
mss->swap += PAGE_SIZE;
else
- page_cache_release(page);
+ put_page(page);
return;
}
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 55bb57e6a30d..8afe10cf7df8 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -279,12 +279,12 @@ static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!page)
return VM_FAULT_OOM;
if (!PageUptodate(page)) {
- offset = (loff_t) index << PAGE_CACHE_SHIFT;
+ offset = (loff_t) index << PAGE_SHIFT;
buf = __va((page_to_pfn(page) << PAGE_SHIFT));
rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
if (rc < 0) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
}
SetPageUptodate(page);
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index dc645b66cd79..45d6110744cb 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -420,8 +420,8 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
pstore_sb = sb;
sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = PSTOREFS_MAGIC;
sb->s_op = &pstore_ops;
sb->s_time_gran = 1;
diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c
index e1f37278cf97..144ceda4948e 100644
--- a/fs/qnx6/dir.c
+++ b/fs/qnx6/dir.c
@@ -35,9 +35,9 @@ static struct page *qnx6_get_page(struct inode *dir, unsigned long n)
static unsigned last_entry(struct inode *inode, unsigned long page_nr)
{
unsigned long last_byte = inode->i_size;
- last_byte -= page_nr << PAGE_CACHE_SHIFT;
- if (last_byte > PAGE_CACHE_SIZE)
- last_byte = PAGE_CACHE_SIZE;
+ last_byte -= page_nr << PAGE_SHIFT;
+ if (last_byte > PAGE_SIZE)
+ last_byte = PAGE_SIZE;
return last_byte / QNX6_DIR_ENTRY_SIZE;
}
@@ -47,9 +47,9 @@ static struct qnx6_long_filename *qnx6_longname(struct super_block *sb,
{
struct qnx6_sb_info *sbi = QNX6_SB(sb);
u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */
- u32 n = s >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits); /* in pages */
+ u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */
/* within page */
- u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_CACHE_MASK;
+ u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK;
struct address_space *mapping = sbi->longfile->i_mapping;
struct page *page = read_mapping_page(mapping, n, NULL);
if (IS_ERR(page))
@@ -115,8 +115,8 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
struct qnx6_sb_info *sbi = QNX6_SB(s);
loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
unsigned long npages = dir_pages(inode);
- unsigned long n = pos >> PAGE_CACHE_SHIFT;
- unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE;
+ unsigned long n = pos >> PAGE_SHIFT;
+ unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE;
bool done = false;
ctx->pos = pos;
@@ -131,7 +131,7 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
if (IS_ERR(page)) {
pr_err("%s(): read failed\n", __func__);
- ctx->pos = (n + 1) << PAGE_CACHE_SHIFT;
+ ctx->pos = (n + 1) << PAGE_SHIFT;
return PTR_ERR(page);
}
de = ((struct qnx6_dir_entry *)page_address(page)) + start;
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 47bb1de07155..1192422a1c56 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -542,8 +542,8 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
iget_failed(inode);
return ERR_PTR(-EIO);
}
- n = (ino - 1) >> (PAGE_CACHE_SHIFT - QNX6_INODE_SIZE_BITS);
- offs = (ino - 1) & (~PAGE_CACHE_MASK >> QNX6_INODE_SIZE_BITS);
+ n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS);
+ offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS);
mapping = sbi->inodes->i_mapping;
page = read_mapping_page(mapping, n, NULL);
if (IS_ERR(page)) {
diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
index d3fb2b698800..f23b5c4a66ad 100644
--- a/fs/qnx6/qnx6.h
+++ b/fs/qnx6/qnx6.h
@@ -128,7 +128,7 @@ extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s,
static inline void qnx6_put_page(struct page *page)
{
kunmap(page);
- page_cache_release(page);
+ put_page(page);
}
extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index ba827daea5a0..ff21980d0119 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2047,11 +2047,20 @@ int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
struct quota_info *dqopt = sb_dqopt(sb);
int err;
- if (!dqopt->ops[qid->type]->get_next_id)
- return -ENOSYS;
+ mutex_lock(&dqopt->dqonoff_mutex);
+ if (!sb_has_quota_active(sb, qid->type)) {
+ err = -ESRCH;
+ goto out;
+ }
+ if (!dqopt->ops[qid->type]->get_next_id) {
+ err = -ENOSYS;
+ goto out;
+ }
mutex_lock(&dqopt->dqio_mutex);
err = dqopt->ops[qid->type]->get_next_id(sb, qid);
mutex_unlock(&dqopt->dqio_mutex);
+out:
+ mutex_unlock(&dqopt->dqonoff_mutex);
return err;
}
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 38981b037524..1ab6e6c2e60e 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -223,8 +223,8 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent)
return err;
sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = RAMFS_MAGIC;
sb->s_op = &ramfs_ops;
sb->s_time_gran = 1;
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 9ffaf7145644..90f815bdfa8a 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -180,11 +180,11 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
int partial = 0;
unsigned blocksize;
struct buffer_head *bh, *head;
- unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT;
+ unsigned long i_size_index = inode->i_size >> PAGE_SHIFT;
int new;
int logit = reiserfs_file_data_log(inode);
struct super_block *s = inode->i_sb;
- int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
+ int bh_per_page = PAGE_SIZE / s->s_blocksize;
struct reiserfs_transaction_handle th;
int ret = 0;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index ae9e5b308cf9..d5c2e9c865de 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -386,7 +386,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
goto finished;
}
/* read file tail into part of page */
- offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1);
+ offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1);
copy_item_head(&tmp_ih, ih);
/*
@@ -587,10 +587,10 @@ static int convert_tail_for_hole(struct inode *inode,
return -EIO;
/* always try to read until the end of the block */
- tail_start = tail_offset & (PAGE_CACHE_SIZE - 1);
+ tail_start = tail_offset & (PAGE_SIZE - 1);
tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
- index = tail_offset >> PAGE_CACHE_SHIFT;
+ index = tail_offset >> PAGE_SHIFT;
/*
* hole_page can be zero in case of direct_io, we are sure
* that we cannot get here if we write with O_DIRECT into tail page
@@ -629,7 +629,7 @@ static int convert_tail_for_hole(struct inode *inode,
unlock:
if (tail_page != hole_page) {
unlock_page(tail_page);
- page_cache_release(tail_page);
+ put_page(tail_page);
}
out:
return retval;
@@ -2189,11 +2189,11 @@ static int grab_tail_page(struct inode *inode,
* we want the page with the last byte in the file,
* not the page that will hold the next byte for appending
*/
- unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
+ unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT;
unsigned long pos = 0;
unsigned long start = 0;
unsigned long blocksize = inode->i_sb->s_blocksize;
- unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1);
+ unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
struct buffer_head *bh;
struct buffer_head *head;
struct page *page;
@@ -2251,7 +2251,7 @@ out:
unlock:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return error;
}
@@ -2265,7 +2265,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
{
struct reiserfs_transaction_handle th;
/* we want the offset for the first byte after the end of the file */
- unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
+ unsigned long offset = inode->i_size & (PAGE_SIZE - 1);
unsigned blocksize = inode->i_sb->s_blocksize;
unsigned length;
struct page *page = NULL;
@@ -2345,7 +2345,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
}
}
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
reiserfs_write_unlock(inode->i_sb);
@@ -2354,7 +2354,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
out:
if (page) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
reiserfs_write_unlock(inode->i_sb);
@@ -2426,7 +2426,7 @@ research:
} else if (is_direct_le_ih(ih)) {
char *p;
p = page_address(bh_result->b_page);
- p += (byte_offset - 1) & (PAGE_CACHE_SIZE - 1);
+ p += (byte_offset - 1) & (PAGE_SIZE - 1);
copy_size = ih_item_len(ih) - pos_in_item;
fs_gen = get_generation(inode->i_sb);
@@ -2525,7 +2525,7 @@ static int reiserfs_write_full_page(struct page *page,
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
- unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+ unsigned long end_index = inode->i_size >> PAGE_SHIFT;
int error = 0;
unsigned long block;
sector_t last_block;
@@ -2535,7 +2535,7 @@ static int reiserfs_write_full_page(struct page *page,
int checked = PageChecked(page);
struct reiserfs_transaction_handle th;
struct super_block *s = inode->i_sb;
- int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
+ int bh_per_page = PAGE_SIZE / s->s_blocksize;
th.t_trans_id = 0;
/* no logging allowed when nonblocking or from PF_MEMALLOC */
@@ -2564,16 +2564,16 @@ static int reiserfs_write_full_page(struct page *page,
if (page->index >= end_index) {
unsigned last_offset;
- last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
+ last_offset = inode->i_size & (PAGE_SIZE - 1);
/* no file contents in this page */
if (page->index >= end_index + 1 || !last_offset) {
unlock_page(page);
return 0;
}
- zero_user_segment(page, last_offset, PAGE_CACHE_SIZE);
+ zero_user_segment(page, last_offset, PAGE_SIZE);
}
bh = head;
- block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
+ block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
/* first map all the buffers, logging any direct items we find */
do {
@@ -2774,7 +2774,7 @@ static int reiserfs_write_begin(struct file *file,
*fsdata = (void *)(unsigned long)flags;
}
- index = pos >> PAGE_CACHE_SHIFT;
+ index = pos >> PAGE_SHIFT;
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
@@ -2822,7 +2822,7 @@ static int reiserfs_write_begin(struct file *file,
}
if (ret) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
/* Truncate allocated blocks */
reiserfs_truncate_failed_write(inode);
}
@@ -2909,7 +2909,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
else
th = NULL;
- start = pos & (PAGE_CACHE_SIZE - 1);
+ start = pos & (PAGE_SIZE - 1);
if (unlikely(copied < len)) {
if (!PageUptodate(page))
copied = 0;
@@ -2974,7 +2974,7 @@ out:
if (locked)
reiserfs_write_unlock(inode->i_sb);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (pos + len > inode->i_size)
reiserfs_truncate_failed_write(inode);
@@ -2996,7 +2996,7 @@ int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to)
{
struct inode *inode = page->mapping->host;
- loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + to;
+ loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to;
int ret = 0;
int update_sd = 0;
struct reiserfs_transaction_handle *th = NULL;
@@ -3181,7 +3181,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
struct inode *inode = page->mapping->host;
unsigned int curr_off = 0;
unsigned int stop = offset + length;
- int partial_page = (offset || length < PAGE_CACHE_SIZE);
+ int partial_page = (offset || length < PAGE_SIZE);
int ret = 1;
BUG_ON(!PageLocked(page));
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 036a1fc0a8c3..57045f423893 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -203,7 +203,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
* __reiserfs_write_begin on that page. This will force a
* reiserfs_get_block to unpack the tail for us.
*/
- index = inode->i_size >> PAGE_CACHE_SHIFT;
+ index = inode->i_size >> PAGE_SHIFT;
mapping = inode->i_mapping;
page = grab_cache_page(mapping, index);
retval = -ENOMEM;
@@ -221,7 +221,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
out_unlock:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
out:
inode_unlock(inode);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 44c2bdced1c8..2ace90e981f0 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -599,18 +599,18 @@ static int journal_list_still_alive(struct super_block *s,
* This does a check to see if the buffer belongs to one of these
* lost pages before doing the final put_bh. If page->mapping was
* null, it tries to free buffers on the page, which should make the
- * final page_cache_release drop the page from the lru.
+ * final put_page drop the page from the lru.
*/
static void release_buffer_page(struct buffer_head *bh)
{
struct page *page = bh->b_page;
if (!page->mapping && trylock_page(page)) {
- page_cache_get(page);
+ get_page(page);
put_bh(bh);
if (!page->mapping)
try_to_free_buffers(page);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
} else {
put_bh(bh);
}
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 24cbe013240f..5feacd689241 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1342,7 +1342,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
*/
data = kmap_atomic(un_bh->b_page);
- off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
+ off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_SIZE - 1));
memcpy(data + off,
ih_item_body(PATH_PLAST_BUFFER(path), &s_ih),
ret_value);
@@ -1511,7 +1511,7 @@ static void unmap_buffers(struct page *page, loff_t pos)
if (page) {
if (page_has_buffers(page)) {
- tail_index = pos & (PAGE_CACHE_SIZE - 1);
+ tail_index = pos & (PAGE_SIZE - 1);
cur_index = 0;
head = page_buffers(page);
bh = head;
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index f41e19b4bb42..2d5489b0a269 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -151,7 +151,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
*/
if (up_to_date_bh) {
unsigned pgoff =
- (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
+ (tail_offset + total_tail - 1) & (PAGE_SIZE - 1);
char *kaddr = kmap_atomic(up_to_date_bh->b_page);
memset(kaddr + pgoff, 0, blk_size - total_tail);
kunmap_atomic(kaddr);
@@ -271,7 +271,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th,
* the page was locked and this part of the page was up to date when
* indirect2direct was called, so we know the bytes are still valid
*/
- tail = tail + (pos & (PAGE_CACHE_SIZE - 1));
+ tail = tail + (pos & (PAGE_SIZE - 1));
PATH_LAST_POSITION(path)++;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 02137bbda0ec..a33812ae9fad 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -415,7 +415,7 @@ out:
static inline void reiserfs_put_page(struct page *page)
{
kunmap(page);
- page_cache_release(page);
+ put_page(page);
}
static struct page *reiserfs_get_page(struct inode *dir, size_t n)
@@ -427,7 +427,7 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n)
* and an unlink/rmdir has just occurred - GFP_NOFS avoids this
*/
mapping_set_gfp_mask(mapping, GFP_NOFS);
- page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL);
+ page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL);
if (!IS_ERR(page)) {
kmap(page);
if (PageError(page))
@@ -526,10 +526,10 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
while (buffer_pos < buffer_size || buffer_pos == 0) {
size_t chunk;
size_t skip = 0;
- size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1));
+ size_t page_offset = (file_pos & (PAGE_SIZE - 1));
- if (buffer_size - buffer_pos > PAGE_CACHE_SIZE)
- chunk = PAGE_CACHE_SIZE;
+ if (buffer_size - buffer_pos > PAGE_SIZE)
+ chunk = PAGE_SIZE;
else
chunk = buffer_size - buffer_pos;
@@ -546,8 +546,8 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
struct reiserfs_xattr_header *rxh;
skip = file_pos = sizeof(struct reiserfs_xattr_header);
- if (chunk + skip > PAGE_CACHE_SIZE)
- chunk = PAGE_CACHE_SIZE - skip;
+ if (chunk + skip > PAGE_SIZE)
+ chunk = PAGE_SIZE - skip;
rxh = (struct reiserfs_xattr_header *)data;
rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC);
rxh->h_hash = cpu_to_le32(xahash);
@@ -675,8 +675,8 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
char *data;
size_t skip = 0;
- if (isize - file_pos > PAGE_CACHE_SIZE)
- chunk = PAGE_CACHE_SIZE;
+ if (isize - file_pos > PAGE_SIZE)
+ chunk = PAGE_SIZE;
else
chunk = isize - file_pos;
diff --git a/fs/splice.c b/fs/splice.c
index 9947b5c69664..b018eb485019 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -88,7 +88,7 @@ out_unlock:
static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
- page_cache_release(buf->page);
+ put_page(buf->page);
buf->flags &= ~PIPE_BUF_FLAG_LRU;
}
@@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(splice_to_pipe);
void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
{
- page_cache_release(spd->pages[i]);
+ put_page(spd->pages[i]);
}
/*
@@ -328,9 +328,9 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
- index = *ppos >> PAGE_CACHE_SHIFT;
- loff = *ppos & ~PAGE_CACHE_MASK;
- req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ index = *ppos >> PAGE_SHIFT;
+ loff = *ppos & ~PAGE_MASK;
+ req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
nr_pages = min(req_pages, spd.nr_pages_max);
/*
@@ -365,7 +365,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
error = add_to_page_cache_lru(page, mapping, index,
mapping_gfp_constraint(mapping, GFP_KERNEL));
if (unlikely(error)) {
- page_cache_release(page);
+ put_page(page);
if (error == -EEXIST)
continue;
break;
@@ -385,7 +385,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
* Now loop over the map and see if we need to start IO on any
* pages, fill in the partial map, etc.
*/
- index = *ppos >> PAGE_CACHE_SHIFT;
+ index = *ppos >> PAGE_SHIFT;
nr_pages = spd.nr_pages;
spd.nr_pages = 0;
for (page_nr = 0; page_nr < nr_pages; page_nr++) {
@@ -397,7 +397,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
/*
* this_len is the max we'll use from this page
*/
- this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
+ this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
page = spd.pages[page_nr];
if (PageReadahead(page))
@@ -426,7 +426,7 @@ retry_lookup:
error = -ENOMEM;
break;
}
- page_cache_release(spd.pages[page_nr]);
+ put_page(spd.pages[page_nr]);
spd.pages[page_nr] = page;
}
/*
@@ -456,7 +456,7 @@ fill_it:
* i_size must be checked after PageUptodate.
*/
isize = i_size_read(mapping->host);
- end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+ end_index = (isize - 1) >> PAGE_SHIFT;
if (unlikely(!isize || index > end_index))
break;
@@ -470,7 +470,7 @@ fill_it:
/*
* max good bytes in this page
*/
- plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
+ plen = ((isize - 1) & ~PAGE_MASK) + 1;
if (plen <= loff)
break;
@@ -494,8 +494,8 @@ fill_it:
* we got, 'nr_pages' is how many pages are in the map.
*/
while (page_nr < nr_pages)
- page_cache_release(spd.pages[page_nr++]);
- in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
+ put_page(spd.pages[page_nr++]);
+ in->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
if (spd.nr_pages)
error = splice_to_pipe(pipe, &spd);
@@ -636,8 +636,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
goto shrink_ret;
}
- offset = *ppos & ~PAGE_CACHE_MASK;
- nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ offset = *ppos & ~PAGE_MASK;
+ nr_pages = (len + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
struct page *page;
@@ -647,7 +647,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
if (!page)
goto err;
- this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
+ this_len = min_t(size_t, len, PAGE_SIZE - offset);
vec[i].iov_base = (void __user *) page_address(page);
vec[i].iov_len = this_len;
spd.pages[i] = page;
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 0cea9b9236d0..2c2618410d51 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -181,11 +181,11 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
in = min(bytes, msblk->devblksize - offset);
bytes -= in;
while (in) {
- if (pg_offset == PAGE_CACHE_SIZE) {
+ if (pg_offset == PAGE_SIZE) {
data = squashfs_next_page(output);
pg_offset = 0;
}
- avail = min_t(int, in, PAGE_CACHE_SIZE -
+ avail = min_t(int, in, PAGE_SIZE -
pg_offset);
memcpy(data + pg_offset, bh[k]->b_data + offset,
avail);
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 1cb70a0b2168..23813c078cc9 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -30,7 +30,7 @@
* access the metadata and fragment caches.
*
* To avoid out of memory and fragmentation issues with vmalloc the cache
- * uses sequences of kmalloced PAGE_CACHE_SIZE buffers.
+ * uses sequences of kmalloced PAGE_SIZE buffers.
*
* It should be noted that the cache is not used for file datablocks, these
* are decompressed and cached in the page-cache in the normal way. The
@@ -231,7 +231,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
/*
* Initialise cache allocating the specified number of entries, each of
* size block_size. To avoid vmalloc fragmentation issues each entry
- * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers.
+ * is allocated as a sequence of kmalloced PAGE_SIZE buffers.
*/
struct squashfs_cache *squashfs_cache_init(char *name, int entries,
int block_size)
@@ -255,7 +255,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
cache->unused = entries;
cache->entries = entries;
cache->block_size = block_size;
- cache->pages = block_size >> PAGE_CACHE_SHIFT;
+ cache->pages = block_size >> PAGE_SHIFT;
cache->pages = cache->pages ? cache->pages : 1;
cache->name = name;
cache->num_waiters = 0;
@@ -275,7 +275,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
}
for (j = 0; j < cache->pages; j++) {
- entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (entry->data[j] == NULL) {
ERROR("Failed to allocate %s buffer\n", name);
goto cleanup;
@@ -314,10 +314,10 @@ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
return min(length, entry->length - offset);
while (offset < entry->length) {
- void *buff = entry->data[offset / PAGE_CACHE_SIZE]
- + (offset % PAGE_CACHE_SIZE);
+ void *buff = entry->data[offset / PAGE_SIZE]
+ + (offset % PAGE_SIZE);
int bytes = min_t(int, entry->length - offset,
- PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE));
+ PAGE_SIZE - (offset % PAGE_SIZE));
if (bytes >= remaining) {
memcpy(buffer, buff, remaining);
@@ -415,7 +415,7 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
*/
void *squashfs_read_table(struct super_block *sb, u64 block, int length)
{
- int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
int i, res;
void *table, *buffer, **data;
struct squashfs_page_actor *actor;
@@ -436,7 +436,7 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length)
goto failed2;
}
- for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
+ for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
data[i] = buffer;
res = squashfs_read_data(sb, block, length |
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index e9034bf6e5ae..d2bc13636f79 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -102,7 +102,7 @@ static void *get_comp_opts(struct super_block *sb, unsigned short flags)
* Read decompressor specific options from file system if present
*/
if (SQUASHFS_COMP_OPTS(flags)) {
- buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (buffer == NULL) {
comp_opts = ERR_PTR(-ENOMEM);
goto out;
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index e5c9689062ba..13d80947bf9e 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -175,7 +175,7 @@ static long long read_indexes(struct super_block *sb, int n,
{
int err, i;
long long block = 0;
- __le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ __le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (blist == NULL) {
ERROR("read_indexes: Failed to allocate block_list\n");
@@ -183,7 +183,7 @@ static long long read_indexes(struct super_block *sb, int n,
}
while (n) {
- int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2);
+ int blocks = min_t(int, n, PAGE_SIZE >> 2);
err = squashfs_read_metadata(sb, blist, start_block,
offset, blocks << 2);
@@ -377,19 +377,19 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
struct inode *inode = page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
void *pageaddr;
- int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+ int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
int start_index = page->index & ~mask, end_index = start_index | mask;
/*
* Loop copying datablock into pages. As the datablock likely covers
- * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly
+ * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
* grab the pages from the page cache, except for the page that we've
* been called to fill.
*/
for (i = start_index; i <= end_index && bytes > 0; i++,
- bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
+ bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
struct page *push_page;
- int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0;
+ int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
@@ -404,14 +404,14 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
pageaddr = kmap_atomic(push_page);
squashfs_copy_data(pageaddr, buffer, offset, avail);
- memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
+ memset(pageaddr + avail, 0, PAGE_SIZE - avail);
kunmap_atomic(pageaddr);
flush_dcache_page(push_page);
SetPageUptodate(push_page);
skip_page:
unlock_page(push_page);
if (i != page->index)
- page_cache_release(push_page);
+ put_page(push_page);
}
}
@@ -454,7 +454,7 @@ static int squashfs_readpage(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
- int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
+ int index = page->index >> (msblk->block_log - PAGE_SHIFT);
int file_end = i_size_read(inode) >> msblk->block_log;
int res;
void *pageaddr;
@@ -462,8 +462,8 @@ static int squashfs_readpage(struct file *file, struct page *page)
TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
page->index, squashfs_i(inode)->start);
- if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT))
+ if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
+ PAGE_SHIFT))
goto out;
if (index < file_end || squashfs_i(inode)->fragment_block ==
@@ -487,7 +487,7 @@ error_out:
SetPageError(page);
out:
pageaddr = kmap_atomic(page);
- memset(pageaddr, 0, PAGE_CACHE_SIZE);
+ memset(pageaddr, 0, PAGE_SIZE);
kunmap_atomic(pageaddr);
flush_dcache_page(page);
if (!PageError(page))
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index 43e7a7eddac0..cb485d8e0e91 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -30,8 +30,8 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
struct inode *inode = target_page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
- int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
- int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+ int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+ int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
int start_index = target_page->index & ~mask;
int end_index = start_index | mask;
int i, n, pages, missing_pages, bytes, res = -ENOMEM;
@@ -68,7 +68,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
if (PageUptodate(page[i])) {
unlock_page(page[i]);
- page_cache_release(page[i]);
+ put_page(page[i]);
page[i] = NULL;
missing_pages++;
}
@@ -96,10 +96,10 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
goto mark_errored;
/* Last page may have trailing bytes not filled */
- bytes = res % PAGE_CACHE_SIZE;
+ bytes = res % PAGE_SIZE;
if (bytes) {
pageaddr = kmap_atomic(page[pages - 1]);
- memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
+ memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
kunmap_atomic(pageaddr);
}
@@ -109,7 +109,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
SetPageUptodate(page[i]);
unlock_page(page[i]);
if (page[i] != target_page)
- page_cache_release(page[i]);
+ put_page(page[i]);
}
kfree(actor);
@@ -127,7 +127,7 @@ mark_errored:
flush_dcache_page(page[i]);
SetPageError(page[i]);
unlock_page(page[i]);
- page_cache_release(page[i]);
+ put_page(page[i]);
}
out:
@@ -153,21 +153,21 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
}
for (n = 0; n < pages && bytes > 0; n++,
- bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
- int avail = min_t(int, bytes, PAGE_CACHE_SIZE);
+ bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
+ int avail = min_t(int, bytes, PAGE_SIZE);
if (page[n] == NULL)
continue;
pageaddr = kmap_atomic(page[n]);
squashfs_copy_data(pageaddr, buffer, offset, avail);
- memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
+ memset(pageaddr + avail, 0, PAGE_SIZE - avail);
kunmap_atomic(pageaddr);
flush_dcache_page(page[n]);
SetPageUptodate(page[n]);
unlock_page(page[n]);
if (page[n] != target_page)
- page_cache_release(page[n]);
+ put_page(page[n]);
}
out:
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
index c31e2bc9c081..ff4468bd18b0 100644
--- a/fs/squashfs/lz4_wrapper.c
+++ b/fs/squashfs/lz4_wrapper.c
@@ -117,13 +117,13 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
data = squashfs_first_page(output);
buff = stream->output;
while (data) {
- if (bytes <= PAGE_CACHE_SIZE) {
+ if (bytes <= PAGE_SIZE) {
memcpy(data, buff, bytes);
break;
}
- memcpy(data, buff, PAGE_CACHE_SIZE);
- buff += PAGE_CACHE_SIZE;
- bytes -= PAGE_CACHE_SIZE;
+ memcpy(data, buff, PAGE_SIZE);
+ buff += PAGE_SIZE;
+ bytes -= PAGE_SIZE;
data = squashfs_next_page(output);
}
squashfs_finish_page(output);
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 244b9fbfff7b..934c17e96590 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -102,13 +102,13 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
data = squashfs_first_page(output);
buff = stream->output;
while (data) {
- if (bytes <= PAGE_CACHE_SIZE) {
+ if (bytes <= PAGE_SIZE) {
memcpy(data, buff, bytes);
break;
} else {
- memcpy(data, buff, PAGE_CACHE_SIZE);
- buff += PAGE_CACHE_SIZE;
- bytes -= PAGE_CACHE_SIZE;
+ memcpy(data, buff, PAGE_SIZE);
+ buff += PAGE_SIZE;
+ bytes -= PAGE_SIZE;
data = squashfs_next_page(output);
}
}
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
index 5a1c11f56441..9b7b1b6a7892 100644
--- a/fs/squashfs/page_actor.c
+++ b/fs/squashfs/page_actor.c
@@ -48,7 +48,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
if (actor == NULL)
return NULL;
- actor->length = length ? : pages * PAGE_CACHE_SIZE;
+ actor->length = length ? : pages * PAGE_SIZE;
actor->buffer = buffer;
actor->pages = pages;
actor->next_page = 0;
@@ -88,7 +88,7 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
if (actor == NULL)
return NULL;
- actor->length = length ? : pages * PAGE_CACHE_SIZE;
+ actor->length = length ? : pages * PAGE_SIZE;
actor->page = page;
actor->pages = pages;
actor->next_page = 0;
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
index 26dd82008b82..98537eab27e2 100644
--- a/fs/squashfs/page_actor.h
+++ b/fs/squashfs/page_actor.h
@@ -24,7 +24,7 @@ static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
if (actor == NULL)
return NULL;
- actor->length = length ? : pages * PAGE_CACHE_SIZE;
+ actor->length = length ? : pages * PAGE_SIZE;
actor->page = page;
actor->pages = pages;
actor->next_page = 0;
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 5e79bfa4f260..cf01e15a7b16 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -152,7 +152,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
* Check the system page size is not larger than the filesystem
* block size (by default 128K). This is currently not supported.
*/
- if (PAGE_CACHE_SIZE > msblk->block_size) {
+ if (PAGE_SIZE > msblk->block_size) {
ERROR("Page size > filesystem block size (%d). This is "
"currently not supported!\n", msblk->block_size);
goto failed_mount;
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index dbcc2f54bad4..d688ef42a6a1 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -48,10 +48,10 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
struct inode *inode = page->mapping->host;
struct super_block *sb = inode->i_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
- int index = page->index << PAGE_CACHE_SHIFT;
+ int index = page->index << PAGE_SHIFT;
u64 block = squashfs_i(inode)->start;
int offset = squashfs_i(inode)->offset;
- int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE);
+ int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE);
int bytes, copied;
void *pageaddr;
struct squashfs_cache_entry *entry;
@@ -94,7 +94,7 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
length - bytes);
if (copied == length - bytes)
- memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
+ memset(pageaddr + length, 0, PAGE_SIZE - length);
else
block = entry->next_index;
kunmap_atomic(pageaddr);
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index c609624e4b8a..6bfaef73d065 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -141,7 +141,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
stream->buf.in_pos = 0;
stream->buf.in_size = 0;
stream->buf.out_pos = 0;
- stream->buf.out_size = PAGE_CACHE_SIZE;
+ stream->buf.out_size = PAGE_SIZE;
stream->buf.out = squashfs_first_page(output);
do {
@@ -158,7 +158,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
stream->buf.out = squashfs_next_page(output);
if (stream->buf.out != NULL) {
stream->buf.out_pos = 0;
- total += PAGE_CACHE_SIZE;
+ total += PAGE_SIZE;
}
}
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 8727caba6882..2ec24d128bce 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -69,7 +69,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
int zlib_err, zlib_init = 0, k = 0;
z_stream *stream = strm;
- stream->avail_out = PAGE_CACHE_SIZE;
+ stream->avail_out = PAGE_SIZE;
stream->next_out = squashfs_first_page(output);
stream->avail_in = 0;
@@ -85,7 +85,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (stream->avail_out == 0) {
stream->next_out = squashfs_next_page(output);
if (stream->next_out != NULL)
- stream->avail_out = PAGE_CACHE_SIZE;
+ stream->avail_out = PAGE_SIZE;
}
if (!zlib_init) {
diff --git a/fs/sync.c b/fs/sync.c
index dd5d1711c7ac..2a54c1f22035 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -302,7 +302,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
goto out;
if (sizeof(pgoff_t) == 4) {
- if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
+ if (offset >= (0x100000000ULL << PAGE_SHIFT)) {
/*
* The range starts outside a 32 bit machine's
* pagecache addressing capabilities. Let it "succeed"
@@ -310,7 +310,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
ret = 0;
goto out;
}
- if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
+ if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) {
/*
* Out to EOF
*/
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 63c1bcb224ee..c0f0a3e643eb 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -30,7 +30,7 @@ const struct file_operations sysv_dir_operations = {
static inline void dir_put_page(struct page *page)
{
kunmap(page);
- page_cache_release(page);
+ put_page(page);
}
static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
@@ -73,8 +73,8 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
if (pos >= inode->i_size)
return 0;
- offset = pos & ~PAGE_CACHE_MASK;
- n = pos >> PAGE_CACHE_SHIFT;
+ offset = pos & ~PAGE_MASK;
+ n = pos >> PAGE_SHIFT;
for ( ; n < npages; n++, offset = 0) {
char *kaddr, *limit;
@@ -85,7 +85,7 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
continue;
kaddr = (char *)page_address(page);
de = (struct sysv_dir_entry *)(kaddr+offset);
- limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE;
+ limit = kaddr + PAGE_SIZE - SYSV_DIRSIZE;
for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) {
char *name = de->name;
@@ -146,7 +146,7 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
if (!IS_ERR(page)) {
kaddr = (char*)page_address(page);
de = (struct sysv_dir_entry *) kaddr;
- kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE;
+ kaddr += PAGE_SIZE - SYSV_DIRSIZE;
for ( ; (char *) de <= kaddr ; de++) {
if (!de->inode)
continue;
@@ -190,7 +190,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
goto out;
kaddr = (char*)page_address(page);
de = (struct sysv_dir_entry *)kaddr;
- kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE;
+ kaddr += PAGE_SIZE - SYSV_DIRSIZE;
while ((char *)de <= kaddr) {
if (!de->inode)
goto got_it;
@@ -261,7 +261,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
kmap(page);
base = (char*)page_address(page);
- memset(base, 0, PAGE_CACHE_SIZE);
+ memset(base, 0, PAGE_SIZE);
de = (struct sysv_dir_entry *) base;
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
@@ -273,7 +273,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
kunmap(page);
err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
fail:
- page_cache_release(page);
+ put_page(page);
return err;
}
@@ -296,7 +296,7 @@ int sysv_empty_dir(struct inode * inode)
kaddr = (char *)page_address(page);
de = (struct sysv_dir_entry *)kaddr;
- kaddr += PAGE_CACHE_SIZE-SYSV_DIRSIZE;
+ kaddr += PAGE_SIZE-SYSV_DIRSIZE;
for ( ;(char *)de <= kaddr; de++) {
if (!de->inode)
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 11e83ed0b4bf..90b60c03b588 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -264,11 +264,11 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
out_dir:
if (dir_de) {
kunmap(dir_page);
- page_cache_release(dir_page);
+ put_page(dir_page);
}
out_old:
kunmap(old_page);
- page_cache_release(old_page);
+ put_page(old_page);
out:
return err;
}
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 065c88f8e4b8..446753d8ac34 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -121,7 +121,7 @@ static int do_readpage(struct page *page)
if (block >= beyond) {
/* Reading beyond inode */
SetPageChecked(page);
- memset(addr, 0, PAGE_CACHE_SIZE);
+ memset(addr, 0, PAGE_SIZE);
goto out;
}
@@ -223,7 +223,7 @@ static int write_begin_slow(struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
struct ubifs_budget_req req = { .new_page = 1 };
int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
struct page *page;
@@ -254,13 +254,13 @@ static int write_begin_slow(struct address_space *mapping,
}
if (!PageUptodate(page)) {
- if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE)
+ if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
SetPageChecked(page);
else {
err = do_readpage(page);
if (err) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
ubifs_release_budget(c, &req);
return err;
}
@@ -428,7 +428,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_inode *ui = ubifs_inode(inode);
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
int skipped_read = 0;
struct page *page;
@@ -446,7 +446,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
if (!PageUptodate(page)) {
/* The page is not loaded from the flash */
- if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) {
+ if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
/*
* We change whole page so no need to load it. But we
* do not know whether this page exists on the media or
@@ -462,7 +462,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
err = do_readpage(page);
if (err) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return err;
}
}
@@ -494,7 +494,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
mutex_unlock(&ui->ui_mutex);
}
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return write_begin_slow(mapping, pos, len, pagep, flags);
}
@@ -549,12 +549,12 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
inode->i_ino, pos, page->index, len, copied, inode->i_size);
- if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) {
+ if (unlikely(copied < len && len == PAGE_SIZE)) {
/*
* VFS copied less data to the page that it intended and
* declared in its '->write_begin()' call via the @len
* argument. If the page was not up-to-date, and @len was
- * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did
+ * @PAGE_SIZE, the 'ubifs_write_begin()' function did
* not load it from the media (for optimization reasons). This
* means that part of the page contains garbage. So read the
* page now.
@@ -593,7 +593,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
out:
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return copied;
}
@@ -621,10 +621,10 @@ static int populate_page(struct ubifs_info *c, struct page *page,
addr = zaddr = kmap(page);
- end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+ end_index = (i_size - 1) >> PAGE_SHIFT;
if (!i_size || page->index > end_index) {
hole = 1;
- memset(addr, 0, PAGE_CACHE_SIZE);
+ memset(addr, 0, PAGE_SIZE);
goto out_hole;
}
@@ -673,7 +673,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
}
if (end_index == page->index) {
- int len = i_size & (PAGE_CACHE_SIZE - 1);
+ int len = i_size & (PAGE_SIZE - 1);
if (len && len < read)
memset(zaddr + len, 0, read - len);
@@ -773,7 +773,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
isize = i_size_read(inode);
if (isize == 0)
goto out_free;
- end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
+ end_index = ((isize - 1) >> PAGE_SHIFT);
for (page_idx = 1; page_idx < page_cnt; page_idx++) {
pgoff_t page_offset = offset + page_idx;
@@ -788,7 +788,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
if (!PageUptodate(page))
err = populate_page(c, page, bu, &n);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
if (err)
break;
}
@@ -905,7 +905,7 @@ static int do_writepage(struct page *page, int len)
#ifdef UBIFS_DEBUG
struct ubifs_inode *ui = ubifs_inode(inode);
spin_lock(&ui->ui_lock);
- ubifs_assert(page->index <= ui->synced_i_size >> PAGE_CACHE_SHIFT);
+ ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT);
spin_unlock(&ui->ui_lock);
#endif
@@ -1001,8 +1001,8 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
struct inode *inode = page->mapping->host;
struct ubifs_inode *ui = ubifs_inode(inode);
loff_t i_size = i_size_read(inode), synced_i_size;
- pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
- int err, len = i_size & (PAGE_CACHE_SIZE - 1);
+ pgoff_t end_index = i_size >> PAGE_SHIFT;
+ int err, len = i_size & (PAGE_SIZE - 1);
void *kaddr;
dbg_gen("ino %lu, pg %lu, pg flags %#lx",
@@ -1021,7 +1021,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
/* Is the page fully inside @i_size? */
if (page->index < end_index) {
- if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) {
+ if (page->index >= synced_i_size >> PAGE_SHIFT) {
err = inode->i_sb->s_op->write_inode(inode, NULL);
if (err)
goto out_unlock;
@@ -1034,7 +1034,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
* with this.
*/
}
- return do_writepage(page, PAGE_CACHE_SIZE);
+ return do_writepage(page, PAGE_SIZE);
}
/*
@@ -1045,7 +1045,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
* writes to that region are not written out to the file."
*/
kaddr = kmap_atomic(page);
- memset(kaddr + len, 0, PAGE_CACHE_SIZE - len);
+ memset(kaddr + len, 0, PAGE_SIZE - len);
flush_dcache_page(page);
kunmap_atomic(kaddr);
@@ -1138,7 +1138,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
truncate_setsize(inode, new_size);
if (offset) {
- pgoff_t index = new_size >> PAGE_CACHE_SHIFT;
+ pgoff_t index = new_size >> PAGE_SHIFT;
struct page *page;
page = find_lock_page(inode->i_mapping, index);
@@ -1157,9 +1157,9 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
clear_page_dirty_for_io(page);
if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
offset = new_size &
- (PAGE_CACHE_SIZE - 1);
+ (PAGE_SIZE - 1);
err = do_writepage(page, offset);
- page_cache_release(page);
+ put_page(page);
if (err)
goto out_budg;
/*
@@ -1173,7 +1173,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
* having to read it.
*/
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
}
}
@@ -1285,7 +1285,7 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset,
struct ubifs_info *c = inode->i_sb->s_fs_info;
ubifs_assert(PagePrivate(page));
- if (offset || length < PAGE_CACHE_SIZE)
+ if (offset || length < PAGE_SIZE)
/* Partial page remains dirty */
return;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index a233ba913be4..e98c24ee25a1 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2237,12 +2237,12 @@ static int __init ubifs_init(void)
BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4);
/*
- * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to
+ * We require that PAGE_SIZE is greater-than-or-equal-to
* UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
*/
- if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) {
+ if (PAGE_SIZE < UBIFS_BLOCK_SIZE) {
pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
- current->pid, (unsigned int)PAGE_CACHE_SIZE);
+ current->pid, (unsigned int)PAGE_SIZE);
return -EINVAL;
}
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 536fb495f2f1..12e79e60c176 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -46,8 +46,8 @@
#define UBIFS_SUPER_MAGIC 0x24051905
/* Number of UBIFS blocks per VFS page */
-#define UBIFS_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / UBIFS_BLOCK_SIZE)
-#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_CACHE_SHIFT - UBIFS_BLOCK_SHIFT)
+#define UBIFS_BLOCKS_PER_PAGE (PAGE_SIZE / UBIFS_BLOCK_SIZE)
+#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_SHIFT - UBIFS_BLOCK_SHIFT)
/* "File system end of life" sequence number watermark */
#define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 1af98963d860..877ba1c9b461 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -46,7 +46,7 @@ static void __udf_adinicb_readpage(struct page *page)
kaddr = kmap(page);
memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
- memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size);
+ memset(kaddr + inode->i_size, 0, PAGE_SIZE - inode->i_size);
flush_dcache_page(page);
SetPageUptodate(page);
kunmap(page);
@@ -87,14 +87,14 @@ static int udf_adinicb_write_begin(struct file *file,
{
struct page *page;
- if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE))
+ if (WARN_ON_ONCE(pos >= PAGE_SIZE))
return -EIO;
page = grab_cache_page_write_begin(mapping, 0, flags);
if (!page)
return -ENOMEM;
*pagep = page;
- if (!PageUptodate(page) && len != PAGE_CACHE_SIZE)
+ if (!PageUptodate(page) && len != PAGE_SIZE)
__udf_adinicb_readpage(page);
return 0;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 166d3ed32c39..2dc461eeb415 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -287,7 +287,7 @@ int udf_expand_file_adinicb(struct inode *inode)
if (!PageUptodate(page)) {
kaddr = kmap(page);
memset(kaddr + iinfo->i_lenAlloc, 0x00,
- PAGE_CACHE_SIZE - iinfo->i_lenAlloc);
+ PAGE_SIZE - iinfo->i_lenAlloc);
memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
iinfo->i_lenAlloc);
flush_dcache_page(page);
@@ -319,7 +319,7 @@ int udf_expand_file_adinicb(struct inode *inode)
inode->i_data.a_ops = &udf_adinicb_aops;
up_write(&iinfo->i_data_sem);
}
- page_cache_release(page);
+ put_page(page);
mark_inode_dirty(inode);
return err;
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index dc5fae601c24..0447b949c7f5 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -237,7 +237,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
sector_t newb, struct page *locked_page)
{
const unsigned blks_per_page =
- 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ 1 << (PAGE_SHIFT - inode->i_blkbits);
const unsigned mask = blks_per_page - 1;
struct address_space * const mapping = inode->i_mapping;
pgoff_t index, cur_index, last_index;
@@ -255,9 +255,9 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
cur_index = locked_page->index;
end = count + beg;
- last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
for (i = beg; i < end; i = (i | mask) + 1) {
- index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ index = i >> (PAGE_SHIFT - inode->i_blkbits);
if (likely(cur_index != index)) {
page = ufs_get_locked_page(mapping, index);
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 74f2e80288bf..0b1457292734 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -62,7 +62,7 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
static inline void ufs_put_page(struct page *page)
{
kunmap(page);
- page_cache_release(page);
+ put_page(page);
}
ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
@@ -111,13 +111,13 @@ static void ufs_check_page(struct page *page)
struct super_block *sb = dir->i_sb;
char *kaddr = page_address(page);
unsigned offs, rec_len;
- unsigned limit = PAGE_CACHE_SIZE;
+ unsigned limit = PAGE_SIZE;
const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
struct ufs_dir_entry *p;
char *error;
- if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
- limit = dir->i_size & ~PAGE_CACHE_MASK;
+ if ((dir->i_size >> PAGE_SHIFT) == page->index) {
+ limit = dir->i_size & ~PAGE_MASK;
if (limit & chunk_mask)
goto Ebadsize;
if (!limit)
@@ -170,7 +170,7 @@ Einumber:
bad_entry:
ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
"offset=%lu, rec_len=%d, name_len=%d",
- dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+ dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
rec_len, ufs_get_de_namlen(sb, p));
goto fail;
Eend:
@@ -178,7 +178,7 @@ Eend:
ufs_error(sb, __func__,
"entry in directory #%lu spans the page boundary"
"offset=%lu",
- dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs);
+ dir->i_ino, (page->index<<PAGE_SHIFT)+offs);
fail:
SetPageChecked(page);
SetPageError(page);
@@ -211,9 +211,9 @@ ufs_last_byte(struct inode *inode, unsigned long page_nr)
{
unsigned last_byte = inode->i_size;
- last_byte -= page_nr << PAGE_CACHE_SHIFT;
- if (last_byte > PAGE_CACHE_SIZE)
- last_byte = PAGE_CACHE_SIZE;
+ last_byte -= page_nr << PAGE_SHIFT;
+ if (last_byte > PAGE_SIZE)
+ last_byte = PAGE_SIZE;
return last_byte;
}
@@ -341,7 +341,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
kaddr = page_address(page);
dir_end = kaddr + ufs_last_byte(dir, n);
de = (struct ufs_dir_entry *)kaddr;
- kaddr += PAGE_CACHE_SIZE - reclen;
+ kaddr += PAGE_SIZE - reclen;
while ((char *)de <= kaddr) {
if ((char *)de == dir_end) {
/* We hit i_size */
@@ -432,8 +432,8 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
loff_t pos = ctx->pos;
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
- unsigned int offset = pos & ~PAGE_CACHE_MASK;
- unsigned long n = pos >> PAGE_CACHE_SHIFT;
+ unsigned int offset = pos & ~PAGE_MASK;
+ unsigned long n = pos >> PAGE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
int need_revalidate = file->f_version != inode->i_version;
@@ -454,14 +454,14 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
ufs_error(sb, __func__,
"bad page in #%lu",
inode->i_ino);
- ctx->pos += PAGE_CACHE_SIZE - offset;
+ ctx->pos += PAGE_SIZE - offset;
return -EIO;
}
kaddr = page_address(page);
if (unlikely(need_revalidate)) {
if (offset) {
offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
- ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset;
+ ctx->pos = (n<<PAGE_SHIFT) + offset;
}
file->f_version = inode->i_version;
need_revalidate = 0;
@@ -574,7 +574,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
kmap(page);
base = (char*)page_address(page);
- memset(base, 0, PAGE_CACHE_SIZE);
+ memset(base, 0, PAGE_SIZE);
de = (struct ufs_dir_entry *) base;
@@ -594,7 +594,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
err = ufs_commit_chunk(page, 0, chunk_size);
fail:
- page_cache_release(page);
+ put_page(page);
return err;
}
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index d897e169ab9c..9f49431e798d 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -1051,13 +1051,13 @@ static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
lastfrag--;
lastpage = ufs_get_locked_page(mapping, lastfrag >>
- (PAGE_CACHE_SHIFT - inode->i_blkbits));
+ (PAGE_SHIFT - inode->i_blkbits));
if (IS_ERR(lastpage)) {
err = -EIO;
goto out;
}
- end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
+ end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
bh = page_buffers(lastpage);
for (i = 0; i < end; ++i)
bh = bh->b_this_page;
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index acf4a3b61b81..a1559f762805 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -305,7 +305,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0);
else {
kunmap(dir_page);
- page_cache_release(dir_page);
+ put_page(dir_page);
}
inode_dec_link_count(old_dir);
}
@@ -315,11 +315,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
out_dir:
if (dir_de) {
kunmap(dir_page);
- page_cache_release(dir_page);
+ put_page(dir_page);
}
out_old:
kunmap(old_page);
- page_cache_release(old_page);
+ put_page(old_page);
out:
return err;
}
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index b6c2f94e041e..a409e3e7827a 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -261,14 +261,14 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
if (unlikely(page->mapping == NULL)) {
/* Truncate got there first */
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
page = NULL;
goto out;
}
if (!PageUptodate(page) || PageError(page)) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
printk(KERN_ERR "ufs_change_blocknr: "
"can not read page: ino %lu, index: %lu\n",
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 954175928240..b7fbf53dbc81 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -283,7 +283,7 @@ extern struct page *ufs_get_locked_page(struct address_space *mapping,
static inline void ufs_put_locked_page(struct page *page)
{
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 041b6948aecc..ce41d7fe753c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3742,11 +3742,11 @@ xfs_bmap_btalloc(
args.prod = align;
if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
args.mod = (xfs_extlen_t)(args.prod - args.mod);
- } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) {
+ } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
args.prod = 1;
args.mod = 0;
} else {
- args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog;
+ args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
args.mod = (xfs_extlen_t)(args.prod - args.mod);
}
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index d445a64b979e..e49b2406d15d 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -704,7 +704,7 @@ next_buffer:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_invalidate:
- xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
return;
}
@@ -925,9 +925,9 @@ xfs_do_writepage(
* ---------------------------------^------------------|
*/
offset = i_size_read(inode);
- end_index = offset >> PAGE_CACHE_SHIFT;
+ end_index = offset >> PAGE_SHIFT;
if (page->index < end_index)
- end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
+ end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
else {
/*
* Check whether the page to write out is beyond or straddles
@@ -940,7 +940,7 @@ xfs_do_writepage(
* | | Straddles |
* ---------------------------------^-----------|--------|
*/
- unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
+ unsigned offset_into_page = offset & (PAGE_SIZE - 1);
/*
* Skip the page if it is fully outside i_size, e.g. due to a
@@ -971,7 +971,7 @@ xfs_do_writepage(
* memory is zeroed when mapped, and writes to that region are
* not written out to the file."
*/
- zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
+ zero_user_segment(page, offset_into_page, PAGE_SIZE);
/* Adjust the end_offset to the end of file */
end_offset = offset;
@@ -1475,7 +1475,7 @@ xfs_vm_write_failed(
loff_t block_offset;
loff_t block_start;
loff_t block_end;
- loff_t from = pos & (PAGE_CACHE_SIZE - 1);
+ loff_t from = pos & (PAGE_SIZE - 1);
loff_t to = from + len;
struct buffer_head *bh, *head;
struct xfs_mount *mp = XFS_I(inode)->i_mount;
@@ -1491,7 +1491,7 @@ xfs_vm_write_failed(
* start of the page by using shifts rather than masks the mismatch
* problem.
*/
- block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
+ block_offset = (pos >> PAGE_SHIFT) << PAGE_SHIFT;
ASSERT(block_offset + from == pos);
@@ -1558,12 +1558,12 @@ xfs_vm_write_begin(
struct page **pagep,
void **fsdata)
{
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
int status;
struct xfs_mount *mp = XFS_I(mapping->host)->i_mount;
- ASSERT(len <= PAGE_CACHE_SIZE);
+ ASSERT(len <= PAGE_SIZE);
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
@@ -1592,7 +1592,7 @@ xfs_vm_write_begin(
truncate_pagecache_range(inode, start, pos + len);
}
- page_cache_release(page);
+ put_page(page);
page = NULL;
}
@@ -1620,7 +1620,7 @@ xfs_vm_write_end(
{
int ret;
- ASSERT(len <= PAGE_CACHE_SIZE);
+ ASSERT(len <= PAGE_SIZE);
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
if (unlikely(ret < len)) {
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index a32c1dcae2ff..3b6309865c65 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1237,7 +1237,7 @@ xfs_free_file_space(
/* wait for the completion of any pending DIOs */
inode_dio_wait(VFS_I(ip));
- rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
+ rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
ioffset = round_down(offset, rounding);
iendoffset = round_up(offset + len, rounding) - 1;
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
@@ -1466,7 +1466,7 @@ xfs_shift_file_space(
if (error)
return error;
error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
- offset >> PAGE_CACHE_SHIFT, -1);
+ offset >> PAGE_SHIFT, -1);
if (error)
return error;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index ac0fd32de31e..569938a4a357 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -106,8 +106,8 @@ xfs_iozero(
unsigned offset, bytes;
void *fsdata;
- offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
- bytes = PAGE_CACHE_SIZE - offset;
+ offset = (pos & (PAGE_SIZE -1)); /* Within page */
+ bytes = PAGE_SIZE - offset;
if (bytes > count)
bytes = count;
@@ -799,8 +799,8 @@ xfs_file_dio_aio_write(
/* see generic_file_direct_write() for why this is necessary */
if (mapping->nrpages) {
invalidate_inode_pages2_range(mapping,
- pos >> PAGE_CACHE_SHIFT,
- end >> PAGE_CACHE_SHIFT);
+ pos >> PAGE_SHIFT,
+ end >> PAGE_SHIFT);
}
if (ret > 0) {
@@ -1207,9 +1207,9 @@ xfs_find_get_desired_pgoff(
pagevec_init(&pvec, 0);
- index = startoff >> PAGE_CACHE_SHIFT;
+ index = startoff >> PAGE_SHIFT;
endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
- end = endoff >> PAGE_CACHE_SHIFT;
+ end = endoff >> PAGE_SHIFT;
do {
int want;
unsigned nr_pages;
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index ec0e239a0fa9..a8192dc797dc 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -135,7 +135,7 @@ typedef __u32 xfs_nlink_t;
* Size of block device i/o is parameterized here.
* Currently the system supports page-sized i/o.
*/
-#define BLKDEV_IOSHIFT PAGE_CACHE_SHIFT
+#define BLKDEV_IOSHIFT PAGE_SHIFT
#define BLKDEV_IOSIZE (1<<BLKDEV_IOSHIFT)
/* number of BB's per block device block */
#define BLKDEV_BB BTOBB(BLKDEV_IOSIZE)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 536a0ee9cd5a..cfd4210dd015 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -171,7 +171,7 @@ xfs_sb_validate_fsb_count(
ASSERT(sbp->sb_blocklog >= BBSHIFT);
/* Limited by ULONG_MAX of page cache index */
- if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
+ if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
return -EFBIG;
return 0;
}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index bac6b3435591..eafe257b357a 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -231,12 +231,12 @@ static inline unsigned long
xfs_preferred_iosize(xfs_mount_t *mp)
{
if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)
- return PAGE_CACHE_SIZE;
+ return PAGE_SIZE;
return (mp->m_swidth ?
(mp->m_swidth << mp->m_sb.sb_blocklog) :
((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ?
(1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) :
- PAGE_CACHE_SIZE));
+ PAGE_SIZE));
}
#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index ade236e90bb3..51ddaf2c2b8c 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -293,8 +293,8 @@ xfs_fs_commit_blocks(
* Make sure reads through the pagecache see the new data.
*/
error = invalidate_inode_pages2_range(inode->i_mapping,
- start >> PAGE_CACHE_SHIFT,
- (end - 1) >> PAGE_CACHE_SHIFT);
+ start >> PAGE_SHIFT,
+ (end - 1) >> PAGE_SHIFT);
WARN_ON_ONCE(error);
error = xfs_iomap_write_unwritten(ip, start, length);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index d760934109b5..187e14b696c2 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -556,10 +556,10 @@ xfs_max_file_offset(
/* Figure out maximum filesize, on Linux this can depend on
* the filesystem blocksize (on 32 bit platforms).
* __block_write_begin does this in an [unsigned] long...
- * page->index << (PAGE_CACHE_SHIFT - bbits)
+ * page->index << (PAGE_SHIFT - bbits)
* So, for page sized blocks (4K on 32 bit platforms),
* this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
- * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+ * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
* but for smaller blocksizes it is less (bbits = log2 bsize).
* Note1: get_block_t takes a long (implicit cast from above)
* Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
@@ -570,10 +570,10 @@ xfs_max_file_offset(
#if BITS_PER_LONG == 32
# if defined(CONFIG_LBDAF)
ASSERT(sizeof(sector_t) == 8);
- pagefactor = PAGE_CACHE_SIZE;
+ pagefactor = PAGE_SIZE;
bitshift = BITS_PER_LONG;
# else
- pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
+ pagefactor = PAGE_SIZE >> (PAGE_SHIFT - blockshift);
# endif
#endif
OpenPOWER on IntegriCloud