diff options
Diffstat (limited to 'fs')
338 files changed, 16185 insertions, 3615 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index e9e04376c52c..ac9225e86bf3 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -153,7 +153,7 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset, * If called with zero offset, we should release * the private state assocated with the page */ - if (offset == 0 && length == PAGE_CACHE_SIZE) + if (offset == 0 && length == PAGE_SIZE) v9fs_fscache_invalidate_page(page); } @@ -166,10 +166,10 @@ static int v9fs_vfs_writepage_locked(struct page *page) struct bio_vec bvec; int err, len; - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; + if (page->index == size >> PAGE_SHIFT) + len = size & ~PAGE_MASK; else - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; bvec.bv_page = page; bvec.bv_offset = 0; @@ -271,7 +271,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping, int retval = 0; struct page *page; struct v9fs_inode *v9inode; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct inode *inode = mapping->host; @@ -288,11 +288,11 @@ start: if (PageUptodate(page)) goto out; - if (len == PAGE_CACHE_SIZE) + if (len == PAGE_SIZE) goto out; retval = v9fs_fid_readpage(v9inode->writeback_fid, page); - page_cache_release(page); + put_page(page); if (!retval) goto start; out: @@ -313,7 +313,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping, /* * zero out the rest of the area */ - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); zero_user(page, from + copied, len - copied); flush_dcache_page(page); @@ -331,7 +331,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping, } set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); return copied; } diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index eadc894faea2..b84c291ba1eb 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -421,8 +421,8 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(file); loff_t i_size; unsigned long pg_start, pg_end; - pg_start = origin >> PAGE_CACHE_SHIFT; - pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT; + pg_start = origin >> PAGE_SHIFT; + pg_end = (origin + retval - 1) >> PAGE_SHIFT; if (inode->i_mapping && inode->i_mapping->nrpages) invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index bf495cedec26..de3ed8629196 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -87,7 +87,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses, sb->s_op = &v9fs_super_ops; sb->s_bdi = &v9ses->bdi; if (v9ses->cache) - sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE; + sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE; sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME; if (!v9ses->cache) diff --git a/fs/Kconfig b/fs/Kconfig index 9d757673bf40..6725f59c18e6 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -209,6 +209,7 @@ menuconfig MISC_FILESYSTEMS if MISC_FILESYSTEMS +source "fs/orangefs/Kconfig" source "fs/adfs/Kconfig" source "fs/affs/Kconfig" source "fs/ecryptfs/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index 252c96898a43..85b6e13b62d3 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -106,6 +106,7 @@ obj-$(CONFIG_AUTOFS4_FS) += autofs4/ obj-$(CONFIG_ADFS_FS) += adfs/ obj-$(CONFIG_FUSE_FS) += fuse/ obj-$(CONFIG_OVERLAY_FS) += overlayfs/ +obj-$(CONFIG_ORANGEFS_FS) += orangefs/ obj-$(CONFIG_UDF_FS) += udf/ obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ obj-$(CONFIG_OMFS_FS) += omfs/ diff --git a/fs/affs/file.c b/fs/affs/file.c index 22fc7c802d69..0cde550050e8 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -510,9 +510,9 @@ affs_do_readpage_ofs(struct page *page, unsigned to) pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino, page->index, to); - BUG_ON(to > PAGE_CACHE_SIZE); + BUG_ON(to > PAGE_SIZE); bsize = AFFS_SB(sb)->s_data_blksize; - tmp = page->index << PAGE_CACHE_SHIFT; + tmp = page->index << PAGE_SHIFT; bidx = tmp / bsize; boff = tmp % bsize; @@ -613,10 +613,10 @@ affs_readpage_ofs(struct file *file, struct page *page) int err; pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index); - to = PAGE_CACHE_SIZE; - if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) { - to = inode->i_size & ~PAGE_CACHE_MASK; - memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to); + to = PAGE_SIZE; + if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) { + to = inode->i_size & ~PAGE_MASK; + memset(page_address(page) + to, 0, PAGE_SIZE - to); } err = affs_do_readpage_ofs(page, to); @@ -646,7 +646,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping return err; } - index = pos >> PAGE_CACHE_SHIFT; + index = pos >> PAGE_SHIFT; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; @@ -656,10 +656,10 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping return 0; /* XXX: inefficient but safe in the face of short writes */ - err = affs_do_readpage_ofs(page, PAGE_CACHE_SIZE); + err = affs_do_readpage_ofs(page, PAGE_SIZE); if (err) { unlock_page(page); - page_cache_release(page); + put_page(page); } return err; } @@ -677,7 +677,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, u32 tmp; int written; - from = pos & (PAGE_CACHE_SIZE - 1); + from = pos & (PAGE_SIZE - 1); to = pos + len; /* * XXX: not sure if this can handle short copies (len < copied), but @@ -692,7 +692,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, bh = NULL; written = 0; - tmp = (page->index << PAGE_CACHE_SHIFT) + from; + tmp = (page->index << PAGE_SHIFT) + from; bidx = tmp / bsize; boff = tmp % bsize; if (boff) { @@ -788,13 +788,13 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, done: affs_brelse(bh); - tmp = (page->index << PAGE_CACHE_SHIFT) + from; + tmp = (page->index << PAGE_SHIFT) + from; if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; err_first_bh: unlock_page(page); - page_cache_release(page); + put_page(page); return written; diff --git a/fs/afs/dir.c b/fs/afs/dir.c index e10e17788f06..5fda2bc53cd7 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -181,7 +181,7 @@ error: static inline void afs_dir_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } /* diff --git a/fs/afs/file.c b/fs/afs/file.c index 999bc3caec92..6344aee4ac4b 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -164,7 +164,7 @@ int afs_page_filler(void *data, struct page *page) _debug("cache said ENOBUFS"); default: go_on: - offset = page->index << PAGE_CACHE_SHIFT; + offset = page->index << PAGE_SHIFT; len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE); /* read the contents of the file from the server into the @@ -319,7 +319,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset, BUG_ON(!PageLocked(page)); /* we clean up only if the entire page is being invalidated */ - if (offset == 0 && length == PAGE_CACHE_SIZE) { + if (offset == 0 && length == PAGE_SIZE) { #ifdef CONFIG_AFS_FSCACHE if (PageFsCache(page)) { struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index ccd0b212e82a..81dd075356b9 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -93,7 +93,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key) kunmap(page); out_free: - page_cache_release(page); + put_page(page); out: _leave(" = %d", ret); return ret; @@ -189,7 +189,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) buf = kmap_atomic(page); memcpy(devname, buf, size); kunmap_atomic(buf); - page_cache_release(page); + put_page(page); page = NULL; } @@ -211,7 +211,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) return mnt; error: - page_cache_release(page); + put_page(page); error_no_page: free_page((unsigned long) options); error_no_options: diff --git a/fs/afs/super.c b/fs/afs/super.c index 81afefe7d8a6..fbdb022b75a2 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -315,8 +315,8 @@ static int afs_fill_super(struct super_block *sb, _enter(""); /* fill in the superblock */ - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = AFS_FS_MAGIC; sb->s_op = &afs_super_ops; sb->s_bdi = &as->volume->bdi; diff --git a/fs/afs/write.c b/fs/afs/write.c index dfef94f70667..65de439bdc4f 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -93,10 +93,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key, _enter(",,%llu", (unsigned long long)pos); i_size = i_size_read(&vnode->vfs_inode); - if (pos + PAGE_CACHE_SIZE > i_size) + if (pos + PAGE_SIZE > i_size) len = i_size - pos; else - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; ret = afs_vnode_fetch_data(vnode, key, pos, len, page); if (ret < 0) { @@ -123,9 +123,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping, struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); struct page *page; struct key *key = file->private_data; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); unsigned to = from + len; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; int ret; _enter("{%x:%u},{%lx},%u,%u", @@ -151,8 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping, *pagep = page; /* page won't leak in error case: it eventually gets cleaned off LRU */ - if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) { - ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page); + if (!PageUptodate(page) && len != PAGE_SIZE) { + ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page); if (ret < 0) { kfree(candidate); _leave(" = %d [prep]", ret); @@ -266,7 +266,7 @@ int afs_write_end(struct file *file, struct address_space *mapping, if (PageDirty(page)) _debug("dirtied"); unlock_page(page); - page_cache_release(page); + put_page(page); return copied; } @@ -480,7 +480,7 @@ static int afs_writepages_region(struct address_space *mapping, if (page->index > end) { *_next = index; - page_cache_release(page); + put_page(page); _leave(" = 0 [%lx]", *_next); return 0; } @@ -494,7 +494,7 @@ static int afs_writepages_region(struct address_space *mapping, if (page->mapping != mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); continue; } @@ -515,7 +515,7 @@ static int afs_writepages_region(struct address_space *mapping, ret = afs_write_back_from_locked_page(wb, page); unlock_page(page); - page_cache_release(page); + put_page(page); if (ret < 0) { _leave(" = %d", ret); return ret; @@ -551,13 +551,13 @@ int afs_writepages(struct address_space *mapping, &next); mapping->writeback_index = next; } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { - end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT); + end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT); ret = afs_writepages_region(mapping, wbc, 0, end, &next); if (wbc->nr_to_write > 0) mapping->writeback_index = next; } else { - start = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + start = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; ret = afs_writepages_region(mapping, wbc, start, end, &next); } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 7d914c67a9d0..81381cc0dd17 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -2292,7 +2292,7 @@ static int elf_core_dump(struct coredump_params *cprm) void *kaddr = kmap(page); stop = !dump_emit(cprm, kaddr, PAGE_SIZE); kunmap(page); - page_cache_release(page); + put_page(page); } else stop = !dump_skip(cprm, PAGE_SIZE); if (stop) diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index b1adb92e69de..083ea2bc60ab 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1533,7 +1533,7 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm) void *kaddr = kmap(page); res = dump_emit(cprm, kaddr, PAGE_SIZE); kunmap(page); - page_cache_release(page); + put_page(page); } else { res = dump_skip(cprm, PAGE_SIZE); } diff --git a/fs/block_dev.c b/fs/block_dev.c index 3172c4e2f502..20a2c02b77c4 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -331,7 +331,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping, ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); unlock_page(page); - page_cache_release(page); + put_page(page); return ret; } @@ -1149,7 +1149,7 @@ void bd_set_size(struct block_device *bdev, loff_t size) inode_lock(bdev->bd_inode); i_size_write(bdev->bd_inode, size); inode_unlock(bdev->bd_inode); - while (bsize < PAGE_CACHE_SIZE) { + while (bsize < PAGE_SIZE) { if (size & bsize) break; bsize <<= 1; diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index e34a71b3e225..516e19d1d202 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -757,7 +757,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state, BUG_ON(NULL == l); ret = btrfsic_read_block(state, &tmp_next_block_ctx); - if (ret < (int)PAGE_CACHE_SIZE) { + if (ret < (int)PAGE_SIZE) { printk(KERN_INFO "btrfsic: read @logical %llu failed!\n", tmp_next_block_ctx.start); @@ -1231,15 +1231,15 @@ static void btrfsic_read_from_block_data( size_t offset_in_page; char *kaddr; char *dst = (char *)dstv; - size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT; + size_t start_offset = block_ctx->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + offset) >> PAGE_SHIFT; WARN_ON(offset + len > block_ctx->len); - offset_in_page = (start_offset + offset) & (PAGE_CACHE_SIZE - 1); + offset_in_page = (start_offset + offset) & (PAGE_SIZE - 1); while (len > 0) { - cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page)); - BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_CACHE_SIZE)); + cur = min(len, ((size_t)PAGE_SIZE - offset_in_page)); + BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE)); kaddr = block_ctx->datav[i]; memcpy(dst, kaddr + offset_in_page, cur); @@ -1605,8 +1605,8 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx) BUG_ON(!block_ctx->datav); BUG_ON(!block_ctx->pagev); - num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >> + PAGE_SHIFT; while (num_pages > 0) { num_pages--; if (block_ctx->datav[num_pages]) { @@ -1637,15 +1637,15 @@ static int btrfsic_read_block(struct btrfsic_state *state, BUG_ON(block_ctx->datav); BUG_ON(block_ctx->pagev); BUG_ON(block_ctx->mem_to_free); - if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) { + if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) { printk(KERN_INFO "btrfsic: read_block() with unaligned bytenr %llu\n", block_ctx->dev_bytenr); return -1; } - num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >> + PAGE_SHIFT; block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) + sizeof(*block_ctx->pagev)) * num_pages, GFP_NOFS); @@ -1676,8 +1676,8 @@ static int btrfsic_read_block(struct btrfsic_state *state, for (j = i; j < num_pages; j++) { ret = bio_add_page(bio, block_ctx->pagev[j], - PAGE_CACHE_SIZE, 0); - if (PAGE_CACHE_SIZE != ret) + PAGE_SIZE, 0); + if (PAGE_SIZE != ret) break; } if (j == i) { @@ -1693,7 +1693,7 @@ static int btrfsic_read_block(struct btrfsic_state *state, return -1; } bio_put(bio); - dev_bytenr += (j - i) * PAGE_CACHE_SIZE; + dev_bytenr += (j - i) * PAGE_SIZE; i = j; } for (i = 0; i < num_pages; i++) { @@ -1769,9 +1769,9 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state, u32 crc = ~(u32)0; unsigned int i; - if (num_pages * PAGE_CACHE_SIZE < state->metablock_size) + if (num_pages * PAGE_SIZE < state->metablock_size) return 1; /* not metadata */ - num_pages = state->metablock_size >> PAGE_CACHE_SHIFT; + num_pages = state->metablock_size >> PAGE_SHIFT; h = (struct btrfs_header *)datav[0]; if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE)) @@ -1779,8 +1779,8 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state, for (i = 0; i < num_pages; i++) { u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE); - size_t sublen = i ? PAGE_CACHE_SIZE : - (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE); + size_t sublen = i ? PAGE_SIZE : + (PAGE_SIZE - BTRFS_CSUM_SIZE); crc = btrfs_crc32c(crc, data, sublen); } @@ -1826,14 +1826,14 @@ again: if (block->is_superblock) { bytenr = btrfs_super_bytenr((struct btrfs_super_block *) mapped_datav[0]); - if (num_pages * PAGE_CACHE_SIZE < + if (num_pages * PAGE_SIZE < BTRFS_SUPER_INFO_SIZE) { printk(KERN_INFO "btrfsic: cannot work with too short bios!\n"); return; } is_metadata = 1; - BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1)); + BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_SIZE - 1)); processed_len = BTRFS_SUPER_INFO_SIZE; if (state->print_mask & BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { @@ -1844,7 +1844,7 @@ again: } if (is_metadata) { if (!block->is_superblock) { - if (num_pages * PAGE_CACHE_SIZE < + if (num_pages * PAGE_SIZE < state->metablock_size) { printk(KERN_INFO "btrfsic: cannot work with too short bios!\n"); @@ -1880,7 +1880,7 @@ again: } block->logical_bytenr = bytenr; } else { - if (num_pages * PAGE_CACHE_SIZE < + if (num_pages * PAGE_SIZE < state->datablock_size) { printk(KERN_INFO "btrfsic: cannot work with too short bios!\n"); @@ -2013,7 +2013,7 @@ again: block->logical_bytenr = bytenr; block->is_metadata = 1; if (block->is_superblock) { - BUG_ON(PAGE_CACHE_SIZE != + BUG_ON(PAGE_SIZE != BTRFS_SUPER_INFO_SIZE); ret = btrfsic_process_written_superblock( state, @@ -2172,8 +2172,8 @@ again: continue_loop: BUG_ON(!processed_len); dev_bytenr += processed_len; - mapped_datav += processed_len >> PAGE_CACHE_SHIFT; - num_pages -= processed_len >> PAGE_CACHE_SHIFT; + mapped_datav += processed_len >> PAGE_SHIFT; + num_pages -= processed_len >> PAGE_SHIFT; goto again; } @@ -2954,7 +2954,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio) goto leave; cur_bytenr = dev_bytenr; for (i = 0; i < bio->bi_vcnt; i++) { - BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE); + BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_SIZE); mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page); if (!mapped_datav[i]) { while (i > 0) { @@ -3037,16 +3037,16 @@ int btrfsic_mount(struct btrfs_root *root, struct list_head *dev_head = &fs_devices->devices; struct btrfs_device *device; - if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) { + if (root->nodesize & ((u64)PAGE_SIZE - 1)) { printk(KERN_INFO - "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", - root->nodesize, PAGE_CACHE_SIZE); + "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n", + root->nodesize, PAGE_SIZE); return -1; } - if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) { + if (root->sectorsize & ((u64)PAGE_SIZE - 1)) { printk(KERN_INFO - "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", - root->sectorsize, PAGE_CACHE_SIZE); + "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n", + root->sectorsize, PAGE_SIZE); return -1; } state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 3346cd8f9910..ff61a41ac90b 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -119,7 +119,7 @@ static int check_compressed_csum(struct inode *inode, csum = ~(u32)0; kaddr = kmap_atomic(page); - csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE); + csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); btrfs_csum_final(csum, (char *)&csum); kunmap_atomic(kaddr); @@ -190,7 +190,7 @@ csum_failed: for (index = 0; index < cb->nr_pages; index++) { page = cb->compressed_pages[index]; page->mapping = NULL; - page_cache_release(page); + put_page(page); } /* do io completion on the original bio */ @@ -224,8 +224,8 @@ out: static noinline void end_compressed_writeback(struct inode *inode, const struct compressed_bio *cb) { - unsigned long index = cb->start >> PAGE_CACHE_SHIFT; - unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_CACHE_SHIFT; + unsigned long index = cb->start >> PAGE_SHIFT; + unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; struct page *pages[16]; unsigned long nr_pages = end_index - index + 1; int i; @@ -247,7 +247,7 @@ static noinline void end_compressed_writeback(struct inode *inode, if (cb->errors) SetPageError(pages[i]); end_page_writeback(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } nr_pages -= ret; index += ret; @@ -304,7 +304,7 @@ static void end_compressed_bio_write(struct bio *bio) for (index = 0; index < cb->nr_pages; index++) { page = cb->compressed_pages[index]; page->mapping = NULL; - page_cache_release(page); + put_page(page); } /* finally free the cb struct */ @@ -341,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, int ret; int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); + WARN_ON(start & ((u64)PAGE_SIZE - 1)); cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); if (!cb) return -ENOMEM; @@ -374,14 +374,14 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, page->mapping = inode->i_mapping; if (bio->bi_iter.bi_size) ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, - PAGE_CACHE_SIZE, + PAGE_SIZE, bio, 0); else ret = 0; page->mapping = NULL; - if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < - PAGE_CACHE_SIZE) { + if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) < + PAGE_SIZE) { bio_get(bio); /* @@ -410,15 +410,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, BUG_ON(!bio); bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; - bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); + bio_add_page(bio, page, PAGE_SIZE, 0); } - if (bytes_left < PAGE_CACHE_SIZE) { + if (bytes_left < PAGE_SIZE) { btrfs_info(BTRFS_I(inode)->root->fs_info, "bytes left %lu compress len %lu nr %lu", bytes_left, cb->compressed_len, cb->nr_pages); } - bytes_left -= PAGE_CACHE_SIZE; - first_byte += PAGE_CACHE_SIZE; + bytes_left -= PAGE_SIZE; + first_byte += PAGE_SIZE; cond_resched(); } bio_get(bio); @@ -457,17 +457,17 @@ static noinline int add_ra_bio_pages(struct inode *inode, int misses = 0; page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; - last_offset = (page_offset(page) + PAGE_CACHE_SIZE); + last_offset = (page_offset(page) + PAGE_SIZE); em_tree = &BTRFS_I(inode)->extent_tree; tree = &BTRFS_I(inode)->io_tree; if (isize == 0) return 0; - end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; + end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; while (last_offset < compressed_end) { - pg_index = last_offset >> PAGE_CACHE_SHIFT; + pg_index = last_offset >> PAGE_SHIFT; if (pg_index > end_index) break; @@ -488,11 +488,11 @@ static noinline int add_ra_bio_pages(struct inode *inode, break; if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { - page_cache_release(page); + put_page(page); goto next; } - end = last_offset + PAGE_CACHE_SIZE - 1; + end = last_offset + PAGE_SIZE - 1; /* * at this point, we have a locked page in the page cache * for these bytes in the file. But, we have to make @@ -502,27 +502,27 @@ static noinline int add_ra_bio_pages(struct inode *inode, lock_extent(tree, last_offset, end); read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, last_offset, - PAGE_CACHE_SIZE); + PAGE_SIZE); read_unlock(&em_tree->lock); if (!em || last_offset < em->start || - (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || + (last_offset + PAGE_SIZE > extent_map_end(em)) || (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { free_extent_map(em); unlock_extent(tree, last_offset, end); unlock_page(page); - page_cache_release(page); + put_page(page); break; } free_extent_map(em); if (page->index == end_index) { char *userpage; - size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); + size_t zero_offset = isize & (PAGE_SIZE - 1); if (zero_offset) { int zeros; - zeros = PAGE_CACHE_SIZE - zero_offset; + zeros = PAGE_SIZE - zero_offset; userpage = kmap_atomic(page); memset(userpage + zero_offset, 0, zeros); flush_dcache_page(page); @@ -531,19 +531,19 @@ static noinline int add_ra_bio_pages(struct inode *inode, } ret = bio_add_page(cb->orig_bio, page, - PAGE_CACHE_SIZE, 0); + PAGE_SIZE, 0); - if (ret == PAGE_CACHE_SIZE) { + if (ret == PAGE_SIZE) { nr_pages++; - page_cache_release(page); + put_page(page); } else { unlock_extent(tree, last_offset, end); unlock_page(page); - page_cache_release(page); + put_page(page); break; } next: - last_offset += PAGE_CACHE_SIZE; + last_offset += PAGE_SIZE; } return 0; } @@ -567,7 +567,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, struct extent_map_tree *em_tree; struct compressed_bio *cb; struct btrfs_root *root = BTRFS_I(inode)->root; - unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; + unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE; unsigned long compressed_len; unsigned long nr_pages; unsigned long pg_index; @@ -589,7 +589,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, page_offset(bio->bi_io_vec->bv_page), - PAGE_CACHE_SIZE); + PAGE_SIZE); read_unlock(&em_tree->lock); if (!em) return -EIO; @@ -617,7 +617,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->compress_type = extent_compress_type(bio_flags); cb->orig_bio = bio; - nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE); + nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); if (!cb->compressed_pages) @@ -640,7 +640,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, add_ra_bio_pages(inode, em_start + em_len, cb); /* include any pages we added in add_ra-bio_pages */ - uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; + uncompressed_len = bio->bi_vcnt * PAGE_SIZE; cb->len = uncompressed_len; comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); @@ -653,18 +653,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, for (pg_index = 0; pg_index < nr_pages; pg_index++) { page = cb->compressed_pages[pg_index]; page->mapping = inode->i_mapping; - page->index = em_start >> PAGE_CACHE_SHIFT; + page->index = em_start >> PAGE_SHIFT; if (comp_bio->bi_iter.bi_size) ret = tree->ops->merge_bio_hook(READ, page, 0, - PAGE_CACHE_SIZE, + PAGE_SIZE, comp_bio, 0); else ret = 0; page->mapping = NULL; - if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < - PAGE_CACHE_SIZE) { + if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < + PAGE_SIZE) { bio_get(comp_bio); ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, @@ -702,9 +702,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; - bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); + bio_add_page(comp_bio, page, PAGE_SIZE, 0); } - cur_disk_byte += PAGE_CACHE_SIZE; + cur_disk_byte += PAGE_SIZE; } bio_get(comp_bio); @@ -1013,8 +1013,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, /* copy bytes from the working buffer into the pages */ while (working_bytes > 0) { - bytes = min(PAGE_CACHE_SIZE - *pg_offset, - PAGE_CACHE_SIZE - buf_offset); + bytes = min(PAGE_SIZE - *pg_offset, + PAGE_SIZE - buf_offset); bytes = min(bytes, working_bytes); kaddr = kmap_atomic(page_out); memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); @@ -1027,7 +1027,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, current_buf_start += bytes; /* check if we need to pick another page */ - if (*pg_offset == PAGE_CACHE_SIZE) { + if (*pg_offset == PAGE_SIZE) { (*pg_index)++; if (*pg_index >= vcnt) return 0; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4b02591b0301..4e47849d7427 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -25,7 +25,6 @@ #include <linux/buffer_head.h> #include <linux/workqueue.h> #include <linux/kthread.h> -#include <linux/freezer.h> #include <linux/slab.h> #include <linux/migrate.h> #include <linux/ratelimit.h> @@ -303,7 +302,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info, err = map_private_extent_buffer(buf, offset, 32, &kaddr, &map_start, &map_len); if (err) - return 1; + return err; cur_len = min(len, map_len - (offset - map_start)); crc = btrfs_csum_data(kaddr + offset - map_start, crc, cur_len); @@ -313,7 +312,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info, if (csum_size > sizeof(inline_result)) { result = kzalloc(csum_size, GFP_NOFS); if (!result) - return 1; + return -ENOMEM; } else { result = (char *)&inline_result; } @@ -334,7 +333,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info, val, found, btrfs_header_level(buf)); if (result != (char *)&inline_result) kfree(result); - return 1; + return -EUCLEAN; } } else { write_extent_buffer(buf, result, 0, csum_size); @@ -513,11 +512,21 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) eb = (struct extent_buffer *)page->private; if (page != eb->pages[0]) return 0; + found_start = btrfs_header_bytenr(eb); - if (WARN_ON(found_start != start || !PageUptodate(page))) - return 0; - csum_tree_block(fs_info, eb, 0); - return 0; + /* + * Please do not consolidate these warnings into a single if. + * It is useful to know what went wrong. + */ + if (WARN_ON(found_start != start)) + return -EUCLEAN; + if (WARN_ON(!PageUptodate(page))) + return -EUCLEAN; + + ASSERT(memcmp_extent_buffer(eb, fs_info->fsid, + btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0); + + return csum_tree_block(fs_info, eb, 0); } static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, @@ -661,10 +670,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, eb, found_level); ret = csum_tree_block(fs_info, eb, 1); - if (ret) { - ret = -EIO; + if (ret) goto err; - } /* * If this is a leaf block and it is corrupt, set the corrupt bit so @@ -1055,7 +1062,7 @@ static void btree_invalidatepage(struct page *page, unsigned int offset, (unsigned long long)page_offset(page)); ClearPagePrivate(page); set_page_private(page, 0); - page_cache_release(page); + put_page(page); } } @@ -1757,7 +1764,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) if (err) return err; - bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; + bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE; bdi->congested_fn = btrfs_congested_fn; bdi->congested_data = info; bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; @@ -1831,7 +1838,7 @@ static int cleaner_kthread(void *arg) */ btrfs_delete_unused_bgs(root->fs_info); sleep: - if (!try_to_freeze() && !again) { + if (!again) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) schedule(); @@ -1921,14 +1928,12 @@ sleep: if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))) btrfs_cleanup_transaction(root); - if (!try_to_freeze()) { - set_current_state(TASK_INTERRUPTIBLE); - if (!kthread_should_stop() && - (!btrfs_transaction_blocked(root->fs_info) || - cannot_commit)) - schedule_timeout(delay); - __set_current_state(TASK_RUNNING); - } + set_current_state(TASK_INTERRUPTIBLE); + if (!kthread_should_stop() && + (!btrfs_transaction_blocked(root->fs_info) || + cannot_commit)) + schedule_timeout(delay); + __set_current_state(TASK_RUNNING); } while (!kthread_should_stop()); return 0; } @@ -2537,7 +2542,7 @@ int open_ctree(struct super_block *sb, err = ret; goto fail_bdi; } - fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE * + fs_info->dirty_metadata_batch = PAGE_SIZE * (1 + ilog2(nr_cpu_ids)); ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); @@ -2782,7 +2787,7 @@ int open_ctree(struct super_block *sb, * flag our filesystem as having big metadata blocks if * they are bigger than the page size */ - if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) { + if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n"); features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; @@ -2832,7 +2837,7 @@ int open_ctree(struct super_block *sb, fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, - SZ_4M / PAGE_CACHE_SIZE); + SZ_4M / PAGE_SIZE); tree_root->nodesize = nodesize; tree_root->sectorsize = sectorsize; @@ -4071,9 +4076,9 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, ret = -EINVAL; } /* Only PAGE SIZE is supported yet */ - if (sectorsize != PAGE_CACHE_SIZE) { + if (sectorsize != PAGE_SIZE) { printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n", - sectorsize, PAGE_CACHE_SIZE); + sectorsize, PAGE_SIZE); ret = -EINVAL; } if (!is_power_of_2(nodesize) || nodesize < sectorsize || diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 53e12977bfd0..ce114ba9780a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3452,7 +3452,7 @@ again: num_pages = 1; num_pages *= 16; - num_pages *= PAGE_CACHE_SIZE; + num_pages *= PAGE_SIZE; ret = btrfs_check_data_free_space(inode, 0, num_pages); if (ret) @@ -4639,7 +4639,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, loops = 0; while (delalloc_bytes && loops < 3) { max_reclaim = min(delalloc_bytes, to_reclaim); - nr_pages = max_reclaim >> PAGE_CACHE_SHIFT; + nr_pages = max_reclaim >> PAGE_SHIFT; btrfs_writeback_inodes_sb_nr(root, nr_pages, items); /* * We need to wait for the async pages to actually start before diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 76a0c8597d98..d247fc0eea19 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1363,23 +1363,23 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) { - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; + unsigned long end_index = end >> PAGE_SHIFT; struct page *page; while (index <= end_index) { page = find_get_page(inode->i_mapping, index); BUG_ON(!page); /* Pages should be in the extent_io_tree */ clear_page_dirty_for_io(page); - page_cache_release(page); + put_page(page); index++; } } void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) { - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; + unsigned long end_index = end >> PAGE_SHIFT; struct page *page; while (index <= end_index) { @@ -1387,7 +1387,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) BUG_ON(!page); /* Pages should be in the extent_io_tree */ __set_page_dirty_nobuffers(page); account_page_redirty(page); - page_cache_release(page); + put_page(page); index++; } } @@ -1397,15 +1397,15 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) */ static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) { - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; + unsigned long end_index = end >> PAGE_SHIFT; struct page *page; while (index <= end_index) { page = find_get_page(tree->mapping, index); BUG_ON(!page); /* Pages should be in the extent_io_tree */ set_page_writeback(page); - page_cache_release(page); + put_page(page); index++; } } @@ -1556,8 +1556,8 @@ static noinline void __unlock_for_delalloc(struct inode *inode, { int ret; struct page *pages[16]; - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; + unsigned long end_index = end >> PAGE_SHIFT; unsigned long nr_pages = end_index - index + 1; int i; @@ -1571,7 +1571,7 @@ static noinline void __unlock_for_delalloc(struct inode *inode, for (i = 0; i < ret; i++) { if (pages[i] != locked_page) unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } nr_pages -= ret; index += ret; @@ -1584,9 +1584,9 @@ static noinline int lock_delalloc_pages(struct inode *inode, u64 delalloc_start, u64 delalloc_end) { - unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT; + unsigned long index = delalloc_start >> PAGE_SHIFT; unsigned long start_index = index; - unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT; + unsigned long end_index = delalloc_end >> PAGE_SHIFT; unsigned long pages_locked = 0; struct page *pages[16]; unsigned long nrpages; @@ -1619,11 +1619,11 @@ static noinline int lock_delalloc_pages(struct inode *inode, pages[i]->mapping != inode->i_mapping) { ret = -EAGAIN; unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); goto done; } } - page_cache_release(pages[i]); + put_page(pages[i]); pages_locked++; } nrpages -= ret; @@ -1636,7 +1636,7 @@ done: __unlock_for_delalloc(inode, locked_page, delalloc_start, ((u64)(start_index + pages_locked - 1)) << - PAGE_CACHE_SHIFT); + PAGE_SHIFT); } return ret; } @@ -1696,7 +1696,7 @@ again: free_extent_state(cached_state); cached_state = NULL; if (!loops) { - max_bytes = PAGE_CACHE_SIZE; + max_bytes = PAGE_SIZE; loops = 1; goto again; } else { @@ -1735,8 +1735,8 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; int ret; struct page *pages[16]; - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; + unsigned long end_index = end >> PAGE_SHIFT; unsigned long nr_pages = end_index - index + 1; int i; @@ -1757,7 +1757,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, SetPagePrivate2(pages[i]); if (pages[i] == locked_page) { - page_cache_release(pages[i]); + put_page(pages[i]); continue; } if (page_ops & PAGE_CLEAR_DIRTY) @@ -1770,7 +1770,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, end_page_writeback(pages[i]); if (page_ops & PAGE_UNLOCK) unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } nr_pages -= ret; index += ret; @@ -1961,7 +1961,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) { u64 start = page_offset(page); - u64 end = start + PAGE_CACHE_SIZE - 1; + u64 end = start + PAGE_SIZE - 1; if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) SetPageUptodate(page); } @@ -2071,11 +2071,11 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb, struct page *p = eb->pages[i]; ret = repair_io_failure(root->fs_info->btree_inode, start, - PAGE_CACHE_SIZE, start, p, + PAGE_SIZE, start, p, start - page_offset(p), mirror_num); if (ret) break; - start += PAGE_CACHE_SIZE; + start += PAGE_SIZE; } return ret; @@ -2466,8 +2466,8 @@ static void end_bio_extent_writepage(struct bio *bio) * advance bv_offset and adjust bv_len to compensate. * Print a warning for nonzero offsets, and an error * if they don't add up to a full page. */ - if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { - if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) + if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { + if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, "partial page write in btrfs with offset %u and length %u", bvec->bv_offset, bvec->bv_len); @@ -2541,8 +2541,8 @@ static void end_bio_extent_readpage(struct bio *bio) * advance bv_offset and adjust bv_len to compensate. * Print a warning for nonzero offsets, and an error * if they don't add up to a full page. */ - if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { - if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) + if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { + if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, "partial page read in btrfs with offset %u and length %u", bvec->bv_offset, bvec->bv_len); @@ -2598,13 +2598,13 @@ static void end_bio_extent_readpage(struct bio *bio) readpage_ok: if (likely(uptodate)) { loff_t i_size = i_size_read(inode); - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + pgoff_t end_index = i_size >> PAGE_SHIFT; unsigned off; /* Zero out the end if this page straddles i_size */ - off = i_size & (PAGE_CACHE_SIZE-1); + off = i_size & (PAGE_SIZE-1); if (page->index == end_index && off) - zero_user_segment(page, off, PAGE_CACHE_SIZE); + zero_user_segment(page, off, PAGE_SIZE); SetPageUptodate(page); } else { ClearPageUptodate(page); @@ -2768,7 +2768,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, struct bio *bio; int contig = 0; int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; - size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE); + size_t page_size = min_t(size_t, size, PAGE_SIZE); if (bio_ret && *bio_ret) { bio = *bio_ret; @@ -2821,7 +2821,7 @@ static void attach_extent_buffer_page(struct extent_buffer *eb, { if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); set_page_private(page, (unsigned long)eb); } else { WARN_ON(page->private != (unsigned long)eb); @@ -2832,7 +2832,7 @@ void set_page_extent_mapped(struct page *page) { if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); set_page_private(page, EXTENT_PAGE_PRIVATE); } } @@ -2880,7 +2880,7 @@ static int __do_readpage(struct extent_io_tree *tree, { struct inode *inode = page->mapping->host; u64 start = page_offset(page); - u64 page_end = start + PAGE_CACHE_SIZE - 1; + u64 page_end = start + PAGE_SIZE - 1; u64 end; u64 cur = start; u64 extent_offset; @@ -2909,12 +2909,12 @@ static int __do_readpage(struct extent_io_tree *tree, } } - if (page->index == last_byte >> PAGE_CACHE_SHIFT) { + if (page->index == last_byte >> PAGE_SHIFT) { char *userpage; - size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1); + size_t zero_offset = last_byte & (PAGE_SIZE - 1); if (zero_offset) { - iosize = PAGE_CACHE_SIZE - zero_offset; + iosize = PAGE_SIZE - zero_offset; userpage = kmap_atomic(page); memset(userpage + zero_offset, 0, iosize); flush_dcache_page(page); @@ -2922,14 +2922,14 @@ static int __do_readpage(struct extent_io_tree *tree, } } while (cur <= end) { - unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; + unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1; bool force_bio_submit = false; if (cur >= last_byte) { char *userpage; struct extent_state *cached = NULL; - iosize = PAGE_CACHE_SIZE - pg_offset; + iosize = PAGE_SIZE - pg_offset; userpage = kmap_atomic(page); memset(userpage + pg_offset, 0, iosize); flush_dcache_page(page); @@ -3112,7 +3112,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, for (index = 0; index < nr_pages; index++) { __do_readpage(tree, pages[index], get_extent, em_cached, bio, mirror_num, bio_flags, rw, prev_em_start); - page_cache_release(pages[index]); + put_page(pages[index]); } } @@ -3134,10 +3134,10 @@ static void __extent_readpages(struct extent_io_tree *tree, page_start = page_offset(pages[index]); if (!end) { start = page_start; - end = start + PAGE_CACHE_SIZE - 1; + end = start + PAGE_SIZE - 1; first_index = index; } else if (end + 1 == page_start) { - end += PAGE_CACHE_SIZE; + end += PAGE_SIZE; } else { __do_contiguous_readpages(tree, &pages[first_index], index - first_index, start, @@ -3145,7 +3145,7 @@ static void __extent_readpages(struct extent_io_tree *tree, bio, mirror_num, bio_flags, rw, prev_em_start); start = page_start; - end = start + PAGE_CACHE_SIZE - 1; + end = start + PAGE_SIZE - 1; first_index = index; } } @@ -3167,13 +3167,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree, struct inode *inode = page->mapping->host; struct btrfs_ordered_extent *ordered; u64 start = page_offset(page); - u64 end = start + PAGE_CACHE_SIZE - 1; + u64 end = start + PAGE_SIZE - 1; int ret; while (1) { lock_extent(tree, start, end); ordered = btrfs_lookup_ordered_range(inode, start, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (!ordered) break; unlock_extent(tree, start, end); @@ -3227,7 +3227,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode, unsigned long *nr_written) { struct extent_io_tree *tree = epd->tree; - u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1; + u64 page_end = delalloc_start + PAGE_SIZE - 1; u64 nr_delalloc; u64 delalloc_to_write = 0; u64 delalloc_end = 0; @@ -3264,13 +3264,11 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode, goto done; } /* - * delalloc_end is already one less than the total - * length, so we don't subtract one from - * PAGE_CACHE_SIZE + * delalloc_end is already one less than the total length, so + * we don't subtract one from PAGE_SIZE */ delalloc_to_write += (delalloc_end - delalloc_start + - PAGE_CACHE_SIZE) >> - PAGE_CACHE_SHIFT; + PAGE_SIZE) >> PAGE_SHIFT; delalloc_start = delalloc_end + 1; } if (wbc->nr_to_write < delalloc_to_write) { @@ -3319,7 +3317,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, { struct extent_io_tree *tree = epd->tree; u64 start = page_offset(page); - u64 page_end = start + PAGE_CACHE_SIZE - 1; + u64 page_end = start + PAGE_SIZE - 1; u64 end; u64 cur = start; u64 extent_offset; @@ -3434,7 +3432,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, if (ret) { SetPageError(page); } else { - unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1; + unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1; set_range_writeback(tree, cur, cur + iosize - 1); if (!PageWriteback(page)) { @@ -3477,12 +3475,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, struct inode *inode = page->mapping->host; struct extent_page_data *epd = data; u64 start = page_offset(page); - u64 page_end = start + PAGE_CACHE_SIZE - 1; + u64 page_end = start + PAGE_SIZE - 1; int ret; int nr = 0; size_t pg_offset = 0; loff_t i_size = i_size_read(inode); - unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; + unsigned long end_index = i_size >> PAGE_SHIFT; int write_flags; unsigned long nr_written = 0; @@ -3497,10 +3495,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ClearPageError(page); - pg_offset = i_size & (PAGE_CACHE_SIZE - 1); + pg_offset = i_size & (PAGE_SIZE - 1); if (page->index > end_index || (page->index == end_index && !pg_offset)) { - page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); + page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); return 0; } @@ -3510,7 +3508,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, userpage = kmap_atomic(page); memset(userpage + pg_offset, 0, - PAGE_CACHE_SIZE - pg_offset); + PAGE_SIZE - pg_offset); kunmap_atomic(userpage); flush_dcache_page(page); } @@ -3748,7 +3746,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, clear_page_dirty_for_io(p); set_page_writeback(p); ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, - PAGE_CACHE_SIZE, 0, bdev, &epd->bio, + PAGE_SIZE, 0, bdev, &epd->bio, -1, end_bio_extent_buffer_writepage, 0, epd->bio_flags, bio_flags, false); epd->bio_flags = bio_flags; @@ -3760,7 +3758,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, ret = -EIO; break; } - offset += PAGE_CACHE_SIZE; + offset += PAGE_SIZE; update_nr_written(p, wbc, 1); unlock_page(p); } @@ -3804,8 +3802,8 @@ int btree_write_cache_pages(struct address_space *mapping, index = mapping->writeback_index; /* Start from prev offset */ end = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + index = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; scanned = 1; } if (wbc->sync_mode == WB_SYNC_ALL) @@ -3948,8 +3946,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, index = mapping->writeback_index; /* Start from prev offset */ end = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + index = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; scanned = 1; } if (wbc->sync_mode == WB_SYNC_ALL) @@ -4083,8 +4081,8 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, int ret = 0; struct address_space *mapping = inode->i_mapping; struct page *page; - unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> - PAGE_CACHE_SHIFT; + unsigned long nr_pages = (end - start + PAGE_SIZE) >> + PAGE_SHIFT; struct extent_page_data epd = { .bio = NULL, @@ -4102,18 +4100,18 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, }; while (start <= end) { - page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + page = find_get_page(mapping, start >> PAGE_SHIFT); if (clear_page_dirty_for_io(page)) ret = __extent_writepage(page, &wbc_writepages, &epd); else { if (tree->ops && tree->ops->writepage_end_io_hook) tree->ops->writepage_end_io_hook(page, start, - start + PAGE_CACHE_SIZE - 1, + start + PAGE_SIZE - 1, NULL, 1); unlock_page(page); } - page_cache_release(page); - start += PAGE_CACHE_SIZE; + put_page(page); + start += PAGE_SIZE; } flush_epd_write_bio(&epd); @@ -4163,7 +4161,7 @@ int extent_readpages(struct extent_io_tree *tree, list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, GFP_NOFS)) { - page_cache_release(page); + put_page(page); continue; } @@ -4197,7 +4195,7 @@ int extent_invalidatepage(struct extent_io_tree *tree, { struct extent_state *cached_state = NULL; u64 start = page_offset(page); - u64 end = start + PAGE_CACHE_SIZE - 1; + u64 end = start + PAGE_SIZE - 1; size_t blocksize = page->mapping->host->i_sb->s_blocksize; start += ALIGN(offset, blocksize); @@ -4223,7 +4221,7 @@ static int try_release_extent_state(struct extent_map_tree *map, struct page *page, gfp_t mask) { u64 start = page_offset(page); - u64 end = start + PAGE_CACHE_SIZE - 1; + u64 end = start + PAGE_SIZE - 1; int ret = 1; if (test_range_bit(tree, start, end, @@ -4262,7 +4260,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, { struct extent_map *em; u64 start = page_offset(page); - u64 end = start + PAGE_CACHE_SIZE - 1; + u64 end = start + PAGE_SIZE - 1; if (gfpflags_allow_blocking(mask) && page->mapping->host->i_size > SZ_16M) { @@ -4587,14 +4585,14 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb) ClearPagePrivate(page); set_page_private(page, 0); /* One for the page private */ - page_cache_release(page); + put_page(page); } if (mapped) spin_unlock(&page->mapping->private_lock); /* One for when we alloced the page */ - page_cache_release(page); + put_page(page); } while (index != 0); } @@ -4779,7 +4777,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, rcu_read_lock(); eb = radix_tree_lookup(&fs_info->buffer_radix, - start >> PAGE_CACHE_SHIFT); + start >> PAGE_SHIFT); if (eb && atomic_inc_not_zero(&eb->refs)) { rcu_read_unlock(); /* @@ -4829,7 +4827,7 @@ again: goto free_eb; spin_lock(&fs_info->buffer_lock); ret = radix_tree_insert(&fs_info->buffer_radix, - start >> PAGE_CACHE_SHIFT, eb); + start >> PAGE_SHIFT, eb); spin_unlock(&fs_info->buffer_lock); radix_tree_preload_end(); if (ret == -EEXIST) { @@ -4862,7 +4860,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, unsigned long len = fs_info->tree_root->nodesize; unsigned long num_pages = num_extent_pages(start, len); unsigned long i; - unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; struct extent_buffer *eb; struct extent_buffer *exists = NULL; struct page *p; @@ -4896,7 +4894,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, if (atomic_inc_not_zero(&exists->refs)) { spin_unlock(&mapping->private_lock); unlock_page(p); - page_cache_release(p); + put_page(p); mark_extent_buffer_accessed(exists, p); goto free_eb; } @@ -4908,7 +4906,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, */ ClearPagePrivate(p); WARN_ON(PageDirty(p)); - page_cache_release(p); + put_page(p); } attach_extent_buffer_page(eb, p); spin_unlock(&mapping->private_lock); @@ -4931,7 +4929,7 @@ again: spin_lock(&fs_info->buffer_lock); ret = radix_tree_insert(&fs_info->buffer_radix, - start >> PAGE_CACHE_SHIFT, eb); + start >> PAGE_SHIFT, eb); spin_unlock(&fs_info->buffer_lock); radix_tree_preload_end(); if (ret == -EEXIST) { @@ -4994,7 +4992,7 @@ static int release_extent_buffer(struct extent_buffer *eb) spin_lock(&fs_info->buffer_lock); radix_tree_delete(&fs_info->buffer_radix, - eb->start >> PAGE_CACHE_SHIFT); + eb->start >> PAGE_SHIFT); spin_unlock(&fs_info->buffer_lock); } else { spin_unlock(&eb->refs_lock); @@ -5168,8 +5166,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, if (start) { WARN_ON(start < eb->start); - start_i = (start >> PAGE_CACHE_SHIFT) - - (eb->start >> PAGE_CACHE_SHIFT); + start_i = (start >> PAGE_SHIFT) - + (eb->start >> PAGE_SHIFT); } else { start_i = 0; } @@ -5252,18 +5250,18 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv, struct page *page; char *kaddr; char *dst = (char *)dstv; - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_SHIFT; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); + offset = (start_offset + start) & (PAGE_SIZE - 1); while (len > 0) { page = eb->pages[i]; - cur = min(len, (PAGE_CACHE_SIZE - offset)); + cur = min(len, (PAGE_SIZE - offset)); kaddr = page_address(page); memcpy(dst, kaddr + offset, cur); @@ -5283,19 +5281,19 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv, struct page *page; char *kaddr; char __user *dst = (char __user *)dstv; - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_SHIFT; int ret = 0; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); + offset = (start_offset + start) & (PAGE_SIZE - 1); while (len > 0) { page = eb->pages[i]; - cur = min(len, (PAGE_CACHE_SIZE - offset)); + cur = min(len, (PAGE_SIZE - offset)); kaddr = page_address(page); if (copy_to_user(dst, kaddr + offset, cur)) { ret = -EFAULT; @@ -5316,13 +5314,13 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, unsigned long *map_start, unsigned long *map_len) { - size_t offset = start & (PAGE_CACHE_SIZE - 1); + size_t offset = start & (PAGE_SIZE - 1); char *kaddr; struct page *p; - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_SHIFT; unsigned long end_i = (start_offset + start + min_len - 1) >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; if (i != end_i) return -EINVAL; @@ -5332,7 +5330,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, *map_start = 0; } else { offset = 0; - *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; + *map_start = ((u64)i << PAGE_SHIFT) - start_offset; } if (start + min_len > eb->len) { @@ -5345,7 +5343,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, p = eb->pages[i]; kaddr = page_address(p); *map = kaddr + offset; - *map_len = PAGE_CACHE_SIZE - offset; + *map_len = PAGE_SIZE - offset; return 0; } @@ -5358,19 +5356,19 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, struct page *page; char *kaddr; char *ptr = (char *)ptrv; - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_SHIFT; int ret = 0; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); + offset = (start_offset + start) & (PAGE_SIZE - 1); while (len > 0) { page = eb->pages[i]; - cur = min(len, (PAGE_CACHE_SIZE - offset)); + cur = min(len, (PAGE_SIZE - offset)); kaddr = page_address(page); ret = memcmp(ptr, kaddr + offset, cur); @@ -5393,19 +5391,19 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv, struct page *page; char *kaddr; char *src = (char *)srcv; - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_SHIFT; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); + offset = (start_offset + start) & (PAGE_SIZE - 1); while (len > 0) { page = eb->pages[i]; WARN_ON(!PageUptodate(page)); - cur = min(len, PAGE_CACHE_SIZE - offset); + cur = min(len, PAGE_SIZE - offset); kaddr = page_address(page); memcpy(kaddr + offset, src, cur); @@ -5423,19 +5421,19 @@ void memset_extent_buffer(struct extent_buffer *eb, char c, size_t offset; struct page *page; char *kaddr; - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_SHIFT; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); + offset = (start_offset + start) & (PAGE_SIZE - 1); while (len > 0) { page = eb->pages[i]; WARN_ON(!PageUptodate(page)); - cur = min(len, PAGE_CACHE_SIZE - offset); + cur = min(len, PAGE_SIZE - offset); kaddr = page_address(page); memset(kaddr + offset, c, cur); @@ -5454,19 +5452,19 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, size_t offset; struct page *page; char *kaddr; - size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; + size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT; WARN_ON(src->len != dst_len); offset = (start_offset + dst_offset) & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); while (len > 0) { page = dst->pages[i]; WARN_ON(!PageUptodate(page)); - cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); + cur = min(len, (unsigned long)(PAGE_SIZE - offset)); kaddr = page_address(page); read_extent_buffer(src, kaddr + offset, src_offset, cur); @@ -5508,7 +5506,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb, unsigned long *page_index, size_t *page_offset) { - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); size_t byte_offset = BIT_BYTE(nr); size_t offset; @@ -5519,8 +5517,8 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb, */ offset = start_offset + start + byte_offset; - *page_index = offset >> PAGE_CACHE_SHIFT; - *page_offset = offset & (PAGE_CACHE_SIZE - 1); + *page_index = offset >> PAGE_SHIFT; + *page_offset = offset & (PAGE_SIZE - 1); } /** @@ -5572,7 +5570,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, len -= bits_to_set; bits_to_set = BITS_PER_BYTE; mask_to_set = ~0U; - if (++offset >= PAGE_CACHE_SIZE && len > 0) { + if (++offset >= PAGE_SIZE && len > 0) { offset = 0; page = eb->pages[++i]; WARN_ON(!PageUptodate(page)); @@ -5614,7 +5612,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, len -= bits_to_clear; bits_to_clear = BITS_PER_BYTE; mask_to_clear = ~0U; - if (++offset >= PAGE_CACHE_SIZE && len > 0) { + if (++offset >= PAGE_SIZE && len > 0) { offset = 0; page = eb->pages[++i]; WARN_ON(!PageUptodate(page)); @@ -5661,7 +5659,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, size_t cur; size_t dst_off_in_page; size_t src_off_in_page; - size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); + size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1); unsigned long dst_i; unsigned long src_i; @@ -5680,17 +5678,17 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, while (len > 0) { dst_off_in_page = (start_offset + dst_offset) & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); src_off_in_page = (start_offset + src_offset) & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); - dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; - src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; + dst_i = (start_offset + dst_offset) >> PAGE_SHIFT; + src_i = (start_offset + src_offset) >> PAGE_SHIFT; - cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - + cur = min(len, (unsigned long)(PAGE_SIZE - src_off_in_page)); cur = min_t(unsigned long, cur, - (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page)); + (unsigned long)(PAGE_SIZE - dst_off_in_page)); copy_pages(dst->pages[dst_i], dst->pages[src_i], dst_off_in_page, src_off_in_page, cur); @@ -5709,7 +5707,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, size_t src_off_in_page; unsigned long dst_end = dst_offset + len - 1; unsigned long src_end = src_offset + len - 1; - size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); + size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1); unsigned long dst_i; unsigned long src_i; @@ -5728,13 +5726,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, return; } while (len > 0) { - dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; - src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; + dst_i = (start_offset + dst_end) >> PAGE_SHIFT; + src_i = (start_offset + src_end) >> PAGE_SHIFT; dst_off_in_page = (start_offset + dst_end) & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); src_off_in_page = (start_offset + src_end) & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); cur = min_t(unsigned long, len, src_off_in_page + 1); cur = min(cur, dst_off_in_page + 1); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 5dbf92e68fbd..b5e0ade90e88 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -120,7 +120,7 @@ struct extent_state { }; #define INLINE_EXTENT_BUFFER_PAGES 16 -#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE) +#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE) struct extent_buffer { u64 start; unsigned long len; @@ -365,8 +365,8 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb); static inline unsigned long num_extent_pages(u64 start, u64 len) { - return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - - (start >> PAGE_CACHE_SHIFT); + return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) - + (start >> PAGE_SHIFT); } static inline void extent_buffer_get(struct extent_buffer *eb) diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index b5baf5bdc8e1..7a7d6e253cfc 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -32,7 +32,7 @@ size) - 1)) #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ - PAGE_CACHE_SIZE)) + PAGE_SIZE)) #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ sizeof(struct btrfs_ordered_sum)) / \ @@ -203,7 +203,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, csum = (u8 *)dst; } - if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) + if (bio->bi_iter.bi_size > PAGE_SIZE * 8) path->reada = READA_FORWARD; WARN_ON(bio->bi_vcnt <= 0); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 2f40482347af..fbe2589f99f0 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -414,11 +414,11 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes, size_t copied = 0; size_t total_copied = 0; int pg = 0; - int offset = pos & (PAGE_CACHE_SIZE - 1); + int offset = pos & (PAGE_SIZE - 1); while (write_bytes > 0) { size_t count = min_t(size_t, - PAGE_CACHE_SIZE - offset, write_bytes); + PAGE_SIZE - offset, write_bytes); struct page *page = prepared_pages[pg]; /* * Copy data from userspace to the current page @@ -448,7 +448,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes, if (unlikely(copied == 0)) break; - if (copied < PAGE_CACHE_SIZE - offset) { + if (copied < PAGE_SIZE - offset) { offset += copied; } else { pg++; @@ -473,7 +473,7 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages) */ ClearPageChecked(pages[i]); unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } } @@ -1297,7 +1297,7 @@ static int prepare_uptodate_page(struct inode *inode, { int ret = 0; - if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && + if (((pos & (PAGE_SIZE - 1)) || force_uptodate) && !PageUptodate(page)) { ret = btrfs_readpage(NULL, page); if (ret) @@ -1323,7 +1323,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages, size_t write_bytes, bool force_uptodate) { int i; - unsigned long index = pos >> PAGE_CACHE_SHIFT; + unsigned long index = pos >> PAGE_SHIFT; gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); int err = 0; int faili; @@ -1345,7 +1345,7 @@ again: err = prepare_uptodate_page(inode, pages[i], pos + write_bytes, false); if (err) { - page_cache_release(pages[i]); + put_page(pages[i]); if (err == -EAGAIN) { err = 0; goto again; @@ -1360,7 +1360,7 @@ again: fail: while (faili >= 0) { unlock_page(pages[faili]); - page_cache_release(pages[faili]); + put_page(pages[faili]); faili--; } return err; @@ -1408,7 +1408,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, cached_state, GFP_NOFS); for (i = 0; i < num_pages; i++) { unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); @@ -1497,8 +1497,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, bool force_page_uptodate = false; bool need_unlock; - nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE), - PAGE_CACHE_SIZE / (sizeof(struct page *))); + nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE), + PAGE_SIZE / (sizeof(struct page *))); nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); nrptrs = max(nrptrs, 8); pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL); @@ -1506,13 +1506,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, return -ENOMEM; while (iov_iter_count(i) > 0) { - size_t offset = pos & (PAGE_CACHE_SIZE - 1); + size_t offset = pos & (PAGE_SIZE - 1); size_t sector_offset; size_t write_bytes = min(iov_iter_count(i), - nrptrs * (size_t)PAGE_CACHE_SIZE - + nrptrs * (size_t)PAGE_SIZE - offset); size_t num_pages = DIV_ROUND_UP(write_bytes + offset, - PAGE_CACHE_SIZE); + PAGE_SIZE); size_t reserve_bytes; size_t dirty_pages; size_t copied; @@ -1547,7 +1547,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, * write_bytes, so scale down. */ num_pages = DIV_ROUND_UP(write_bytes + offset, - PAGE_CACHE_SIZE); + PAGE_SIZE); reserve_bytes = round_up(write_bytes + sector_offset, root->sectorsize); goto reserve_metadata; @@ -1609,7 +1609,7 @@ again: } else { force_page_uptodate = false; dirty_pages = DIV_ROUND_UP(copied + offset, - PAGE_CACHE_SIZE); + PAGE_SIZE); } /* @@ -1641,7 +1641,7 @@ again: u64 __pos; __pos = round_down(pos, root->sectorsize) + - (dirty_pages << PAGE_CACHE_SHIFT); + (dirty_pages << PAGE_SHIFT); btrfs_delalloc_release_space(inode, __pos, release_bytes); } @@ -1682,7 +1682,7 @@ again: cond_resched(); balance_dirty_pages_ratelimited(inode->i_mapping); - if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1) + if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1) btrfs_btree_balance_dirty(root); pos += copied; @@ -1738,8 +1738,8 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb, goto out; written += written_buffered; iocb->ki_pos = pos + written_buffered; - invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); + invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT, + endbyte >> PAGE_SHIFT); out: return written ? written : err; } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 8f835bfa1bdd..5e6062c26129 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -29,7 +29,7 @@ #include "inode-map.h" #include "volumes.h" -#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) +#define BITS_PER_BITMAP (PAGE_SIZE * 8) #define MAX_CACHE_BYTES_PER_GIG SZ_32K struct btrfs_trim_range { @@ -295,7 +295,7 @@ static int readahead_cache(struct inode *inode) return -ENOMEM; file_ra_state_init(ra, inode->i_mapping); - last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; + last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index); @@ -310,14 +310,14 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode, int num_pages; int check_crcs = 0; - num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); + num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) check_crcs = 1; /* Make sure we can fit our crcs into the first page */ if (write && check_crcs && - (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) + (num_pages * sizeof(u32)) >= PAGE_SIZE) return -ENOSPC; memset(io_ctl, 0, sizeof(struct btrfs_io_ctl)); @@ -354,9 +354,9 @@ static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear) io_ctl->page = io_ctl->pages[io_ctl->index++]; io_ctl->cur = page_address(io_ctl->page); io_ctl->orig = io_ctl->cur; - io_ctl->size = PAGE_CACHE_SIZE; + io_ctl->size = PAGE_SIZE; if (clear) - memset(io_ctl->cur, 0, PAGE_CACHE_SIZE); + memset(io_ctl->cur, 0, PAGE_SIZE); } static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) @@ -369,7 +369,7 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) if (io_ctl->pages[i]) { ClearPageChecked(io_ctl->pages[i]); unlock_page(io_ctl->pages[i]); - page_cache_release(io_ctl->pages[i]); + put_page(io_ctl->pages[i]); } } } @@ -475,7 +475,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index) offset = sizeof(u32) * io_ctl->num_pages; crc = btrfs_csum_data(io_ctl->orig + offset, crc, - PAGE_CACHE_SIZE - offset); + PAGE_SIZE - offset); btrfs_csum_final(crc, (char *)&crc); io_ctl_unmap_page(io_ctl); tmp = page_address(io_ctl->pages[0]); @@ -503,7 +503,7 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index) io_ctl_map_page(io_ctl, 0); crc = btrfs_csum_data(io_ctl->orig + offset, crc, - PAGE_CACHE_SIZE - offset); + PAGE_SIZE - offset); btrfs_csum_final(crc, (char *)&crc); if (val != crc) { btrfs_err_rl(io_ctl->root->fs_info, @@ -561,7 +561,7 @@ static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap) io_ctl_map_page(io_ctl, 0); } - memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); + memcpy(io_ctl->cur, bitmap, PAGE_SIZE); io_ctl_set_crc(io_ctl, io_ctl->index - 1); if (io_ctl->index < io_ctl->num_pages) io_ctl_map_page(io_ctl, 0); @@ -621,7 +621,7 @@ static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl, if (ret) return ret; - memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); + memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE); io_ctl_unmap_page(io_ctl); return 0; @@ -775,7 +775,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, } else { ASSERT(num_bitmaps); num_bitmaps--; - e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); + e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS); if (!e->bitmap) { kmem_cache_free( btrfs_free_space_cachep, e); @@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as * we add more bitmaps. */ - bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; + bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE; if (bitmap_bytes >= max_bytes) { ctl->extents_thresh = 0; @@ -2111,7 +2111,7 @@ new_bitmap: } /* allocate the bitmap */ - info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); + info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS); spin_lock(&ctl->tree_lock); if (!info->bitmap) { ret = -ENOMEM; @@ -3580,7 +3580,7 @@ again: } if (!map) { - map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); + map = kzalloc(PAGE_SIZE, GFP_NOFS); if (!map) { kmem_cache_free(btrfs_free_space_cachep, info); return -ENOMEM; diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 1f0ec19b23f6..70107f7c9307 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -283,7 +283,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root) } #define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space)) -#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) +#define INODES_PER_BITMAP (PAGE_SIZE * 8) /* * The goal is to keep the memory used by the free_ino tree won't @@ -317,7 +317,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) } ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * - PAGE_CACHE_SIZE / sizeof(*info); + PAGE_SIZE / sizeof(*info); } /* @@ -481,12 +481,12 @@ again: spin_lock(&ctl->tree_lock); prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; - prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); - prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; + prealloc = ALIGN(prealloc, PAGE_SIZE); + prealloc += ctl->total_bitmaps * PAGE_SIZE; spin_unlock(&ctl->tree_lock); /* Just to make sure we have enough space */ - prealloc += 8 * PAGE_CACHE_SIZE; + prealloc += 8 * PAGE_SIZE; ret = btrfs_delalloc_reserve_space(inode, 0, prealloc); if (ret) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 41a5688ffdfe..2aaba58b4856 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -194,7 +194,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans, while (compressed_size > 0) { cpage = compressed_pages[i]; cur_size = min_t(unsigned long, compressed_size, - PAGE_CACHE_SIZE); + PAGE_SIZE); kaddr = kmap_atomic(cpage); write_extent_buffer(leaf, kaddr, ptr, cur_size); @@ -208,13 +208,13 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans, compress_type); } else { page = find_get_page(inode->i_mapping, - start >> PAGE_CACHE_SHIFT); + start >> PAGE_SHIFT); btrfs_set_file_extent_compression(leaf, ei, 0); kaddr = kmap_atomic(page); - offset = start & (PAGE_CACHE_SIZE - 1); + offset = start & (PAGE_SIZE - 1); write_extent_buffer(leaf, kaddr + offset, ptr, size); kunmap_atomic(kaddr); - page_cache_release(page); + put_page(page); } btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); @@ -322,7 +322,7 @@ out: * And at reserve time, it's always aligned to page size, so * just free one page here. */ - btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE); + btrfs_qgroup_free_data(inode, 0, PAGE_SIZE); btrfs_free_path(path); btrfs_end_transaction(trans, root); return ret; @@ -435,8 +435,8 @@ static noinline void compress_file_range(struct inode *inode, actual_end = min_t(u64, isize, end + 1); again: will_compress = 0; - nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; - nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE); + nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; + nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE); /* * we don't want to send crud past the end of i_size through @@ -514,7 +514,7 @@ again: if (!ret) { unsigned long offset = total_compressed & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); struct page *page = pages[nr_pages_ret - 1]; char *kaddr; @@ -524,7 +524,7 @@ again: if (offset) { kaddr = kmap_atomic(page); memset(kaddr + offset, 0, - PAGE_CACHE_SIZE - offset); + PAGE_SIZE - offset); kunmap_atomic(kaddr); } will_compress = 1; @@ -580,7 +580,7 @@ cont: * one last check to make sure the compression is really a * win, compare the page count read with the blocks on disk */ - total_in = ALIGN(total_in, PAGE_CACHE_SIZE); + total_in = ALIGN(total_in, PAGE_SIZE); if (total_compressed >= total_in) { will_compress = 0; } else { @@ -594,7 +594,7 @@ cont: */ for (i = 0; i < nr_pages_ret; i++) { WARN_ON(pages[i]->mapping); - page_cache_release(pages[i]); + put_page(pages[i]); } kfree(pages); pages = NULL; @@ -650,7 +650,7 @@ cleanup_and_bail_uncompressed: free_pages_out: for (i = 0; i < nr_pages_ret; i++) { WARN_ON(pages[i]->mapping); - page_cache_release(pages[i]); + put_page(pages[i]); } kfree(pages); } @@ -664,7 +664,7 @@ static void free_async_extent_pages(struct async_extent *async_extent) for (i = 0; i < async_extent->nr_pages; i++) { WARN_ON(async_extent->pages[i]->mapping); - page_cache_release(async_extent->pages[i]); + put_page(async_extent->pages[i]); } kfree(async_extent->pages); async_extent->nr_pages = 0; @@ -966,7 +966,7 @@ static noinline int cow_file_range(struct inode *inode, PAGE_END_WRITEBACK); *nr_written = *nr_written + - (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; + (end - start + PAGE_SIZE) / PAGE_SIZE; *page_started = 1; goto out; } else if (ret < 0) { @@ -1106,8 +1106,8 @@ static noinline void async_cow_submit(struct btrfs_work *work) async_cow = container_of(work, struct async_cow, work); root = async_cow->root; - nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> - PAGE_CACHE_SHIFT; + nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >> + PAGE_SHIFT; /* * atomic_sub_return implies a barrier for waitqueue_active @@ -1164,8 +1164,8 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, async_cow_start, async_cow_submit, async_cow_free); - nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> - PAGE_CACHE_SHIFT; + nr_pages = (cur_end - start + PAGE_SIZE) >> + PAGE_SHIFT; atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); btrfs_queue_work(root->fs_info->delalloc_workers, @@ -1960,7 +1960,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, struct extent_state **cached_state) { - WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0); + WARN_ON((end & (PAGE_SIZE - 1)) == 0); return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, cached_state, GFP_NOFS); } @@ -1993,7 +1993,7 @@ again: inode = page->mapping->host; page_start = page_offset(page); - page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; + page_end = page_offset(page) + PAGE_SIZE - 1; lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, &cached_state); @@ -2003,7 +2003,7 @@ again: goto out; ordered = btrfs_lookup_ordered_range(inode, page_start, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (ordered) { unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, &cached_state, GFP_NOFS); @@ -2014,7 +2014,7 @@ again: } ret = btrfs_delalloc_reserve_space(inode, page_start, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (ret) { mapping_set_error(page->mapping, ret); end_extent_writepage(page, ret, page_start, page_end); @@ -2030,7 +2030,7 @@ out: &cached_state, GFP_NOFS); out_page: unlock_page(page); - page_cache_release(page); + put_page(page); kfree(fixup); } @@ -2063,7 +2063,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) return -EAGAIN; SetPageChecked(page); - page_cache_get(page); + get_page(page); btrfs_init_work(&fixup->work, btrfs_fixup_helper, btrfs_writepage_fixup_worker, NULL, NULL); fixup->page = page; @@ -4247,7 +4247,7 @@ static int truncate_inline_extent(struct inode *inode, if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) { loff_t offset = new_size; - loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE); + loff_t page_end = ALIGN(offset, PAGE_SIZE); /* * Zero out the remaining of the last page of our inline extent, @@ -4633,7 +4633,7 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, struct extent_state *cached_state = NULL; char *kaddr; u32 blocksize = root->sectorsize; - pgoff_t index = from >> PAGE_CACHE_SHIFT; + pgoff_t index = from >> PAGE_SHIFT; unsigned offset = from & (blocksize - 1); struct page *page; gfp_t mask = btrfs_alloc_write_mask(mapping); @@ -4668,7 +4668,7 @@ again: lock_page(page); if (page->mapping != mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); goto again; } if (!PageUptodate(page)) { @@ -4686,7 +4686,7 @@ again: unlock_extent_cached(io_tree, block_start, block_end, &cached_state, GFP_NOFS); unlock_page(page); - page_cache_release(page); + put_page(page); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); goto again; @@ -4728,7 +4728,7 @@ out_unlock: btrfs_delalloc_release_space(inode, block_start, blocksize); unlock_page(page); - page_cache_release(page); + put_page(page); out: return ret; } @@ -6717,7 +6717,7 @@ static noinline int uncompress_inline(struct btrfs_path *path, read_extent_buffer(leaf, tmp, ptr, inline_size); - max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); + max_size = min_t(unsigned long, PAGE_SIZE, max_size); ret = btrfs_decompress(compress_type, tmp, page, extent_offset, inline_size, max_size); kfree(tmp); @@ -6879,8 +6879,8 @@ next: size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); extent_offset = page_offset(page) + pg_offset - extent_start; - copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, - size - extent_offset); + copy_size = min_t(u64, PAGE_SIZE - pg_offset, + size - extent_offset); em->start = extent_start + extent_offset; em->len = ALIGN(copy_size, root->sectorsize); em->orig_block_len = em->len; @@ -6899,9 +6899,9 @@ next: map = kmap(page); read_extent_buffer(leaf, map + pg_offset, ptr, copy_size); - if (pg_offset + copy_size < PAGE_CACHE_SIZE) { + if (pg_offset + copy_size < PAGE_SIZE) { memset(map + pg_offset + copy_size, 0, - PAGE_CACHE_SIZE - pg_offset - + PAGE_SIZE - pg_offset - copy_size); } kunmap(page); @@ -7336,12 +7336,12 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end) int start_idx; int end_idx; - start_idx = start >> PAGE_CACHE_SHIFT; + start_idx = start >> PAGE_SHIFT; /* * end is the last byte in the last page. end == start is legal */ - end_idx = end >> PAGE_CACHE_SHIFT; + end_idx = end >> PAGE_SHIFT; rcu_read_lock(); @@ -7382,7 +7382,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end) * include/linux/pagemap.h for details. */ if (unlikely(page != *pagep)) { - page_cache_release(page); + put_page(page); page = NULL; } } @@ -7390,7 +7390,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end) if (page) { if (page->index <= end_idx) found = true; - page_cache_release(page); + put_page(page); } rcu_read_unlock(); @@ -8719,7 +8719,7 @@ static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) if (ret == 1) { ClearPagePrivate(page); set_page_private(page, 0); - page_cache_release(page); + put_page(page); } return ret; } @@ -8739,7 +8739,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset, struct btrfs_ordered_extent *ordered; struct extent_state *cached_state = NULL; u64 page_start = page_offset(page); - u64 page_end = page_start + PAGE_CACHE_SIZE - 1; + u64 page_end = page_start + PAGE_SIZE - 1; u64 start; u64 end; int inode_evicting = inode->i_state & I_FREEING; @@ -8822,7 +8822,7 @@ again: * 2) Not written to disk * This means the reserved space should be freed here. */ - btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE); + btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE); if (!inode_evicting) { clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DIRTY | @@ -8837,7 +8837,7 @@ again: if (PagePrivate(page)) { ClearPagePrivate(page); set_page_private(page, 0); - page_cache_release(page); + put_page(page); } } @@ -8874,11 +8874,11 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) u64 page_end; u64 end; - reserved_space = PAGE_CACHE_SIZE; + reserved_space = PAGE_SIZE; sb_start_pagefault(inode->i_sb); page_start = page_offset(page); - page_end = page_start + PAGE_CACHE_SIZE - 1; + page_end = page_start + PAGE_SIZE - 1; end = page_end; /* @@ -8934,15 +8934,15 @@ again: goto again; } - if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) { + if (page->index == ((size - 1) >> PAGE_SHIFT)) { reserved_space = round_up(size - page_start, root->sectorsize); - if (reserved_space < PAGE_CACHE_SIZE) { + if (reserved_space < PAGE_SIZE) { end = page_start + reserved_space - 1; spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->outstanding_extents++; spin_unlock(&BTRFS_I(inode)->lock); btrfs_delalloc_release_space(inode, page_start, - PAGE_CACHE_SIZE - reserved_space); + PAGE_SIZE - reserved_space); } } @@ -8969,14 +8969,14 @@ again: ret = 0; /* page is wholly or partially inside EOF */ - if (page_start + PAGE_CACHE_SIZE > size) - zero_start = size & ~PAGE_CACHE_MASK; + if (page_start + PAGE_SIZE > size) + zero_start = size & ~PAGE_MASK; else - zero_start = PAGE_CACHE_SIZE; + zero_start = PAGE_SIZE; - if (zero_start != PAGE_CACHE_SIZE) { + if (zero_start != PAGE_SIZE) { kaddr = kmap(page); - memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); + memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start); flush_dcache_page(page); kunmap(page); } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 053e677839fe..94a0c8a3e871 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -898,7 +898,7 @@ static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh) u64 end; read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); + em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE); read_unlock(&em_tree->lock); if (em) { @@ -988,7 +988,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map *em; - u64 len = PAGE_CACHE_SIZE; + u64 len = PAGE_SIZE; /* * hopefully we have this extent in the tree already, try without @@ -1124,15 +1124,15 @@ static int cluster_pages_for_defrag(struct inode *inode, struct extent_io_tree *tree; gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); - file_end = (isize - 1) >> PAGE_CACHE_SHIFT; + file_end = (isize - 1) >> PAGE_SHIFT; if (!isize || start_index > file_end) return 0; page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); ret = btrfs_delalloc_reserve_space(inode, - start_index << PAGE_CACHE_SHIFT, - page_cnt << PAGE_CACHE_SHIFT); + start_index << PAGE_SHIFT, + page_cnt << PAGE_SHIFT); if (ret) return ret; i_done = 0; @@ -1148,7 +1148,7 @@ again: break; page_start = page_offset(page); - page_end = page_start + PAGE_CACHE_SIZE - 1; + page_end = page_start + PAGE_SIZE - 1; while (1) { lock_extent_bits(tree, page_start, page_end, &cached_state); @@ -1169,7 +1169,7 @@ again: */ if (page->mapping != inode->i_mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); goto again; } } @@ -1179,7 +1179,7 @@ again: lock_page(page); if (!PageUptodate(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); ret = -EIO; break; } @@ -1187,7 +1187,7 @@ again: if (page->mapping != inode->i_mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); goto again; } @@ -1208,7 +1208,7 @@ again: wait_on_page_writeback(pages[i]); page_start = page_offset(pages[0]); - page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; + page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE; lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, &cached_state); @@ -1222,8 +1222,8 @@ again: BTRFS_I(inode)->outstanding_extents++; spin_unlock(&BTRFS_I(inode)->lock); btrfs_delalloc_release_space(inode, - start_index << PAGE_CACHE_SHIFT, - (page_cnt - i_done) << PAGE_CACHE_SHIFT); + start_index << PAGE_SHIFT, + (page_cnt - i_done) << PAGE_SHIFT); } @@ -1240,17 +1240,17 @@ again: set_page_extent_mapped(pages[i]); set_page_dirty(pages[i]); unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } return i_done; out: for (i = 0; i < i_done; i++) { unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } btrfs_delalloc_release_space(inode, - start_index << PAGE_CACHE_SHIFT, - page_cnt << PAGE_CACHE_SHIFT); + start_index << PAGE_SHIFT, + page_cnt << PAGE_SHIFT); return ret; } @@ -1273,7 +1273,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, int defrag_count = 0; int compress_type = BTRFS_COMPRESS_ZLIB; u32 extent_thresh = range->extent_thresh; - unsigned long max_cluster = SZ_256K >> PAGE_CACHE_SHIFT; + unsigned long max_cluster = SZ_256K >> PAGE_SHIFT; unsigned long cluster = max_cluster; u64 new_align = ~((u64)SZ_128K - 1); struct page **pages = NULL; @@ -1317,9 +1317,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, /* find the last page to defrag */ if (range->start + range->len > range->start) { last_index = min_t(u64, isize - 1, - range->start + range->len - 1) >> PAGE_CACHE_SHIFT; + range->start + range->len - 1) >> PAGE_SHIFT; } else { - last_index = (isize - 1) >> PAGE_CACHE_SHIFT; + last_index = (isize - 1) >> PAGE_SHIFT; } if (newer_than) { @@ -1331,11 +1331,11 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, * we always align our defrag to help keep * the extents in the file evenly spaced */ - i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; + i = (newer_off & new_align) >> PAGE_SHIFT; } else goto out_ra; } else { - i = range->start >> PAGE_CACHE_SHIFT; + i = range->start >> PAGE_SHIFT; } if (!max_to_defrag) max_to_defrag = last_index - i + 1; @@ -1348,7 +1348,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, inode->i_mapping->writeback_index = i; while (i <= last_index && defrag_count < max_to_defrag && - (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) { + (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) { /* * make sure we stop running if someone unmounts * the FS @@ -1362,7 +1362,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, break; } - if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, + if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT, extent_thresh, &last_len, &skip, &defrag_end, range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { @@ -1371,14 +1371,14 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, * the should_defrag function tells us how much to skip * bump our counter by the suggested amount */ - next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE); + next = DIV_ROUND_UP(skip, PAGE_SIZE); i = max(i + 1, next); continue; } if (!newer_than) { - cluster = (PAGE_CACHE_ALIGN(defrag_end) >> - PAGE_CACHE_SHIFT) - i; + cluster = (PAGE_ALIGN(defrag_end) >> + PAGE_SHIFT) - i; cluster = min(cluster, max_cluster); } else { cluster = max_cluster; @@ -1412,20 +1412,20 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, i += ret; newer_off = max(newer_off + 1, - (u64)i << PAGE_CACHE_SHIFT); + (u64)i << PAGE_SHIFT); ret = find_new_extents(root, inode, newer_than, &newer_off, SZ_64K); if (!ret) { range->start = newer_off; - i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; + i = (newer_off & new_align) >> PAGE_SHIFT; } else { break; } } else { if (ret > 0) { i += ret; - last_len += ret << PAGE_CACHE_SHIFT; + last_len += ret << PAGE_SHIFT; } else { i++; last_len = 0; @@ -1722,7 +1722,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, if (vol_args->flags & BTRFS_SUBVOL_RDONLY) readonly = true; if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { - if (vol_args->size > PAGE_CACHE_SIZE) { + if (vol_args->size > PAGE_SIZE) { ret = -EINVAL; goto free_args; } @@ -2806,12 +2806,12 @@ static struct page *extent_same_get_page(struct inode *inode, pgoff_t index) lock_page(page); if (!PageUptodate(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); return ERR_PTR(-EIO); } if (page->mapping != inode->i_mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); return ERR_PTR(-EAGAIN); } } @@ -2823,7 +2823,7 @@ static int gather_extent_pages(struct inode *inode, struct page **pages, int num_pages, u64 off) { int i; - pgoff_t index = off >> PAGE_CACHE_SHIFT; + pgoff_t index = off >> PAGE_SHIFT; for (i = 0; i < num_pages; i++) { again: @@ -2932,12 +2932,12 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp) pg = cmp->src_pages[i]; if (pg) { unlock_page(pg); - page_cache_release(pg); + put_page(pg); } pg = cmp->dst_pages[i]; if (pg) { unlock_page(pg); - page_cache_release(pg); + put_page(pg); } } kfree(cmp->src_pages); @@ -2949,7 +2949,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff, u64 len, struct cmp_pages *cmp) { int ret; - int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT; + int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT; struct page **src_pgarr, **dst_pgarr; /* @@ -2987,12 +2987,12 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst, int ret = 0; int i; struct page *src_page, *dst_page; - unsigned int cmp_len = PAGE_CACHE_SIZE; + unsigned int cmp_len = PAGE_SIZE; void *addr, *dst_addr; i = 0; while (len) { - if (len < PAGE_CACHE_SIZE) + if (len < PAGE_SIZE) cmp_len = len; BUG_ON(i >= cmp->num_pages); @@ -3191,7 +3191,7 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen, if (olen > BTRFS_MAX_DEDUPE_LEN) olen = BTRFS_MAX_DEDUPE_LEN; - if (WARN_ON_ONCE(bs < PAGE_CACHE_SIZE)) { + if (WARN_ON_ONCE(bs < PAGE_SIZE)) { /* * Btrfs does not support blocksize < page_size. As a * result, btrfs_cmp_data() won't correctly handle @@ -3891,8 +3891,8 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, * data immediately and not the previous data. */ truncate_inode_pages_range(&inode->i_data, - round_down(destoff, PAGE_CACHE_SIZE), - round_up(destoff + len, PAGE_CACHE_SIZE) - 1); + round_down(destoff, PAGE_SIZE), + round_up(destoff + len, PAGE_SIZE) - 1); out_unlock: if (!same_inode) btrfs_double_inode_unlock(src, inode); @@ -4124,7 +4124,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) /* we generally have at most 6 or so space infos, one for each raid * level. So, a whole page should be more than enough for everyone */ - if (alloc_size > PAGE_CACHE_SIZE) + if (alloc_size > PAGE_SIZE) return -ENOMEM; space_args.total_spaces = 0; diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index a2f051347731..1adfbe7be6b8 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -55,8 +55,8 @@ static struct list_head *lzo_alloc_workspace(void) return ERR_PTR(-ENOMEM); workspace->mem = vmalloc(LZO1X_MEM_COMPRESS); - workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); - workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); + workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); + workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); if (!workspace->mem || !workspace->buf || !workspace->cbuf) goto fail; @@ -116,7 +116,7 @@ static int lzo_compress_pages(struct list_head *ws, *total_out = 0; *total_in = 0; - in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + in_page = find_get_page(mapping, start >> PAGE_SHIFT); data_in = kmap(in_page); /* @@ -133,10 +133,10 @@ static int lzo_compress_pages(struct list_head *ws, tot_out = LZO_LEN; pages[0] = out_page; nr_pages = 1; - pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; + pg_bytes_left = PAGE_SIZE - LZO_LEN; /* compress at most one page of data each time */ - in_len = min(len, PAGE_CACHE_SIZE); + in_len = min(len, PAGE_SIZE); while (tot_in < len) { ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf, &out_len, workspace->mem); @@ -201,7 +201,7 @@ static int lzo_compress_pages(struct list_head *ws, cpage_out = kmap(out_page); pages[nr_pages++] = out_page; - pg_bytes_left = PAGE_CACHE_SIZE; + pg_bytes_left = PAGE_SIZE; out_offset = 0; } } @@ -221,12 +221,12 @@ static int lzo_compress_pages(struct list_head *ws, bytes_left = len - tot_in; kunmap(in_page); - page_cache_release(in_page); + put_page(in_page); - start += PAGE_CACHE_SIZE; - in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + start += PAGE_SIZE; + in_page = find_get_page(mapping, start >> PAGE_SHIFT); data_in = kmap(in_page); - in_len = min(bytes_left, PAGE_CACHE_SIZE); + in_len = min(bytes_left, PAGE_SIZE); } if (tot_out > tot_in) @@ -248,7 +248,7 @@ out: if (in_page) { kunmap(in_page); - page_cache_release(in_page); + put_page(in_page); } return ret; @@ -266,7 +266,7 @@ static int lzo_decompress_biovec(struct list_head *ws, char *data_in; unsigned long page_in_index = 0; unsigned long page_out_index = 0; - unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE); + unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE); unsigned long buf_start; unsigned long buf_offset = 0; unsigned long bytes; @@ -289,7 +289,7 @@ static int lzo_decompress_biovec(struct list_head *ws, tot_in = LZO_LEN; in_offset = LZO_LEN; tot_len = min_t(size_t, srclen, tot_len); - in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; + in_page_bytes_left = PAGE_SIZE - LZO_LEN; tot_out = 0; pg_offset = 0; @@ -345,12 +345,12 @@ cont: data_in = kmap(pages_in[++page_in_index]); - in_page_bytes_left = PAGE_CACHE_SIZE; + in_page_bytes_left = PAGE_SIZE; in_offset = 0; } } - out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); + out_len = lzo1x_worst_compress(PAGE_SIZE); ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, &out_len); if (need_unmap) @@ -399,7 +399,7 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in, in_len = read_compress_length(data_in); data_in += LZO_LEN; - out_len = PAGE_CACHE_SIZE; + out_len = PAGE_SIZE; ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len); if (ret != LZO_E_OK) { printk(KERN_WARNING "BTRFS: decompress failed!\n"); diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 55161369fab1..0b7792e02dd5 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -270,7 +270,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio) s = kmap(rbio->bio_pages[i]); d = kmap(rbio->stripe_pages[i]); - memcpy(d, s, PAGE_CACHE_SIZE); + memcpy(d, s, PAGE_SIZE); kunmap(rbio->bio_pages[i]); kunmap(rbio->stripe_pages[i]); @@ -962,7 +962,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, */ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) { - return DIV_ROUND_UP(stripe_len, PAGE_CACHE_SIZE) * nr_stripes; + return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes; } /* @@ -1078,7 +1078,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, u64 disk_start; stripe = &rbio->bbio->stripes[stripe_nr]; - disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT); + disk_start = stripe->physical + (page_index << PAGE_SHIFT); /* if the device is missing, just fail this stripe */ if (!stripe->dev->bdev) @@ -1096,8 +1096,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, if (last_end == disk_start && stripe->dev->bdev && !last->bi_error && last->bi_bdev == stripe->dev->bdev) { - ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0); - if (ret == PAGE_CACHE_SIZE) + ret = bio_add_page(last, page, PAGE_SIZE, 0); + if (ret == PAGE_SIZE) return 0; } } @@ -1111,7 +1111,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, bio->bi_bdev = stripe->dev->bdev; bio->bi_iter.bi_sector = disk_start >> 9; - bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); + bio_add_page(bio, page, PAGE_SIZE, 0); bio_list_add(bio_list, bio); return 0; } @@ -1154,7 +1154,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio) bio_list_for_each(bio, &rbio->bio_list) { start = (u64)bio->bi_iter.bi_sector << 9; stripe_offset = start - rbio->bbio->raid_map[0]; - page_index = stripe_offset >> PAGE_CACHE_SHIFT; + page_index = stripe_offset >> PAGE_SHIFT; for (i = 0; i < bio->bi_vcnt; i++) { p = bio->bi_io_vec[i].bv_page; @@ -1253,7 +1253,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) } else { /* raid5 */ memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); - run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); + run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); } @@ -1914,7 +1914,7 @@ pstripe: /* Copy parity block into failed block to start with */ memcpy(pointers[faila], pointers[rbio->nr_data], - PAGE_CACHE_SIZE); + PAGE_SIZE); /* rearrange the pointer array */ p = pointers[faila]; @@ -1923,7 +1923,7 @@ pstripe: pointers[rbio->nr_data - 1] = p; /* xor in the rest */ - run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE); + run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE); } /* if we're doing this rebuild as part of an rmw, go through * and set all of our private rbio pages in the @@ -2250,7 +2250,7 @@ void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + rbio->stripe_len * rbio->nr_data); stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); - index = stripe_offset >> PAGE_CACHE_SHIFT; + index = stripe_offset >> PAGE_SHIFT; rbio->bio_pages[index] = page; } @@ -2365,14 +2365,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, } else { /* raid5 */ memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); - run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); + run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); } /* Check scrubbing pairty and repair it */ p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); parity = kmap(p); - if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE)) - memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE); + if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) + memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE); else /* Parity is right, needn't writeback */ bitmap_clear(rbio->dbitmap, pagenr, 1); diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index b892914968c1..298631eaee78 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -226,7 +226,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info, /* find extent */ spin_lock(&fs_info->reada_lock); re = radix_tree_lookup(&fs_info->reada_tree, - start >> PAGE_CACHE_SHIFT); + start >> PAGE_SHIFT); if (re) re->refcnt++; spin_unlock(&fs_info->reada_lock); @@ -257,7 +257,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info, zone = NULL; spin_lock(&fs_info->reada_lock); ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, - logical >> PAGE_CACHE_SHIFT, 1); + logical >> PAGE_SHIFT, 1); if (ret == 1 && logical >= zone->start && logical <= zone->end) { kref_get(&zone->refcnt); spin_unlock(&fs_info->reada_lock); @@ -294,13 +294,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info, spin_lock(&fs_info->reada_lock); ret = radix_tree_insert(&dev->reada_zones, - (unsigned long)(zone->end >> PAGE_CACHE_SHIFT), + (unsigned long)(zone->end >> PAGE_SHIFT), zone); if (ret == -EEXIST) { kfree(zone); ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, - logical >> PAGE_CACHE_SHIFT, 1); + logical >> PAGE_SHIFT, 1); if (ret == 1 && logical >= zone->start && logical <= zone->end) kref_get(&zone->refcnt); else @@ -326,7 +326,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root, u64 length; int real_stripes; int nzones = 0; - unsigned long index = logical >> PAGE_CACHE_SHIFT; + unsigned long index = logical >> PAGE_SHIFT; int dev_replace_is_ongoing; int have_zone = 0; @@ -495,7 +495,7 @@ static void reada_extent_put(struct btrfs_fs_info *fs_info, struct reada_extent *re) { int i; - unsigned long index = re->logical >> PAGE_CACHE_SHIFT; + unsigned long index = re->logical >> PAGE_SHIFT; spin_lock(&fs_info->reada_lock); if (--re->refcnt) { @@ -538,7 +538,7 @@ static void reada_zone_release(struct kref *kref) struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt); radix_tree_delete(&zone->device->reada_zones, - zone->end >> PAGE_CACHE_SHIFT); + zone->end >> PAGE_SHIFT); kfree(zone); } @@ -587,7 +587,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical, static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock) { int i; - unsigned long index = zone->end >> PAGE_CACHE_SHIFT; + unsigned long index = zone->end >> PAGE_SHIFT; for (i = 0; i < zone->ndevs; ++i) { struct reada_zone *peer; @@ -622,7 +622,7 @@ static int reada_pick_zone(struct btrfs_device *dev) (void **)&zone, index, 1); if (ret == 0) break; - index = (zone->end >> PAGE_CACHE_SHIFT) + 1; + index = (zone->end >> PAGE_SHIFT) + 1; if (zone->locked) { if (zone->elems > top_locked_elems) { top_locked_elems = zone->elems; @@ -673,7 +673,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, * plugging to speed things up */ ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, - dev->reada_next >> PAGE_CACHE_SHIFT, 1); + dev->reada_next >> PAGE_SHIFT, 1); if (ret == 0 || re->logical > dev->reada_curr_zone->end) { ret = reada_pick_zone(dev); if (!ret) { @@ -682,7 +682,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, } re = NULL; ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, - dev->reada_next >> PAGE_CACHE_SHIFT, 1); + dev->reada_next >> PAGE_SHIFT, 1); } if (ret == 0) { spin_unlock(&fs_info->reada_lock); @@ -838,7 +838,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all) printk(KERN_CONT " curr off %llu", device->reada_next - zone->start); printk(KERN_CONT "\n"); - index = (zone->end >> PAGE_CACHE_SHIFT) + 1; + index = (zone->end >> PAGE_SHIFT) + 1; } cnt = 0; index = 0; @@ -864,7 +864,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all) } } printk(KERN_CONT "\n"); - index = (re->logical >> PAGE_CACHE_SHIFT) + 1; + index = (re->logical >> PAGE_SHIFT) + 1; if (++cnt > 15) break; } @@ -880,7 +880,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all) if (ret == 0) break; if (!re->scheduled) { - index = (re->logical >> PAGE_CACHE_SHIFT) + 1; + index = (re->logical >> PAGE_SHIFT) + 1; continue; } printk(KERN_DEBUG @@ -897,7 +897,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all) } } printk(KERN_CONT "\n"); - index = (re->logical >> PAGE_CACHE_SHIFT) + 1; + index = (re->logical >> PAGE_SHIFT) + 1; } spin_unlock(&fs_info->reada_lock); } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 2bd0011450df..3c93968b539d 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3129,10 +3129,10 @@ static int relocate_file_extent_cluster(struct inode *inode, if (ret) goto out; - index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; - last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; + index = (cluster->start - offset) >> PAGE_SHIFT; + last_index = (cluster->end - offset) >> PAGE_SHIFT; while (index <= last_index) { - ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE); + ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE); if (ret) goto out; @@ -3145,7 +3145,7 @@ static int relocate_file_extent_cluster(struct inode *inode, mask); if (!page) { btrfs_delalloc_release_metadata(inode, - PAGE_CACHE_SIZE); + PAGE_SIZE); ret = -ENOMEM; goto out; } @@ -3162,16 +3162,16 @@ static int relocate_file_extent_cluster(struct inode *inode, lock_page(page); if (!PageUptodate(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); btrfs_delalloc_release_metadata(inode, - PAGE_CACHE_SIZE); + PAGE_SIZE); ret = -EIO; goto out; } } page_start = page_offset(page); - page_end = page_start + PAGE_CACHE_SIZE - 1; + page_end = page_start + PAGE_SIZE - 1; lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); @@ -3191,7 +3191,7 @@ static int relocate_file_extent_cluster(struct inode *inode, unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); unlock_page(page); - page_cache_release(page); + put_page(page); index++; balance_dirty_pages_ratelimited(inode->i_mapping); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 39dbdcbf4d13..4678f03e878e 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -703,7 +703,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) if (IS_ERR(inode)) return PTR_ERR(inode); - index = offset >> PAGE_CACHE_SHIFT; + index = offset >> PAGE_SHIFT; page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); if (!page) { @@ -1636,7 +1636,7 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, if (spage->io_error) { void *mapped_buffer = kmap_atomic(spage->page); - memset(mapped_buffer, 0, PAGE_CACHE_SIZE); + memset(mapped_buffer, 0, PAGE_SIZE); flush_dcache_page(spage->page); kunmap_atomic(mapped_buffer); } @@ -4294,8 +4294,8 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, goto out; } - while (len >= PAGE_CACHE_SIZE) { - index = offset >> PAGE_CACHE_SHIFT; + while (len >= PAGE_SIZE) { + index = offset >> PAGE_SHIFT; again: page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); if (!page) { @@ -4326,7 +4326,7 @@ again: */ if (page->mapping != inode->i_mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); goto again; } if (!PageUptodate(page)) { @@ -4348,15 +4348,15 @@ again: ret = err; next_page: unlock_page(page); - page_cache_release(page); + put_page(page); if (ret) break; - offset += PAGE_CACHE_SIZE; - physical_for_dev_replace += PAGE_CACHE_SIZE; - nocow_ctx_logical += PAGE_CACHE_SIZE; - len -= PAGE_CACHE_SIZE; + offset += PAGE_SIZE; + physical_for_dev_replace += PAGE_SIZE; + nocow_ctx_logical += PAGE_SIZE; + len -= PAGE_SIZE; } ret = COPY_COMPLETE; out: @@ -4390,8 +4390,8 @@ static int write_page_nocow(struct scrub_ctx *sctx, bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; bio->bi_bdev = dev->bdev; - ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); - if (ret != PAGE_CACHE_SIZE) { + ret = bio_add_page(bio, page, PAGE_SIZE, 0); + if (ret != PAGE_SIZE) { leave_with_eio: bio_put(bio); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 19b7bf4284ee..8d358c547c59 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -4449,9 +4449,9 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) struct page *page; char *addr; struct btrfs_key key; - pgoff_t index = offset >> PAGE_CACHE_SHIFT; + pgoff_t index = offset >> PAGE_SHIFT; pgoff_t last_index; - unsigned pg_offset = offset & ~PAGE_CACHE_MASK; + unsigned pg_offset = offset & ~PAGE_MASK; ssize_t ret = 0; key.objectid = sctx->cur_ino; @@ -4471,7 +4471,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) if (len == 0) goto out; - last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT; + last_index = (offset + len - 1) >> PAGE_SHIFT; /* initial readahead */ memset(&sctx->ra, 0, sizeof(struct file_ra_state)); @@ -4481,7 +4481,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) while (index <= last_index) { unsigned cur_len = min_t(unsigned, len, - PAGE_CACHE_SIZE - pg_offset); + PAGE_SIZE - pg_offset); page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); if (!page) { ret = -ENOMEM; @@ -4493,7 +4493,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) lock_page(page); if (!PageUptodate(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); ret = -EIO; break; } @@ -4503,7 +4503,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len); kunmap(page); unlock_page(page); - page_cache_release(page); + put_page(page); index++; pg_offset = 0; len -= cur_len; @@ -4804,7 +4804,7 @@ static int clone_range(struct send_ctx *sctx, type = btrfs_file_extent_type(leaf, ei); if (type == BTRFS_FILE_EXTENT_INLINE) { ext_len = btrfs_file_extent_inline_len(leaf, slot, ei); - ext_len = PAGE_CACHE_ALIGN(ext_len); + ext_len = PAGE_ALIGN(ext_len); } else { ext_len = btrfs_file_extent_num_bytes(leaf, ei); } @@ -4886,7 +4886,7 @@ static int send_write_or_clone(struct send_ctx *sctx, * but there may be items after this page. Make * sure to send the whole thing */ - len = PAGE_CACHE_ALIGN(len); + len = PAGE_ALIGN(len); } else { len = btrfs_file_extent_num_bytes(path->nodes[0], ei); } diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c index b976597b0721..e05619f241be 100644 --- a/fs/btrfs/struct-funcs.c +++ b/fs/btrfs/struct-funcs.c @@ -66,7 +66,7 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ \ if (token && token->kaddr && token->offset <= offset && \ token->eb == eb && \ - (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ + (token->offset + PAGE_SIZE >= offset + size)) { \ kaddr = token->kaddr; \ p = kaddr + part_offset - token->offset; \ res = get_unaligned_le##bits(p + off); \ @@ -104,7 +104,7 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \ \ if (token && token->kaddr && token->offset <= offset && \ token->eb == eb && \ - (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ + (token->offset + PAGE_SIZE >= offset + size)) { \ kaddr = token->kaddr; \ p = kaddr + part_offset - token->offset; \ put_unaligned_le##bits(val, p + off); \ diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 669b58201e36..70948b13bc81 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c @@ -32,8 +32,8 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end, { int ret; struct page *pages[16]; - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; + unsigned long end_index = end >> PAGE_SHIFT; unsigned long nr_pages = end_index - index + 1; int i; int count = 0; @@ -49,9 +49,9 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end, count++; if (flags & PROCESS_UNLOCK && PageLocked(pages[i])) unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); if (flags & PROCESS_RELEASE) - page_cache_release(pages[i]); + put_page(pages[i]); } nr_pages -= ret; index += ret; @@ -93,7 +93,7 @@ static int test_find_delalloc(void) * everything to make sure our pages don't get evicted and screw up our * test. */ - for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) { + for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) { page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); if (!page) { test_msg("Failed to allocate test page\n"); @@ -104,7 +104,7 @@ static int test_find_delalloc(void) if (index) { unlock_page(page); } else { - page_cache_get(page); + get_page(page); locked_page = page; } } @@ -129,7 +129,7 @@ static int test_find_delalloc(void) } unlock_extent(&tmp, start, end); unlock_page(locked_page); - page_cache_release(locked_page); + put_page(locked_page); /* * Test this scenario @@ -139,7 +139,7 @@ static int test_find_delalloc(void) */ test_start = SZ_64M; locked_page = find_lock_page(inode->i_mapping, - test_start >> PAGE_CACHE_SHIFT); + test_start >> PAGE_SHIFT); if (!locked_page) { test_msg("Couldn't find the locked page\n"); goto out_bits; @@ -165,7 +165,7 @@ static int test_find_delalloc(void) } unlock_extent(&tmp, start, end); /* locked_page was unlocked above */ - page_cache_release(locked_page); + put_page(locked_page); /* * Test this scenario @@ -174,7 +174,7 @@ static int test_find_delalloc(void) */ test_start = max_bytes + 4096; locked_page = find_lock_page(inode->i_mapping, test_start >> - PAGE_CACHE_SHIFT); + PAGE_SHIFT); if (!locked_page) { test_msg("Could'nt find the locked page\n"); goto out_bits; @@ -225,13 +225,13 @@ static int test_find_delalloc(void) * range we want to find. */ page = find_get_page(inode->i_mapping, - (max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT); + (max_bytes + SZ_1M) >> PAGE_SHIFT); if (!page) { test_msg("Couldn't find our page\n"); goto out_bits; } ClearPageDirty(page); - page_cache_release(page); + put_page(page); /* We unlocked it in the previous test */ lock_page(locked_page); @@ -239,7 +239,7 @@ static int test_find_delalloc(void) end = 0; /* * Currently if we fail to find dirty pages in the delalloc range we - * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If + * will adjust max_bytes down to PAGE_SIZE and then re-search. If * this changes at any point in the future we will need to fix this * tests expected behavior. */ @@ -249,9 +249,9 @@ static int test_find_delalloc(void) test_msg("Didn't find our range\n"); goto out_bits; } - if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) { + if (start != test_start && end != test_start + PAGE_SIZE - 1) { test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n", - test_start, test_start + PAGE_CACHE_SIZE - 1, start, + test_start, test_start + PAGE_SIZE - 1, start, end); goto out_bits; } @@ -265,7 +265,7 @@ out_bits: clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL); out: if (locked_page) - page_cache_release(locked_page); + put_page(locked_page); process_page_range(inode, 0, total_dirty - 1, PROCESS_UNLOCK | PROCESS_RELEASE); iput(inode); @@ -298,9 +298,9 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, return -EINVAL; } - bitmap_set(bitmap, (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, + bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, sizeof(long) * BITS_PER_BYTE); - extent_buffer_bitmap_set(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0, + extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0, sizeof(long) * BITS_PER_BYTE); if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { test_msg("Setting straddling pages failed\n"); @@ -309,10 +309,10 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, bitmap_set(bitmap, 0, len * BITS_PER_BYTE); bitmap_clear(bitmap, - (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, + (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, sizeof(long) * BITS_PER_BYTE); extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); - extent_buffer_bitmap_clear(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0, + extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0, sizeof(long) * BITS_PER_BYTE); if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { test_msg("Clearing straddling pages failed\n"); @@ -353,7 +353,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, static int test_eb_bitmaps(void) { - unsigned long len = PAGE_CACHE_SIZE * 4; + unsigned long len = PAGE_SIZE * 4; unsigned long *bitmap; struct extent_buffer *eb; int ret; @@ -379,7 +379,7 @@ static int test_eb_bitmaps(void) /* Do it over again with an extent buffer which isn't page-aligned. */ free_extent_buffer(eb); - eb = __alloc_dummy_extent_buffer(NULL, PAGE_CACHE_SIZE / 2, len); + eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len); if (!eb) { test_msg("Couldn't allocate test extent buffer\n"); kfree(bitmap); diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c index c9ad97b1e690..514247515312 100644 --- a/fs/btrfs/tests/free-space-tests.c +++ b/fs/btrfs/tests/free-space-tests.c @@ -22,7 +22,7 @@ #include "../disk-io.h" #include "../free-space-cache.h" -#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) +#define BITS_PER_BITMAP (PAGE_SIZE * 8) /* * This test just does basic sanity checking, making sure we can add an exten diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e2b54d546b7c..bd0f45fb38c4 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1025,16 +1025,16 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, } /* make sure our super fits in the device */ - if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode)) + if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) goto error_bdev_put; /* make sure our super fits in the page */ - if (sizeof(*disk_super) > PAGE_CACHE_SIZE) + if (sizeof(*disk_super) > PAGE_SIZE) goto error_bdev_put; /* make sure our super doesn't straddle pages on disk */ - index = bytenr >> PAGE_CACHE_SHIFT; - if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index) + index = bytenr >> PAGE_SHIFT; + if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) goto error_bdev_put; /* pull in the page with our super */ @@ -1047,7 +1047,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, p = kmap(page); /* align our pointer to the offset of the super block */ - disk_super = p + (bytenr & ~PAGE_CACHE_MASK); + disk_super = p + (bytenr & ~PAGE_MASK); if (btrfs_super_bytenr(disk_super) != bytenr || btrfs_super_magic(disk_super) != BTRFS_MAGIC) @@ -1075,7 +1075,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, error_unmap: kunmap(page); - page_cache_release(page); + put_page(page); error_bdev_put: blkdev_put(bdev, flags); @@ -6527,7 +6527,7 @@ int btrfs_read_sys_array(struct btrfs_root *root) * but sb spans only this function. Add an explicit SetPageUptodate call * to silence the warning eg. on PowerPC 64. */ - if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE) + if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) SetPageUptodate(sb->pages[0]); write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 82990b8f872b..88d274e8ecf2 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -59,7 +59,7 @@ static struct list_head *zlib_alloc_workspace(void) workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), zlib_inflate_workspacesize()); workspace->strm.workspace = vmalloc(workspacesize); - workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); + workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS); if (!workspace->strm.workspace || !workspace->buf) goto fail; @@ -103,7 +103,7 @@ static int zlib_compress_pages(struct list_head *ws, workspace->strm.total_in = 0; workspace->strm.total_out = 0; - in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + in_page = find_get_page(mapping, start >> PAGE_SHIFT); data_in = kmap(in_page); out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); @@ -117,8 +117,8 @@ static int zlib_compress_pages(struct list_head *ws, workspace->strm.next_in = data_in; workspace->strm.next_out = cpage_out; - workspace->strm.avail_out = PAGE_CACHE_SIZE; - workspace->strm.avail_in = min(len, PAGE_CACHE_SIZE); + workspace->strm.avail_out = PAGE_SIZE; + workspace->strm.avail_in = min(len, PAGE_SIZE); while (workspace->strm.total_in < len) { ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH); @@ -156,7 +156,7 @@ static int zlib_compress_pages(struct list_head *ws, cpage_out = kmap(out_page); pages[nr_pages] = out_page; nr_pages++; - workspace->strm.avail_out = PAGE_CACHE_SIZE; + workspace->strm.avail_out = PAGE_SIZE; workspace->strm.next_out = cpage_out; } /* we're all done */ @@ -170,14 +170,14 @@ static int zlib_compress_pages(struct list_head *ws, bytes_left = len - workspace->strm.total_in; kunmap(in_page); - page_cache_release(in_page); + put_page(in_page); - start += PAGE_CACHE_SIZE; + start += PAGE_SIZE; in_page = find_get_page(mapping, - start >> PAGE_CACHE_SHIFT); + start >> PAGE_SHIFT); data_in = kmap(in_page); workspace->strm.avail_in = min(bytes_left, - PAGE_CACHE_SIZE); + PAGE_SIZE); workspace->strm.next_in = data_in; } } @@ -205,7 +205,7 @@ out: if (in_page) { kunmap(in_page); - page_cache_release(in_page); + put_page(in_page); } return ret; } @@ -223,18 +223,18 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, size_t total_out = 0; unsigned long page_in_index = 0; unsigned long page_out_index = 0; - unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE); + unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE); unsigned long buf_start; unsigned long pg_offset; data_in = kmap(pages_in[page_in_index]); workspace->strm.next_in = data_in; - workspace->strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); + workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE); workspace->strm.total_in = 0; workspace->strm.total_out = 0; workspace->strm.next_out = workspace->buf; - workspace->strm.avail_out = PAGE_CACHE_SIZE; + workspace->strm.avail_out = PAGE_SIZE; pg_offset = 0; /* If it's deflate, and it's got no preset dictionary, then @@ -274,7 +274,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, } workspace->strm.next_out = workspace->buf; - workspace->strm.avail_out = PAGE_CACHE_SIZE; + workspace->strm.avail_out = PAGE_SIZE; if (workspace->strm.avail_in == 0) { unsigned long tmp; @@ -288,7 +288,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, workspace->strm.next_in = data_in; tmp = srclen - workspace->strm.total_in; workspace->strm.avail_in = min(tmp, - PAGE_CACHE_SIZE); + PAGE_SIZE); } } if (ret != Z_STREAM_END) @@ -325,7 +325,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, workspace->strm.total_in = 0; workspace->strm.next_out = workspace->buf; - workspace->strm.avail_out = PAGE_CACHE_SIZE; + workspace->strm.avail_out = PAGE_SIZE; workspace->strm.total_out = 0; /* If it's deflate, and it's got no preset dictionary, then we can tell zlib to skip the adler32 check. */ @@ -368,8 +368,8 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, else buf_offset = 0; - bytes = min(PAGE_CACHE_SIZE - pg_offset, - PAGE_CACHE_SIZE - buf_offset); + bytes = min(PAGE_SIZE - pg_offset, + PAGE_SIZE - buf_offset); bytes = min(bytes, bytes_left); kaddr = kmap_atomic(dest_page); @@ -380,7 +380,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, bytes_left -= bytes; next: workspace->strm.next_out = workspace->buf; - workspace->strm.avail_out = PAGE_CACHE_SIZE; + workspace->strm.avail_out = PAGE_SIZE; } if (ret != Z_STREAM_END && bytes_left != 0) diff --git a/fs/buffer.c b/fs/buffer.c index 33be29675358..af0d9a82a8ed 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -129,7 +129,7 @@ __clear_page_buffers(struct page *page) { ClearPagePrivate(page); set_page_private(page, 0); - page_cache_release(page); + put_page(page); } static void buffer_io_error(struct buffer_head *bh, char *msg) @@ -207,7 +207,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) struct page *page; int all_mapped = 1; - index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); + index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); if (!page) goto out; @@ -245,7 +245,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) } out_unlock: spin_unlock(&bd_mapping->private_lock); - page_cache_release(page); + put_page(page); out: return ret; } @@ -1040,7 +1040,7 @@ done: ret = (block < end_block) ? 1 : -ENXIO; failed: unlock_page(page); - page_cache_release(page); + put_page(page); return ret; } @@ -1533,7 +1533,7 @@ void block_invalidatepage(struct page *page, unsigned int offset, /* * Check for overflow */ - BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); + BUG_ON(stop > PAGE_SIZE || stop < length); head = page_buffers(page); bh = head; @@ -1716,7 +1716,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, blocksize = bh->b_size; bbits = block_size_bits(blocksize); - block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); + block = (sector_t)page->index << (PAGE_SHIFT - bbits); last_block = (i_size_read(inode) - 1) >> bbits; /* @@ -1894,7 +1894,7 @@ EXPORT_SYMBOL(page_zero_new_buffers); int __block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); unsigned to = from + len; struct inode *inode = page->mapping->host; unsigned block_start, block_end; @@ -1904,15 +1904,15 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len, struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; BUG_ON(!PageLocked(page)); - BUG_ON(from > PAGE_CACHE_SIZE); - BUG_ON(to > PAGE_CACHE_SIZE); + BUG_ON(from > PAGE_SIZE); + BUG_ON(to > PAGE_SIZE); BUG_ON(from > to); head = create_page_buffers(page, inode, 0); blocksize = head->b_size; bbits = block_size_bits(blocksize); - block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); + block = (sector_t)page->index << (PAGE_SHIFT - bbits); for(bh = head, block_start = 0; bh != head || !block_start; block++, block_start=block_end, bh = bh->b_this_page) { @@ -2020,7 +2020,7 @@ static int __block_commit_write(struct inode *inode, struct page *page, int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, get_block_t *get_block) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct page *page; int status; @@ -2031,7 +2031,7 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, status = __block_write_begin(page, pos, len, get_block); if (unlikely(status)) { unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; } @@ -2047,7 +2047,7 @@ int block_write_end(struct file *file, struct address_space *mapping, struct inode *inode = mapping->host; unsigned start; - start = pos & (PAGE_CACHE_SIZE - 1); + start = pos & (PAGE_SIZE - 1); if (unlikely(copied < len)) { /* @@ -2099,7 +2099,7 @@ int generic_write_end(struct file *file, struct address_space *mapping, } unlock_page(page); - page_cache_release(page); + put_page(page); if (old_size < pos) pagecache_isize_extended(inode, old_size, pos); @@ -2136,9 +2136,9 @@ int block_is_partially_uptodate(struct page *page, unsigned long from, head = page_buffers(page); blocksize = head->b_size; - to = min_t(unsigned, PAGE_CACHE_SIZE - from, count); + to = min_t(unsigned, PAGE_SIZE - from, count); to = from + to; - if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) + if (from < blocksize && to > PAGE_SIZE - blocksize) return 0; bh = head; @@ -2181,7 +2181,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) blocksize = head->b_size; bbits = block_size_bits(blocksize); - iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); + iblock = (sector_t)page->index << (PAGE_SHIFT - bbits); lblock = (i_size_read(inode)+blocksize-1) >> bbits; bh = head; nr = 0; @@ -2295,16 +2295,16 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, unsigned zerofrom, offset, len; int err = 0; - index = pos >> PAGE_CACHE_SHIFT; - offset = pos & ~PAGE_CACHE_MASK; + index = pos >> PAGE_SHIFT; + offset = pos & ~PAGE_MASK; - while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) { - zerofrom = curpos & ~PAGE_CACHE_MASK; + while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { + zerofrom = curpos & ~PAGE_MASK; if (zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; } - len = PAGE_CACHE_SIZE - zerofrom; + len = PAGE_SIZE - zerofrom; err = pagecache_write_begin(file, mapping, curpos, len, AOP_FLAG_UNINTERRUPTIBLE, @@ -2329,7 +2329,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, /* page covers the boundary, find the boundary offset */ if (index == curidx) { - zerofrom = curpos & ~PAGE_CACHE_MASK; + zerofrom = curpos & ~PAGE_MASK; /* if we will expand the thing last block will be filled */ if (offset <= zerofrom) { goto out; @@ -2375,7 +2375,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping, if (err) return err; - zerofrom = *bytes & ~PAGE_CACHE_MASK; + zerofrom = *bytes & ~PAGE_MASK; if (pos+len > *bytes && zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; @@ -2430,10 +2430,10 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, } /* page is wholly or partially inside EOF */ - if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) - end = size & ~PAGE_CACHE_MASK; + if (((page->index + 1) << PAGE_SHIFT) > size) + end = size & ~PAGE_MASK; else - end = PAGE_CACHE_SIZE; + end = PAGE_SIZE; ret = __block_write_begin(page, 0, end, get_block); if (!ret) @@ -2508,8 +2508,8 @@ int nobh_write_begin(struct address_space *mapping, int ret = 0; int is_mapped_to_disk = 1; - index = pos >> PAGE_CACHE_SHIFT; - from = pos & (PAGE_CACHE_SIZE - 1); + index = pos >> PAGE_SHIFT; + from = pos & (PAGE_SIZE - 1); to = from + len; page = grab_cache_page_write_begin(mapping, index, flags); @@ -2543,7 +2543,7 @@ int nobh_write_begin(struct address_space *mapping, goto out_release; } - block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); + block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); /* * We loop across all blocks in the page, whether or not they are @@ -2551,7 +2551,7 @@ int nobh_write_begin(struct address_space *mapping, * page is fully mapped-to-disk. */ for (block_start = 0, block_in_page = 0, bh = head; - block_start < PAGE_CACHE_SIZE; + block_start < PAGE_SIZE; block_in_page++, block_start += blocksize, bh = bh->b_this_page) { int create; @@ -2623,7 +2623,7 @@ failed: out_release: unlock_page(page); - page_cache_release(page); + put_page(page); *pagep = NULL; return ret; @@ -2653,7 +2653,7 @@ int nobh_write_end(struct file *file, struct address_space *mapping, } unlock_page(page); - page_cache_release(page); + put_page(page); while (head) { bh = head; @@ -2675,7 +2675,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, { struct inode * const inode = page->mapping->host; loff_t i_size = i_size_read(inode); - const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + const pgoff_t end_index = i_size >> PAGE_SHIFT; unsigned offset; int ret; @@ -2684,7 +2684,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, goto out; /* Is the page fully outside i_size? (truncate in progress) */ - offset = i_size & (PAGE_CACHE_SIZE-1); + offset = i_size & (PAGE_SIZE-1); if (page->index >= end_index+1 || !offset) { /* * The page may have dirty, unmapped buffers. For example, @@ -2707,7 +2707,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + zero_user_segment(page, offset, PAGE_SIZE); out: ret = mpage_writepage(page, get_block, wbc); if (ret == -EAGAIN) @@ -2720,8 +2720,8 @@ EXPORT_SYMBOL(nobh_writepage); int nobh_truncate_page(struct address_space *mapping, loff_t from, get_block_t *get_block) { - pgoff_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + pgoff_t index = from >> PAGE_SHIFT; + unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize; sector_t iblock; unsigned length, pos; @@ -2738,7 +2738,7 @@ int nobh_truncate_page(struct address_space *mapping, return 0; length = blocksize - length; - iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); page = grab_cache_page(mapping, index); err = -ENOMEM; @@ -2748,7 +2748,7 @@ int nobh_truncate_page(struct address_space *mapping, if (page_has_buffers(page)) { has_buffers: unlock_page(page); - page_cache_release(page); + put_page(page); return block_truncate_page(mapping, from, get_block); } @@ -2772,7 +2772,7 @@ has_buffers: if (!PageUptodate(page)) { err = mapping->a_ops->readpage(NULL, page); if (err) { - page_cache_release(page); + put_page(page); goto out; } lock_page(page); @@ -2789,7 +2789,7 @@ has_buffers: unlock: unlock_page(page); - page_cache_release(page); + put_page(page); out: return err; } @@ -2798,8 +2798,8 @@ EXPORT_SYMBOL(nobh_truncate_page); int block_truncate_page(struct address_space *mapping, loff_t from, get_block_t *get_block) { - pgoff_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + pgoff_t index = from >> PAGE_SHIFT; + unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize; sector_t iblock; unsigned length, pos; @@ -2816,7 +2816,7 @@ int block_truncate_page(struct address_space *mapping, return 0; length = blocksize - length; - iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); page = grab_cache_page(mapping, index); err = -ENOMEM; @@ -2865,7 +2865,7 @@ int block_truncate_page(struct address_space *mapping, unlock: unlock_page(page); - page_cache_release(page); + put_page(page); out: return err; } @@ -2879,7 +2879,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, { struct inode * const inode = page->mapping->host; loff_t i_size = i_size_read(inode); - const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + const pgoff_t end_index = i_size >> PAGE_SHIFT; unsigned offset; /* Is the page fully inside i_size? */ @@ -2888,14 +2888,14 @@ int block_write_full_page(struct page *page, get_block_t *get_block, end_buffer_async_write); /* Is the page fully outside i_size? (truncate in progress) */ - offset = i_size & (PAGE_CACHE_SIZE-1); + offset = i_size & (PAGE_SIZE-1); if (page->index >= end_index+1 || !offset) { /* * The page may have dirty, unmapped buffers. For example, * they may have been added in ext3_writepage(). Make them * freeable here, so the page does not leak. */ - do_invalidatepage(page, 0, PAGE_CACHE_SIZE); + do_invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); return 0; /* don't care */ } @@ -2907,7 +2907,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + zero_user_segment(page, offset, PAGE_SIZE); return __block_write_full_page(inode, page, get_block, wbc, end_buffer_async_write); } diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index c0f3da3926a0..afbdc418966d 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -194,10 +194,10 @@ static void cachefiles_read_copier(struct fscache_operation *_op) error = -EIO; } - page_cache_release(monitor->back_page); + put_page(monitor->back_page); fscache_end_io(op, monitor->netfs_page, error); - page_cache_release(monitor->netfs_page); + put_page(monitor->netfs_page); fscache_retrieval_complete(op, 1); fscache_put_retrieval(op); kfree(monitor); @@ -288,8 +288,8 @@ monitor_backing_page: _debug("- monitor add"); /* install the monitor */ - page_cache_get(monitor->netfs_page); - page_cache_get(backpage); + get_page(monitor->netfs_page); + get_page(backpage); monitor->back_page = backpage; monitor->monitor.private = backpage; add_page_wait_queue(backpage, &monitor->monitor); @@ -310,7 +310,7 @@ backing_page_already_present: _debug("- present"); if (newpage) { - page_cache_release(newpage); + put_page(newpage); newpage = NULL; } @@ -342,7 +342,7 @@ success: out: if (backpage) - page_cache_release(backpage); + put_page(backpage); if (monitor) { fscache_put_retrieval(monitor->op); kfree(monitor); @@ -363,7 +363,7 @@ io_error: goto out; nomem_page: - page_cache_release(newpage); + put_page(newpage); nomem_monitor: fscache_put_retrieval(monitor->op); kfree(monitor); @@ -530,7 +530,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, netpage->index, cachefiles_gfp); if (ret < 0) { if (ret == -EEXIST) { - page_cache_release(netpage); + put_page(netpage); fscache_retrieval_complete(op, 1); continue; } @@ -538,10 +538,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, } /* install a monitor */ - page_cache_get(netpage); + get_page(netpage); monitor->netfs_page = netpage; - page_cache_get(backpage); + get_page(backpage); monitor->back_page = backpage; monitor->monitor.private = backpage; add_page_wait_queue(backpage, &monitor->monitor); @@ -555,10 +555,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, unlock_page(backpage); } - page_cache_release(backpage); + put_page(backpage); backpage = NULL; - page_cache_release(netpage); + put_page(netpage); netpage = NULL; continue; @@ -603,7 +603,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, netpage->index, cachefiles_gfp); if (ret < 0) { if (ret == -EEXIST) { - page_cache_release(netpage); + put_page(netpage); fscache_retrieval_complete(op, 1); continue; } @@ -612,14 +612,14 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, copy_highpage(netpage, backpage); - page_cache_release(backpage); + put_page(backpage); backpage = NULL; fscache_mark_page_cached(op, netpage); /* the netpage is unlocked and marked up to date here */ fscache_end_io(op, netpage, 0); - page_cache_release(netpage); + put_page(netpage); netpage = NULL; fscache_retrieval_complete(op, 1); continue; @@ -632,11 +632,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, out: /* tidy up */ if (newpage) - page_cache_release(newpage); + put_page(newpage); if (netpage) - page_cache_release(netpage); + put_page(netpage); if (backpage) - page_cache_release(backpage); + put_page(backpage); if (monitor) { fscache_put_retrieval(op); kfree(monitor); @@ -644,7 +644,7 @@ out: list_for_each_entry_safe(netpage, _n, list, lru) { list_del(&netpage->lru); - page_cache_release(netpage); + put_page(netpage); fscache_retrieval_complete(op, 1); } diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 19adeb0ef82a..4801571f51cb 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -143,7 +143,7 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset, inode = page->mapping->host; ci = ceph_inode(inode); - if (offset != 0 || length != PAGE_CACHE_SIZE) { + if (offset != 0 || length != PAGE_SIZE) { dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", inode, page, page->index, offset, length); return; @@ -175,8 +175,8 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset, static int ceph_releasepage(struct page *page, gfp_t g) { - struct inode *inode = page->mapping ? page->mapping->host : NULL; - dout("%p releasepage %p idx %lu\n", inode, page, page->index); + dout("%p releasepage %p idx %lu\n", page->mapping->host, + page, page->index); WARN_ON(PageDirty(page)); /* Can we release the page from the cache? */ @@ -197,10 +197,10 @@ static int readpage_nounlock(struct file *filp, struct page *page) &ceph_inode_to_client(inode)->client->osdc; int err = 0; u64 off = page_offset(page); - u64 len = PAGE_CACHE_SIZE; + u64 len = PAGE_SIZE; if (off >= i_size_read(inode)) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); return 0; } @@ -212,7 +212,7 @@ static int readpage_nounlock(struct file *filp, struct page *page) */ if (off == 0) return -EINVAL; - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); return 0; } @@ -234,9 +234,9 @@ static int readpage_nounlock(struct file *filp, struct page *page) ceph_fscache_readpage_cancel(inode, page); goto out; } - if (err < PAGE_CACHE_SIZE) + if (err < PAGE_SIZE) /* zero fill remainder of page */ - zero_user_segment(page, err, PAGE_CACHE_SIZE); + zero_user_segment(page, err, PAGE_SIZE); else flush_dcache_page(page); @@ -276,12 +276,12 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) for (i = 0; i < num_pages; i++) { struct page *page = osd_data->pages[i]; - if (rc < 0 && rc != ENOENT) + if (rc < 0 && rc != -ENOENT) goto unlock; - if (bytes < (int)PAGE_CACHE_SIZE) { + if (bytes < (int)PAGE_SIZE) { /* zero (remainder of) page */ int s = bytes < 0 ? 0 : bytes; - zero_user_segment(page, s, PAGE_CACHE_SIZE); + zero_user_segment(page, s, PAGE_SIZE); } dout("finish_read %p uptodate %p idx %lu\n", inode, page, page->index); @@ -290,8 +290,8 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) ceph_readpage_to_fscache(inode, page); unlock: unlock_page(page); - page_cache_release(page); - bytes -= PAGE_CACHE_SIZE; + put_page(page); + bytes -= PAGE_SIZE; } kfree(osd_data->pages); } @@ -336,7 +336,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) if (max && nr_pages == max) break; } - len = nr_pages << PAGE_CACHE_SHIFT; + len = nr_pages << PAGE_SHIFT; dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, off, len); vino = ceph_vino(inode); @@ -364,7 +364,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) if (add_to_page_cache_lru(page, &inode->i_data, page->index, GFP_KERNEL)) { ceph_fscache_uncache_page(inode, page); - page_cache_release(page); + put_page(page); dout("start_read %p add_to_page_cache failed %p\n", inode, page); nr_pages = i; @@ -415,8 +415,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping, if (rc == 0) goto out; - if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) - max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) + if (fsc->mount_options->rsize >= PAGE_SIZE) + max = (fsc->mount_options->rsize + PAGE_SIZE - 1) >> PAGE_SHIFT; dout("readpages %p file %p nr_pages %d max %d\n", inode, @@ -484,7 +484,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) long writeback_stat; u64 truncate_size; u32 truncate_seq; - int err = 0, len = PAGE_CACHE_SIZE; + int err = 0, len = PAGE_SIZE; dout("writepage %p idx %lu\n", page, page->index); @@ -606,71 +606,71 @@ static void writepages_finish(struct ceph_osd_request *req, struct inode *inode = req->r_inode; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_osd_data *osd_data; - unsigned wrote; struct page *page; - int num_pages; - int i; + int num_pages, total_pages = 0; + int i, j; + int rc = req->r_result; struct ceph_snap_context *snapc = req->r_snapc; struct address_space *mapping = inode->i_mapping; - int rc = req->r_result; - u64 bytes = req->r_ops[0].extent.length; struct ceph_fs_client *fsc = ceph_inode_to_client(inode); - long writeback_stat; - unsigned issued = ceph_caps_issued(ci); + bool remove_page; - osd_data = osd_req_op_extent_osd_data(req, 0); - BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); - num_pages = calc_pages_for((u64)osd_data->alignment, - (u64)osd_data->length); - if (rc >= 0) { - /* - * Assume we wrote the pages we originally sent. The - * osd might reply with fewer pages if our writeback - * raced with a truncation and was adjusted at the osd, - * so don't believe the reply. - */ - wrote = num_pages; - } else { - wrote = 0; + + dout("writepages_finish %p rc %d\n", inode, rc); + if (rc < 0) mapping_set_error(mapping, rc); - } - dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n", - inode, rc, bytes, wrote); - /* clean all pages */ - for (i = 0; i < num_pages; i++) { - page = osd_data->pages[i]; - BUG_ON(!page); - WARN_ON(!PageUptodate(page)); + /* + * We lost the cache cap, need to truncate the page before + * it is unlocked, otherwise we'd truncate it later in the + * page truncation thread, possibly losing some data that + * raced its way in + */ + remove_page = !(ceph_caps_issued(ci) & + (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); - writeback_stat = - atomic_long_dec_return(&fsc->writeback_count); - if (writeback_stat < - CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) - clear_bdi_congested(&fsc->backing_dev_info, - BLK_RW_ASYNC); + /* clean all pages */ + for (i = 0; i < req->r_num_ops; i++) { + if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) + break; - ceph_put_snap_context(page_snap_context(page)); - page->private = 0; - ClearPagePrivate(page); - dout("unlocking %d %p\n", i, page); - end_page_writeback(page); + osd_data = osd_req_op_extent_osd_data(req, i); + BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); + num_pages = calc_pages_for((u64)osd_data->alignment, + (u64)osd_data->length); + total_pages += num_pages; + for (j = 0; j < num_pages; j++) { + page = osd_data->pages[j]; + BUG_ON(!page); + WARN_ON(!PageUptodate(page)); + + if (atomic_long_dec_return(&fsc->writeback_count) < + CONGESTION_OFF_THRESH( + fsc->mount_options->congestion_kb)) + clear_bdi_congested(&fsc->backing_dev_info, + BLK_RW_ASYNC); + + ceph_put_snap_context(page_snap_context(page)); + page->private = 0; + ClearPagePrivate(page); + dout("unlocking %p\n", page); + end_page_writeback(page); + + if (remove_page) + generic_error_remove_page(inode->i_mapping, + page); - /* - * We lost the cache cap, need to truncate the page before - * it is unlocked, otherwise we'd truncate it later in the - * page truncation thread, possibly losing some data that - * raced its way in - */ - if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) - generic_error_remove_page(inode->i_mapping, page); + unlock_page(page); + } + dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", + inode, osd_data->length, rc >= 0 ? num_pages : 0); - unlock_page(page); + ceph_release_pages(osd_data->pages, num_pages); } - dout("%p wrote+cleaned %d pages\n", inode, wrote); - ceph_put_wrbuffer_cap_refs(ci, num_pages, snapc); - ceph_release_pages(osd_data->pages, num_pages); + ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); + + osd_data = osd_req_op_extent_osd_data(req, 0); if (osd_data->pages_from_pool) mempool_free(osd_data->pages, ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); @@ -725,9 +725,9 @@ static int ceph_writepages_start(struct address_space *mapping, } if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) wsize = fsc->mount_options->wsize; - if (wsize < PAGE_CACHE_SIZE) - wsize = PAGE_CACHE_SIZE; - max_pages_ever = wsize >> PAGE_CACHE_SHIFT; + if (wsize < PAGE_SIZE) + wsize = PAGE_SIZE; + max_pages_ever = wsize >> PAGE_SHIFT; pagevec_init(&pvec, 0); @@ -737,8 +737,8 @@ static int ceph_writepages_start(struct address_space *mapping, end = -1; dout(" cyclic, start at %lu\n", start); } else { - start = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + start = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; should_loop = 0; @@ -778,17 +778,15 @@ retry: while (!done && index <= end) { unsigned i; int first; - pgoff_t next; - int pvec_pages, locked_pages; - struct page **pages = NULL; + pgoff_t strip_unit_end = 0; + int num_ops = 0, op_idx; + int pvec_pages, locked_pages = 0; + struct page **pages = NULL, **data_pages; mempool_t *pool = NULL; /* Becomes non-null if mempool used */ struct page *page; int want; - u64 offset, len; - long writeback_stat; + u64 offset = 0, len = 0; - next = 0; - locked_pages = 0; max_pages = max_pages_ever; get_more_pages: @@ -824,8 +822,8 @@ get_more_pages: unlock_page(page); break; } - if (next && (page->index != next)) { - dout("not consecutive %p\n", page); + if (strip_unit_end && (page->index > strip_unit_end)) { + dout("end of strip unit %p\n", page); unlock_page(page); break; } @@ -867,36 +865,31 @@ get_more_pages: /* * We have something to write. If this is * the first locked page this time through, - * allocate an osd request and a page array - * that it will use. + * calculate max possinle write size and + * allocate a page array */ if (locked_pages == 0) { - BUG_ON(pages); + u64 objnum; + u64 objoff; + /* prepare async write request */ offset = (u64)page_offset(page); len = wsize; - req = ceph_osdc_new_request(&fsc->client->osdc, - &ci->i_layout, vino, - offset, &len, 0, - do_sync ? 2 : 1, - CEPH_OSD_OP_WRITE, - CEPH_OSD_FLAG_WRITE | - CEPH_OSD_FLAG_ONDISK, - snapc, truncate_seq, - truncate_size, true); - if (IS_ERR(req)) { - rc = PTR_ERR(req); + + rc = ceph_calc_file_object_mapping(&ci->i_layout, + offset, len, + &objnum, &objoff, + &len); + if (rc < 0) { unlock_page(page); break; } - if (do_sync) - osd_req_op_init(req, 1, - CEPH_OSD_OP_STARTSYNC, 0); - - req->r_callback = writepages_finish; - req->r_inode = inode; + num_ops = 1 + do_sync; + strip_unit_end = page->index + + ((len - 1) >> PAGE_SHIFT); + BUG_ON(pages); max_pages = calc_pages_for(0, (u64)len); pages = kmalloc(max_pages * sizeof (*pages), GFP_NOFS); @@ -905,6 +898,20 @@ get_more_pages: pages = mempool_alloc(pool, GFP_NOFS); BUG_ON(!pages); } + + len = 0; + } else if (page->index != + (offset + len) >> PAGE_SHIFT) { + if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : + CEPH_OSD_MAX_OPS)) { + redirty_page_for_writepage(wbc, page); + unlock_page(page); + break; + } + + num_ops++; + offset = (u64)page_offset(page); + len = 0; } /* note position of first page in pvec */ @@ -913,18 +920,16 @@ get_more_pages: dout("%p will write page %p idx %lu\n", inode, page, page->index); - writeback_stat = - atomic_long_inc_return(&fsc->writeback_count); - if (writeback_stat > CONGESTION_ON_THRESH( + if (atomic_long_inc_return(&fsc->writeback_count) > + CONGESTION_ON_THRESH( fsc->mount_options->congestion_kb)) { set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); } - set_page_writeback(page); pages[locked_pages] = page; locked_pages++; - next = page->index + 1; + len += PAGE_SIZE; } /* did we get anything? */ @@ -944,38 +949,119 @@ get_more_pages: /* shift unused pages over in the pvec... we * will need to release them below. */ for (j = i; j < pvec_pages; j++) { - dout(" pvec leftover page %p\n", - pvec.pages[j]); + dout(" pvec leftover page %p\n", pvec.pages[j]); pvec.pages[j-i+first] = pvec.pages[j]; } pvec.nr -= i-first; } - /* Format the osd request message and submit the write */ +new_request: offset = page_offset(pages[0]); - len = (u64)locked_pages << PAGE_CACHE_SHIFT; - if (snap_size == -1) { - len = min(len, (u64)i_size_read(inode) - offset); - /* writepages_finish() clears writeback pages - * according to the data length, so make sure - * data length covers all locked pages */ - len = max(len, 1 + - ((u64)(locked_pages - 1) << PAGE_CACHE_SHIFT)); - } else { - len = min(len, snap_size - offset); + len = wsize; + + req = ceph_osdc_new_request(&fsc->client->osdc, + &ci->i_layout, vino, + offset, &len, 0, num_ops, + CEPH_OSD_OP_WRITE, + CEPH_OSD_FLAG_WRITE | + CEPH_OSD_FLAG_ONDISK, + snapc, truncate_seq, + truncate_size, false); + if (IS_ERR(req)) { + req = ceph_osdc_new_request(&fsc->client->osdc, + &ci->i_layout, vino, + offset, &len, 0, + min(num_ops, + CEPH_OSD_SLAB_OPS), + CEPH_OSD_OP_WRITE, + CEPH_OSD_FLAG_WRITE | + CEPH_OSD_FLAG_ONDISK, + snapc, truncate_seq, + truncate_size, true); + BUG_ON(IS_ERR(req)); } - dout("writepages got %d pages at %llu~%llu\n", - locked_pages, offset, len); + BUG_ON(len < page_offset(pages[locked_pages - 1]) + + PAGE_SIZE - offset); + + req->r_callback = writepages_finish; + req->r_inode = inode; - osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, + /* Format the osd request message and submit the write */ + len = 0; + data_pages = pages; + op_idx = 0; + for (i = 0; i < locked_pages; i++) { + u64 cur_offset = page_offset(pages[i]); + if (offset + len != cur_offset) { + if (op_idx + do_sync + 1 == req->r_num_ops) + break; + osd_req_op_extent_dup_last(req, op_idx, + cur_offset - offset); + dout("writepages got pages at %llu~%llu\n", + offset, len); + osd_req_op_extent_osd_data_pages(req, op_idx, + data_pages, len, 0, !!pool, false); + osd_req_op_extent_update(req, op_idx, len); - pages = NULL; /* request message now owns the pages array */ - pool = NULL; + len = 0; + offset = cur_offset; + data_pages = pages + i; + op_idx++; + } - /* Update the write op length in case we changed it */ + set_page_writeback(pages[i]); + len += PAGE_SIZE; + } + + if (snap_size != -1) { + len = min(len, snap_size - offset); + } else if (i == locked_pages) { + /* writepages_finish() clears writeback pages + * according to the data length, so make sure + * data length covers all locked pages */ + u64 min_len = len + 1 - PAGE_SIZE; + len = min(len, (u64)i_size_read(inode) - offset); + len = max(len, min_len); + } + dout("writepages got pages at %llu~%llu\n", offset, len); + + osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, + 0, !!pool, false); + osd_req_op_extent_update(req, op_idx, len); - osd_req_op_extent_update(req, 0, len); + if (do_sync) { + op_idx++; + osd_req_op_init(req, op_idx, CEPH_OSD_OP_STARTSYNC, 0); + } + BUG_ON(op_idx + 1 != req->r_num_ops); + + pool = NULL; + if (i < locked_pages) { + BUG_ON(num_ops <= req->r_num_ops); + num_ops -= req->r_num_ops; + num_ops += do_sync; + locked_pages -= i; + + /* allocate new pages array for next request */ + data_pages = pages; + pages = kmalloc(locked_pages * sizeof (*pages), + GFP_NOFS); + if (!pages) { + pool = fsc->wb_pagevec_pool; + pages = mempool_alloc(pool, GFP_NOFS); + BUG_ON(!pages); + } + memcpy(pages, data_pages + i, + locked_pages * sizeof(*pages)); + memset(data_pages + i, 0, + locked_pages * sizeof(*pages)); + } else { + BUG_ON(num_ops != req->r_num_ops); + index = pages[i - 1]->index + 1; + /* request message now owns the pages array */ + pages = NULL; + } vino = ceph_vino(inode); ceph_osdc_build_request(req, offset, snapc, vino.snap, @@ -985,9 +1071,10 @@ get_more_pages: BUG_ON(rc); req = NULL; - /* continue? */ - index = next; - wbc->nr_to_write -= locked_pages; + wbc->nr_to_write -= i; + if (pages) + goto new_request; + if (wbc->nr_to_write <= 0) done = 1; @@ -1048,8 +1135,8 @@ static int ceph_update_writeable_page(struct file *file, { struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); - loff_t page_off = pos & PAGE_CACHE_MASK; - int pos_in_page = pos & ~PAGE_CACHE_MASK; + loff_t page_off = pos & PAGE_MASK; + int pos_in_page = pos & ~PAGE_MASK; int end_in_page = pos_in_page + len; loff_t i_size; int r; @@ -1104,7 +1191,7 @@ retry_locked: } /* full page? */ - if (pos_in_page == 0 && len == PAGE_CACHE_SIZE) + if (pos_in_page == 0 && len == PAGE_SIZE) return 0; /* past end of file? */ @@ -1112,12 +1199,12 @@ retry_locked: if (page_off >= i_size || (pos_in_page == 0 && (pos+len) >= i_size && - end_in_page - pos_in_page != PAGE_CACHE_SIZE)) { + end_in_page - pos_in_page != PAGE_SIZE)) { dout(" zeroing %p 0 - %d and %d - %d\n", - page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); + page, pos_in_page, end_in_page, (int)PAGE_SIZE); zero_user_segments(page, 0, pos_in_page, - end_in_page, PAGE_CACHE_SIZE); + end_in_page, PAGE_SIZE); return 0; } @@ -1141,7 +1228,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, { struct inode *inode = file_inode(file); struct page *page; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; int r; do { @@ -1155,7 +1242,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, r = ceph_update_writeable_page(file, pos, len, page); if (r < 0) - page_cache_release(page); + put_page(page); else *pagep = page; } while (r == -EAGAIN); @@ -1172,7 +1259,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata) { struct inode *inode = file_inode(file); - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); int check_cap = 0; dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, @@ -1192,7 +1279,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); if (check_cap) ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); @@ -1235,11 +1322,11 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_file_info *fi = vma->vm_file->private_data; struct page *pinned_page = NULL; - loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT; + loff_t off = vmf->pgoff << PAGE_SHIFT; int want, got, ret; dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", - inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE); + inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE); if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; else @@ -1256,7 +1343,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } } dout("filemap_fault %p %llu~%zd got cap refs on %s\n", - inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got)); + inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got)); if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || ci->i_inline_version == CEPH_INLINE_NONE) @@ -1265,16 +1352,16 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ret = -EAGAIN; dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n", - inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret); + inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret); if (pinned_page) - page_cache_release(pinned_page); + put_page(pinned_page); ceph_put_cap_refs(ci, got); if (ret != -EAGAIN) return ret; /* read inline data */ - if (off >= PAGE_CACHE_SIZE) { + if (off >= PAGE_SIZE) { /* does not support inline data > PAGE_SIZE */ ret = VM_FAULT_SIGBUS; } else { @@ -1291,12 +1378,12 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) CEPH_STAT_CAP_INLINE_DATA, true); if (ret1 < 0 || off >= i_size_read(inode)) { unlock_page(page); - page_cache_release(page); + put_page(page); ret = VM_FAULT_SIGBUS; goto out; } - if (ret1 < PAGE_CACHE_SIZE) - zero_user_segment(page, ret1, PAGE_CACHE_SIZE); + if (ret1 < PAGE_SIZE) + zero_user_segment(page, ret1, PAGE_SIZE); else flush_dcache_page(page); SetPageUptodate(page); @@ -1305,7 +1392,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } out: dout("filemap_fault %p %llu~%zd read inline data ret %d\n", - inode, off, (size_t)PAGE_CACHE_SIZE, ret); + inode, off, (size_t)PAGE_SIZE, ret); return ret; } @@ -1343,10 +1430,10 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) } } - if (off + PAGE_CACHE_SIZE <= size) - len = PAGE_CACHE_SIZE; + if (off + PAGE_SIZE <= size) + len = PAGE_SIZE; else - len = size & ~PAGE_CACHE_MASK; + len = size & ~PAGE_MASK; dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", inode, ceph_vinop(inode), off, len, size); @@ -1432,7 +1519,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, return; if (PageUptodate(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); return; } } @@ -1447,14 +1534,14 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, } if (page != locked_page) { - if (len < PAGE_CACHE_SIZE) - zero_user_segment(page, len, PAGE_CACHE_SIZE); + if (len < PAGE_SIZE) + zero_user_segment(page, len, PAGE_SIZE); else flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); - page_cache_release(page); + put_page(page); } } @@ -1491,7 +1578,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) from_pagecache = true; lock_page(page); } else { - page_cache_release(page); + put_page(page); page = NULL; } } @@ -1499,8 +1586,8 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) if (page) { len = i_size_read(inode); - if (len > PAGE_CACHE_SIZE) - len = PAGE_CACHE_SIZE; + if (len > PAGE_SIZE) + len = PAGE_SIZE; } else { page = __page_cache_alloc(GFP_NOFS); if (!page) { @@ -1522,7 +1609,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) ceph_vino(inode), 0, &len, 0, 1, CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, - ceph_empty_snapc, 0, 0, false); + NULL, 0, 0, false); if (IS_ERR(req)) { err = PTR_ERR(req); goto out; @@ -1540,9 +1627,8 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) ceph_vino(inode), 0, &len, 1, 3, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, - ceph_empty_snapc, - ci->i_truncate_seq, ci->i_truncate_size, - false); + NULL, ci->i_truncate_seq, + ci->i_truncate_size, false); if (IS_ERR(req)) { err = PTR_ERR(req); goto out; @@ -1584,7 +1670,7 @@ out: if (page && page != locked_page) { if (from_pagecache) { unlock_page(page); - page_cache_release(page); + put_page(page); } else __free_pages(page, 0); } @@ -1663,8 +1749,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool) goto out; } - rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, - ceph_empty_snapc, + rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1, false, GFP_NOFS); if (!rd_req) { err = -ENOMEM; @@ -1678,8 +1763,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool) "%llx.00000000", ci->i_vino.ino); rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name); - wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, - ceph_empty_snapc, + wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1, false, GFP_NOFS); if (!wr_req) { err = -ENOMEM; diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 6fe0ad26a7df..cfaeef18cbca 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -991,7 +991,7 @@ static int send_cap_msg(struct ceph_mds_session *session, u32 seq, u64 flush_tid, u64 oldest_flush_tid, u32 issue_seq, u32 mseq, u64 size, u64 max_size, struct timespec *mtime, struct timespec *atime, - u64 time_warp_seq, + struct timespec *ctime, u64 time_warp_seq, kuid_t uid, kgid_t gid, umode_t mode, u64 xattr_version, struct ceph_buffer *xattrs_buf, @@ -1042,6 +1042,8 @@ static int send_cap_msg(struct ceph_mds_session *session, ceph_encode_timespec(&fc->mtime, mtime); if (atime) ceph_encode_timespec(&fc->atime, atime); + if (ctime) + ceph_encode_timespec(&fc->ctime, ctime); fc->time_warp_seq = cpu_to_le32(time_warp_seq); fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid)); @@ -1116,7 +1118,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, int held, revoking, dropping, keep; u64 seq, issue_seq, mseq, time_warp_seq, follows; u64 size, max_size; - struct timespec mtime, atime; + struct timespec mtime, atime, ctime; int wake = 0; umode_t mode; kuid_t uid; @@ -1180,6 +1182,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, ci->i_requested_max_size = max_size; mtime = inode->i_mtime; atime = inode->i_atime; + ctime = inode->i_ctime; time_warp_seq = ci->i_time_warp_seq; uid = inode->i_uid; gid = inode->i_gid; @@ -1198,7 +1201,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id, op, keep, want, flushing, seq, flush_tid, oldest_flush_tid, issue_seq, mseq, - size, max_size, &mtime, &atime, time_warp_seq, + size, max_size, &mtime, &atime, &ctime, time_warp_seq, uid, gid, mode, xattr_version, xattr_blob, follows, inline_data); if (ret < 0) { @@ -1320,7 +1323,7 @@ retry: capsnap->dirty, 0, capsnap->flush_tid, 0, 0, mseq, capsnap->size, 0, &capsnap->mtime, &capsnap->atime, - capsnap->time_warp_seq, + &capsnap->ctime, capsnap->time_warp_seq, capsnap->uid, capsnap->gid, capsnap->mode, capsnap->xattr_version, capsnap->xattr_blob, capsnap->follows, capsnap->inline_data); @@ -2507,7 +2510,7 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, *pinned_page = page; break; } - page_cache_release(page); + put_page(page); } /* * drop cap refs first because getattr while diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index fd11fb231a2e..4fb2bbc2a272 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -38,7 +38,7 @@ int ceph_init_dentry(struct dentry *dentry) if (dentry->d_fsdata) return 0; - di = kmem_cache_alloc(ceph_dentry_cachep, GFP_KERNEL | __GFP_ZERO); + di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL); if (!di) return -ENOMEM; /* oh well */ @@ -68,23 +68,6 @@ out_unlock: return 0; } -struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry) -{ - struct inode *inode = NULL; - - if (!dentry) - return NULL; - - spin_lock(&dentry->d_lock); - if (!IS_ROOT(dentry)) { - inode = d_inode(dentry->d_parent); - ihold(inode); - } - spin_unlock(&dentry->d_lock); - return inode; -} - - /* * for readdir, we encode the directory frag and offset within that * frag into f_pos. @@ -146,7 +129,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx, struct inode *dir = d_inode(parent); struct dentry *dentry, *last = NULL; struct ceph_dentry_info *di; - unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *); + unsigned nsize = PAGE_SIZE / sizeof(struct dentry *); int err = 0; loff_t ptr_pos = 0; struct ceph_readdir_cache_control cache_ctl = {}; @@ -171,7 +154,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx, } err = -EAGAIN; - pgoff = ptr_pos >> PAGE_CACHE_SHIFT; + pgoff = ptr_pos >> PAGE_SHIFT; if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) { ceph_readdir_cache_release(&cache_ctl); cache_ctl.page = find_lock_page(&dir->i_data, pgoff); @@ -624,6 +607,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_mds_request *req; int op; + int mask; int err; dout("lookup %p dentry %p '%pd'\n", @@ -666,8 +650,12 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, return ERR_CAST(req); req->r_dentry = dget(dentry); req->r_num_caps = 2; - /* we only need inode linkage */ - req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); + + mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; + if (ceph_security_xattr_wanted(dir)) + mask |= CEPH_CAP_XATTR_SHARED; + req->r_args.getattr.mask = cpu_to_le32(mask); + req->r_locked_dir = dir; err = ceph_mdsc_do_request(mdsc, NULL, req); err = ceph_handle_snapdir(req, dentry, err); @@ -1095,6 +1083,7 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) { int valid = 0; + struct dentry *parent; struct inode *dir; if (flags & LOOKUP_RCU) @@ -1103,7 +1092,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry, dentry, d_inode(dentry), ceph_dentry(dentry)->offset); - dir = ceph_get_dentry_parent_inode(dentry); + parent = dget_parent(dentry); + dir = d_inode(parent); /* always trust cached snapped dentries, snapdir dentry */ if (ceph_snap(dir) != CEPH_NOSNAP) { @@ -1121,13 +1111,48 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) valid = 1; } + if (!valid) { + struct ceph_mds_client *mdsc = + ceph_sb_to_client(dir->i_sb)->mdsc; + struct ceph_mds_request *req; + int op, mask, err; + + op = ceph_snap(dir) == CEPH_SNAPDIR ? + CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; + req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); + if (!IS_ERR(req)) { + req->r_dentry = dget(dentry); + req->r_num_caps = 2; + + mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; + if (ceph_security_xattr_wanted(dir)) + mask |= CEPH_CAP_XATTR_SHARED; + req->r_args.getattr.mask = mask; + + req->r_locked_dir = dir; + err = ceph_mdsc_do_request(mdsc, NULL, req); + if (err == 0 || err == -ENOENT) { + if (dentry == req->r_dentry) { + valid = !d_unhashed(dentry); + } else { + d_invalidate(req->r_dentry); + err = -EAGAIN; + } + } + ceph_mdsc_put_request(req); + dout("d_revalidate %p lookup result=%d\n", + dentry, err); + } + } + dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid"); if (valid) { ceph_dentry_lru_touch(dentry); } else { ceph_dir_clear_complete(dir); } - iput(dir); + + dput(parent); return valid; } diff --git a/fs/ceph/export.c b/fs/ceph/export.c index 3b3172357326..6e72c98162d5 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -71,12 +71,18 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino) inode = ceph_find_inode(sb, vino); if (!inode) { struct ceph_mds_request *req; + int mask; req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO, USE_ANY_MDS); if (IS_ERR(req)) return ERR_CAST(req); + mask = CEPH_STAT_CAP_INODE; + if (ceph_security_xattr_wanted(d_inode(sb->s_root))) + mask |= CEPH_CAP_XATTR_SHARED; + req->r_args.getattr.mask = cpu_to_le32(mask); + req->r_ino1 = vino; req->r_num_caps = 1; err = ceph_mdsc_do_request(mdsc, NULL, req); @@ -128,6 +134,7 @@ static struct dentry *__get_parent(struct super_block *sb, struct ceph_mds_request *req; struct inode *inode; struct dentry *dentry; + int mask; int err; req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPPARENT, @@ -144,6 +151,12 @@ static struct dentry *__get_parent(struct super_block *sb, .snap = CEPH_NOSNAP, }; } + + mask = CEPH_STAT_CAP_INODE; + if (ceph_security_xattr_wanted(d_inode(sb->s_root))) + mask |= CEPH_CAP_XATTR_SHARED; + req->r_args.getattr.mask = cpu_to_le32(mask); + req->r_num_caps = 1; err = ceph_mdsc_do_request(mdsc, NULL, req); inode = req->r_target_inode; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index eb9028e8cfc5..a79f9269831e 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -157,7 +157,7 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) case S_IFDIR: dout("init_file %p %p 0%o (regular)\n", inode, file, inode->i_mode); - cf = kmem_cache_alloc(ceph_file_cachep, GFP_KERNEL | __GFP_ZERO); + cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL); if (cf == NULL) { ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ return -ENOMEM; @@ -300,6 +300,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, struct ceph_mds_request *req; struct dentry *dn; struct ceph_acls_info acls = {}; + int mask; int err; dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", @@ -335,6 +336,12 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, acls.pagelist = NULL; } } + + mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; + if (ceph_security_xattr_wanted(dir)) + mask |= CEPH_CAP_XATTR_SHARED; + req->r_args.open.mask = cpu_to_le32(mask); + req->r_locked_dir = dir; /* caller holds dir->i_mutex */ err = ceph_mdsc_do_request(mdsc, (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, @@ -459,7 +466,7 @@ more: ret += zlen; } - didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; + didpages = (page_align + ret) >> PAGE_SHIFT; pos += ret; read = pos - off; left -= ret; @@ -725,7 +732,6 @@ static void ceph_aio_retry_work(struct work_struct *work) ret = ceph_osdc_start_request(req->r_osdc, req, false); out: if (ret < 0) { - BUG_ON(ret == -EOLDSNAPC); req->r_result = ret; ceph_aio_complete_req(req, NULL); } @@ -783,7 +789,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, int num_pages = 0; int flags; int ret; - struct timespec mtime = CURRENT_TIME; + struct timespec mtime = current_fs_time(inode->i_sb); size_t count = iov_iter_count(iter); loff_t pos = iocb->ki_pos; bool write = iov_iter_rw(iter) == WRITE; @@ -800,8 +806,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, if (write) { ret = invalidate_inode_pages2_range(inode->i_mapping, - pos >> PAGE_CACHE_SHIFT, - (pos + count) >> PAGE_CACHE_SHIFT); + pos >> PAGE_SHIFT, + (pos + count) >> PAGE_SHIFT); if (ret < 0) dout("invalidate_inode_pages2_range returned %d\n", ret); @@ -866,7 +872,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, * may block. */ truncate_inode_pages_range(inode->i_mapping, pos, - (pos+len) | (PAGE_CACHE_SIZE - 1)); + (pos+len) | (PAGE_SIZE - 1)); osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); } @@ -949,7 +955,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, ret = ceph_osdc_start_request(req->r_osdc, req, false); if (ret < 0) { - BUG_ON(ret == -EOLDSNAPC); req->r_result = ret; ceph_aio_complete_req(req, NULL); } @@ -988,7 +993,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, int flags; int check_caps = 0; int ret; - struct timespec mtime = CURRENT_TIME; + struct timespec mtime = current_fs_time(inode->i_sb); size_t count = iov_iter_count(from); if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) @@ -1001,8 +1006,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, return ret; ret = invalidate_inode_pages2_range(inode->i_mapping, - pos >> PAGE_CACHE_SHIFT, - (pos + count) >> PAGE_CACHE_SHIFT); + pos >> PAGE_SHIFT, + (pos + count) >> PAGE_SHIFT); if (ret < 0) dout("invalidate_inode_pages2_range returned %d\n", ret); @@ -1031,7 +1036,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, * write from beginning of first page, * regardless of io alignment */ - num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); if (IS_ERR(pages)) { @@ -1154,7 +1159,7 @@ again: dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); if (pinned_page) { - page_cache_release(pinned_page); + put_page(pinned_page); pinned_page = NULL; } ceph_put_cap_refs(ci, got); @@ -1183,10 +1188,10 @@ again: if (retry_op == READ_INLINE) { BUG_ON(ret > 0 || read > 0); if (iocb->ki_pos < i_size && - iocb->ki_pos < PAGE_CACHE_SIZE) { + iocb->ki_pos < PAGE_SIZE) { loff_t end = min_t(loff_t, i_size, iocb->ki_pos + len); - end = min_t(loff_t, end, PAGE_CACHE_SIZE); + end = min_t(loff_t, end, PAGE_SIZE); if (statret < end) zero_user_segment(page, statret, end); ret = copy_page_to_iter(page, @@ -1458,21 +1463,21 @@ static inline void ceph_zero_partial_page( struct inode *inode, loff_t offset, unsigned size) { struct page *page; - pgoff_t index = offset >> PAGE_CACHE_SHIFT; + pgoff_t index = offset >> PAGE_SHIFT; page = find_lock_page(inode->i_mapping, index); if (page) { wait_on_page_writeback(page); - zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size); + zero_user(page, offset & (PAGE_SIZE - 1), size); unlock_page(page); - page_cache_release(page); + put_page(page); } } static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, loff_t length) { - loff_t nearly = round_up(offset, PAGE_CACHE_SIZE); + loff_t nearly = round_up(offset, PAGE_SIZE); if (offset < nearly) { loff_t size = nearly - offset; if (length < size) @@ -1481,8 +1486,8 @@ static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, offset += size; length -= size; } - if (length >= PAGE_CACHE_SIZE) { - loff_t size = round_down(length, PAGE_CACHE_SIZE); + if (length >= PAGE_SIZE) { + loff_t size = round_down(length, PAGE_SIZE); truncate_pagecache_range(inode, offset, offset + size - 1); offset += size; length -= size; diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index e48fd8b23257..edfade037738 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -549,6 +549,10 @@ int ceph_fill_file_size(struct inode *inode, int issued, if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { dout("size %lld -> %llu\n", inode->i_size, size); + if (size > 0 && S_ISDIR(inode->i_mode)) { + pr_err("fill_file_size non-zero size for directory\n"); + size = 0; + } i_size_write(inode, size); inode->i_blocks = (size + (1<<9) - 1) >> 9; ci->i_reported_size = size; @@ -1261,6 +1265,7 @@ retry_lookup: dout(" %p links to %p %llx.%llx, not %llx.%llx\n", dn, d_inode(dn), ceph_vinop(d_inode(dn)), ceph_vinop(in)); + d_invalidate(dn); have_lease = false; } @@ -1333,7 +1338,7 @@ void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl) { if (ctl->page) { kunmap(ctl->page); - page_cache_release(ctl->page); + put_page(ctl->page); ctl->page = NULL; } } @@ -1343,21 +1348,26 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn, struct ceph_mds_request *req) { struct ceph_inode_info *ci = ceph_inode(dir); - unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry*); + unsigned nsize = PAGE_SIZE / sizeof(struct dentry*); unsigned idx = ctl->index % nsize; pgoff_t pgoff = ctl->index / nsize; if (!ctl->page || pgoff != page_index(ctl->page)) { ceph_readdir_cache_release(ctl); - ctl->page = grab_cache_page(&dir->i_data, pgoff); + if (idx == 0) + ctl->page = grab_cache_page(&dir->i_data, pgoff); + else + ctl->page = find_lock_page(&dir->i_data, pgoff); if (!ctl->page) { ctl->index = -1; - return -ENOMEM; + return idx == 0 ? -ENOMEM : 0; } /* reading/filling the cache are serialized by * i_mutex, no need to use page lock */ unlock_page(ctl->page); ctl->dentries = kmap(ctl->page); + if (idx == 0) + memset(ctl->dentries, 0, PAGE_SIZE); } if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && @@ -1380,7 +1390,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, struct qstr dname; struct dentry *dn; struct inode *in; - int err = 0, ret, i; + int err = 0, skipped = 0, ret, i; struct inode *snapdir = NULL; struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; struct ceph_dentry_info *di; @@ -1492,7 +1502,17 @@ retry_lookup: } if (d_really_is_negative(dn)) { - struct dentry *realdn = splice_dentry(dn, in); + struct dentry *realdn; + + if (ceph_security_xattr_deadlock(in)) { + dout(" skip splicing dn %p to inode %p" + " (security xattr deadlock)\n", dn, in); + iput(in); + skipped++; + goto next_item; + } + + realdn = splice_dentry(dn, in); if (IS_ERR(realdn)) { err = PTR_ERR(realdn); d_drop(dn); @@ -1509,7 +1529,7 @@ retry_lookup: req->r_session, req->r_request_started); - if (err == 0 && cache_ctl.index >= 0) { + if (err == 0 && skipped == 0 && cache_ctl.index >= 0) { ret = fill_readdir_cache(d_inode(parent), dn, &cache_ctl, req); if (ret < 0) @@ -1520,7 +1540,7 @@ next_item: dput(dn); } out: - if (err == 0) { + if (err == 0 && skipped == 0) { req->r_did_prepopulate = true; req->r_readdir_cache_idx = cache_ctl.index; } @@ -1950,7 +1970,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) if (dirtied) { inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, &prealloc_cf); - inode->i_ctime = CURRENT_TIME; + inode->i_ctime = current_fs_time(inode->i_sb); } release &= issued; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 911d64d865f1..541ead4d8965 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1610,7 +1610,7 @@ again: while (!list_empty(&tmp_list)) { if (!msg) { msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, - PAGE_CACHE_SIZE, GFP_NOFS, false); + PAGE_SIZE, GFP_NOFS, false); if (!msg) goto out_err; head = msg->front.iov_base; @@ -1729,7 +1729,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) init_completion(&req->r_safe_completion); INIT_LIST_HEAD(&req->r_unsafe_item); - req->r_stamp = CURRENT_TIME; + req->r_stamp = current_fs_time(mdsc->fsc->sb); req->r_op = op; req->r_direct_mode = mode; @@ -2540,6 +2540,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) /* insert trace into our cache */ mutex_lock(&req->r_fill_mutex); + current->journal_info = req; err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); if (err == 0) { if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || @@ -2547,6 +2548,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) ceph_readdir_prepopulate(req, req->r_session); ceph_unreserve_caps(mdsc, &req->r_caps_reservation); } + current->journal_info = NULL; mutex_unlock(&req->r_fill_mutex); up_read(&mdsc->snap_rwsem); @@ -3764,7 +3766,6 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) dout("handle_map epoch %u len %d\n", epoch, (int)maplen); /* do we need it? */ - ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch); mutex_lock(&mdsc->mutex); if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { dout("handle_map epoch %u <= our %u\n", @@ -3791,6 +3792,8 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; __wake_requests(mdsc, &mdsc->waiting_for_map); + ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP, + mdsc->mdsmap->m_epoch); mutex_unlock(&mdsc->mutex); schedule_delayed(mdsc); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 37712ccffcc6..ee69a537dba5 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -97,7 +97,7 @@ struct ceph_mds_reply_info_parsed { /* * cap releases are batched and sent to the MDS en masse. */ -#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \ +#define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - \ sizeof(struct ceph_mds_cap_release)) / \ sizeof(struct ceph_mds_cap_item)) diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 4aa7122a8d38..9caaa7ffc93f 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -296,8 +296,6 @@ static int cmpu64_rev(const void *a, const void *b) } -struct ceph_snap_context *ceph_empty_snapc; - /* * build the snap context for a given realm. */ @@ -987,17 +985,3 @@ out: up_write(&mdsc->snap_rwsem); return; } - -int __init ceph_snap_init(void) -{ - ceph_empty_snapc = ceph_create_snap_context(0, GFP_NOFS); - if (!ceph_empty_snapc) - return -ENOMEM; - ceph_empty_snapc->seq = 1; - return 0; -} - -void ceph_snap_exit(void) -{ - ceph_put_snap_context(ceph_empty_snapc); -} diff --git a/fs/ceph/super.c b/fs/ceph/super.c index ca4d5e8457f1..f12d5e2955c2 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -439,8 +439,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT) seq_puts(m, ",dirstat"); - if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0) - seq_puts(m, ",norbytes"); + if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES)) + seq_puts(m, ",rbytes"); if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR) seq_puts(m, ",noasyncreaddir"); if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0) @@ -530,7 +530,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, goto fail; } fsc->client->extra_mon_dispatch = extra_mon_dispatch; - fsc->client->monc.want_mdsmap = 1; + ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, 0, true); fsc->mount_options = fsopt; @@ -560,7 +560,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, /* set up mempools */ err = -ENOMEM; - page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT; + page_count = fsc->mount_options->wsize >> PAGE_SHIFT; size = sizeof (struct page *) * (page_count ? page_count : 1); fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size); if (!fsc->wb_pagevec_pool) @@ -793,22 +793,20 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc, struct dentry *root; int first = 0; /* first vfsmount for this super_block */ - dout("mount start\n"); + dout("mount start %p\n", fsc); mutex_lock(&fsc->client->mount_mutex); - err = __ceph_open_session(fsc->client, started); - if (err < 0) - goto out; + if (!fsc->sb->s_root) { + err = __ceph_open_session(fsc->client, started); + if (err < 0) + goto out; - dout("mount opening root\n"); - root = open_root_dentry(fsc, "", started); - if (IS_ERR(root)) { - err = PTR_ERR(root); - goto out; - } - if (fsc->sb->s_root) { - dput(root); - } else { + dout("mount opening root\n"); + root = open_root_dentry(fsc, "", started); + if (IS_ERR(root)) { + err = PTR_ERR(root); + goto out; + } fsc->sb->s_root = root; first = 1; @@ -818,6 +816,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc, } if (path[0] == 0) { + root = fsc->sb->s_root; dget(root); } else { dout("mount opening base mountpoint\n"); @@ -833,16 +832,14 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc, mutex_unlock(&fsc->client->mount_mutex); return root; -out: - mutex_unlock(&fsc->client->mount_mutex); - return ERR_PTR(err); - fail: if (first) { dput(fsc->sb->s_root); fsc->sb->s_root = NULL; } - goto out; +out: + mutex_unlock(&fsc->client->mount_mutex); + return ERR_PTR(err); } static int ceph_set_super(struct super_block *s, void *data) @@ -915,13 +912,13 @@ static int ceph_register_bdi(struct super_block *sb, int err; /* set ra_pages based on rasize mount option? */ - if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE) + if (fsc->mount_options->rasize >= PAGE_SIZE) fsc->backing_dev_info.ra_pages = - (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1) + (fsc->mount_options->rasize + PAGE_SIZE - 1) >> PAGE_SHIFT; else fsc->backing_dev_info.ra_pages = - VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; + VM_MAX_READAHEAD * 1024 / PAGE_SIZE; err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld", atomic_long_inc_return(&bdi_seq)); @@ -1042,19 +1039,14 @@ static int __init init_ceph(void) ceph_flock_init(); ceph_xattr_init(); - ret = ceph_snap_init(); - if (ret) - goto out_xattr; ret = register_filesystem(&ceph_fs_type); if (ret) - goto out_snap; + goto out_xattr; pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL); return 0; -out_snap: - ceph_snap_exit(); out_xattr: ceph_xattr_exit(); destroy_caches(); @@ -1066,7 +1058,6 @@ static void __exit exit_ceph(void) { dout("exit_ceph\n"); unregister_filesystem(&ceph_fs_type); - ceph_snap_exit(); ceph_xattr_exit(); destroy_caches(); } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 9c458eb52245..e705c4d612d7 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -37,8 +37,7 @@ #define CEPH_MOUNT_OPT_FSCACHE (1<<10) /* use fscache */ #define CEPH_MOUNT_OPT_NOPOOLPERM (1<<11) /* no pool permission check */ -#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES | \ - CEPH_MOUNT_OPT_DCACHE) +#define CEPH_MOUNT_OPT_DEFAULT CEPH_MOUNT_OPT_DCACHE #define ceph_set_mount_opt(fsc, opt) \ (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt; @@ -469,7 +468,7 @@ static inline struct inode *ceph_find_inode(struct super_block *sb, #define CEPH_I_POOL_PERM (1 << 4) /* pool rd/wr bits are valid */ #define CEPH_I_POOL_RD (1 << 5) /* can read from pool */ #define CEPH_I_POOL_WR (1 << 6) /* can write to pool */ - +#define CEPH_I_SEC_INITED (1 << 7) /* security initialized */ static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci, long long release_count, @@ -721,7 +720,6 @@ static inline int default_congestion_kb(void) /* snap.c */ -extern struct ceph_snap_context *ceph_empty_snapc; struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, u64 ino); extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc, @@ -738,8 +736,6 @@ extern void ceph_queue_cap_snap(struct ceph_inode_info *ci); extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, struct ceph_cap_snap *capsnap); extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); -extern int ceph_snap_init(void); -extern void ceph_snap_exit(void); /* * a cap_snap is "pending" if it is still awaiting an in-progress @@ -808,6 +804,20 @@ extern void __init ceph_xattr_init(void); extern void ceph_xattr_exit(void); extern const struct xattr_handler *ceph_xattr_handlers[]; +#ifdef CONFIG_SECURITY +extern bool ceph_security_xattr_deadlock(struct inode *in); +extern bool ceph_security_xattr_wanted(struct inode *in); +#else +static inline bool ceph_security_xattr_deadlock(struct inode *in) +{ + return false; +} +static inline bool ceph_security_xattr_wanted(struct inode *in) +{ + return false; +} +#endif + /* acl.c */ struct ceph_acls_info { void *default_acl; @@ -947,7 +957,6 @@ extern void ceph_dentry_lru_touch(struct dentry *dn); extern void ceph_dentry_lru_del(struct dentry *dn); extern void ceph_invalidate_dentry_lease(struct dentry *dentry); extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn); -extern struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry); extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl); /* diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 819163d8313b..9410abdef3ce 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -714,31 +714,62 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) } } +static inline int __get_request_mask(struct inode *in) { + struct ceph_mds_request *req = current->journal_info; + int mask = 0; + if (req && req->r_target_inode == in) { + if (req->r_op == CEPH_MDS_OP_LOOKUP || + req->r_op == CEPH_MDS_OP_LOOKUPINO || + req->r_op == CEPH_MDS_OP_LOOKUPPARENT || + req->r_op == CEPH_MDS_OP_GETATTR) { + mask = le32_to_cpu(req->r_args.getattr.mask); + } else if (req->r_op == CEPH_MDS_OP_OPEN || + req->r_op == CEPH_MDS_OP_CREATE) { + mask = le32_to_cpu(req->r_args.open.mask); + } + } + return mask; +} + ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value, size_t size) { struct ceph_inode_info *ci = ceph_inode(inode); - int err; struct ceph_inode_xattr *xattr; struct ceph_vxattr *vxattr = NULL; + int req_mask; + int err; if (!ceph_is_valid_xattr(name)) return -ENODATA; /* let's see if a virtual xattr was requested */ vxattr = ceph_match_vxattr(inode, name); - if (vxattr && !(vxattr->exists_cb && !vxattr->exists_cb(ci))) { - err = vxattr->getxattr_cb(ci, value, size); + if (vxattr) { + err = -ENODATA; + if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) + err = vxattr->getxattr_cb(ci, value, size); return err; } + req_mask = __get_request_mask(inode); + spin_lock(&ci->i_ceph_lock); dout("getxattr %p ver=%lld index_ver=%lld\n", inode, ci->i_xattrs.version, ci->i_xattrs.index_version); if (ci->i_xattrs.version == 0 || - !__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1)) { + !((req_mask & CEPH_CAP_XATTR_SHARED) || + __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1))) { spin_unlock(&ci->i_ceph_lock); + + /* security module gets xattr while filling trace */ + if (current->journal_info != NULL) { + pr_warn_ratelimited("sync getxattr %p " + "during filling trace\n", inode); + return -EBUSY; + } + /* get xattrs from mds (if we don't already have them) */ err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true); if (err) @@ -765,6 +796,9 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value, memcpy(value, xattr->val, xattr->val_len); + if (current->journal_info != NULL && + !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) + ci->i_ceph_flags |= CEPH_I_SEC_INITED; out: spin_unlock(&ci->i_ceph_lock); return err; @@ -999,7 +1033,7 @@ retry: dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL, &prealloc_cf); ci->i_xattrs.dirty = true; - inode->i_ctime = CURRENT_TIME; + inode->i_ctime = current_fs_time(inode->i_sb); } spin_unlock(&ci->i_ceph_lock); @@ -1015,7 +1049,15 @@ do_sync: do_sync_unlocked: if (lock_snap_rwsem) up_read(&mdsc->snap_rwsem); - err = ceph_sync_setxattr(dentry, name, value, size, flags); + + /* security module set xattr while filling trace */ + if (current->journal_info != NULL) { + pr_warn_ratelimited("sync setxattr %p " + "during filling trace\n", inode); + err = -EBUSY; + } else { + err = ceph_sync_setxattr(dentry, name, value, size, flags); + } out: ceph_free_cap_flush(prealloc_cf); kfree(newname); @@ -1136,7 +1178,7 @@ retry: dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL, &prealloc_cf); ci->i_xattrs.dirty = true; - inode->i_ctime = CURRENT_TIME; + inode->i_ctime = current_fs_time(inode->i_sb); spin_unlock(&ci->i_ceph_lock); if (lock_snap_rwsem) up_read(&mdsc->snap_rwsem); @@ -1164,3 +1206,25 @@ int ceph_removexattr(struct dentry *dentry, const char *name) return __ceph_removexattr(dentry, name); } + +#ifdef CONFIG_SECURITY +bool ceph_security_xattr_wanted(struct inode *in) +{ + return in->i_security != NULL; +} + +bool ceph_security_xattr_deadlock(struct inode *in) +{ + struct ceph_inode_info *ci; + bool ret; + if (in->i_security == NULL) + return false; + ci = ceph_inode(in); + spin_lock(&ci->i_ceph_lock); + ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) && + !(ci->i_xattrs.version > 0 && + __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0)); + spin_unlock(&ci->i_ceph_lock); + return ret; +} +#endif diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 1d86fc620e5c..89201564c346 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -962,7 +962,7 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off, cifs_dbg(FYI, "about to flush pages\n"); /* should we flush first and last page first */ truncate_inode_pages_range(&target_inode->i_data, destoff, - PAGE_CACHE_ALIGN(destoff + len)-1); + PAGE_ALIGN(destoff + len)-1); if (target_tcon->ses->server->ops->duplicate_extents) rc = target_tcon->ses->server->ops->duplicate_extents(xid, diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index d21da9f05bae..f2cc0b3d1af7 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -714,7 +714,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb) * * Note that this might make for "interesting" allocation problems during * writeback however as we have to allocate an array of pointers for the - * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. + * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096. * * For reads, there is a similar problem as we need to allocate an array * of kvecs to handle the receive, though that should only need to be done @@ -733,7 +733,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb) /* * The default wsize is 1M. find_get_pages seems to return a maximum of 256 - * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill + * pages in a single call. With PAGE_SIZE == 4k, this means we can fill * a single wsize request with a single call. */ #define CIFS_DEFAULT_IOSIZE (1024 * 1024) diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 76fcb50295a3..a894bf809ff7 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1929,17 +1929,17 @@ cifs_writev_requeue(struct cifs_writedata *wdata) wsize = server->ops->wp_retry_size(inode); if (wsize < rest_len) { - nr_pages = wsize / PAGE_CACHE_SIZE; + nr_pages = wsize / PAGE_SIZE; if (!nr_pages) { rc = -ENOTSUPP; break; } - cur_len = nr_pages * PAGE_CACHE_SIZE; - tailsz = PAGE_CACHE_SIZE; + cur_len = nr_pages * PAGE_SIZE; + tailsz = PAGE_SIZE; } else { - nr_pages = DIV_ROUND_UP(rest_len, PAGE_CACHE_SIZE); + nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE); cur_len = rest_len; - tailsz = rest_len - (nr_pages - 1) * PAGE_CACHE_SIZE; + tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE; } wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete); @@ -1957,7 +1957,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata) wdata2->sync_mode = wdata->sync_mode; wdata2->nr_pages = nr_pages; wdata2->offset = page_offset(wdata2->pages[0]); - wdata2->pagesz = PAGE_CACHE_SIZE; + wdata2->pagesz = PAGE_SIZE; wdata2->tailsz = tailsz; wdata2->bytes = cur_len; @@ -1975,7 +1975,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata) if (rc != 0 && rc != -EAGAIN) { SetPageError(wdata2->pages[j]); end_page_writeback(wdata2->pages[j]); - page_cache_release(wdata2->pages[j]); + put_page(wdata2->pages[j]); } } @@ -2018,7 +2018,7 @@ cifs_writev_complete(struct work_struct *work) else if (wdata->result < 0) SetPageError(page); end_page_writeback(page); - page_cache_release(page); + put_page(page); } if (wdata->result != -EAGAIN) mapping_set_error(inode->i_mapping, wdata->result); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a763cd3d9e7c..6f62ac821a84 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3630,7 +3630,7 @@ try_mount_again: cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info); /* tune readahead according to rsize */ - cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE; + cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_SIZE; remote_path_check: #ifdef CONFIG_CIFS_DFS_UPCALL diff --git a/fs/cifs/file.c b/fs/cifs/file.c index ff882aeaccc6..c03d0744648b 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1833,7 +1833,7 @@ refind_writable: static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) { struct address_space *mapping = page->mapping; - loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; + loff_t offset = (loff_t)page->index << PAGE_SHIFT; char *write_data; int rc = -EFAULT; int bytes_written = 0; @@ -1849,7 +1849,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) write_data = kmap(page); write_data += from; - if ((to > PAGE_CACHE_SIZE) || (from > to)) { + if ((to > PAGE_SIZE) || (from > to)) { kunmap(page); return -EIO; } @@ -1902,7 +1902,7 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping, * find_get_pages_tag seems to return a max of 256 on each * iteration, so we must call it several times in order to * fill the array or the wsize is effectively limited to - * 256 * PAGE_CACHE_SIZE. + * 256 * PAGE_SIZE. */ *found_pages = 0; pages = wdata->pages; @@ -1991,7 +1991,7 @@ wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages, /* put any pages we aren't going to use */ for (i = nr_pages; i < found_pages; i++) { - page_cache_release(wdata->pages[i]); + put_page(wdata->pages[i]); wdata->pages[i] = NULL; } @@ -2009,11 +2009,11 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages, wdata->sync_mode = wbc->sync_mode; wdata->nr_pages = nr_pages; wdata->offset = page_offset(wdata->pages[0]); - wdata->pagesz = PAGE_CACHE_SIZE; + wdata->pagesz = PAGE_SIZE; wdata->tailsz = min(i_size_read(mapping->host) - page_offset(wdata->pages[nr_pages - 1]), - (loff_t)PAGE_CACHE_SIZE); - wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz; + (loff_t)PAGE_SIZE); + wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz; if (wdata->cfile != NULL) cifsFileInfo_put(wdata->cfile); @@ -2047,15 +2047,15 @@ static int cifs_writepages(struct address_space *mapping, * If wsize is smaller than the page cache size, default to writing * one page at a time via cifs_writepage */ - if (cifs_sb->wsize < PAGE_CACHE_SIZE) + if (cifs_sb->wsize < PAGE_SIZE) return generic_writepages(mapping, wbc); if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + index = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = true; scanned = true; @@ -2071,7 +2071,7 @@ retry: if (rc) break; - tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1; + tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1; wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index, &found_pages); @@ -2111,7 +2111,7 @@ retry: else SetPageError(wdata->pages[i]); end_page_writeback(wdata->pages[i]); - page_cache_release(wdata->pages[i]); + put_page(wdata->pages[i]); } if (rc != -EAGAIN) mapping_set_error(mapping, rc); @@ -2154,7 +2154,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc) xid = get_xid(); /* BB add check for wbc flags */ - page_cache_get(page); + get_page(page); if (!PageUptodate(page)) cifs_dbg(FYI, "ppw - page not up to date\n"); @@ -2170,7 +2170,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc) */ set_page_writeback(page); retry_write: - rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE); + rc = cifs_partialpagewrite(page, 0, PAGE_SIZE); if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL) goto retry_write; else if (rc == -EAGAIN) @@ -2180,7 +2180,7 @@ retry_write: else SetPageUptodate(page); end_page_writeback(page); - page_cache_release(page); + put_page(page); free_xid(xid); return rc; } @@ -2214,12 +2214,12 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, if (copied == len) SetPageUptodate(page); ClearPageChecked(page); - } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) + } else if (!PageUptodate(page) && copied == PAGE_SIZE) SetPageUptodate(page); if (!PageUptodate(page)) { char *page_data; - unsigned offset = pos & (PAGE_CACHE_SIZE - 1); + unsigned offset = pos & (PAGE_SIZE - 1); unsigned int xid; xid = get_xid(); @@ -2248,7 +2248,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, } unlock_page(page); - page_cache_release(page); + put_page(page); return rc; } @@ -3286,9 +3286,9 @@ cifs_readv_complete(struct work_struct *work) (rdata->result == -EAGAIN && got_bytes)) cifs_readpage_to_fscache(rdata->mapping->host, page); - got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes); + got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes); - page_cache_release(page); + put_page(page); rdata->pages[i] = NULL; } kref_put(&rdata->refcount, cifs_readdata_release); @@ -3307,21 +3307,21 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, /* determine the eof that the server (probably) has */ eof = CIFS_I(rdata->mapping->host)->server_eof; - eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0; + eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0; cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index); rdata->got_bytes = 0; - rdata->tailsz = PAGE_CACHE_SIZE; + rdata->tailsz = PAGE_SIZE; for (i = 0; i < nr_pages; i++) { struct page *page = rdata->pages[i]; - if (len >= PAGE_CACHE_SIZE) { + if (len >= PAGE_SIZE) { /* enough data to fill the page */ iov.iov_base = kmap(page); - iov.iov_len = PAGE_CACHE_SIZE; + iov.iov_len = PAGE_SIZE; cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", i, page->index, iov.iov_base, iov.iov_len); - len -= PAGE_CACHE_SIZE; + len -= PAGE_SIZE; } else if (len > 0) { /* enough for partial page, fill and zero the rest */ iov.iov_base = kmap(page); @@ -3329,7 +3329,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", i, page->index, iov.iov_base, iov.iov_len); memset(iov.iov_base + len, - '\0', PAGE_CACHE_SIZE - len); + '\0', PAGE_SIZE - len); rdata->tailsz = len; len = 0; } else if (page->index > eof_index) { @@ -3341,12 +3341,12 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, * to prevent the VFS from repeatedly attempting to * fill them until the writes are flushed. */ - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); lru_cache_add_file(page); flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); - page_cache_release(page); + put_page(page); rdata->pages[i] = NULL; rdata->nr_pages--; continue; @@ -3354,7 +3354,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, /* no need to hold page hostage */ lru_cache_add_file(page); unlock_page(page); - page_cache_release(page); + put_page(page); rdata->pages[i] = NULL; rdata->nr_pages--; continue; @@ -3402,8 +3402,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, } /* move first page to the tmplist */ - *offset = (loff_t)page->index << PAGE_CACHE_SHIFT; - *bytes = PAGE_CACHE_SIZE; + *offset = (loff_t)page->index << PAGE_SHIFT; + *bytes = PAGE_SIZE; *nr_pages = 1; list_move_tail(&page->lru, tmplist); @@ -3415,7 +3415,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, break; /* would this page push the read over the rsize? */ - if (*bytes + PAGE_CACHE_SIZE > rsize) + if (*bytes + PAGE_SIZE > rsize) break; __SetPageLocked(page); @@ -3424,7 +3424,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, break; } list_move_tail(&page->lru, tmplist); - (*bytes) += PAGE_CACHE_SIZE; + (*bytes) += PAGE_SIZE; expected_index++; (*nr_pages)++; } @@ -3493,7 +3493,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, * reach this point however since we set ra_pages to 0 when the * rsize is smaller than a cache page. */ - if (unlikely(rsize < PAGE_CACHE_SIZE)) { + if (unlikely(rsize < PAGE_SIZE)) { add_credits_and_wake_if(server, credits, 0); return 0; } @@ -3512,7 +3512,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, list_del(&page->lru); lru_cache_add_file(page); unlock_page(page); - page_cache_release(page); + put_page(page); } rc = -ENOMEM; add_credits_and_wake_if(server, credits, 0); @@ -3524,7 +3524,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, rdata->offset = offset; rdata->bytes = bytes; rdata->pid = pid; - rdata->pagesz = PAGE_CACHE_SIZE; + rdata->pagesz = PAGE_SIZE; rdata->read_into_pages = cifs_readpages_read_into_pages; rdata->credits = credits; @@ -3542,7 +3542,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, page = rdata->pages[i]; lru_cache_add_file(page); unlock_page(page); - page_cache_release(page); + put_page(page); } /* Fallback to the readpage in error/reconnect cases */ kref_put(&rdata->refcount, cifs_readdata_release); @@ -3577,7 +3577,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page, read_data = kmap(page); /* for reads over a certain size could initiate async read ahead */ - rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset); + rc = cifs_read(file, read_data, PAGE_SIZE, poffset); if (rc < 0) goto io_error; @@ -3587,8 +3587,8 @@ static int cifs_readpage_worker(struct file *file, struct page *page, file_inode(file)->i_atime = current_fs_time(file_inode(file)->i_sb); - if (PAGE_CACHE_SIZE > rc) - memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc); + if (PAGE_SIZE > rc) + memset(read_data + rc, 0, PAGE_SIZE - rc); flush_dcache_page(page); SetPageUptodate(page); @@ -3608,7 +3608,7 @@ read_complete: static int cifs_readpage(struct file *file, struct page *page) { - loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; + loff_t offset = (loff_t)page->index << PAGE_SHIFT; int rc = -EACCES; unsigned int xid; @@ -3679,8 +3679,8 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping, struct page **pagep, void **fsdata) { int oncethru = 0; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - loff_t offset = pos & (PAGE_CACHE_SIZE - 1); + pgoff_t index = pos >> PAGE_SHIFT; + loff_t offset = pos & (PAGE_SIZE - 1); loff_t page_start = pos & PAGE_MASK; loff_t i_size; struct page *page; @@ -3703,7 +3703,7 @@ start: * the server. If the write is short, we'll end up doing a sync write * instead. */ - if (len == PAGE_CACHE_SIZE) + if (len == PAGE_SIZE) goto out; /* @@ -3718,7 +3718,7 @@ start: (offset == 0 && (pos + len) >= i_size)) { zero_user_segments(page, 0, offset, offset + len, - PAGE_CACHE_SIZE); + PAGE_SIZE); /* * PageChecked means that the parts of the page * to which we're not writing are considered up @@ -3737,7 +3737,7 @@ start: * do a sync write instead since PG_uptodate isn't set. */ cifs_readpage_worker(file, page, &page_start); - page_cache_release(page); + put_page(page); oncethru = 1; goto start; } else { @@ -3764,7 +3764,7 @@ static void cifs_invalidate_page(struct page *page, unsigned int offset, { struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host); - if (offset == 0 && length == PAGE_CACHE_SIZE) + if (offset == 0 && length == PAGE_SIZE) cifs_fscache_invalidate_page(page, &cifsi->vfs_inode); } @@ -3772,7 +3772,7 @@ static int cifs_launder_page(struct page *page) { int rc = 0; loff_t range_start = page_offset(page); - loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); + loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index aeb26dbfa1bf..5f9ad5c42180 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -59,7 +59,7 @@ static void cifs_set_ops(struct inode *inode) /* check if server can support readpages */ if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf < - PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE) + PAGE_SIZE + MAX_CIFS_HDR_SIZE) inode->i_data.a_ops = &cifs_addr_ops_smallbuf; else inode->i_data.a_ops = &cifs_addr_ops; @@ -2019,8 +2019,8 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry, static int cifs_truncate_page(struct address_space *mapping, loff_t from) { - pgoff_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE - 1); + pgoff_t index = from >> PAGE_SHIFT; + unsigned offset = from & (PAGE_SIZE - 1); struct page *page; int rc = 0; @@ -2028,9 +2028,9 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from) if (!page) return -ENOMEM; - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + zero_user_segment(page, offset, PAGE_SIZE); unlock_page(page); - page_cache_release(page); + put_page(page); return rc; } diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index a8f3b589a2df..cfd91320e869 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -71,8 +71,8 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent) struct inode *inode; struct dentry *root; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = CONFIGFS_MAGIC; sb->s_op = &configfs_ops; sb->s_time_gran = 1; diff --git a/fs/coredump.c b/fs/coredump.c index 9ea87e9fdccf..47c32c3bfa1d 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -32,6 +32,9 @@ #include <linux/pipe_fs_i.h> #include <linux/oom.h> #include <linux/compat.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/path.h> #include <linux/timekeeping.h> #include <asm/uaccess.h> @@ -649,6 +652,8 @@ void do_coredump(const siginfo_t *siginfo) } } else { struct inode *inode; + int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW | + O_LARGEFILE | O_EXCL; if (cprm.limit < binfmt->min_coredump) goto fail_unlock; @@ -687,10 +692,27 @@ void do_coredump(const siginfo_t *siginfo) * what matters is that at least one of the two processes * writes its coredump successfully, not which one. */ - cprm.file = filp_open(cn.corename, - O_CREAT | 2 | O_NOFOLLOW | - O_LARGEFILE | O_EXCL, - 0600); + if (need_suid_safe) { + /* + * Using user namespaces, normal user tasks can change + * their current->fs->root to point to arbitrary + * directories. Since the intention of the "only dump + * with a fully qualified path" rule is to control where + * coredumps may be placed using root privileges, + * current->fs->root must not be used. Instead, use the + * root directory of init_task. + */ + struct path root; + + task_lock(&init_task); + get_fs_root(init_task.fs, &root); + task_unlock(&init_task); + cprm.file = file_open_root(root.dentry, root.mnt, + cn.corename, open_flags, 0600); + path_put(&root); + } else { + cprm.file = filp_open(cn.corename, open_flags, 0600); + } if (IS_ERR(cprm.file)) goto fail_unlock; diff --git a/fs/cramfs/README b/fs/cramfs/README index 445d1c2d7646..9d4e7ea311f4 100644 --- a/fs/cramfs/README +++ b/fs/cramfs/README @@ -86,26 +86,26 @@ Block Size (Block size in cramfs refers to the size of input data that is compressed at a time. It's intended to be somewhere around -PAGE_CACHE_SIZE for cramfs_readpage's convenience.) +PAGE_SIZE for cramfs_readpage's convenience.) The superblock ought to indicate the block size that the fs was written for, since comments in <linux/pagemap.h> indicate that -PAGE_CACHE_SIZE may grow in future (if I interpret the comment +PAGE_SIZE may grow in future (if I interpret the comment correctly). -Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that -for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in +Currently, mkcramfs #define's PAGE_SIZE as 4096 and uses that +for blksize, whereas Linux-2.3.39 uses its PAGE_SIZE, which in turn is defined as PAGE_SIZE (which can be as large as 32KB on arm). This discrepancy is a bug, though it's not clear which should be changed. -One option is to change mkcramfs to take its PAGE_CACHE_SIZE from +One option is to change mkcramfs to take its PAGE_SIZE from <asm/page.h>. Personally I don't like this option, but it does require the least amount of change: just change `#define -PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage +PAGE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage is that the generated cramfs cannot always be shared between different kernels, not even necessarily kernels of the same architecture if -PAGE_CACHE_SIZE is subject to change between kernel versions +PAGE_SIZE is subject to change between kernel versions (currently possible with arm and ia64). The remaining options try to make cramfs more sharable. @@ -126,22 +126,22 @@ size. The options are: 1. Always 4096 bytes. 2. Writer chooses blocksize; kernel adapts but rejects blocksize > - PAGE_CACHE_SIZE. + PAGE_SIZE. 3. Writer chooses blocksize; kernel adapts even to blocksize > - PAGE_CACHE_SIZE. + PAGE_SIZE. It's easy enough to change the kernel to use a smaller value than -PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks. +PAGE_SIZE: just make cramfs_readpage read multiple blocks. -The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE +The cost of option 1 is that kernels with a larger PAGE_SIZE value don't get as good compression as they can. The cost of option 2 relative to option 1 is that the code uses variables instead of #define'd constants. The gain is that people -with kernels having larger PAGE_CACHE_SIZE can make use of that if +with kernels having larger PAGE_SIZE can make use of that if they don't mind their cramfs being inaccessible to kernels with -smaller PAGE_CACHE_SIZE values. +smaller PAGE_SIZE values. Option 3 is easy to implement if we don't mind being CPU-inefficient: e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index b862bc219cd7..3a32ddf98095 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -137,7 +137,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb, * page cache and dentry tree anyway.. * * This also acts as a way to guarantee contiguous areas of up to - * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to + * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to * worry about end-of-buffer issues even when decompressing a full * page cache. */ @@ -152,7 +152,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb, */ #define BLKS_PER_BUF_SHIFT (2) #define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT) -#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_CACHE_SIZE) +#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE) static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE]; static unsigned buffer_blocknr[READ_BUFFERS]; @@ -173,8 +173,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i if (!len) return NULL; - blocknr = offset >> PAGE_CACHE_SHIFT; - offset &= PAGE_CACHE_SIZE - 1; + blocknr = offset >> PAGE_SHIFT; + offset &= PAGE_SIZE - 1; /* Check if an existing buffer already has the data.. */ for (i = 0; i < READ_BUFFERS; i++) { @@ -184,14 +184,14 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i continue; if (blocknr < buffer_blocknr[i]) continue; - blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT; + blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT; blk_offset += offset; if (blk_offset + len > BUFFER_SIZE) continue; return read_buffers[i] + blk_offset; } - devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT; + devsize = mapping->host->i_size >> PAGE_SHIFT; /* Ok, read in BLKS_PER_BUF pages completely first. */ for (i = 0; i < BLKS_PER_BUF; i++) { @@ -213,7 +213,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i wait_on_page_locked(page); if (!PageUptodate(page)) { /* asynchronous error */ - page_cache_release(page); + put_page(page); pages[i] = NULL; } } @@ -229,12 +229,12 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i struct page *page = pages[i]; if (page) { - memcpy(data, kmap(page), PAGE_CACHE_SIZE); + memcpy(data, kmap(page), PAGE_SIZE); kunmap(page); - page_cache_release(page); + put_page(page); } else - memset(data, 0, PAGE_CACHE_SIZE); - data += PAGE_CACHE_SIZE; + memset(data, 0, PAGE_SIZE); + data += PAGE_SIZE; } return read_buffers[buffer] + offset; } @@ -353,7 +353,7 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf) u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = CRAMFS_MAGIC; - buf->f_bsize = PAGE_CACHE_SIZE; + buf->f_bsize = PAGE_SIZE; buf->f_blocks = CRAMFS_SB(sb)->blocks; buf->f_bfree = 0; buf->f_bavail = 0; @@ -496,7 +496,7 @@ static int cramfs_readpage(struct file *file, struct page *page) int bytes_filled; void *pgdata; - maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; bytes_filled = 0; pgdata = kmap(page); @@ -516,14 +516,14 @@ static int cramfs_readpage(struct file *file, struct page *page) if (compr_len == 0) ; /* hole */ - else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) { + else if (unlikely(compr_len > (PAGE_SIZE << 1))) { pr_err("bad compressed blocksize %u\n", compr_len); goto err; } else { mutex_lock(&read_mutex); bytes_filled = cramfs_uncompress_block(pgdata, - PAGE_CACHE_SIZE, + PAGE_SIZE, cramfs_read(sb, start_offset, compr_len), compr_len); mutex_unlock(&read_mutex); @@ -532,7 +532,7 @@ static int cramfs_readpage(struct file *file, struct page *page) } } - memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled); + memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled); flush_dcache_page(page); kunmap(page); SetPageUptodate(page); diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index aed9cccca505..7f5804537d30 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -170,15 +170,15 @@ static int do_page_crypto(struct inode *inode, fscrypt_complete, &ecr); BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index)); - memcpy(xts_tweak, &inode->i_ino, sizeof(index)); + memcpy(xts_tweak, &index, sizeof(index)); memset(&xts_tweak[sizeof(index)], 0, FS_XTS_TWEAK_SIZE - sizeof(index)); sg_init_table(&dst, 1); - sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); + sg_set_page(&dst, dest_page, PAGE_SIZE, 0); sg_init_table(&src, 1); - sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); - skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, + sg_set_page(&src, src_page, PAGE_SIZE, 0); + skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, xts_tweak); if (rw == FS_DECRYPT) res = crypto_skcipher_decrypt(req); @@ -287,7 +287,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk, struct bio *bio; int ret, err = 0; - BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); + BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); ctx = fscrypt_get_ctx(inode); if (IS_ERR(ctx)) @@ -323,7 +323,7 @@ static int dax_load_hole(struct address_space *mapping, struct page *page, size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; if (vmf->pgoff >= size) { unlock_page(page); - page_cache_release(page); + put_page(page); return VM_FAULT_SIGBUS; } @@ -351,7 +351,7 @@ static int copy_user_bh(struct page *to, struct inode *inode, } #define NO_SECTOR -1 -#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT)) +#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT)) static int dax_radix_entry(struct address_space *mapping, pgoff_t index, sector_t sector, bool pmd_entry, bool dirty) @@ -506,8 +506,8 @@ int dax_writeback_mapping_range(struct address_space *mapping, if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) return 0; - start_index = wbc->range_start >> PAGE_CACHE_SHIFT; - end_index = wbc->range_end >> PAGE_CACHE_SHIFT; + start_index = wbc->range_start >> PAGE_SHIFT; + end_index = wbc->range_end >> PAGE_SHIFT; pmd_index = DAX_PMD_INDEX(start_index); rcu_read_lock(); @@ -642,12 +642,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, page = find_get_page(mapping, vmf->pgoff); if (page) { if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { - page_cache_release(page); + put_page(page); return VM_FAULT_RETRY; } if (unlikely(page->mapping != mapping)) { unlock_page(page); - page_cache_release(page); + put_page(page); goto repeat; } size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -711,10 +711,10 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, if (page) { unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, - PAGE_CACHE_SIZE, 0); + PAGE_SIZE, 0); delete_from_page_cache(page); unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; } @@ -747,7 +747,7 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, unlock_page: if (page) { unlock_page(page); - page_cache_release(page); + put_page(page); } goto out; } @@ -1094,7 +1094,7 @@ EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); * you are truncating a file, the helper function dax_truncate_page() may be * more convenient. * - * We work in terms of PAGE_CACHE_SIZE here for commonality with + * We work in terms of PAGE_SIZE here for commonality with * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem * took care of disposing of the unnecessary blocks. Even if the filesystem * block size is smaller than PAGE_SIZE, we have to zero the rest of the page @@ -1104,18 +1104,18 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, get_block_t get_block) { struct buffer_head bh; - pgoff_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + pgoff_t index = from >> PAGE_SHIFT; + unsigned offset = from & (PAGE_SIZE-1); int err; /* Block boundary? Nothing to do */ if (!length) return 0; - BUG_ON((offset + length) > PAGE_CACHE_SIZE); + BUG_ON((offset + length) > PAGE_SIZE); memset(&bh, 0, sizeof(bh)); bh.b_bdev = inode->i_sb->s_bdev; - bh.b_size = PAGE_CACHE_SIZE; + bh.b_size = PAGE_SIZE; err = get_block(inode, index, &bh, 0); if (err < 0) return err; @@ -1123,7 +1123,7 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, struct block_device *bdev = bh.b_bdev; struct blk_dax_ctl dax = { .sector = to_sector(&bh, inode), - .size = PAGE_CACHE_SIZE, + .size = PAGE_SIZE, }; if (dax_map_atomic(bdev, &dax) < 0) @@ -1146,7 +1146,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range); * Similar to block_truncate_page(), this function can be called by a * filesystem when it is truncating a DAX file to handle the partial page. * - * We work in terms of PAGE_CACHE_SIZE here for commonality with + * We work in terms of PAGE_SIZE here for commonality with * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem * took care of disposing of the unnecessary blocks. Even if the filesystem * block size is smaller than PAGE_SIZE, we have to zero the rest of the page @@ -1154,7 +1154,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range); */ int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) { - unsigned length = PAGE_CACHE_ALIGN(from) - from; + unsigned length = PAGE_ALIGN(from) - from; return dax_zero_page_range(inode, from, length, get_block); } EXPORT_SYMBOL_GPL(dax_truncate_page); diff --git a/fs/direct-io.c b/fs/direct-io.c index 476f1ecbd1f0..472037732daf 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -172,7 +172,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) */ if (dio->page_errors == 0) dio->page_errors = ret; - page_cache_get(page); + get_page(page); dio->pages[0] = page; sdio->head = 0; sdio->tail = 1; @@ -424,7 +424,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) { while (sdio->head < sdio->tail) - page_cache_release(dio->pages[sdio->head++]); + put_page(dio->pages[sdio->head++]); } /* @@ -487,7 +487,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio) if (dio->rw == READ && !PageCompound(page) && dio->should_dirty) set_page_dirty_lock(page); - page_cache_release(page); + put_page(page); } err = bio->bi_error; bio_put(bio); @@ -696,7 +696,7 @@ static inline int dio_bio_add_page(struct dio_submit *sdio) */ if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE) sdio->pages_in_io--; - page_cache_get(sdio->cur_page); + get_page(sdio->cur_page); sdio->final_block_in_bio = sdio->cur_page_block + (sdio->cur_page_len >> sdio->blkbits); ret = 0; @@ -810,13 +810,13 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, */ if (sdio->cur_page) { ret = dio_send_cur_page(dio, sdio, map_bh); - page_cache_release(sdio->cur_page); + put_page(sdio->cur_page); sdio->cur_page = NULL; if (ret) return ret; } - page_cache_get(page); /* It is in dio */ + get_page(page); /* It is in dio */ sdio->cur_page = page; sdio->cur_page_offset = offset; sdio->cur_page_len = len; @@ -830,7 +830,7 @@ out: if (sdio->boundary) { ret = dio_send_cur_page(dio, sdio, map_bh); dio_bio_submit(dio, sdio); - page_cache_release(sdio->cur_page); + put_page(sdio->cur_page); sdio->cur_page = NULL; } return ret; @@ -947,7 +947,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, ret = get_more_blocks(dio, sdio, map_bh); if (ret) { - page_cache_release(page); + put_page(page); goto out; } if (!buffer_mapped(map_bh)) @@ -988,7 +988,7 @@ do_holes: /* AKPM: eargh, -ENOTBLK is a hack */ if (dio->rw & WRITE) { - page_cache_release(page); + put_page(page); return -ENOTBLK; } @@ -1001,7 +1001,7 @@ do_holes: if (sdio->block_in_file >= i_size_aligned >> blkbits) { /* We hit eof */ - page_cache_release(page); + put_page(page); goto out; } zero_user(page, from, 1 << blkbits); @@ -1041,7 +1041,7 @@ do_holes: sdio->next_block_for_io, map_bh); if (ret) { - page_cache_release(page); + put_page(page); goto out; } sdio->next_block_for_io += this_chunk_blocks; @@ -1057,7 +1057,7 @@ next_block: } /* Drop the ref which was taken in get_user_pages() */ - page_cache_release(page); + put_page(page); } out: return ret; @@ -1281,7 +1281,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, ret2 = dio_send_cur_page(dio, &sdio, &map_bh); if (retval == 0) retval = ret2; - page_cache_release(sdio.cur_page); + put_page(sdio.cur_page); sdio.cur_page = NULL; } if (sdio.bio) diff --git a/fs/dlm/config.c b/fs/dlm/config.c index 519112168a9e..1669f6291c95 100644 --- a/fs/dlm/config.c +++ b/fs/dlm/config.c @@ -343,13 +343,12 @@ static struct config_group *make_cluster(struct config_group *g, struct dlm_cluster *cl = NULL; struct dlm_spaces *sps = NULL; struct dlm_comms *cms = NULL; - void *gps = NULL; cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS); sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS); cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS); - if (!cl || !gps || !sps || !cms) + if (!cl || !sps || !cms) goto fail; config_group_init_type_name(&cl->group, name, &cluster_type); diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 00640e70ed7a..1ab012a27d9f 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -640,7 +640,7 @@ static int receive_from_sock(struct connection *con) con->rx_page = alloc_page(GFP_ATOMIC); if (con->rx_page == NULL) goto out_resched; - cbuf_init(&con->cb, PAGE_CACHE_SIZE); + cbuf_init(&con->cb, PAGE_SIZE); } /* @@ -657,7 +657,7 @@ static int receive_from_sock(struct connection *con) * buffer and the start of the currently used section (cb.base) */ if (cbuf_data(&con->cb) >= con->cb.base) { - iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); + iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb); iov[1].iov_len = con->cb.base; iov[1].iov_base = page_address(con->rx_page); nvec = 2; @@ -675,7 +675,7 @@ static int receive_from_sock(struct connection *con) ret = dlm_process_incoming_buffer(con->nodeid, page_address(con->rx_page), con->cb.base, con->cb.len, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (ret == -EBADMSG) { log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d", page_address(con->rx_page), con->cb.base, @@ -1416,7 +1416,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) spin_lock(&con->writequeue_lock); e = list_entry(con->writequeue.prev, struct writequeue_entry, list); if ((&e->list == &con->writequeue) || - (PAGE_CACHE_SIZE - e->end < len)) { + (PAGE_SIZE - e->end < len)) { e = NULL; } else { offset = e->end; diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 64026e53722a..d09cb4cdd09f 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -286,7 +286,7 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg, pg = virt_to_page(addr); offset = offset_in_page(addr); sg_set_page(&sg[i], pg, 0, offset); - remainder_of_page = PAGE_CACHE_SIZE - offset; + remainder_of_page = PAGE_SIZE - offset; if (size >= remainder_of_page) { sg[i].length = remainder_of_page; addr += remainder_of_page; @@ -400,7 +400,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat, struct page *page) { return ecryptfs_lower_header_size(crypt_stat) + - ((loff_t)page->index << PAGE_CACHE_SHIFT); + ((loff_t)page->index << PAGE_SHIFT); } /** @@ -428,7 +428,7 @@ static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat, size_t extent_size = crypt_stat->extent_size; int rc; - extent_base = (((loff_t)page_index) * (PAGE_CACHE_SIZE / extent_size)); + extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size)); rc = ecryptfs_derive_iv(extent_iv, crypt_stat, (extent_base + extent_offset)); if (rc) { @@ -498,7 +498,7 @@ int ecryptfs_encrypt_page(struct page *page) } for (extent_offset = 0; - extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); + extent_offset < (PAGE_SIZE / crypt_stat->extent_size); extent_offset++) { rc = crypt_extent(crypt_stat, enc_extent_page, page, extent_offset, ENCRYPT); @@ -512,7 +512,7 @@ int ecryptfs_encrypt_page(struct page *page) lower_offset = lower_offset_for_page(crypt_stat, page); enc_extent_virt = kmap(enc_extent_page); rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset, - PAGE_CACHE_SIZE); + PAGE_SIZE); kunmap(enc_extent_page); if (rc < 0) { ecryptfs_printk(KERN_ERR, @@ -560,7 +560,7 @@ int ecryptfs_decrypt_page(struct page *page) lower_offset = lower_offset_for_page(crypt_stat, page); page_virt = kmap(page); - rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_CACHE_SIZE, + rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE, ecryptfs_inode); kunmap(page); if (rc < 0) { @@ -571,7 +571,7 @@ int ecryptfs_decrypt_page(struct page *page) } for (extent_offset = 0; - extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); + extent_offset < (PAGE_SIZE / crypt_stat->extent_size); extent_offset++) { rc = crypt_extent(crypt_stat, page, page, extent_offset, DECRYPT); @@ -659,11 +659,11 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat) if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; else { - if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) + if (PAGE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; else - crypt_stat->metadata_size = PAGE_CACHE_SIZE; + crypt_stat->metadata_size = PAGE_SIZE; } } @@ -1442,7 +1442,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) ECRYPTFS_VALIDATE_HEADER_SIZE); if (rc) { /* metadata is not in the file header, so try xattrs */ - memset(page_virt, 0, PAGE_CACHE_SIZE); + memset(page_virt, 0, PAGE_SIZE); rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); if (rc) { printk(KERN_DEBUG "Valid eCryptfs headers not found in " @@ -1475,7 +1475,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) } out: if (page_virt) { - memset(page_virt, 0, PAGE_CACHE_SIZE); + memset(page_virt, 0, PAGE_SIZE); kmem_cache_free(ecryptfs_header_cache, page_virt); } return rc; diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 121114e9a464..224b49e71aa4 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -763,10 +763,10 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia, } else { /* ia->ia_size < i_size_read(inode) */ /* We're chopping off all the pages down to the page * in which ia->ia_size is located. Fill in the end of - * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to - * PAGE_CACHE_SIZE with zeros. */ - size_t num_zeros = (PAGE_CACHE_SIZE - - (ia->ia_size & ~PAGE_CACHE_MASK)); + * that page from (ia->ia_size & ~PAGE_MASK) to + * PAGE_SIZE with zeros. */ + size_t num_zeros = (PAGE_SIZE + - (ia->ia_size & ~PAGE_MASK)); if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { truncate_setsize(inode, ia->ia_size); diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index c5c84dfb5b3e..3cf1546dca82 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -635,8 +635,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes, if (!s) { printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc " "[%zd] bytes of kernel memory\n", __func__, sizeof(*s)); - rc = -ENOMEM; - goto out; + return -ENOMEM; } (*packet_size) = 0; rc = ecryptfs_find_auth_tok_for_sig( @@ -922,8 +921,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size, if (!s) { printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc " "[%zd] bytes of kernel memory\n", __func__, sizeof(*s)); - rc = -ENOMEM; - goto out; + return -ENOMEM; } if (max_packet_size < ECRYPTFS_TAG_70_MIN_METADATA_SIZE) { printk(KERN_WARNING "%s: max_packet_size is [%zd]; it must be " @@ -1800,7 +1798,7 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat, * added the our &auth_tok_list */ next_packet_is_auth_tok_packet = 1; while (next_packet_is_auth_tok_packet) { - size_t max_packet_size = ((PAGE_CACHE_SIZE - 8) - i); + size_t max_packet_size = ((PAGE_SIZE - 8) - i); switch (src[i]) { case ECRYPTFS_TAG_3_PACKET_TYPE: diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 8b0b4a73116d..1698132d0e57 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -695,12 +695,12 @@ static struct ecryptfs_cache_info { { .cache = &ecryptfs_header_cache, .name = "ecryptfs_headers", - .size = PAGE_CACHE_SIZE, + .size = PAGE_SIZE, }, { .cache = &ecryptfs_xattr_cache, .name = "ecryptfs_xattr_cache", - .size = PAGE_CACHE_SIZE, + .size = PAGE_SIZE, }, { .cache = &ecryptfs_key_record_cache, @@ -818,7 +818,7 @@ static int __init ecryptfs_init(void) { int rc; - if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) { + if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_SIZE) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is " "larger than the host's page size, and so " @@ -826,7 +826,7 @@ static int __init ecryptfs_init(void) "default eCryptfs extent size is [%u] bytes; " "the page size is [%lu] bytes.\n", ECRYPTFS_DEFAULT_EXTENT_SIZE, - (unsigned long)PAGE_CACHE_SIZE); + (unsigned long)PAGE_SIZE); goto out; } rc = ecryptfs_init_kmem_caches(); diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 1f5865263b3e..e6b1d80952b9 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -122,7 +122,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, struct ecryptfs_crypt_stat *crypt_stat) { loff_t extent_num_in_page = 0; - loff_t num_extents_per_page = (PAGE_CACHE_SIZE + loff_t num_extents_per_page = (PAGE_SIZE / crypt_stat->extent_size); int rc = 0; @@ -138,7 +138,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, char *page_virt; page_virt = kmap_atomic(page); - memset(page_virt, 0, PAGE_CACHE_SIZE); + memset(page_virt, 0, PAGE_SIZE); /* TODO: Support more than one header extent */ if (view_extent_num == 0) { size_t written; @@ -164,8 +164,8 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, - crypt_stat->metadata_size); rc = ecryptfs_read_lower_page_segment( - page, (lower_offset >> PAGE_CACHE_SHIFT), - (lower_offset & ~PAGE_CACHE_MASK), + page, (lower_offset >> PAGE_SHIFT), + (lower_offset & ~PAGE_MASK), crypt_stat->extent_size, page->mapping->host); if (rc) { printk(KERN_ERR "%s: Error attempting to read " @@ -198,7 +198,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page) if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { rc = ecryptfs_read_lower_page_segment(page, page->index, 0, - PAGE_CACHE_SIZE, + PAGE_SIZE, page->mapping->host); } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) { if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) { @@ -215,7 +215,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page) } else { rc = ecryptfs_read_lower_page_segment( - page, page->index, 0, PAGE_CACHE_SIZE, + page, page->index, 0, PAGE_SIZE, page->mapping->host); if (rc) { printk(KERN_ERR "Error reading page; rc = " @@ -250,12 +250,12 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to) struct inode *inode = page->mapping->host; int end_byte_in_page; - if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) + if ((i_size_read(inode) / PAGE_SIZE) != page->index) goto out; - end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; + end_byte_in_page = i_size_read(inode) % PAGE_SIZE; if (to > end_byte_in_page) end_byte_in_page = to; - zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE); + zero_user_segment(page, end_byte_in_page, PAGE_SIZE); out: return 0; } @@ -279,7 +279,7 @@ static int ecryptfs_write_begin(struct file *file, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct page *page; loff_t prev_page_end_size; int rc = 0; @@ -289,14 +289,14 @@ static int ecryptfs_write_begin(struct file *file, return -ENOMEM; *pagep = page; - prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT); + prev_page_end_size = ((loff_t)index << PAGE_SHIFT); if (!PageUptodate(page)) { struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(mapping->host)->crypt_stat; if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { rc = ecryptfs_read_lower_page_segment( - page, index, 0, PAGE_CACHE_SIZE, mapping->host); + page, index, 0, PAGE_SIZE, mapping->host); if (rc) { printk(KERN_ERR "%s: Error attempting to read " "lower page segment; rc = [%d]\n", @@ -322,7 +322,7 @@ static int ecryptfs_write_begin(struct file *file, SetPageUptodate(page); } else { rc = ecryptfs_read_lower_page_segment( - page, index, 0, PAGE_CACHE_SIZE, + page, index, 0, PAGE_SIZE, mapping->host); if (rc) { printk(KERN_ERR "%s: Error reading " @@ -336,9 +336,9 @@ static int ecryptfs_write_begin(struct file *file, } else { if (prev_page_end_size >= i_size_read(page->mapping->host)) { - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); SetPageUptodate(page); - } else if (len < PAGE_CACHE_SIZE) { + } else if (len < PAGE_SIZE) { rc = ecryptfs_decrypt_page(page); if (rc) { printk(KERN_ERR "%s: Error decrypting " @@ -371,11 +371,11 @@ static int ecryptfs_write_begin(struct file *file, * of page? Zero it out. */ if ((i_size_read(mapping->host) == prev_page_end_size) && (pos != 0)) - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); out: if (unlikely(rc)) { unlock_page(page); - page_cache_release(page); + put_page(page); *pagep = NULL; } return rc; @@ -437,7 +437,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode) } inode_lock(lower_inode); size = lower_inode->i_op->getxattr(lower_dentry, ECRYPTFS_XATTR_NAME, - xattr_virt, PAGE_CACHE_SIZE); + xattr_virt, PAGE_SIZE); if (size < 0) size = 8; put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt); @@ -479,8 +479,8 @@ static int ecryptfs_write_end(struct file *file, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + pgoff_t index = pos >> PAGE_SHIFT; + unsigned from = pos & (PAGE_SIZE - 1); unsigned to = from + copied; struct inode *ecryptfs_inode = mapping->host; struct ecryptfs_crypt_stat *crypt_stat = @@ -500,7 +500,7 @@ static int ecryptfs_write_end(struct file *file, goto out; } if (!PageUptodate(page)) { - if (copied < PAGE_CACHE_SIZE) { + if (copied < PAGE_SIZE) { rc = 0; goto out; } @@ -533,7 +533,7 @@ static int ecryptfs_write_end(struct file *file, rc = copied; out: unlock_page(page); - page_cache_release(page); + put_page(page); return rc; } diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c index 09fe622274e4..158a3a39f82d 100644 --- a/fs/ecryptfs/read_write.c +++ b/fs/ecryptfs/read_write.c @@ -74,7 +74,7 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode, loff_t offset; int rc; - offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT) + offset = ((((loff_t)page_for_lower->index) << PAGE_SHIFT) + offset_in_page); virt = kmap(page_for_lower); rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size); @@ -123,9 +123,9 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset, else pos = offset; while (pos < (offset + size)) { - pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT); - size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK); - size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page); + pgoff_t ecryptfs_page_idx = (pos >> PAGE_SHIFT); + size_t start_offset_in_page = (pos & ~PAGE_MASK); + size_t num_bytes = (PAGE_SIZE - start_offset_in_page); loff_t total_remaining_bytes = ((offset + size) - pos); if (fatal_signal_pending(current)) { @@ -165,7 +165,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset, * Fill in zero values to the end of the page */ memset(((char *)ecryptfs_page_virt + start_offset_in_page), 0, - PAGE_CACHE_SIZE - start_offset_in_page); + PAGE_SIZE - start_offset_in_page); } /* pos >= offset, we are now writing the data request */ @@ -186,7 +186,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset, ecryptfs_page, start_offset_in_page, data_offset); - page_cache_release(ecryptfs_page); + put_page(ecryptfs_page); if (rc) { printk(KERN_ERR "%s: Error encrypting " "page; rc = [%d]\n", __func__, rc); @@ -262,7 +262,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs, loff_t offset; int rc; - offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page); + offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page); virt = kmap(page_for_ecryptfs); rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode); if (rc > 0) diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index dd029d13ea61..553c5d2db4a4 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -197,8 +197,8 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) efivarfs_sb = sb; sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = EFIVARFS_MAGIC; sb->s_op = &efivarfs_ops; sb->s_d_op = &efivarfs_d_ops; diff --git a/fs/eventfd.c b/fs/eventfd.c index ed70cf9fdc7b..1231cd1999d8 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -121,8 +121,46 @@ static unsigned int eventfd_poll(struct file *file, poll_table *wait) u64 count; poll_wait(file, &ctx->wqh, wait); - smp_rmb(); - count = ctx->count; + + /* + * All writes to ctx->count occur within ctx->wqh.lock. This read + * can be done outside ctx->wqh.lock because we know that poll_wait + * takes that lock (through add_wait_queue) if our caller will sleep. + * + * The read _can_ therefore seep into add_wait_queue's critical + * section, but cannot move above it! add_wait_queue's spin_lock acts + * as an acquire barrier and ensures that the read be ordered properly + * against the writes. The following CAN happen and is safe: + * + * poll write + * ----------------- ------------ + * lock ctx->wqh.lock (in poll_wait) + * count = ctx->count + * __add_wait_queue + * unlock ctx->wqh.lock + * lock ctx->qwh.lock + * ctx->count += n + * if (waitqueue_active) + * wake_up_locked_poll + * unlock ctx->qwh.lock + * eventfd_poll returns 0 + * + * but the following, which would miss a wakeup, cannot happen: + * + * poll write + * ----------------- ------------ + * count = ctx->count (INVALID!) + * lock ctx->qwh.lock + * ctx->count += n + * **waitqueue_active is false** + * **no wake_up_locked_poll!** + * unlock ctx->qwh.lock + * lock ctx->wqh.lock (in poll_wait) + * __add_wait_queue + * unlock ctx->wqh.lock + * eventfd_poll returns 0 + */ + count = READ_ONCE(ctx->count); if (count > 0) events |= POLLIN; diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c index e5bb2abf77f9..547b93cbea63 100644 --- a/fs/exofs/dir.c +++ b/fs/exofs/dir.c @@ -41,16 +41,16 @@ static inline unsigned exofs_chunk_size(struct inode *inode) static inline void exofs_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr) { loff_t last_byte = inode->i_size; - last_byte -= page_nr << PAGE_CACHE_SHIFT; - if (last_byte > PAGE_CACHE_SIZE) - last_byte = PAGE_CACHE_SIZE; + last_byte -= page_nr << PAGE_SHIFT; + if (last_byte > PAGE_SIZE) + last_byte = PAGE_SIZE; return last_byte; } @@ -85,13 +85,13 @@ static void exofs_check_page(struct page *page) unsigned chunk_size = exofs_chunk_size(dir); char *kaddr = page_address(page); unsigned offs, rec_len; - unsigned limit = PAGE_CACHE_SIZE; + unsigned limit = PAGE_SIZE; struct exofs_dir_entry *p; char *error; /* if the page is the last one in the directory */ - if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { - limit = dir->i_size & ~PAGE_CACHE_MASK; + if ((dir->i_size >> PAGE_SHIFT) == page->index) { + limit = dir->i_size & ~PAGE_MASK; if (limit & (chunk_size - 1)) goto Ebadsize; if (!limit) @@ -138,7 +138,7 @@ bad_entry: EXOFS_ERR( "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - " "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n", - dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, _LLU(le64_to_cpu(p->inode_no)), rec_len, p->name_len); goto fail; @@ -147,7 +147,7 @@ Eend: EXOFS_ERR("ERROR [exofs_check_page]: " "entry in directory(0x%lx) spans the page boundary" "offset=%lu, inode=0x%llx\n", - dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, (page->index<<PAGE_SHIFT)+offs, _LLU(le64_to_cpu(p->inode_no))); fail: SetPageChecked(page); @@ -237,8 +237,8 @@ exofs_readdir(struct file *file, struct dir_context *ctx) { loff_t pos = ctx->pos; struct inode *inode = file_inode(file); - unsigned int offset = pos & ~PAGE_CACHE_MASK; - unsigned long n = pos >> PAGE_CACHE_SHIFT; + unsigned int offset = pos & ~PAGE_MASK; + unsigned long n = pos >> PAGE_SHIFT; unsigned long npages = dir_pages(inode); unsigned chunk_mask = ~(exofs_chunk_size(inode)-1); int need_revalidate = (file->f_version != inode->i_version); @@ -254,7 +254,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx) if (IS_ERR(page)) { EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n", inode->i_ino); - ctx->pos += PAGE_CACHE_SIZE - offset; + ctx->pos += PAGE_SIZE - offset; return PTR_ERR(page); } kaddr = page_address(page); @@ -262,7 +262,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx) if (offset) { offset = exofs_validate_entry(kaddr, offset, chunk_mask); - ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; + ctx->pos = (n<<PAGE_SHIFT) + offset; } file->f_version = inode->i_version; need_revalidate = 0; @@ -449,7 +449,7 @@ int exofs_add_link(struct dentry *dentry, struct inode *inode) kaddr = page_address(page); dir_end = kaddr + exofs_last_byte(dir, n); de = (struct exofs_dir_entry *)kaddr; - kaddr += PAGE_CACHE_SIZE - reclen; + kaddr += PAGE_SIZE - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { name_len = 0; @@ -602,7 +602,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent) kunmap_atomic(kaddr); err = exofs_commit_chunk(page, 0, chunk_size); fail: - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 9eaf595aeaf8..49e1bd00b4ec 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -317,7 +317,7 @@ static int read_exec(struct page_collect *pcol) if (!pcol->ios) { int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true, - pcol->pg_first << PAGE_CACHE_SHIFT, + pcol->pg_first << PAGE_SHIFT, pcol->length, &pcol->ios); if (ret) @@ -383,7 +383,7 @@ static int readpage_strip(void *data, struct page *page) struct inode *inode = pcol->inode; struct exofs_i_info *oi = exofs_i(inode); loff_t i_size = i_size_read(inode); - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + pgoff_t end_index = i_size >> PAGE_SHIFT; size_t len; int ret; @@ -397,9 +397,9 @@ static int readpage_strip(void *data, struct page *page) pcol->that_locked_page = page; if (page->index < end_index) - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; else if (page->index == end_index) - len = i_size & ~PAGE_CACHE_MASK; + len = i_size & ~PAGE_MASK; else len = 0; @@ -442,8 +442,8 @@ try_again: goto fail; } - if (len != PAGE_CACHE_SIZE) - zero_user(page, len, PAGE_CACHE_SIZE - len); + if (len != PAGE_SIZE) + zero_user(page, len, PAGE_SIZE - len); EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n", inode->i_ino, page->index, len); @@ -609,7 +609,7 @@ static void __r4w_put_page(void *priv, struct page *page) if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) { EXOFS_DBGMSG2("index=0x%lx\n", page->index); - page_cache_release(page); + put_page(page); return; } EXOFS_DBGMSG2("that_locked_page index=0x%lx\n", @@ -633,7 +633,7 @@ static int write_exec(struct page_collect *pcol) BUG_ON(pcol->ios); ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false, - pcol->pg_first << PAGE_CACHE_SHIFT, + pcol->pg_first << PAGE_SHIFT, pcol->length, &pcol->ios); if (unlikely(ret)) goto err; @@ -696,7 +696,7 @@ static int writepage_strip(struct page *page, struct inode *inode = pcol->inode; struct exofs_i_info *oi = exofs_i(inode); loff_t i_size = i_size_read(inode); - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + pgoff_t end_index = i_size >> PAGE_SHIFT; size_t len; int ret; @@ -708,9 +708,9 @@ static int writepage_strip(struct page *page, if (page->index < end_index) /* in this case, the page is within the limits of the file */ - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; else { - len = i_size & ~PAGE_CACHE_MASK; + len = i_size & ~PAGE_MASK; if (page->index > end_index || !len) { /* in this case, the page is outside the limits @@ -790,10 +790,10 @@ static int exofs_writepages(struct address_space *mapping, long start, end, expected_pages; int ret; - start = wbc->range_start >> PAGE_CACHE_SHIFT; + start = wbc->range_start >> PAGE_SHIFT; end = (wbc->range_end == LLONG_MAX) ? start + mapping->nrpages : - wbc->range_end >> PAGE_CACHE_SHIFT; + wbc->range_end >> PAGE_SHIFT; if (start || end) expected_pages = end - start + 1; @@ -881,15 +881,15 @@ int exofs_write_begin(struct file *file, struct address_space *mapping, } /* read modify write */ - if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { + if (!PageUptodate(page) && (len != PAGE_SIZE)) { loff_t i_size = i_size_read(mapping->host); - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + pgoff_t end_index = i_size >> PAGE_SHIFT; size_t rlen; if (page->index < end_index) - rlen = PAGE_CACHE_SIZE; + rlen = PAGE_SIZE; else if (page->index == end_index) - rlen = i_size & ~PAGE_CACHE_MASK; + rlen = i_size & ~PAGE_MASK; else rlen = 0; diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c index c20d77df2679..622a686bb08b 100644 --- a/fs/exofs/namei.c +++ b/fs/exofs/namei.c @@ -292,11 +292,11 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, out_dir: if (dir_de) { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } out_old: kunmap(old_page); - page_cache_release(old_page); + put_page(old_page); out: return err; } diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 0c6638b40f21..7ff6fcfa685d 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -37,7 +37,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen) { unsigned len = le16_to_cpu(dlen); -#if (PAGE_CACHE_SIZE >= 65536) +#if (PAGE_SIZE >= 65536) if (len == EXT2_MAX_REC_LEN) return 1 << 16; #endif @@ -46,7 +46,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen) static inline __le16 ext2_rec_len_to_disk(unsigned len) { -#if (PAGE_CACHE_SIZE >= 65536) +#if (PAGE_SIZE >= 65536) if (len == (1 << 16)) return cpu_to_le16(EXT2_MAX_REC_LEN); else @@ -67,7 +67,7 @@ static inline unsigned ext2_chunk_size(struct inode *inode) static inline void ext2_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } /* @@ -79,9 +79,9 @@ ext2_last_byte(struct inode *inode, unsigned long page_nr) { unsigned last_byte = inode->i_size; - last_byte -= page_nr << PAGE_CACHE_SHIFT; - if (last_byte > PAGE_CACHE_SIZE) - last_byte = PAGE_CACHE_SIZE; + last_byte -= page_nr << PAGE_SHIFT; + if (last_byte > PAGE_SIZE) + last_byte = PAGE_SIZE; return last_byte; } @@ -118,12 +118,12 @@ static void ext2_check_page(struct page *page, int quiet) char *kaddr = page_address(page); u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); unsigned offs, rec_len; - unsigned limit = PAGE_CACHE_SIZE; + unsigned limit = PAGE_SIZE; ext2_dirent *p; char *error; - if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { - limit = dir->i_size & ~PAGE_CACHE_MASK; + if ((dir->i_size >> PAGE_SHIFT) == page->index) { + limit = dir->i_size & ~PAGE_MASK; if (limit & (chunk_size - 1)) goto Ebadsize; if (!limit) @@ -176,7 +176,7 @@ bad_entry: if (!quiet) ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - " "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", - dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, (unsigned long) le32_to_cpu(p->inode), rec_len, p->name_len); goto fail; @@ -186,7 +186,7 @@ Eend: ext2_error(sb, "ext2_check_page", "entry in directory #%lu spans the page boundary" "offset=%lu, inode=%lu", - dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, (page->index<<PAGE_SHIFT)+offs, (unsigned long) le32_to_cpu(p->inode)); } fail: @@ -287,8 +287,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx) loff_t pos = ctx->pos; struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; - unsigned int offset = pos & ~PAGE_CACHE_MASK; - unsigned long n = pos >> PAGE_CACHE_SHIFT; + unsigned int offset = pos & ~PAGE_MASK; + unsigned long n = pos >> PAGE_SHIFT; unsigned long npages = dir_pages(inode); unsigned chunk_mask = ~(ext2_chunk_size(inode)-1); unsigned char *types = NULL; @@ -309,14 +309,14 @@ ext2_readdir(struct file *file, struct dir_context *ctx) ext2_error(sb, __func__, "bad page in #%lu", inode->i_ino); - ctx->pos += PAGE_CACHE_SIZE - offset; + ctx->pos += PAGE_SIZE - offset; return PTR_ERR(page); } kaddr = page_address(page); if (unlikely(need_revalidate)) { if (offset) { offset = ext2_validate_entry(kaddr, offset, chunk_mask); - ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; + ctx->pos = (n<<PAGE_SHIFT) + offset; } file->f_version = inode->i_version; need_revalidate = 0; @@ -406,7 +406,7 @@ struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir, if (++n >= npages) n = 0; /* next page is past the blocks we've got */ - if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) { + if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { ext2_error(dir->i_sb, __func__, "dir %lu size %lld exceeds block count %llu", dir->i_ino, dir->i_size, @@ -511,7 +511,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode) kaddr = page_address(page); dir_end = kaddr + ext2_last_byte(dir, n); de = (ext2_dirent *)kaddr; - kaddr += PAGE_CACHE_SIZE - reclen; + kaddr += PAGE_SIZE - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { /* We hit i_size */ @@ -655,7 +655,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent) kunmap_atomic(kaddr); err = ext2_commit_chunk(page, 0, chunk_size); fail: - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 7a2be8f7f3c3..d34843925b23 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -398,7 +398,7 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0); else { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } inode_dec_link_count(old_dir); } @@ -408,11 +408,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, out_dir: if (dir_de) { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } out_old: kunmap(old_page); - page_cache_release(old_page); + put_page(old_page); out: return err; } diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c index ea69ce4f6d8f..db9ae6e18154 100644 --- a/fs/ext4/crypto.c +++ b/fs/ext4/crypto.c @@ -285,10 +285,10 @@ static int ext4_page_crypto(struct inode *inode, EXT4_XTS_TWEAK_SIZE - sizeof(index)); sg_init_table(&dst, 1); - sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); + sg_set_page(&dst, dest_page, PAGE_SIZE, 0); sg_init_table(&src, 1); - sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); - skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, + sg_set_page(&src, src_page, PAGE_SIZE, 0); + skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, xts_tweak); if (rw == EXT4_DECRYPT) res = crypto_skcipher_decrypt(req); @@ -400,7 +400,7 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, (unsigned long) inode->i_ino, lblk, len); #endif - BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); + BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); ctx = ext4_get_crypto_ctx(inode, GFP_NOFS); if (IS_ERR(ctx)) diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index ebfcb8999db2..4173bfe21114 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -160,13 +160,13 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) err = ext4_map_blocks(NULL, inode, &map, 0); if (err > 0) { pgoff_t index = map.m_pblk >> - (PAGE_CACHE_SHIFT - inode->i_blkbits); + (PAGE_SHIFT - inode->i_blkbits); if (!ra_has_index(&file->f_ra, index)) page_cache_sync_readahead( sb->s_bdev->bd_inode->i_mapping, &file->f_ra, file, index, 1); - file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; + file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT; bh = ext4_bread(NULL, inode, map.m_lblk, 0); if (IS_ERR(bh)) { err = PTR_ERR(bh); @@ -290,7 +290,7 @@ errout: static inline int is_32bit_api(void) { #ifdef CONFIG_COMPAT - return is_compat_task(); + return in_compat_syscall(); #else return (BITS_PER_LONG == 32); #endif diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 6a857af0e6b1..349afebe21ee 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1984,7 +1984,7 @@ ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize) { unsigned len = le16_to_cpu(dlen); -#if (PAGE_CACHE_SIZE >= 65536) +#if (PAGE_SIZE >= 65536) if (len == EXT4_MAX_REC_LEN || len == 0) return blocksize; return (len & 65532) | ((len & 3) << 16); @@ -1997,7 +1997,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize) { if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3)) BUG(); -#if (PAGE_CACHE_SIZE >= 65536) +#if (PAGE_SIZE >= 65536) if (len < 65536) return cpu_to_le16(len); if (len == blocksize) { diff --git a/fs/ext4/file.c b/fs/ext4/file.c index edba9fb9bb0d..fa2208bae2e1 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -432,8 +432,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, lastoff = startoff; endoff = (loff_t)end_blk << blkbits; - index = startoff >> PAGE_CACHE_SHIFT; - end = endoff >> PAGE_CACHE_SHIFT; + index = startoff >> PAGE_SHIFT; + end = endoff >> PAGE_SHIFT; pagevec_init(&pvec, 0); do { diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 7cbdd3752ba5..7bc6c855cc18 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -482,7 +482,7 @@ static int ext4_read_inline_page(struct inode *inode, struct page *page) ret = ext4_read_inline_data(inode, kaddr, len, &iloc); flush_dcache_page(page); kunmap_atomic(kaddr); - zero_user_segment(page, len, PAGE_CACHE_SIZE); + zero_user_segment(page, len, PAGE_SIZE); SetPageUptodate(page); brelse(iloc.bh); @@ -507,7 +507,7 @@ int ext4_readpage_inline(struct inode *inode, struct page *page) if (!page->index) ret = ext4_read_inline_page(inode, page); else if (!PageUptodate(page)) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); } @@ -595,7 +595,7 @@ retry: if (ret) { unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; ext4_orphan_add(handle, inode); up_write(&EXT4_I(inode)->xattr_sem); @@ -621,7 +621,7 @@ retry: out: if (page) { unlock_page(page); - page_cache_release(page); + put_page(page); } if (sem_held) up_write(&EXT4_I(inode)->xattr_sem); @@ -690,7 +690,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping, if (!ext4_has_inline_data(inode)) { ret = 0; unlock_page(page); - page_cache_release(page); + put_page(page); goto out_up_read; } @@ -815,7 +815,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping, if (ret) { up_read(&EXT4_I(inode)->xattr_sem); unlock_page(page); - page_cache_release(page); + put_page(page); ext4_truncate_failed_write(inode); return ret; } @@ -829,7 +829,7 @@ out: up_read(&EXT4_I(inode)->xattr_sem); if (page) { unlock_page(page); - page_cache_release(page); + put_page(page); } return ret; } @@ -919,7 +919,7 @@ retry_journal: out_release_page: up_read(&EXT4_I(inode)->xattr_sem); unlock_page(page); - page_cache_release(page); + put_page(page); out_journal: ext4_journal_stop(handle); out: @@ -947,7 +947,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, i_size_changed = 1; } unlock_page(page); - page_cache_release(page); + put_page(page); /* * Don't mark the inode dirty under page lock. First, it unnecessarily diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 8710174090f5..981a1fc30eaa 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1055,7 +1055,7 @@ int do_journal_get_write_access(handle_t *handle, static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); unsigned to = from + len; struct inode *inode = page->mapping->host; unsigned block_start, block_end; @@ -1067,15 +1067,15 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, bool decrypt = false; BUG_ON(!PageLocked(page)); - BUG_ON(from > PAGE_CACHE_SIZE); - BUG_ON(to > PAGE_CACHE_SIZE); + BUG_ON(from > PAGE_SIZE); + BUG_ON(to > PAGE_SIZE); BUG_ON(from > to); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); head = page_buffers(page); bbits = ilog2(blocksize); - block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); + block = (sector_t)page->index << (PAGE_SHIFT - bbits); for (bh = head, block_start = 0; bh != head || !block_start; block++, block_start = block_end, bh = bh->b_this_page) { @@ -1157,8 +1157,8 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, * we allocate blocks but write fails for some reason */ needed_blocks = ext4_writepage_trans_blocks(inode) + 1; - index = pos >> PAGE_CACHE_SHIFT; - from = pos & (PAGE_CACHE_SIZE - 1); + index = pos >> PAGE_SHIFT; + from = pos & (PAGE_SIZE - 1); to = from + len; if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { @@ -1186,7 +1186,7 @@ retry_grab: retry_journal: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); if (IS_ERR(handle)) { - page_cache_release(page); + put_page(page); return PTR_ERR(handle); } @@ -1194,7 +1194,7 @@ retry_journal: if (page->mapping != mapping) { /* The page got truncated from under us */ unlock_page(page); - page_cache_release(page); + put_page(page); ext4_journal_stop(handle); goto retry_grab; } @@ -1250,7 +1250,7 @@ retry_journal: if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; - page_cache_release(page); + put_page(page); return ret; } *pagep = page; @@ -1293,7 +1293,7 @@ static int ext4_write_end(struct file *file, ret = ext4_jbd2_file_inode(handle, inode); if (ret) { unlock_page(page); - page_cache_release(page); + put_page(page); goto errout; } } @@ -1313,7 +1313,7 @@ static int ext4_write_end(struct file *file, */ i_size_changed = ext4_update_inode_size(inode, pos + copied); unlock_page(page); - page_cache_release(page); + put_page(page); if (old_size < pos) pagecache_isize_extended(inode, old_size, pos); @@ -1397,7 +1397,7 @@ static int ext4_journalled_write_end(struct file *file, int size_changed = 0; trace_ext4_journalled_write_end(inode, pos, len, copied); - from = pos & (PAGE_CACHE_SIZE - 1); + from = pos & (PAGE_SIZE - 1); to = from + len; BUG_ON(!ext4_handle_valid(handle)); @@ -1421,7 +1421,7 @@ static int ext4_journalled_write_end(struct file *file, ext4_set_inode_state(inode, EXT4_STATE_JDATA); EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; unlock_page(page); - page_cache_release(page); + put_page(page); if (old_size < pos) pagecache_isize_extended(inode, old_size, pos); @@ -1535,7 +1535,7 @@ static void ext4_da_page_release_reservation(struct page *page, int num_clusters; ext4_fsblk_t lblk; - BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); + BUG_ON(stop > PAGE_SIZE || stop < length); head = page_buffers(page); bh = head; @@ -1551,7 +1551,7 @@ static void ext4_da_page_release_reservation(struct page *page, clear_buffer_delay(bh); } else if (contiguous_blks) { lblk = page->index << - (PAGE_CACHE_SHIFT - inode->i_blkbits); + (PAGE_SHIFT - inode->i_blkbits); lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; ext4_es_remove_extent(inode, lblk, contiguous_blks); @@ -1561,7 +1561,7 @@ static void ext4_da_page_release_reservation(struct page *page, } while ((bh = bh->b_this_page) != head); if (contiguous_blks) { - lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + lblk = page->index << (PAGE_SHIFT - inode->i_blkbits); lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; ext4_es_remove_extent(inode, lblk, contiguous_blks); } @@ -1570,7 +1570,7 @@ static void ext4_da_page_release_reservation(struct page *page, * need to release the reserved space for that cluster. */ num_clusters = EXT4_NUM_B2C(sbi, to_release); while (num_clusters > 0) { - lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + + lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) + ((num_clusters - 1) << sbi->s_cluster_bits); if (sbi->s_cluster_ratio == 1 || !ext4_find_delalloc_cluster(inode, lblk)) @@ -1617,8 +1617,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, end = mpd->next_page - 1; if (invalidate) { ext4_lblk_t start, last; - start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); - last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); + start = index << (PAGE_SHIFT - inode->i_blkbits); + last = end << (PAGE_SHIFT - inode->i_blkbits); ext4_es_remove_extent(inode, start, last - start + 1); } @@ -1634,7 +1634,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); if (invalidate) { - block_invalidatepage(page, 0, PAGE_CACHE_SIZE); + block_invalidatepage(page, 0, PAGE_SIZE); ClearPageUptodate(page); } unlock_page(page); @@ -2005,10 +2005,10 @@ static int ext4_writepage(struct page *page, trace_ext4_writepage(page); size = i_size_read(inode); - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; + if (page->index == size >> PAGE_SHIFT) + len = size & ~PAGE_MASK; else - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; page_bufs = page_buffers(page); /* @@ -2032,7 +2032,7 @@ static int ext4_writepage(struct page *page, ext4_bh_delay_or_unwritten)) { redirty_page_for_writepage(wbc, page); if ((current->flags & PF_MEMALLOC) || - (inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) { + (inode->i_sb->s_blocksize == PAGE_SIZE)) { /* * For memory cleaning there's no point in writing only * some buffers. So just bail out. Warn if we came here @@ -2074,10 +2074,10 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) int err; BUG_ON(page->index != mpd->first_page); - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; + if (page->index == size >> PAGE_SHIFT) + len = size & ~PAGE_MASK; else - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; clear_page_dirty_for_io(page); err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); if (!err) @@ -2211,7 +2211,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) int nr_pages, i; struct inode *inode = mpd->inode; struct buffer_head *head, *bh; - int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; + int bpp_bits = PAGE_SHIFT - inode->i_blkbits; pgoff_t start, end; ext4_lblk_t lblk; sector_t pblock; @@ -2272,7 +2272,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) * supports blocksize < pagesize as we will try to * convert potentially unmapped parts of inode. */ - mpd->io_submit.io_end->size += PAGE_CACHE_SIZE; + mpd->io_submit.io_end->size += PAGE_SIZE; /* Page fully mapped - let IO run! */ err = mpage_submit_page(mpd, page); if (err < 0) { @@ -2424,7 +2424,7 @@ update_disksize: * Update on-disk size after IO is submitted. Races with * truncate are avoided by checking i_size under i_data_sem. */ - disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT; + disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; if (disksize > EXT4_I(inode)->i_disksize) { int err2; loff_t i_size; @@ -2560,7 +2560,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) mpd->next_page = page->index + 1; /* Add all dirty buffers to mpd */ lblk = ((ext4_lblk_t)page->index) << - (PAGE_CACHE_SHIFT - blkbits); + (PAGE_SHIFT - blkbits); head = page_buffers(page); err = mpage_process_page_bufs(mpd, head, head, lblk); if (err <= 0) @@ -2645,7 +2645,7 @@ static int ext4_writepages(struct address_space *mapping, * We may need to convert up to one extent per block in * the page and we may dirty the inode. */ - rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits); + rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits); } /* @@ -2676,8 +2676,8 @@ static int ext4_writepages(struct address_space *mapping, mpd.first_page = writeback_index; mpd.last_page = -1; } else { - mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT; - mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT; + mpd.first_page = wbc->range_start >> PAGE_SHIFT; + mpd.last_page = wbc->range_end >> PAGE_SHIFT; } mpd.inode = inode; @@ -2836,7 +2836,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, struct inode *inode = mapping->host; handle_t *handle; - index = pos >> PAGE_CACHE_SHIFT; + index = pos >> PAGE_SHIFT; if (ext4_nonda_switch(inode->i_sb)) { *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; @@ -2879,7 +2879,7 @@ retry_journal: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ext4_da_write_credits(inode, pos, len)); if (IS_ERR(handle)) { - page_cache_release(page); + put_page(page); return PTR_ERR(handle); } @@ -2887,7 +2887,7 @@ retry_journal: if (page->mapping != mapping) { /* The page got truncated from under us */ unlock_page(page); - page_cache_release(page); + put_page(page); ext4_journal_stop(handle); goto retry_grab; } @@ -2915,7 +2915,7 @@ retry_journal: ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; - page_cache_release(page); + put_page(page); return ret; } @@ -2963,7 +2963,7 @@ static int ext4_da_write_end(struct file *file, len, copied, page, fsdata); trace_ext4_da_write_end(inode, pos, len, copied); - start = pos & (PAGE_CACHE_SIZE - 1); + start = pos & (PAGE_SIZE - 1); end = start + copied - 1; /* @@ -3185,7 +3185,7 @@ static int __ext4_journalled_invalidatepage(struct page *page, /* * If it's a full truncate we just forget about the pending dirtying */ - if (offset == 0 && length == PAGE_CACHE_SIZE) + if (offset == 0 && length == PAGE_SIZE) ClearPageChecked(page); return jbd2_journal_invalidatepage(journal, page, offset, length); @@ -3554,8 +3554,8 @@ void ext4_set_aops(struct inode *inode) static int __ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) { - ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + ext4_fsblk_t index = from >> PAGE_SHIFT; + unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize, pos; ext4_lblk_t iblock; struct inode *inode = mapping->host; @@ -3563,14 +3563,14 @@ static int __ext4_block_zero_page_range(handle_t *handle, struct page *page; int err = 0; - page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, + page = find_or_create_page(mapping, from >> PAGE_SHIFT, mapping_gfp_constraint(mapping, ~__GFP_FS)); if (!page) return -ENOMEM; blocksize = inode->i_sb->s_blocksize; - iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); + iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); @@ -3612,7 +3612,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, ext4_encrypted_inode(inode)) { /* We expect the key to be set. */ BUG_ON(!ext4_has_encryption_key(inode)); - BUG_ON(blocksize != PAGE_CACHE_SIZE); + BUG_ON(blocksize != PAGE_SIZE); WARN_ON_ONCE(ext4_decrypt(page)); } } @@ -3636,7 +3636,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, unlock: unlock_page(page); - page_cache_release(page); + put_page(page); return err; } @@ -3651,7 +3651,7 @@ static int ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) { struct inode *inode = mapping->host; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize = inode->i_sb->s_blocksize; unsigned max = blocksize - (offset & (blocksize - 1)); @@ -3676,7 +3676,7 @@ static int ext4_block_zero_page_range(handle_t *handle, static int ext4_block_truncate_page(handle_t *handle, struct address_space *mapping, loff_t from) { - unsigned offset = from & (PAGE_CACHE_SIZE-1); + unsigned offset = from & (PAGE_SIZE-1); unsigned length; unsigned blocksize; struct inode *inode = mapping->host; @@ -3814,7 +3814,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) */ if (offset + length > inode->i_size) { length = inode->i_size + - PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - + PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) - offset; } @@ -4889,23 +4889,23 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode) tid_t commit_tid = 0; int ret; - offset = inode->i_size & (PAGE_CACHE_SIZE - 1); + offset = inode->i_size & (PAGE_SIZE - 1); /* * All buffers in the last page remain valid? Then there's nothing to - * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == + * do. We do the check mainly to optimize the common PAGE_SIZE == * blocksize case */ - if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) + if (offset > PAGE_SIZE - (1 << inode->i_blkbits)) return; while (1) { page = find_lock_page(inode->i_mapping, - inode->i_size >> PAGE_CACHE_SHIFT); + inode->i_size >> PAGE_SHIFT); if (!page) return; ret = __ext4_journalled_invalidatepage(page, offset, - PAGE_CACHE_SIZE - offset); + PAGE_SIZE - offset); unlock_page(page); - page_cache_release(page); + put_page(page); if (ret != -EBUSY) return; commit_tid = 0; @@ -5544,10 +5544,10 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) goto out; } - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; + if (page->index == size >> PAGE_SHIFT) + len = size & ~PAGE_MASK; else - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; /* * Return if we have all the buffers mapped. This avoids the need to do * journal_start/journal_stop which can block and take a long time @@ -5578,7 +5578,7 @@ retry_alloc: ret = block_page_mkwrite(vma, vmf, get_block); if (!ret && ext4_should_journal_data(inode)) { if (ext4_walk_page_buffers(handle, page_buffers(page), 0, - PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { + PAGE_SIZE, NULL, do_journal_get_write_access)) { unlock_page(page); ret = VM_FAULT_SIGBUS; ext4_journal_stop(handle); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 50e05df28f66..eeeade76012e 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -119,7 +119,7 @@ MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); * * * one block each for bitmap and buddy information. So for each group we - * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / + * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / * blocksize) blocks. So it can have information regarding groups_per_page * which is blocks_per_page/2 * @@ -807,7 +807,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b) * * one block each for bitmap and buddy information. * So for each group we take up 2 blocks. A page can - * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. + * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. * So it can have information regarding groups_per_page which * is blocks_per_page/2 * @@ -839,7 +839,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) sb = inode->i_sb; ngroups = ext4_get_groups_count(sb); blocksize = 1 << inode->i_blkbits; - blocks_per_page = PAGE_CACHE_SIZE / blocksize; + blocks_per_page = PAGE_SIZE / blocksize; groups_per_page = blocks_per_page >> 1; if (groups_per_page == 0) @@ -993,7 +993,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb, e4b->bd_buddy_page = NULL; e4b->bd_bitmap_page = NULL; - blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; + blocks_per_page = PAGE_SIZE / sb->s_blocksize; /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. @@ -1028,11 +1028,11 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_page) { unlock_page(e4b->bd_bitmap_page); - page_cache_release(e4b->bd_bitmap_page); + put_page(e4b->bd_bitmap_page); } if (e4b->bd_buddy_page) { unlock_page(e4b->bd_buddy_page); - page_cache_release(e4b->bd_buddy_page); + put_page(e4b->bd_buddy_page); } } @@ -1125,7 +1125,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, might_sleep(); mb_debug(1, "load group %u\n", group); - blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; + blocks_per_page = PAGE_SIZE / sb->s_blocksize; grp = ext4_get_group_info(sb, group); e4b->bd_blkbits = sb->s_blocksize_bits; @@ -1167,7 +1167,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, * is yet to initialize the same. So * wait for it to initialize. */ - page_cache_release(page); + put_page(page); page = find_or_create_page(inode->i_mapping, pnum, gfp); if (page) { BUG_ON(page->mapping != inode->i_mapping); @@ -1203,7 +1203,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); if (page == NULL || !PageUptodate(page)) { if (page) - page_cache_release(page); + put_page(page); page = find_or_create_page(inode->i_mapping, pnum, gfp); if (page) { BUG_ON(page->mapping != inode->i_mapping); @@ -1238,11 +1238,11 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, err: if (page) - page_cache_release(page); + put_page(page); if (e4b->bd_bitmap_page) - page_cache_release(e4b->bd_bitmap_page); + put_page(e4b->bd_bitmap_page); if (e4b->bd_buddy_page) - page_cache_release(e4b->bd_buddy_page); + put_page(e4b->bd_buddy_page); e4b->bd_buddy = NULL; e4b->bd_bitmap = NULL; return ret; @@ -1257,9 +1257,9 @@ static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_page) - page_cache_release(e4b->bd_bitmap_page); + put_page(e4b->bd_bitmap_page); if (e4b->bd_buddy_page) - page_cache_release(e4b->bd_buddy_page); + put_page(e4b->bd_buddy_page); } @@ -2833,8 +2833,8 @@ static void ext4_free_data_callback(struct super_block *sb, /* No more items in the per group rb tree * balance refcounts from ext4_mb_free_metadata() */ - page_cache_release(e4b.bd_buddy_page); - page_cache_release(e4b.bd_bitmap_page); + put_page(e4b.bd_buddy_page); + put_page(e4b.bd_bitmap_page); } ext4_unlock_group(sb, entry->efd_group); kmem_cache_free(ext4_free_data_cachep, entry); @@ -4385,9 +4385,9 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac) ext4_mb_put_pa(ac, ac->ac_sb, pa); } if (ac->ac_bitmap_page) - page_cache_release(ac->ac_bitmap_page); + put_page(ac->ac_bitmap_page); if (ac->ac_buddy_page) - page_cache_release(ac->ac_buddy_page); + put_page(ac->ac_buddy_page); if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) mutex_unlock(&ac->ac_lg->lg_mutex); ext4_mb_collect_stats(ac); @@ -4599,8 +4599,8 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, * otherwise we'll refresh it from * on-disk bitmap and lose not-yet-available * blocks */ - page_cache_get(e4b->bd_buddy_page); - page_cache_get(e4b->bd_bitmap_page); + get_page(e4b->bd_buddy_page); + get_page(e4b->bd_bitmap_page); } while (*n) { parent = *n; diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 796ff0eafd3c..325cef48b39a 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -156,7 +156,7 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2, page[1] = grab_cache_page_write_begin(mapping[1], index2, fl); if (!page[1]) { unlock_page(page[0]); - page_cache_release(page[0]); + put_page(page[0]); return -ENOMEM; } /* @@ -192,7 +192,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to) create_empty_buffers(page, blocksize, 0); head = page_buffers(page); - block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits); for (bh = head, block_start = 0; bh != head || !block_start; block++, block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; @@ -268,7 +268,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, int i, err2, jblocks, retries = 0; int replaced_count = 0; int from = data_offset_in_page << orig_inode->i_blkbits; - int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; + int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits; struct super_block *sb = orig_inode->i_sb; struct buffer_head *bh = NULL; @@ -404,9 +404,9 @@ data_copy: unlock_pages: unlock_page(pagep[0]); - page_cache_release(pagep[0]); + put_page(pagep[0]); unlock_page(pagep[1]); - page_cache_release(pagep[1]); + put_page(pagep[1]); stop_journal: ext4_journal_stop(handle); if (*err == -ENOSPC && @@ -561,7 +561,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, struct inode *orig_inode = file_inode(o_filp); struct inode *donor_inode = file_inode(d_filp); struct ext4_ext_path *path = NULL; - int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; + int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits; ext4_lblk_t o_end, o_start = orig_blk; ext4_lblk_t d_start = donor_blk; int ret; @@ -655,9 +655,9 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, if (o_end - o_start < cur_len) cur_len = o_end - o_start; - orig_page_index = o_start >> (PAGE_CACHE_SHIFT - + orig_page_index = o_start >> (PAGE_SHIFT - orig_inode->i_blkbits); - donor_page_index = d_start >> (PAGE_CACHE_SHIFT - + donor_page_index = d_start >> (PAGE_SHIFT - donor_inode->i_blkbits); offset_in_page = o_start % blocks_per_page; if (cur_len > blocks_per_page- offset_in_page) diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index b39d9c7e5952..e4fc8ea45d78 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -433,8 +433,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io, * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - if (len < PAGE_CACHE_SIZE) - zero_user_segment(page, len, PAGE_CACHE_SIZE); + if (len < PAGE_SIZE) + zero_user_segment(page, len, PAGE_SIZE); /* * In the first loop we prepare and mark buffers to submit. We have to * mark all buffers in the page before submitting so that diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index bc7642f57dc8..dc54a4b60eba 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -23,7 +23,7 @@ * * then this code just gives up and calls the buffer_head-based read function. * It does handle a page which has holes at the end - that is a common case: - * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. + * the end-of-file on blocksize < PAGE_SIZE setups. * */ @@ -140,7 +140,7 @@ int ext4_mpage_readpages(struct address_space *mapping, struct inode *inode = mapping->host; const unsigned blkbits = inode->i_blkbits; - const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; + const unsigned blocks_per_page = PAGE_SIZE >> blkbits; const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; @@ -173,7 +173,7 @@ int ext4_mpage_readpages(struct address_space *mapping, if (page_has_buffers(page)) goto confused; - block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); + block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); last_block = block_in_file + nr_pages * blocks_per_page; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; if (last_block > last_block_in_file) @@ -217,7 +217,7 @@ int ext4_mpage_readpages(struct address_space *mapping, set_error_page: SetPageError(page); zero_user_segment(page, 0, - PAGE_CACHE_SIZE); + PAGE_SIZE); unlock_page(page); goto next_page; } @@ -250,7 +250,7 @@ int ext4_mpage_readpages(struct address_space *mapping, } if (first_hole != blocks_per_page) { zero_user_segment(page, first_hole << blkbits, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (first_hole == 0) { SetPageUptodate(page); unlock_page(page); @@ -319,7 +319,7 @@ int ext4_mpage_readpages(struct address_space *mapping, unlock_page(page); next_page: if (pages) - page_cache_release(page); + put_page(page); } BUG_ON(pages && !list_empty(pages)); if (bio) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index df213616dc39..304c712dbe12 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1785,7 +1785,7 @@ static int parse_options(char *options, struct super_block *sb, int blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); - if (blocksize < PAGE_CACHE_SIZE) { + if (blocksize < PAGE_SIZE) { ext4_msg(sb, KERN_ERR, "can't mount with " "dioread_nolock if block size != PAGE_SIZE"); return 0; @@ -3809,7 +3809,7 @@ no_journal: } if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) && - (blocksize != PAGE_CACHE_SIZE)) { + (blocksize != PAGE_SIZE)) { ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs encryption"); goto failed_mount_wq; diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c index 6f7ee30a89ce..75ed5c2f0c16 100644 --- a/fs/ext4/symlink.c +++ b/fs/ext4/symlink.c @@ -80,12 +80,12 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry, if (res <= plen) paddr[res] = '\0'; if (cpage) - page_cache_release(cpage); + put_page(cpage); set_delayed_call(done, kfree_link, paddr); return paddr; errout: if (cpage) - page_cache_release(cpage); + put_page(cpage); kfree(paddr); return ERR_PTR(res); } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index e5c762b37239..53fec0872e60 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -223,7 +223,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) /* Allocate a new bio */ bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw)); - if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { bio_put(bio); return -EFAULT; } @@ -265,8 +265,8 @@ alloc_new: bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; - if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) < - PAGE_CACHE_SIZE) { + if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < + PAGE_SIZE) { __submit_merged_bio(io); goto alloc_new; } @@ -406,7 +406,7 @@ got_it: * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. */ if (dn.data_blkaddr == NEW_ADDR) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); unlock_page(page); return page; @@ -517,7 +517,7 @@ struct page *get_new_data_page(struct inode *inode, goto got_it; if (dn.data_blkaddr == NEW_ADDR) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); } else { f2fs_put_page(page, 1); @@ -530,8 +530,8 @@ struct page *get_new_data_page(struct inode *inode, } got_it: if (new_i_size && i_size_read(inode) < - ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) { - i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)); + ((loff_t)(index + 1) << PAGE_SHIFT)) { + i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT)); /* Only the directory inode sets new_i_size */ set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); } @@ -570,9 +570,9 @@ alloc: /* update i_size */ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + dn->ofs_in_node; - if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)) + if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT)) i_size_write(dn->inode, - ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)); + ((loff_t)(fofs + 1) << PAGE_SHIFT)); return 0; } @@ -971,7 +971,7 @@ got_it: goto confused; } } else { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); unlock_page(page); goto next_page; @@ -1021,7 +1021,7 @@ submit_and_realloc: goto next_page; set_error_page: SetPageError(page); - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); unlock_page(page); goto next_page; confused: @@ -1032,7 +1032,7 @@ confused: unlock_page(page); next_page: if (pages) - page_cache_release(page); + put_page(page); } BUG_ON(pages && !list_empty(pages)); if (bio) @@ -1136,7 +1136,7 @@ static int f2fs_write_data_page(struct page *page, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); loff_t i_size = i_size_read(inode); const pgoff_t end_index = ((unsigned long long) i_size) - >> PAGE_CACHE_SHIFT; + >> PAGE_SHIFT; unsigned offset = 0; bool need_balance_fs = false; int err = 0; @@ -1157,11 +1157,11 @@ static int f2fs_write_data_page(struct page *page, * If the offset is out-of-range of file size, * this page does not have to be written to disk. */ - offset = i_size & (PAGE_CACHE_SIZE - 1); + offset = i_size & (PAGE_SIZE - 1); if ((page->index >= end_index + 1) || !offset) goto out; - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + zero_user_segment(page, offset, PAGE_SIZE); write: if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; @@ -1267,8 +1267,8 @@ next: cycled = 0; end = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + index = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; cycled = 1; /* ignore range_cyclic tests */ @@ -1448,11 +1448,11 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi, * the block addresses when there is no need to fill the page. */ if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) && - len == PAGE_CACHE_SIZE) + len == PAGE_SIZE) return 0; if (f2fs_has_inline_data(inode) || - (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { + (pos & PAGE_MASK) >= i_size_read(inode)) { f2fs_lock_op(sbi); locked = true; } @@ -1513,7 +1513,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct page *page = NULL; - pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; + pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT; bool need_balance = false; block_t blkaddr = NULL_ADDR; int err = 0; @@ -1561,22 +1561,22 @@ repeat: if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr); - if (len == PAGE_CACHE_SIZE) + if (len == PAGE_SIZE) goto out_update; if (PageUptodate(page)) goto out_clear; - if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { - unsigned start = pos & (PAGE_CACHE_SIZE - 1); + if ((pos & PAGE_MASK) >= i_size_read(inode)) { + unsigned start = pos & (PAGE_SIZE - 1); unsigned end = start + len; /* Reading beyond i_size is simple: memset to zero */ - zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); + zero_user_segments(page, 0, start, end, PAGE_SIZE); goto out_update; } if (blkaddr == NEW_ADDR) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); } else { struct f2fs_io_info fio = { .sbi = sbi, @@ -1688,7 +1688,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); if (inode->i_ino >= F2FS_ROOT_INO(sbi) && - (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) + (offset % PAGE_SIZE || length != PAGE_SIZE)) return; if (PageDirty(page)) { diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 4fb6ef88a34f..f4a61a5ff79f 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -164,7 +164,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi) /* build curseg */ si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE; - si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE; + si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE; /* build dirty segmap */ si->base_mem += sizeof(struct dirty_seglist_info); @@ -201,9 +201,9 @@ get_cache: si->page_mem = 0; npages = NODE_MAPPING(sbi)->nrpages; - si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT; + si->page_mem += (unsigned long long)npages << PAGE_SHIFT; npages = META_MAPPING(sbi)->nrpages; - si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT; + si->page_mem += (unsigned long long)npages << PAGE_SHIFT; } static int stat_show(struct seq_file *s, void *v) diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 80641ad82745..af819571bce7 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -17,8 +17,8 @@ static unsigned long dir_blocks(struct inode *inode) { - return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1)) - >> PAGE_CACHE_SHIFT; + return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1)) + >> PAGE_SHIFT; } static unsigned int dir_buckets(unsigned int level, int dir_level) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index bbe2cd1265d0..7a4558d17f36 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1294,7 +1294,7 @@ static inline void f2fs_put_page(struct page *page, int unlock) f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); unlock_page(page); } - page_cache_release(page); + put_page(page); } static inline void f2fs_put_dnode(struct dnode_of_data *dn) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index b41c3579ea9e..443e07705c2a 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -74,11 +74,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, goto mapped; /* page is wholly or partially inside EOF */ - if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) > + if (((loff_t)(page->index + 1) << PAGE_SHIFT) > i_size_read(inode)) { unsigned offset; - offset = i_size_read(inode) & ~PAGE_CACHE_MASK; - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + offset = i_size_read(inode) & ~PAGE_MASK; + zero_user_segment(page, offset, PAGE_SIZE); } set_page_dirty(page); SetPageUptodate(page); @@ -346,11 +346,11 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) goto found; } - pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT); + pgofs = (pgoff_t)(offset >> PAGE_SHIFT); dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); - for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { + for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA); if (err && err != -ENOENT) { @@ -370,7 +370,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) /* find data/hole in dnode block */ for (; dn.ofs_in_node < end_offset; dn.ofs_in_node++, pgofs++, - data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { + data_ofs = (loff_t)pgofs << PAGE_SHIFT) { block_t blkaddr; blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); @@ -508,8 +508,8 @@ void truncate_data_blocks(struct dnode_of_data *dn) static int truncate_partial_data_page(struct inode *inode, u64 from, bool cache_only) { - unsigned offset = from & (PAGE_CACHE_SIZE - 1); - pgoff_t index = from >> PAGE_CACHE_SHIFT; + unsigned offset = from & (PAGE_SIZE - 1); + pgoff_t index = from >> PAGE_SHIFT; struct address_space *mapping = inode->i_mapping; struct page *page; @@ -529,7 +529,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from, return 0; truncate_out: f2fs_wait_on_page_writeback(page, DATA, true); - zero_user(page, offset, PAGE_CACHE_SIZE - offset); + zero_user(page, offset, PAGE_SIZE - offset); if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode)) set_page_dirty(page); @@ -799,11 +799,11 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) if (ret) return ret; - pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; - pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; + pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; - off_start = offset & (PAGE_CACHE_SIZE - 1); - off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); + off_start = offset & (PAGE_SIZE - 1); + off_end = (offset + len) & (PAGE_SIZE - 1); if (pg_start == pg_end) { ret = fill_zero(inode, pg_start, off_start, @@ -813,7 +813,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) } else { if (off_start) { ret = fill_zero(inode, pg_start++, off_start, - PAGE_CACHE_SIZE - off_start); + PAGE_SIZE - off_start); if (ret) return ret; } @@ -830,8 +830,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) f2fs_balance_fs(sbi, true); - blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT; - blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT; + blk_start = (loff_t)pg_start << PAGE_SHIFT; + blk_end = (loff_t)pg_end << PAGE_SHIFT; truncate_inode_pages_range(mapping, blk_start, blk_end - 1); @@ -954,8 +954,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) if (ret) return ret; - pg_start = offset >> PAGE_CACHE_SHIFT; - pg_end = (offset + len) >> PAGE_CACHE_SHIFT; + pg_start = offset >> PAGE_SHIFT; + pg_end = (offset + len) >> PAGE_SHIFT; /* write out all dirty pages from offset */ ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); @@ -1006,11 +1006,11 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, truncate_pagecache_range(inode, offset, offset + len - 1); - pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; - pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; + pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; - off_start = offset & (PAGE_CACHE_SIZE - 1); - off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); + off_start = offset & (PAGE_SIZE - 1); + off_end = (offset + len) & (PAGE_SIZE - 1); if (pg_start == pg_end) { ret = fill_zero(inode, pg_start, off_start, @@ -1024,12 +1024,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, } else { if (off_start) { ret = fill_zero(inode, pg_start++, off_start, - PAGE_CACHE_SIZE - off_start); + PAGE_SIZE - off_start); if (ret) return ret; new_size = max_t(loff_t, new_size, - (loff_t)pg_start << PAGE_CACHE_SHIFT); + (loff_t)pg_start << PAGE_SHIFT); } for (index = pg_start; index < pg_end; index++) { @@ -1060,7 +1060,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, f2fs_unlock_op(sbi); new_size = max_t(loff_t, new_size, - (loff_t)(index + 1) << PAGE_CACHE_SHIFT); + (loff_t)(index + 1) << PAGE_SHIFT); } if (off_end) { @@ -1117,8 +1117,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) truncate_pagecache(inode, offset); - pg_start = offset >> PAGE_CACHE_SHIFT; - pg_end = (offset + len) >> PAGE_CACHE_SHIFT; + pg_start = offset >> PAGE_SHIFT; + pg_end = (offset + len) >> PAGE_SHIFT; delta = pg_end - pg_start; nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; @@ -1158,11 +1158,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset, f2fs_balance_fs(sbi, true); - pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; - pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; + pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; - off_start = offset & (PAGE_CACHE_SIZE - 1); - off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); + off_start = offset & (PAGE_SIZE - 1); + off_end = (offset + len) & (PAGE_SIZE - 1); f2fs_lock_op(sbi); @@ -1180,12 +1180,12 @@ noalloc: if (pg_start == pg_end) new_size = offset + len; else if (index == pg_start && off_start) - new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT; + new_size = (loff_t)(index + 1) << PAGE_SHIFT; else if (index == pg_end) - new_size = ((loff_t)index << PAGE_CACHE_SHIFT) + + new_size = ((loff_t)index << PAGE_SHIFT) + off_end; else - new_size += PAGE_CACHE_SIZE; + new_size += PAGE_SIZE; } if (!(mode & FALLOC_FL_KEEP_SIZE) && @@ -1652,8 +1652,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, if (need_inplace_update(inode)) return -EINVAL; - pg_start = range->start >> PAGE_CACHE_SHIFT; - pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT; + pg_start = range->start >> PAGE_SHIFT; + pg_end = (range->start + range->len) >> PAGE_SHIFT; f2fs_balance_fs(sbi, true); @@ -1770,7 +1770,7 @@ clear_out: out: inode_unlock(inode); if (!err) - range->len = (u64)total << PAGE_CACHE_SHIFT; + range->len = (u64)total << PAGE_SHIFT; return err; } diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 358214e9f707..a2fbe6f427d3 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -51,7 +51,7 @@ void read_inline_data(struct page *page, struct page *ipage) f2fs_bug_on(F2FS_P_SB(page), page->index); - zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); + zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE); /* Copy the whole inline data block */ src_addr = inline_data_addr(ipage); @@ -93,7 +93,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page) } if (page->index) - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); else read_inline_data(page, ipage); @@ -375,7 +375,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, goto out; f2fs_wait_on_page_writeback(page, DATA, true); - zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); + zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE); dentry_blk = kmap_atomic(page); @@ -405,8 +405,8 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, stat_dec_inline_dir(dir); clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); - if (i_size_read(dir) < PAGE_CACHE_SIZE) { - i_size_write(dir, PAGE_CACHE_SIZE); + if (i_size_read(dir) < PAGE_SIZE) { + i_size_write(dir, PAGE_SIZE); set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); } diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 7876f1052101..013e57932d61 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -1027,12 +1027,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry, goto errout; } - /* this is broken symlink case */ - if (unlikely(cstr.name[0] == 0)) { - res = -ENOENT; - goto errout; - } - if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) { /* Symlink data on the disk is corrupted */ res = -EIO; @@ -1046,17 +1040,23 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry, if (res < 0) goto errout; + /* this is broken symlink case */ + if (unlikely(pstr.name[0] == 0)) { + res = -ENOENT; + goto errout; + } + paddr = pstr.name; /* Null-terminate the name */ paddr[res] = '\0'; - page_cache_release(cpage); + put_page(cpage); set_delayed_call(done, kfree_link, paddr); return paddr; errout: fscrypt_fname_free_buffer(&pstr); - page_cache_release(cpage); + put_page(cpage); return ERR_PTR(res); } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 118321bd1a7f..1a33de9d84b1 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -46,11 +46,11 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) */ if (type == FREE_NIDS) { mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); } else if (type == NAT_ENTRIES) { mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); } else if (type == DIRTY_DENTS) { if (sbi->sb->s_bdi->wb.dirty_exceeded) @@ -62,13 +62,13 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) for (i = 0; i <= UPDATE_INO; i++) mem_size += (sbi->im[i].ino_num * - sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; + sizeof(struct ino_entry)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); } else if (type == EXTENT_CACHE) { mem_size = (atomic_read(&sbi->total_ext_tree) * sizeof(struct extent_tree) + atomic_read(&sbi->total_ext_node) * - sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT; + sizeof(struct extent_node)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); } else { if (!sbi->sb->s_bdi->wb.dirty_exceeded) @@ -121,7 +121,7 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) src_addr = page_address(src_page); dst_addr = page_address(dst_page); - memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); + memcpy(dst_addr, src_addr, PAGE_SIZE); set_page_dirty(dst_page); f2fs_put_page(src_page, 1); diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 0b30cd2aeebd..011942f94d64 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -591,7 +591,7 @@ out: /* truncate meta pages to be used by the recovery */ truncate_inode_pages_range(META_MAPPING(sbi), - (loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1); + (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1); if (err) { truncate_inode_pages_final(NODE_MAPPING(sbi)); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 6f16b39f0b52..540669d6978e 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -885,12 +885,12 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) } } - sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - + sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE; if (valid_sum_count <= sum_in_page) return 1; else if ((valid_sum_count - sum_in_page) <= - (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) + (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) return 2; return 3; } @@ -909,9 +909,9 @@ void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr) void *dst = page_address(page); if (src) - memcpy(dst, src, PAGE_CACHE_SIZE); + memcpy(dst, src, PAGE_SIZE); else - memset(dst, 0, PAGE_CACHE_SIZE); + memset(dst, 0, PAGE_SIZE); set_page_dirty(page); f2fs_put_page(page, 1); } @@ -1596,7 +1596,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi) s = (struct f2fs_summary *)(kaddr + offset); seg_i->sum_blk->entries[j] = *s; offset += SUMMARY_SIZE; - if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - + if (offset + SUMMARY_SIZE <= PAGE_SIZE - SUM_FOOTER_SIZE) continue; @@ -1757,7 +1757,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) *summary = seg_i->sum_blk->entries[j]; written_size += SUMMARY_SIZE; - if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - + if (written_size + SUMMARY_SIZE <= PAGE_SIZE - SUM_FOOTER_SIZE) continue; @@ -1844,7 +1844,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, src_addr = page_address(src_page); dst_addr = page_address(dst_page); - memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); + memcpy(dst_addr, src_addr, PAGE_SIZE); set_page_dirty(dst_page); f2fs_put_page(src_page, 1); @@ -2171,7 +2171,7 @@ static int build_curseg(struct f2fs_sb_info *sbi) for (i = 0; i < NR_CURSEG_TYPE; i++) { mutex_init(&array[i].curseg_mutex); - array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!array[i].sum_blk) return -ENOMEM; init_rwsem(&array[i].journal_rwsem); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 15bb81f8dac2..006f87d69921 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -984,9 +984,25 @@ static loff_t max_file_blocks(void) return result; } +static int __f2fs_commit_super(struct buffer_head *bh, + struct f2fs_super_block *super) +{ + lock_buffer(bh); + if (super) + memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super)); + set_buffer_uptodate(bh); + set_buffer_dirty(bh); + unlock_buffer(bh); + + /* it's rare case, we can do fua all the time */ + return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA); +} + static inline bool sanity_check_area_boundary(struct super_block *sb, - struct f2fs_super_block *raw_super) + struct buffer_head *bh) { + struct f2fs_super_block *raw_super = (struct f2fs_super_block *) + (bh->b_data + F2FS_SUPER_OFFSET); u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr); u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr); @@ -1000,6 +1016,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb, u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main); u32 segment_count = le32_to_cpu(raw_super->segment_count); u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); + u64 main_end_blkaddr = main_blkaddr + + (segment_count_main << log_blocks_per_seg); + u64 seg_end_blkaddr = segment0_blkaddr + + (segment_count << log_blocks_per_seg); if (segment0_blkaddr != cp_blkaddr) { f2fs_msg(sb, KERN_INFO, @@ -1044,22 +1064,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb, return true; } - if (main_blkaddr + (segment_count_main << log_blocks_per_seg) != - segment0_blkaddr + (segment_count << log_blocks_per_seg)) { + if (main_end_blkaddr > seg_end_blkaddr) { f2fs_msg(sb, KERN_INFO, - "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)", + "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)", main_blkaddr, - segment0_blkaddr + (segment_count << log_blocks_per_seg), + segment0_blkaddr + + (segment_count << log_blocks_per_seg), segment_count_main << log_blocks_per_seg); return true; + } else if (main_end_blkaddr < seg_end_blkaddr) { + int err = 0; + char *res; + + /* fix in-memory information all the time */ + raw_super->segment_count = cpu_to_le32((main_end_blkaddr - + segment0_blkaddr) >> log_blocks_per_seg); + + if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) { + res = "internally"; + } else { + err = __f2fs_commit_super(bh, NULL); + res = err ? "failed" : "done"; + } + f2fs_msg(sb, KERN_INFO, + "Fix alignment : %s, start(%u) end(%u) block(%u)", + res, main_blkaddr, + segment0_blkaddr + + (segment_count << log_blocks_per_seg), + segment_count_main << log_blocks_per_seg); + if (err) + return true; } - return false; } static int sanity_check_raw_super(struct super_block *sb, - struct f2fs_super_block *raw_super) + struct buffer_head *bh) { + struct f2fs_super_block *raw_super = (struct f2fs_super_block *) + (bh->b_data + F2FS_SUPER_OFFSET); unsigned int blocksize; if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { @@ -1070,10 +1113,10 @@ static int sanity_check_raw_super(struct super_block *sb, } /* Currently, support only 4KB page cache size */ - if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) { + if (F2FS_BLKSIZE != PAGE_SIZE) { f2fs_msg(sb, KERN_INFO, "Invalid page_cache_size (%lu), supports only 4KB\n", - PAGE_CACHE_SIZE); + PAGE_SIZE); return 1; } @@ -1126,7 +1169,7 @@ static int sanity_check_raw_super(struct super_block *sb, } /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ - if (sanity_check_area_boundary(sb, raw_super)) + if (sanity_check_area_boundary(sb, bh)) return 1; return 0; @@ -1202,7 +1245,7 @@ static int read_raw_super_block(struct super_block *sb, { int block; struct buffer_head *bh; - struct f2fs_super_block *super, *buf; + struct f2fs_super_block *super; int err = 0; super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL); @@ -1218,11 +1261,8 @@ static int read_raw_super_block(struct super_block *sb, continue; } - buf = (struct f2fs_super_block *) - (bh->b_data + F2FS_SUPER_OFFSET); - /* sanity checking of raw super */ - if (sanity_check_raw_super(sb, buf)) { + if (sanity_check_raw_super(sb, bh)) { f2fs_msg(sb, KERN_ERR, "Can't find valid F2FS filesystem in %dth superblock", block + 1); @@ -1232,7 +1272,8 @@ static int read_raw_super_block(struct super_block *sb, } if (!*raw_super) { - memcpy(super, buf, sizeof(*super)); + memcpy(super, bh->b_data + F2FS_SUPER_OFFSET, + sizeof(*super)); *valid_super_block = block; *raw_super = super; } @@ -1252,42 +1293,29 @@ static int read_raw_super_block(struct super_block *sb, return err; } -static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block) +int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) { - struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi); struct buffer_head *bh; int err; - bh = sb_getblk(sbi->sb, block); + /* write back-up superblock first */ + bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1); if (!bh) return -EIO; - - lock_buffer(bh); - memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super)); - set_buffer_uptodate(bh); - set_buffer_dirty(bh); - unlock_buffer(bh); - - /* it's rare case, we can do fua all the time */ - err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA); + err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); brelse(bh); - return err; -} - -int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) -{ - int err; - - /* write back-up superblock first */ - err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1); - /* if we are in recovery path, skip writing valid superblock */ if (recover || err) return err; /* write current valid superblock */ - return __f2fs_commit_super(sbi, sbi->valid_super_block); + bh = sb_getblk(sbi->sb, sbi->valid_super_block); + if (!bh) + return -EIO; + err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); + brelse(bh); + return err; } static int f2fs_fill_super(struct super_block *sb, void *data, int silent) @@ -1442,7 +1470,7 @@ try_onemore: seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); if (__exist_node_summaries(sbi)) sbi->kbytes_written = - le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written); + le64_to_cpu(seg_i->journal->info.kbytes_written); build_gc_manager(sbi); diff --git a/fs/fat/Kconfig b/fs/fat/Kconfig index 182f9ffe2b51..3ff1772f612e 100644 --- a/fs/fat/Kconfig +++ b/fs/fat/Kconfig @@ -93,8 +93,24 @@ config FAT_DEFAULT_IOCHARSET that most of your FAT filesystems use, and can be overridden with the "iocharset" mount option for FAT filesystems. Note that "utf8" is not recommended for FAT filesystems. - If unsure, you shouldn't set "utf8" here. + If unsure, you shouldn't set "utf8" here - select the next option + instead if you would like to use UTF-8 encoded file names by default. See <file:Documentation/filesystems/vfat.txt> for more information. Enable any character sets you need in File Systems/Native Language Support. + +config FAT_DEFAULT_UTF8 + bool "Enable FAT UTF-8 option by default" + depends on VFAT_FS + default n + help + Set this if you would like to have "utf8" mount option set + by default when mounting FAT filesystems. + + Even if you say Y here can always disable UTF-8 for + particular mount by adding "utf8=0" to mount options. + + Say Y if you use UTF-8 encoding for file names, N otherwise. + + See <file:Documentation/filesystems/vfat.txt> for more information. diff --git a/fs/fat/inode.c b/fs/fat/inode.c index a5599052116c..226281068a46 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -1127,7 +1127,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, } opts->name_check = 'n'; opts->quiet = opts->showexec = opts->sys_immutable = opts->dotsOK = 0; - opts->utf8 = opts->unicode_xlate = 0; + opts->unicode_xlate = 0; opts->numtail = 1; opts->usefree = opts->nocase = 0; opts->tz_set = 0; @@ -1135,6 +1135,8 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, opts->errors = FAT_ERRORS_RO; *debug = 0; + opts->utf8 = IS_ENABLED(CONFIG_FAT_DEFAULT_UTF8) && is_vfat; + if (!options) goto out; diff --git a/fs/fhandle.c b/fs/fhandle.c index d59712dfa3e7..ca3c3dd01789 100644 --- a/fs/fhandle.c +++ b/fs/fhandle.c @@ -228,7 +228,7 @@ long do_handle_open(int mountdirfd, path_put(&path); return fd; } - file = file_open_root(path.dentry, path.mnt, "", open_flag); + file = file_open_root(path.dentry, path.mnt, "", open_flag, 0); if (IS_ERR(file)) { put_unused_fd(fd); retval = PTR_ERR(file); diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c index cb84f0fcc72a..bfc780c682fb 100644 --- a/fs/freevxfs/vxfs_immed.c +++ b/fs/freevxfs/vxfs_immed.c @@ -66,11 +66,11 @@ static int vxfs_immed_readpage(struct file *fp, struct page *pp) { struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host); - u_int64_t offset = (u_int64_t)pp->index << PAGE_CACHE_SHIFT; + u_int64_t offset = (u_int64_t)pp->index << PAGE_SHIFT; caddr_t kaddr; kaddr = kmap(pp); - memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE); + memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_SIZE); kunmap(pp); flush_dcache_page(pp); diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c index 1cff72df0389..a49e0cfbb686 100644 --- a/fs/freevxfs/vxfs_lookup.c +++ b/fs/freevxfs/vxfs_lookup.c @@ -45,7 +45,7 @@ /* * Number of VxFS blocks per page. */ -#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_CACHE_SIZE / (sbp)->s_blocksize)) +#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_SIZE / (sbp)->s_blocksize)) static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int); @@ -175,7 +175,7 @@ vxfs_inode_by_name(struct inode *dip, struct dentry *dp) if (de) { ino = de->d_ino; kunmap(pp); - page_cache_release(pp); + put_page(pp); } return (ino); @@ -255,8 +255,8 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx) nblocks = dir_blocks(ip); pblocks = VXFS_BLOCK_PER_PAGE(sbp); - page = pos >> PAGE_CACHE_SHIFT; - offset = pos & ~PAGE_CACHE_MASK; + page = pos >> PAGE_SHIFT; + offset = pos & ~PAGE_MASK; block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks; for (; page < npages; page++, block = 0) { @@ -289,7 +289,7 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx) continue; offset = (char *)de - kaddr; - ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; + ctx->pos = ((page << PAGE_SHIFT) | offset) + 2; if (!dir_emit(ctx, de->d_name, de->d_namelen, de->d_ino, DT_UNKNOWN)) { vxfs_put_page(pp); @@ -301,6 +301,6 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx) vxfs_put_page(pp); offset = 0; } - ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; + ctx->pos = ((page << PAGE_SHIFT) | offset) + 2; return 0; } diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c index 5d318c44f855..e806694d4145 100644 --- a/fs/freevxfs/vxfs_subr.c +++ b/fs/freevxfs/vxfs_subr.c @@ -50,7 +50,7 @@ inline void vxfs_put_page(struct page *pp) { kunmap(pp); - page_cache_release(pp); + put_page(pp); } /** diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 5c46ed9f3e14..592cea54cea0 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -33,7 +33,7 @@ /* * 4MB minimal write chunk size */ -#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) +#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10)) struct wb_completion { atomic_t cnt; @@ -281,13 +281,15 @@ locked_inode_to_wb_and_lock_list(struct inode *inode) wb_get(wb); spin_unlock(&inode->i_lock); spin_lock(&wb->list_lock); - wb_put(wb); /* not gonna deref it anymore */ /* i_wb may have changed inbetween, can't use inode_to_wb() */ - if (likely(wb == inode->i_wb)) - return wb; /* @inode already has ref */ + if (likely(wb == inode->i_wb)) { + wb_put(wb); /* @inode already has ref */ + return wb; + } spin_unlock(&wb->list_lock); + wb_put(wb); cpu_relax(); spin_lock(&inode->i_lock); } @@ -1337,10 +1339,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode() * and does more profound writeback list handling in writeback_sb_inodes(). */ -static int -writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, - struct writeback_control *wbc) +static int writeback_single_inode(struct inode *inode, + struct writeback_control *wbc) { + struct bdi_writeback *wb; int ret = 0; spin_lock(&inode->i_lock); @@ -1378,7 +1380,8 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, ret = __writeback_single_inode(inode, wbc); wbc_detach_inode(wbc); - spin_lock(&wb->list_lock); + + wb = inode_to_wb_and_lock_list(inode); spin_lock(&inode->i_lock); /* * If inode is clean, remove it from writeback lists. Otherwise don't @@ -1453,6 +1456,7 @@ static long writeback_sb_inodes(struct super_block *sb, while (!list_empty(&wb->b_io)) { struct inode *inode = wb_inode(wb->b_io.prev); + struct bdi_writeback *tmp_wb; if (inode->i_sb != sb) { if (work->sb) { @@ -1543,15 +1547,23 @@ static long writeback_sb_inodes(struct super_block *sb, cond_resched(); } - - spin_lock(&wb->list_lock); + /* + * Requeue @inode if still dirty. Be careful as @inode may + * have been switched to another wb in the meantime. + */ + tmp_wb = inode_to_wb_and_lock_list(inode); spin_lock(&inode->i_lock); if (!(inode->i_state & I_DIRTY_ALL)) wrote++; - requeue_inode(inode, wb, &wbc); + requeue_inode(inode, tmp_wb, &wbc); inode_sync_complete(inode); spin_unlock(&inode->i_lock); + if (unlikely(tmp_wb != wb)) { + spin_unlock(&tmp_wb->list_lock); + spin_lock(&wb->list_lock); + } + /* * bail out to wb_writeback() often enough to check * background threshold and other termination conditions. @@ -2338,7 +2350,6 @@ EXPORT_SYMBOL(sync_inodes_sb); */ int write_inode_now(struct inode *inode, int sync) { - struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; struct writeback_control wbc = { .nr_to_write = LONG_MAX, .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, @@ -2350,7 +2361,7 @@ int write_inode_now(struct inode *inode, int sync) wbc.nr_to_write = 0; might_sleep(); - return writeback_single_inode(inode, wb, &wbc); + return writeback_single_inode(inode, &wbc); } EXPORT_SYMBOL(write_inode_now); @@ -2367,7 +2378,7 @@ EXPORT_SYMBOL(write_inode_now); */ int sync_inode(struct inode *inode, struct writeback_control *wbc) { - return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc); + return writeback_single_inode(inode, wbc); } EXPORT_SYMBOL(sync_inode); diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 6b35fc4860a0..3078b679fcd1 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -113,7 +113,7 @@ try_again: wake_up_bit(&cookie->flags, 0); if (xpage) - page_cache_release(xpage); + put_page(xpage); __fscache_uncache_page(cookie, page); return true; @@ -164,7 +164,7 @@ static void fscache_end_page_write(struct fscache_object *object, } spin_unlock(&object->lock); if (xpage) - page_cache_release(xpage); + put_page(xpage); } /* @@ -884,7 +884,7 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) spin_unlock(&cookie->stores_lock); for (i = n - 1; i >= 0; i--) - page_cache_release(results[i]); + put_page(results[i]); } _leave(""); @@ -982,7 +982,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, radix_tree_tag_set(&cookie->stores, page->index, FSCACHE_COOKIE_PENDING_TAG); - page_cache_get(page); + get_page(page); /* we only want one writer at a time, but we do need to queue new * writers after exclusive ops */ @@ -1026,7 +1026,7 @@ submit_failed: radix_tree_delete(&cookie->stores, page->index); spin_unlock(&cookie->stores_lock); wake_cookie = __fscache_unuse_cookie(cookie); - page_cache_release(page); + put_page(page); ret = -ENOBUFS; goto nobufs; diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ebb5e37455a0..cbece1221417 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -897,7 +897,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) return err; } - page_cache_get(newpage); + get_page(newpage); if (!(buf->flags & PIPE_BUF_FLAG_LRU)) lru_cache_add_file(newpage); @@ -912,12 +912,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) if (err) { unlock_page(newpage); - page_cache_release(newpage); + put_page(newpage); return err; } unlock_page(oldpage); - page_cache_release(oldpage); + put_page(oldpage); cs->len = 0; return 0; @@ -951,7 +951,7 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, fuse_copy_finish(cs); buf = cs->pipebufs; - page_cache_get(page); + get_page(page); buf->page = page; buf->offset = offset; buf->len = count; @@ -1435,7 +1435,7 @@ out_unlock: out: for (; page_nr < cs.nr_segs; page_nr++) - page_cache_release(bufs[page_nr].page); + put_page(bufs[page_nr].page); kfree(bufs); return ret; @@ -1632,8 +1632,8 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, goto out_up_killsb; mapping = inode->i_mapping; - index = outarg.offset >> PAGE_CACHE_SHIFT; - offset = outarg.offset & ~PAGE_CACHE_MASK; + index = outarg.offset >> PAGE_SHIFT; + offset = outarg.offset & ~PAGE_MASK; file_size = i_size_read(inode); end = outarg.offset + outarg.size; if (end > file_size) { @@ -1652,13 +1652,13 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, if (!page) goto out_iput; - this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); + this_num = min_t(unsigned, num, PAGE_SIZE - offset); err = fuse_copy_page(cs, &page, offset, this_num, 0); if (!err && offset == 0 && - (this_num == PAGE_CACHE_SIZE || file_size == end)) + (this_num == PAGE_SIZE || file_size == end)) SetPageUptodate(page); unlock_page(page); - page_cache_release(page); + put_page(page); if (err) goto out_iput; @@ -1697,7 +1697,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, size_t total_len = 0; int num_pages; - offset = outarg->offset & ~PAGE_CACHE_MASK; + offset = outarg->offset & ~PAGE_MASK; file_size = i_size_read(inode); num = outarg->size; @@ -1720,7 +1720,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, req->page_descs[0].offset = offset; req->end = fuse_retrieve_end; - index = outarg->offset >> PAGE_CACHE_SHIFT; + index = outarg->offset >> PAGE_SHIFT; while (num && req->num_pages < num_pages) { struct page *page; @@ -1730,7 +1730,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, if (!page) break; - this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); + this_num = min_t(unsigned, num, PAGE_SIZE - offset); req->pages[req->num_pages] = page; req->page_descs[req->num_pages].length = this_num; req->num_pages++; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 9dde38f12c07..719924d6c706 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -348,7 +348,7 @@ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, pgoff_t curr_index; BUG_ON(req->inode != inode); - curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; + curr_index = req->misc.write.in.offset >> PAGE_SHIFT; if (idx_from < curr_index + req->num_pages && curr_index <= idx_to) { found = true; @@ -683,11 +683,11 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode, * present there. */ int i; - int start_idx = num_read >> PAGE_CACHE_SHIFT; - size_t off = num_read & (PAGE_CACHE_SIZE - 1); + int start_idx = num_read >> PAGE_SHIFT; + size_t off = num_read & (PAGE_SIZE - 1); for (i = start_idx; i < req->num_pages; i++) { - zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE); + zero_user_segment(req->pages[i], off, PAGE_SIZE); off = 0; } } else { @@ -704,7 +704,7 @@ static int fuse_do_readpage(struct file *file, struct page *page) struct fuse_req *req; size_t num_read; loff_t pos = page_offset(page); - size_t count = PAGE_CACHE_SIZE; + size_t count = PAGE_SIZE; u64 attr_ver; int err; @@ -789,7 +789,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) else SetPageError(page); unlock_page(page); - page_cache_release(page); + put_page(page); } if (req->ff) fuse_file_put(req->ff, false); @@ -800,7 +800,7 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file) struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; loff_t pos = page_offset(req->pages[0]); - size_t count = req->num_pages << PAGE_CACHE_SHIFT; + size_t count = req->num_pages << PAGE_SHIFT; req->out.argpages = 1; req->out.page_zeroing = 1; @@ -836,7 +836,7 @@ static int fuse_readpages_fill(void *_data, struct page *page) if (req->num_pages && (req->num_pages == FUSE_MAX_PAGES_PER_REQ || - (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || + (req->num_pages + 1) * PAGE_SIZE > fc->max_read || req->pages[req->num_pages - 1]->index + 1 != page->index)) { int nr_alloc = min_t(unsigned, data->nr_pages, FUSE_MAX_PAGES_PER_REQ); @@ -858,7 +858,7 @@ static int fuse_readpages_fill(void *_data, struct page *page) return -EIO; } - page_cache_get(page); + get_page(page); req->pages[req->num_pages] = page; req->page_descs[req->num_pages].length = PAGE_SIZE; req->num_pages++; @@ -1003,17 +1003,17 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, for (i = 0; i < req->num_pages; i++) { struct page *page = req->pages[i]; - if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) + if (!req->out.h.error && !offset && count >= PAGE_SIZE) SetPageUptodate(page); - if (count > PAGE_CACHE_SIZE - offset) - count -= PAGE_CACHE_SIZE - offset; + if (count > PAGE_SIZE - offset) + count -= PAGE_SIZE - offset; else count = 0; offset = 0; unlock_page(page); - page_cache_release(page); + put_page(page); } return res; @@ -1024,7 +1024,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, struct iov_iter *ii, loff_t pos) { struct fuse_conn *fc = get_fuse_conn(mapping->host); - unsigned offset = pos & (PAGE_CACHE_SIZE - 1); + unsigned offset = pos & (PAGE_SIZE - 1); size_t count = 0; int err; @@ -1034,8 +1034,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, do { size_t tmp; struct page *page; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, + pgoff_t index = pos >> PAGE_SHIFT; + size_t bytes = min_t(size_t, PAGE_SIZE - offset, iov_iter_count(ii)); bytes = min_t(size_t, bytes, fc->max_write - count); @@ -1059,7 +1059,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, iov_iter_advance(ii, tmp); if (!tmp) { unlock_page(page); - page_cache_release(page); + put_page(page); bytes = min(bytes, iov_iter_single_seg_count(ii)); goto again; } @@ -1072,7 +1072,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, count += tmp; pos += tmp; offset += tmp; - if (offset == PAGE_CACHE_SIZE) + if (offset == PAGE_SIZE) offset = 0; if (!fc->big_writes) @@ -1086,8 +1086,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, static inline unsigned fuse_wr_pages(loff_t pos, size_t len) { return min_t(unsigned, - ((pos + len - 1) >> PAGE_CACHE_SHIFT) - - (pos >> PAGE_CACHE_SHIFT) + 1, + ((pos + len - 1) >> PAGE_SHIFT) - + (pos >> PAGE_SHIFT) + 1, FUSE_MAX_PAGES_PER_REQ); } @@ -1205,8 +1205,8 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) goto out; invalidate_mapping_pages(file->f_mapping, - pos >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); + pos >> PAGE_SHIFT, + endbyte >> PAGE_SHIFT); written += written_buffered; iocb->ki_pos = pos + written_buffered; @@ -1315,8 +1315,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, size_t nmax = write ? fc->max_write : fc->max_read; loff_t pos = *ppos; size_t count = iov_iter_count(iter); - pgoff_t idx_from = pos >> PAGE_CACHE_SHIFT; - pgoff_t idx_to = (pos + count - 1) >> PAGE_CACHE_SHIFT; + pgoff_t idx_from = pos >> PAGE_SHIFT; + pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT; ssize_t res = 0; struct fuse_req *req; int err = 0; @@ -1466,7 +1466,7 @@ __acquires(fc->lock) { struct fuse_inode *fi = get_fuse_inode(req->inode); struct fuse_write_in *inarg = &req->misc.write.in; - __u64 data_size = req->num_pages * PAGE_CACHE_SIZE; + __u64 data_size = req->num_pages * PAGE_SIZE; if (!fc->connected) goto out_free; @@ -1727,7 +1727,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req, list_del(&new_req->writepages_entry); list_for_each_entry(old_req, &fi->writepages, writepages_entry) { BUG_ON(old_req->inode != new_req->inode); - curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT; + curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT; if (curr_index <= page->index && page->index < curr_index + old_req->num_pages) { found = true; @@ -1742,7 +1742,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req, new_req->num_pages = 1; for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) { BUG_ON(tmp->inode != new_req->inode); - curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT; + curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT; if (tmp->num_pages == 1 && curr_index == page->index) { old_req = tmp; @@ -1799,7 +1799,7 @@ static int fuse_writepages_fill(struct page *page, if (req && req->num_pages && (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || - (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write || + (req->num_pages + 1) * PAGE_SIZE > fc->max_write || data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { fuse_writepages_send(data); data->req = NULL; @@ -1924,7 +1924,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct fuse_conn *fc = get_fuse_conn(file_inode(file)); struct page *page; loff_t fsize; @@ -1938,15 +1938,15 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping, fuse_wait_on_page_writeback(mapping->host, page->index); - if (PageUptodate(page) || len == PAGE_CACHE_SIZE) + if (PageUptodate(page) || len == PAGE_SIZE) goto success; /* * Check if the start this page comes after the end of file, in which * case the readpage can be optimized away. */ fsize = i_size_read(mapping->host); - if (fsize <= (pos & PAGE_CACHE_MASK)) { - size_t off = pos & ~PAGE_CACHE_MASK; + if (fsize <= (pos & PAGE_MASK)) { + size_t off = pos & ~PAGE_MASK; if (off) zero_user_segment(page, 0, off); goto success; @@ -1960,7 +1960,7 @@ success: cleanup: unlock_page(page); - page_cache_release(page); + put_page(page); error: return err; } @@ -1973,16 +1973,16 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, if (!PageUptodate(page)) { /* Zero any unwritten bytes at the end of the page */ - size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK; + size_t endoff = (pos + copied) & ~PAGE_MASK; if (endoff) - zero_user_segment(page, endoff, PAGE_CACHE_SIZE); + zero_user_segment(page, endoff, PAGE_SIZE); SetPageUptodate(page); } fuse_write_update_size(inode, pos + copied); set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); return copied; } diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 4d69d5c0bedc..1ce67668a8e1 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -339,11 +339,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, fuse_invalidate_attr(inode); if (offset >= 0) { - pg_start = offset >> PAGE_CACHE_SHIFT; + pg_start = offset >> PAGE_SHIFT; if (len <= 0) pg_end = -1; else - pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT; + pg_end = (offset + len - 1) >> PAGE_SHIFT; invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); } @@ -864,7 +864,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) process_init_limits(fc, arg); if (arg->minor >= 6) { - ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; + ra_pages = arg->max_readahead / PAGE_SIZE; if (arg->flags & FUSE_ASYNC_READ) fc->async_read = 1; if (!(arg->flags & FUSE_POSIX_LOCKS)) @@ -901,7 +901,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) if (arg->time_gran && arg->time_gran <= 1000000000) fc->sb->s_time_gran = arg->time_gran; } else { - ra_pages = fc->max_read / PAGE_CACHE_SIZE; + ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; fc->no_flock = 1; } @@ -922,7 +922,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) arg->major = FUSE_KERNEL_VERSION; arg->minor = FUSE_KERNEL_MINOR_VERSION; - arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; + arg->max_readahead = fc->bdi.ra_pages * PAGE_SIZE; arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | @@ -955,7 +955,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) int err; fc->bdi.name = "fuse"; - fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; + fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; /* fuse does it's own writeback accounting */ fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT; @@ -1053,8 +1053,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) goto err; #endif } else { - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; } sb->s_magic = FUSE_SUPER_MAGIC; sb->s_op = &fuse_super_operations; diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index aa016e4b8bec..1bbbee945f46 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -101,7 +101,7 @@ static int gfs2_writepage_common(struct page *page, struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); loff_t i_size = i_size_read(inode); - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + pgoff_t end_index = i_size >> PAGE_SHIFT; unsigned offset; if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) @@ -109,9 +109,9 @@ static int gfs2_writepage_common(struct page *page, if (current->journal_info) goto redirty; /* Is the page fully outside i_size? (truncate in progress) */ - offset = i_size & (PAGE_CACHE_SIZE-1); + offset = i_size & (PAGE_SIZE-1); if (page->index > end_index || (page->index == end_index && !offset)) { - page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); + page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); goto out; } return 1; @@ -238,7 +238,7 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping, { struct inode *inode = mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); - unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); + unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize); int i; int ret; @@ -366,8 +366,8 @@ static int gfs2_write_cache_jdata(struct address_space *mapping, cycled = 0; end = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + index = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; cycled = 1; /* ignore range_cyclic tests */ @@ -458,7 +458,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) * so we need to supply one here. It doesn't happen often. */ if (unlikely(page->index)) { - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); SetPageUptodate(page); return 0; } @@ -471,7 +471,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); - memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); + memset(kaddr + dsize, 0, PAGE_SIZE - dsize); kunmap_atomic(kaddr); flush_dcache_page(page); brelse(dibh); @@ -560,8 +560,8 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, unsigned size) { struct address_space *mapping = ip->i_inode.i_mapping; - unsigned long index = *pos / PAGE_CACHE_SIZE; - unsigned offset = *pos & (PAGE_CACHE_SIZE - 1); + unsigned long index = *pos / PAGE_SIZE; + unsigned offset = *pos & (PAGE_SIZE - 1); unsigned copied = 0; unsigned amt; struct page *page; @@ -569,15 +569,15 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, do { amt = size - copied; - if (offset + size > PAGE_CACHE_SIZE) - amt = PAGE_CACHE_SIZE - offset; + if (offset + size > PAGE_SIZE) + amt = PAGE_SIZE - offset; page = read_cache_page(mapping, index, __gfs2_readpage, NULL); if (IS_ERR(page)) return PTR_ERR(page); p = kmap_atomic(page); memcpy(buf + copied, p + offset, amt); kunmap_atomic(p); - page_cache_release(page); + put_page(page); copied += amt; index++; offset = 0; @@ -651,8 +651,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, unsigned requested = 0; int alloc_required; int error = 0; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + pgoff_t index = pos >> PAGE_SHIFT; + unsigned from = pos & (PAGE_SIZE - 1); struct page *page; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); @@ -697,7 +697,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, rblocks += gfs2_rg_blocks(ip, requested); error = gfs2_trans_begin(sdp, rblocks, - PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); + PAGE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; @@ -727,7 +727,7 @@ out: return 0; unlock_page(page); - page_cache_release(page); + put_page(page); gfs2_trans_end(sdp); if (pos + len > ip->i_inode.i_size) @@ -827,7 +827,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, if (!PageUptodate(page)) SetPageUptodate(page); unlock_page(page); - page_cache_release(page); + put_page(page); if (copied) { if (inode->i_size < to) @@ -877,7 +877,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping, struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct buffer_head *dibh; - unsigned int from = pos & (PAGE_CACHE_SIZE - 1); + unsigned int from = pos & (PAGE_SIZE - 1); unsigned int to = from + len; int ret; struct gfs2_trans *tr = current->journal_info; @@ -888,7 +888,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping, ret = gfs2_meta_inode_buffer(ip, &dibh); if (unlikely(ret)) { unlock_page(page); - page_cache_release(page); + put_page(page); goto failed; } @@ -992,7 +992,7 @@ static void gfs2_invalidatepage(struct page *page, unsigned int offset, { struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); unsigned int stop = offset + length; - int partial_page = (offset || length < PAGE_CACHE_SIZE); + int partial_page = (offset || length < PAGE_SIZE); struct buffer_head *bh, *head; unsigned long pos = 0; @@ -1082,7 +1082,7 @@ static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, * the first place, mapping->nr_pages will always be zero. */ if (mapping->nrpages) { - loff_t lstart = offset & ~(PAGE_CACHE_SIZE - 1); + loff_t lstart = offset & ~(PAGE_SIZE - 1); loff_t len = iov_iter_count(iter); loff_t end = PAGE_ALIGN(offset + len) - 1; diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 0860f0b5b3f1..24ce1cdd434a 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -75,7 +75,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, dsize = dibh->b_size - sizeof(struct gfs2_dinode); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); - memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); + memset(kaddr + dsize, 0, PAGE_SIZE - dsize); kunmap(page); SetPageUptodate(page); @@ -98,7 +98,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, if (release) { unlock_page(page); - page_cache_release(page); + put_page(page); } return 0; @@ -932,8 +932,8 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from) { struct inode *inode = mapping->host; struct gfs2_inode *ip = GFS2_I(inode); - unsigned long index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + unsigned long index = from >> PAGE_SHIFT; + unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize, iblock, length, pos; struct buffer_head *bh; struct page *page; @@ -945,7 +945,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from) blocksize = inode->i_sb->s_blocksize; length = blocksize - (offset & (blocksize - 1)); - iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); + iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); @@ -989,7 +989,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from) mark_buffer_dirty(bh); unlock: unlock_page(page); - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index c9384f932975..208efc70ad49 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -354,8 +354,8 @@ static int gfs2_allocate_page_backing(struct page *page) { struct inode *inode = page->mapping->host; struct buffer_head bh; - unsigned long size = PAGE_CACHE_SIZE; - u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + unsigned long size = PAGE_SIZE; + u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits); do { bh.b_state = 0; @@ -386,7 +386,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_alloc_parms ap = { .aflags = 0, }; unsigned long last_index; - u64 pos = page->index << PAGE_CACHE_SHIFT; + u64 pos = page->index << PAGE_SHIFT; unsigned int data_blocks, ind_blocks, rblocks; struct gfs2_holder gh; loff_t size; @@ -401,7 +401,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) goto out; - gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE); + gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq(&gh); @@ -411,7 +411,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); set_bit(GIF_SW_PAGED, &ip->i_flags); - if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { + if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) { lock_page(page); if (!PageUptodate(page) || page->mapping != inode->i_mapping) { ret = -EAGAIN; @@ -424,7 +424,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) goto out_unlock; - gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); + gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks); ap.target = data_blocks + ind_blocks; ret = gfs2_quota_lock_check(ip, &ap); if (ret) @@ -447,7 +447,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) lock_page(page); ret = -EINVAL; size = i_size_read(inode); - last_index = (size - 1) >> PAGE_CACHE_SHIFT; + last_index = (size - 1) >> PAGE_SHIFT; /* Check page index against inode size */ if (size == 0 || (page->index > last_index)) goto out_trans_end; @@ -873,7 +873,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t rblocks += data_blocks ? data_blocks : 1; error = gfs2_trans_begin(sdp, rblocks, - PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); + PAGE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index e137d96f1b17..0448524c11bc 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -124,7 +124,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) if (mapping == NULL) mapping = &sdp->sd_aspace; - shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift; + shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; index = blkno >> shift; /* convert block to page */ bufnum = blkno - (index << shift); /* block buf index within page */ @@ -154,7 +154,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) map_bh(bh, sdp->sd_vfs, blkno); unlock_page(page); - page_cache_release(page); + put_page(page); return bh; } diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index a39891344259..ce7d69a2fdc0 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -701,7 +701,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, unsigned to_write = bytes, pg_off = off; int done = 0; - blk = index << (PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift); + blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift); boff = off % bsize; page = find_or_create_page(mapping, index, GFP_NOFS); @@ -753,13 +753,13 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, flush_dcache_page(page); kunmap_atomic(kaddr); unlock_page(page); - page_cache_release(page); + put_page(page); return 0; unlock_out: unlock_page(page); - page_cache_release(page); + put_page(page); return -EIO; } @@ -773,13 +773,13 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp, nbytes = sizeof(struct gfs2_quota); - pg_beg = loc >> PAGE_CACHE_SHIFT; - pg_off = loc % PAGE_CACHE_SIZE; + pg_beg = loc >> PAGE_SHIFT; + pg_off = loc % PAGE_SIZE; /* If the quota straddles a page boundary, split the write in two */ - if ((pg_off + nbytes) > PAGE_CACHE_SIZE) { + if ((pg_off + nbytes) > PAGE_SIZE) { pg_oflow = 1; - overflow = (pg_off + nbytes) - PAGE_CACHE_SIZE; + overflow = (pg_off + nbytes) - PAGE_SIZE; } ptr = qp; diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 07c0265aa195..99a0bdac8796 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -918,9 +918,8 @@ static int read_rindex_entry(struct gfs2_inode *ip) goto fail; rgd->rd_gl->gl_object = rgd; - rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK; - rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr + - rgd->rd_length) * bsize) - 1; + rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK; + rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr; rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED); if (rgd->rd_data > sdp->sd_max_rg_data) diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index 221719eac5de..d77d844b668b 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -278,14 +278,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) mapping = tree->inode->i_mapping; off = (loff_t)cnid * tree->node_size; - block = off >> PAGE_CACHE_SHIFT; - node->page_offset = off & ~PAGE_CACHE_MASK; + block = off >> PAGE_SHIFT; + node->page_offset = off & ~PAGE_MASK; for (i = 0; i < tree->pages_per_bnode; i++) { page = read_mapping_page(mapping, block++, NULL); if (IS_ERR(page)) goto fail; if (PageError(page)) { - page_cache_release(page); + put_page(page); goto fail; } node->page[i] = page; @@ -401,7 +401,7 @@ void hfs_bnode_free(struct hfs_bnode *node) for (i = 0; i < node->tree->pages_per_bnode; i++) if (node->page[i]) - page_cache_release(node->page[i]); + put_page(node->page[i]); kfree(node); } @@ -429,11 +429,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) pagep = node->page; memset(kmap(*pagep) + node->page_offset, 0, - min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); + min((int)PAGE_SIZE, (int)tree->node_size)); set_page_dirty(*pagep); kunmap(*pagep); for (i = 1; i < tree->pages_per_bnode; i++) { - memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); + memset(kmap(*++pagep), 0, PAGE_SIZE); set_page_dirty(*pagep); kunmap(*pagep); } diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c index 1ab19e660e69..37cdd955eceb 100644 --- a/fs/hfs/btree.c +++ b/fs/hfs/btree.c @@ -116,14 +116,14 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke } tree->node_size_shift = ffs(size) - 1; - tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT; kunmap(page); - page_cache_release(page); + put_page(page); return tree; fail_page: - page_cache_release(page); + put_page(page); free_inode: tree->inode->i_mapping->a_ops = &hfs_aops; iput(tree->inode); @@ -257,9 +257,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) off = off16; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); + pagep = node->page + (off >> PAGE_SHIFT); data = kmap(*pagep); - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; idx = 0; for (;;) { @@ -279,7 +279,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) } } } - if (++off >= PAGE_CACHE_SIZE) { + if (++off >= PAGE_SIZE) { kunmap(*pagep); data = kmap(*++pagep); off = 0; @@ -302,9 +302,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) len = hfs_brec_lenoff(node, 0, &off16); off = off16; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); + pagep = node->page + (off >> PAGE_SHIFT); data = kmap(*pagep); - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; } } @@ -348,9 +348,9 @@ void hfs_bmap_free(struct hfs_bnode *node) len = hfs_brec_lenoff(node, 0, &off); } off += node->page_offset + nidx / 8; - page = node->page[off >> PAGE_CACHE_SHIFT]; + page = node->page[off >> PAGE_SHIFT]; data = kmap(page); - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; m = 1 << (~nidx & 7); byte = data[off]; if (!(byte & m)) { diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 6686bf39a5b5..cb1e5faa2fb7 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -91,8 +91,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask) if (!tree) return 0; - if (tree->node_size >= PAGE_CACHE_SIZE) { - nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT); + if (tree->node_size >= PAGE_SIZE) { + nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT); spin_lock(&tree->hash_lock); node = hfs_bnode_findhash(tree, nidx); if (!node) @@ -105,8 +105,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask) } spin_unlock(&tree->hash_lock); } else { - nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift); - i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); + nidx = page->index << (PAGE_SHIFT - tree->node_size_shift); + i = 1 << (PAGE_SHIFT - tree->node_size_shift); spin_lock(&tree->hash_lock); do { node = hfs_bnode_findhash(tree, nidx++); diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c index d2954451519e..c0ae274c0a22 100644 --- a/fs/hfsplus/bitmap.c +++ b/fs/hfsplus/bitmap.c @@ -13,7 +13,7 @@ #include "hfsplus_fs.h" #include "hfsplus_raw.h" -#define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8) +#define PAGE_CACHE_BITS (PAGE_SIZE * 8) int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max) diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index 63924662aaf3..ce014ceb89ef 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c @@ -24,16 +24,16 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) int l; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); - off &= ~PAGE_CACHE_MASK; + pagep = node->page + (off >> PAGE_SHIFT); + off &= ~PAGE_MASK; - l = min_t(int, len, PAGE_CACHE_SIZE - off); + l = min_t(int, len, PAGE_SIZE - off); memcpy(buf, kmap(*pagep) + off, l); kunmap(*pagep); while ((len -= l) != 0) { buf += l; - l = min_t(int, len, PAGE_CACHE_SIZE); + l = min_t(int, len, PAGE_SIZE); memcpy(buf, kmap(*++pagep), l); kunmap(*pagep); } @@ -77,17 +77,17 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) int l; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); - off &= ~PAGE_CACHE_MASK; + pagep = node->page + (off >> PAGE_SHIFT); + off &= ~PAGE_MASK; - l = min_t(int, len, PAGE_CACHE_SIZE - off); + l = min_t(int, len, PAGE_SIZE - off); memcpy(kmap(*pagep) + off, buf, l); set_page_dirty(*pagep); kunmap(*pagep); while ((len -= l) != 0) { buf += l; - l = min_t(int, len, PAGE_CACHE_SIZE); + l = min_t(int, len, PAGE_SIZE); memcpy(kmap(*++pagep), buf, l); set_page_dirty(*pagep); kunmap(*pagep); @@ -107,16 +107,16 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) int l; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); - off &= ~PAGE_CACHE_MASK; + pagep = node->page + (off >> PAGE_SHIFT); + off &= ~PAGE_MASK; - l = min_t(int, len, PAGE_CACHE_SIZE - off); + l = min_t(int, len, PAGE_SIZE - off); memset(kmap(*pagep) + off, 0, l); set_page_dirty(*pagep); kunmap(*pagep); while ((len -= l) != 0) { - l = min_t(int, len, PAGE_CACHE_SIZE); + l = min_t(int, len, PAGE_SIZE); memset(kmap(*++pagep), 0, l); set_page_dirty(*pagep); kunmap(*pagep); @@ -136,20 +136,20 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, tree = src_node->tree; src += src_node->page_offset; dst += dst_node->page_offset; - src_page = src_node->page + (src >> PAGE_CACHE_SHIFT); - src &= ~PAGE_CACHE_MASK; - dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT); - dst &= ~PAGE_CACHE_MASK; + src_page = src_node->page + (src >> PAGE_SHIFT); + src &= ~PAGE_MASK; + dst_page = dst_node->page + (dst >> PAGE_SHIFT); + dst &= ~PAGE_MASK; if (src == dst) { - l = min_t(int, len, PAGE_CACHE_SIZE - src); + l = min_t(int, len, PAGE_SIZE - src); memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); kunmap(*src_page); set_page_dirty(*dst_page); kunmap(*dst_page); while ((len -= l) != 0) { - l = min_t(int, len, PAGE_CACHE_SIZE); + l = min_t(int, len, PAGE_SIZE); memcpy(kmap(*++dst_page), kmap(*++src_page), l); kunmap(*src_page); set_page_dirty(*dst_page); @@ -161,12 +161,12 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, do { src_ptr = kmap(*src_page) + src; dst_ptr = kmap(*dst_page) + dst; - if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { - l = PAGE_CACHE_SIZE - src; + if (PAGE_SIZE - src < PAGE_SIZE - dst) { + l = PAGE_SIZE - src; src = 0; dst += l; } else { - l = PAGE_CACHE_SIZE - dst; + l = PAGE_SIZE - dst; src += l; dst = 0; } @@ -195,11 +195,11 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) dst += node->page_offset; if (dst > src) { src += len - 1; - src_page = node->page + (src >> PAGE_CACHE_SHIFT); - src = (src & ~PAGE_CACHE_MASK) + 1; + src_page = node->page + (src >> PAGE_SHIFT); + src = (src & ~PAGE_MASK) + 1; dst += len - 1; - dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); - dst = (dst & ~PAGE_CACHE_MASK) + 1; + dst_page = node->page + (dst >> PAGE_SHIFT); + dst = (dst & ~PAGE_MASK) + 1; if (src == dst) { while (src < len) { @@ -208,7 +208,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) set_page_dirty(*dst_page); kunmap(*dst_page); len -= src; - src = PAGE_CACHE_SIZE; + src = PAGE_SIZE; src_page--; dst_page--; } @@ -226,32 +226,32 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) dst_ptr = kmap(*dst_page) + dst; if (src < dst) { l = src; - src = PAGE_CACHE_SIZE; + src = PAGE_SIZE; dst -= l; } else { l = dst; src -= l; - dst = PAGE_CACHE_SIZE; + dst = PAGE_SIZE; } l = min(len, l); memmove(dst_ptr - l, src_ptr - l, l); kunmap(*src_page); set_page_dirty(*dst_page); kunmap(*dst_page); - if (dst == PAGE_CACHE_SIZE) + if (dst == PAGE_SIZE) dst_page--; else src_page--; } while ((len -= l)); } } else { - src_page = node->page + (src >> PAGE_CACHE_SHIFT); - src &= ~PAGE_CACHE_MASK; - dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); - dst &= ~PAGE_CACHE_MASK; + src_page = node->page + (src >> PAGE_SHIFT); + src &= ~PAGE_MASK; + dst_page = node->page + (dst >> PAGE_SHIFT); + dst &= ~PAGE_MASK; if (src == dst) { - l = min_t(int, len, PAGE_CACHE_SIZE - src); + l = min_t(int, len, PAGE_SIZE - src); memmove(kmap(*dst_page) + src, kmap(*src_page) + src, l); kunmap(*src_page); @@ -259,7 +259,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) kunmap(*dst_page); while ((len -= l) != 0) { - l = min_t(int, len, PAGE_CACHE_SIZE); + l = min_t(int, len, PAGE_SIZE); memmove(kmap(*++dst_page), kmap(*++src_page), l); kunmap(*src_page); @@ -272,13 +272,13 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) do { src_ptr = kmap(*src_page) + src; dst_ptr = kmap(*dst_page) + dst; - if (PAGE_CACHE_SIZE - src < - PAGE_CACHE_SIZE - dst) { - l = PAGE_CACHE_SIZE - src; + if (PAGE_SIZE - src < + PAGE_SIZE - dst) { + l = PAGE_SIZE - src; src = 0; dst += l; } else { - l = PAGE_CACHE_SIZE - dst; + l = PAGE_SIZE - dst; src += l; dst = 0; } @@ -444,14 +444,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) mapping = tree->inode->i_mapping; off = (loff_t)cnid << tree->node_size_shift; - block = off >> PAGE_CACHE_SHIFT; - node->page_offset = off & ~PAGE_CACHE_MASK; + block = off >> PAGE_SHIFT; + node->page_offset = off & ~PAGE_MASK; for (i = 0; i < tree->pages_per_bnode; block++, i++) { page = read_mapping_page(mapping, block, NULL); if (IS_ERR(page)) goto fail; if (PageError(page)) { - page_cache_release(page); + put_page(page); goto fail; } node->page[i] = page; @@ -569,7 +569,7 @@ void hfs_bnode_free(struct hfs_bnode *node) for (i = 0; i < node->tree->pages_per_bnode; i++) if (node->page[i]) - page_cache_release(node->page[i]); + put_page(node->page[i]); kfree(node); } @@ -597,11 +597,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) pagep = node->page; memset(kmap(*pagep) + node->page_offset, 0, - min_t(int, PAGE_CACHE_SIZE, tree->node_size)); + min_t(int, PAGE_SIZE, tree->node_size)); set_page_dirty(*pagep); kunmap(*pagep); for (i = 1; i < tree->pages_per_bnode; i++) { - memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); + memset(kmap(*++pagep), 0, PAGE_SIZE); set_page_dirty(*pagep); kunmap(*pagep); } diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c index 3345c7553edc..d9d1a36ba826 100644 --- a/fs/hfsplus/btree.c +++ b/fs/hfsplus/btree.c @@ -236,15 +236,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) tree->node_size_shift = ffs(size) - 1; tree->pages_per_bnode = - (tree->node_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + (tree->node_size + PAGE_SIZE - 1) >> + PAGE_SHIFT; kunmap(page); - page_cache_release(page); + put_page(page); return tree; fail_page: - page_cache_release(page); + put_page(page); free_inode: tree->inode->i_mapping->a_ops = &hfsplus_aops; iput(tree->inode); @@ -380,9 +380,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) off = off16; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); + pagep = node->page + (off >> PAGE_SHIFT); data = kmap(*pagep); - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; idx = 0; for (;;) { @@ -403,7 +403,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) } } } - if (++off >= PAGE_CACHE_SIZE) { + if (++off >= PAGE_SIZE) { kunmap(*pagep); data = kmap(*++pagep); off = 0; @@ -426,9 +426,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) len = hfs_brec_lenoff(node, 0, &off16); off = off16; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); + pagep = node->page + (off >> PAGE_SHIFT); data = kmap(*pagep); - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; } } @@ -475,9 +475,9 @@ void hfs_bmap_free(struct hfs_bnode *node) len = hfs_brec_lenoff(node, 0, &off); } off += node->page_offset + nidx / 8; - page = node->page[off >> PAGE_CACHE_SHIFT]; + page = node->page[off >> PAGE_SHIFT]; data = kmap(page); - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; m = 1 << (~nidx & 7); byte = data[off]; if (!(byte & m)) { diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 1a6394cdb54e..b28f39865c3a 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -87,9 +87,9 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask) } if (!tree) return 0; - if (tree->node_size >= PAGE_CACHE_SIZE) { + if (tree->node_size >= PAGE_SIZE) { nidx = page->index >> - (tree->node_size_shift - PAGE_CACHE_SHIFT); + (tree->node_size_shift - PAGE_SHIFT); spin_lock(&tree->hash_lock); node = hfs_bnode_findhash(tree, nidx); if (!node) @@ -103,8 +103,8 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask) spin_unlock(&tree->hash_lock); } else { nidx = page->index << - (PAGE_CACHE_SHIFT - tree->node_size_shift); - i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); + (PAGE_SHIFT - tree->node_size_shift); + i = 1 << (PAGE_SHIFT - tree->node_size_shift); spin_lock(&tree->hash_lock); do { node = hfs_bnode_findhash(tree, nidx++); diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 5d54490a136d..c35911362ff9 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -438,7 +438,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) err = -EFBIG; last_fs_block = sbi->total_blocks - 1; last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || (last_fs_page > (pgoff_t)(~0ULL))) { diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c index ab01530b4930..70e445ff0cff 100644 --- a/fs/hfsplus/xattr.c +++ b/fs/hfsplus/xattr.c @@ -220,7 +220,7 @@ check_attr_tree_state_again: index = 0; written = 0; - for (; written < node_size; index++, written += PAGE_CACHE_SIZE) { + for (; written < node_size; index++, written += PAGE_SIZE) { void *kaddr; page = read_mapping_page(mapping, index, NULL); @@ -231,11 +231,11 @@ check_attr_tree_state_again: kaddr = kmap_atomic(page); memcpy(kaddr, buf + written, - min_t(size_t, PAGE_CACHE_SIZE, node_size - written)); + min_t(size_t, PAGE_SIZE, node_size - written)); kunmap_atomic(kaddr); set_page_dirty(page); - page_cache_release(page); + put_page(page); } hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY); diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index d1abbee281d1..7016653f3e41 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -410,12 +410,12 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc) struct inode *inode = mapping->host; char *buffer; loff_t base = page_offset(page); - int count = PAGE_CACHE_SIZE; - int end_index = inode->i_size >> PAGE_CACHE_SHIFT; + int count = PAGE_SIZE; + int end_index = inode->i_size >> PAGE_SHIFT; int err; if (page->index >= end_index) - count = inode->i_size & (PAGE_CACHE_SIZE-1); + count = inode->i_size & (PAGE_SIZE-1); buffer = kmap(page); @@ -447,7 +447,7 @@ static int hostfs_readpage(struct file *file, struct page *page) buffer = kmap(page); bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (bytes_read < 0) { ClearPageUptodate(page); SetPageError(page); @@ -455,7 +455,7 @@ static int hostfs_readpage(struct file *file, struct page *page) goto out; } - memset(buffer + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read); + memset(buffer + bytes_read, 0, PAGE_SIZE - bytes_read); ClearPageError(page); SetPageUptodate(page); @@ -471,7 +471,7 @@ static int hostfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; *pagep = grab_cache_page_write_begin(mapping, index, flags); if (!*pagep) @@ -485,14 +485,14 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping, { struct inode *inode = mapping->host; void *buffer; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); int err; buffer = kmap(page); err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied); kunmap(page); - if (!PageUptodate(page) && err == PAGE_CACHE_SIZE) + if (!PageUptodate(page) && err == PAGE_SIZE) SetPageUptodate(page); /* @@ -502,7 +502,7 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping, if (err > 0 && (pos > inode->i_size)) inode->i_size = pos; unlock_page(page); - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index e1f465a389d5..4ea71eba40a5 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -213,12 +213,12 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset, int i, chunksize; /* Find which 4k chunk and offset with in that chunk */ - i = offset >> PAGE_CACHE_SHIFT; - offset = offset & ~PAGE_CACHE_MASK; + i = offset >> PAGE_SHIFT; + offset = offset & ~PAGE_MASK; while (size) { size_t n; - chunksize = PAGE_CACHE_SIZE; + chunksize = PAGE_SIZE; if (offset) chunksize -= offset; if (chunksize > size) @@ -237,7 +237,7 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset, /* * Support for read() - Find the page attached to f_mapping and copy out the * data. Its *very* similar to do_generic_mapping_read(), we can't use that - * since it has PAGE_CACHE_SIZE assumptions. + * since it has PAGE_SIZE assumptions. */ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) { @@ -285,7 +285,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) * We have the page, copy it to user space buffer. */ copied = hugetlbfs_read_actor(page, offset, to, nr); - page_cache_release(page); + put_page(page); } offset += copied; retval += copied; diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c index f311bf084015..2e4e834d1a98 100644 --- a/fs/isofs/compress.c +++ b/fs/isofs/compress.c @@ -26,7 +26,7 @@ #include "zisofs.h" /* This should probably be global. */ -static char zisofs_sink_page[PAGE_CACHE_SIZE]; +static char zisofs_sink_page[PAGE_SIZE]; /* * This contains the zlib memory allocation and the mutex for the @@ -70,11 +70,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, for ( i = 0 ; i < pcount ; i++ ) { if (!pages[i]) continue; - memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE); + memset(page_address(pages[i]), 0, PAGE_SIZE); flush_dcache_page(pages[i]); SetPageUptodate(pages[i]); } - return ((loff_t)pcount) << PAGE_CACHE_SHIFT; + return ((loff_t)pcount) << PAGE_SHIFT; } /* Because zlib is not thread-safe, do all the I/O at the top. */ @@ -121,11 +121,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, if (pages[curpage]) { stream.next_out = page_address(pages[curpage]) + poffset; - stream.avail_out = PAGE_CACHE_SIZE - poffset; + stream.avail_out = PAGE_SIZE - poffset; poffset = 0; } else { stream.next_out = (void *)&zisofs_sink_page; - stream.avail_out = PAGE_CACHE_SIZE; + stream.avail_out = PAGE_SIZE; } } if (!stream.avail_in) { @@ -220,14 +220,14 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount, * pages with the data we have anyway... */ start_off = page_offset(pages[full_page]); - end_off = min_t(loff_t, start_off + PAGE_CACHE_SIZE, inode->i_size); + end_off = min_t(loff_t, start_off + PAGE_SIZE, inode->i_size); cstart_block = start_off >> zisofs_block_shift; cend_block = (end_off + (1 << zisofs_block_shift) - 1) >> zisofs_block_shift; - WARN_ON(start_off - (full_page << PAGE_CACHE_SHIFT) != - ((cstart_block << zisofs_block_shift) & PAGE_CACHE_MASK)); + WARN_ON(start_off - (full_page << PAGE_SHIFT) != + ((cstart_block << zisofs_block_shift) & PAGE_MASK)); /* Find the pointer to this specific chunk */ /* Note: we're not using isonum_731() here because the data is known aligned */ @@ -260,10 +260,10 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount, ret = zisofs_uncompress_block(inode, block_start, block_end, pcount, pages, poffset, &err); poffset += ret; - pages += poffset >> PAGE_CACHE_SHIFT; - pcount -= poffset >> PAGE_CACHE_SHIFT; - full_page -= poffset >> PAGE_CACHE_SHIFT; - poffset &= ~PAGE_CACHE_MASK; + pages += poffset >> PAGE_SHIFT; + pcount -= poffset >> PAGE_SHIFT; + full_page -= poffset >> PAGE_SHIFT; + poffset &= ~PAGE_MASK; if (err) { brelse(bh); @@ -282,7 +282,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount, if (poffset && *pages) { memset(page_address(*pages) + poffset, 0, - PAGE_CACHE_SIZE - poffset); + PAGE_SIZE - poffset); flush_dcache_page(*pages); SetPageUptodate(*pages); } @@ -302,12 +302,12 @@ static int zisofs_readpage(struct file *file, struct page *page) int i, pcount, full_page; unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; unsigned int zisofs_pages_per_cblock = - PAGE_CACHE_SHIFT <= zisofs_block_shift ? - (1 << (zisofs_block_shift - PAGE_CACHE_SHIFT)) : 0; + PAGE_SHIFT <= zisofs_block_shift ? + (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0; struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; pgoff_t index = page->index, end_index; - end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; /* * If this page is wholly outside i_size we just return zero; * do_generic_file_read() will handle this for us @@ -318,7 +318,7 @@ static int zisofs_readpage(struct file *file, struct page *page) return 0; } - if (PAGE_CACHE_SHIFT <= zisofs_block_shift) { + if (PAGE_SHIFT <= zisofs_block_shift) { /* We have already been given one page, this is the one we must do. */ full_page = index & (zisofs_pages_per_cblock - 1); @@ -351,7 +351,7 @@ static int zisofs_readpage(struct file *file, struct page *page) kunmap(pages[i]); unlock_page(pages[i]); if (i != full_page) - page_cache_release(pages[i]); + put_page(pages[i]); } } diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index bcd2d41b318a..131dedc920d8 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -1021,7 +1021,7 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock, * the page with useless information without generating any * I/O errors. */ - if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) { + if (b_off > ((inode->i_size + PAGE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) { printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n", __func__, b_off, (unsigned long long)inode->i_size); diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 517f2de784cf..2ad98d6e19f4 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -81,11 +81,11 @@ static void release_buffer_page(struct buffer_head *bh) if (!trylock_page(page)) goto nope; - page_cache_get(page); + get_page(page); __brelse(bh); try_to_free_buffers(page); unlock_page(page); - page_cache_release(page); + put_page(page); return; nope: diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index de73a9516a54..435f0b26ac20 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -2221,7 +2221,7 @@ void jbd2_journal_ack_err(journal_t *journal) int jbd2_journal_blocks_per_page(struct inode *inode) { - return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); + return 1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); } /* diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 01e4652d88f6..67c103867bf8 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -2263,7 +2263,7 @@ int jbd2_journal_invalidatepage(journal_t *journal, struct buffer_head *head, *bh, *next; unsigned int stop = offset + length; unsigned int curr_off = 0; - int partial_page = (offset || length < PAGE_CACHE_SIZE); + int partial_page = (offset || length < PAGE_SIZE); int may_free = 1; int ret = 0; @@ -2272,7 +2272,7 @@ int jbd2_journal_invalidatepage(journal_t *journal, if (!page_has_buffers(page)) return 0; - BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); + BUG_ON(stop > PAGE_SIZE || stop < length); /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c index 1090eb64b90d..9d26b1b9fc01 100644 --- a/fs/jffs2/debug.c +++ b/fs/jffs2/debug.c @@ -95,15 +95,15 @@ __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f) rather than mucking around with actually reading the node and checking the compression type, which is the real way to tell a hole node. */ - if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) - && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { + if (frag->ofs & (PAGE_SIZE-1) && frag_prev(frag) + && frag_prev(frag)->size < PAGE_SIZE && frag_prev(frag)->node) { JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", ref_offset(fn->raw)); bitched = 1; } - if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) - && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { + if ((frag->ofs+frag->size) & (PAGE_SIZE-1) && frag_next(frag) + && frag_next(frag)->size < PAGE_SIZE && frag_next(frag)->node) { JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); bitched = 1; diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index cad86bac3453..0e62dec3effc 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -87,14 +87,15 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) int ret; jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", - __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT); + __func__, inode->i_ino, pg->index << PAGE_SHIFT); BUG_ON(!PageLocked(pg)); pg_buf = kmap(pg); /* FIXME: Can kmap fail? */ - ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); + ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT, + PAGE_SIZE); if (ret) { ClearPageUptodate(pg); @@ -137,8 +138,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, struct page *pg; struct inode *inode = mapping->host; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - uint32_t pageofs = index << PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; + uint32_t pageofs = index << PAGE_SHIFT; int ret = 0; pg = grab_cache_page_write_begin(mapping, index, flags); @@ -230,7 +231,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, out_page: unlock_page(pg); - page_cache_release(pg); + put_page(pg); return ret; } @@ -245,14 +246,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode *ri; - unsigned start = pos & (PAGE_CACHE_SIZE - 1); + unsigned start = pos & (PAGE_SIZE - 1); unsigned end = start + copied; unsigned aligned_start = start & ~3; int ret = 0; uint32_t writtenlen = 0; jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", - __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT, + __func__, inode->i_ino, pg->index << PAGE_SHIFT, start, end, pg->flags); /* We need to avoid deadlock with page_cache_read() in @@ -261,7 +262,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, to re-lock it. */ BUG_ON(!PageUptodate(pg)); - if (end == PAGE_CACHE_SIZE) { + if (end == PAGE_SIZE) { /* When writing out the end of a page, write out the _whole_ page. This helps to reduce the number of nodes in files which have many short writes, like @@ -275,7 +276,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", __func__); unlock_page(pg); - page_cache_release(pg); + put_page(pg); return -ENOMEM; } @@ -292,7 +293,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, kmap(pg); ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start, - (pg->index << PAGE_CACHE_SHIFT) + aligned_start, + (pg->index << PAGE_SHIFT) + aligned_start, end - aligned_start, &writtenlen); kunmap(pg); @@ -329,6 +330,6 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, jffs2_dbg(1, "%s() returning %d\n", __func__, writtenlen > 0 ? writtenlen : ret); unlock_page(pg); - page_cache_release(pg); + put_page(pg); return writtenlen > 0 ? writtenlen : ret; } diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index bead25ae8fe4..ae2ebb26b446 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -586,8 +586,8 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) goto out_root; sb->s_maxbytes = 0xFFFFFFFF; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = JFFS2_SUPER_MAGIC; if (!(sb->s_flags & MS_RDONLY)) jffs2_start_garbage_collect_thread(c); @@ -685,7 +685,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, struct inode *inode = OFNI_EDONI_2SFFJ(f); struct page *pg; - pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, + pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT, (void *)jffs2_do_readpage_unlock, inode); if (IS_ERR(pg)) return (void *)pg; @@ -701,7 +701,7 @@ void jffs2_gc_release_page(struct jffs2_sb_info *c, struct page *pg = (void *)*priv; kunmap(pg); - page_cache_release(pg); + put_page(pg); } static int jffs2_flash_setup(struct jffs2_sb_info *c) { diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c index 95d5880a63ee..9ed0f26cf023 100644 --- a/fs/jffs2/gc.c +++ b/fs/jffs2/gc.c @@ -134,37 +134,59 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) if (mutex_lock_interruptible(&c->alloc_sem)) return -EINTR; + for (;;) { + /* We can't start doing GC until we've finished checking + the node CRCs etc. */ + int bucket, want_ino; + spin_lock(&c->erase_completion_lock); if (!c->unchecked_size) break; - - /* We can't start doing GC yet. We haven't finished checking - the node CRCs etc. Do it now. */ - - /* checked_ino is protected by the alloc_sem */ - if (c->checked_ino > c->highest_ino && xattr) { - pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n", - c->unchecked_size); - jffs2_dbg_dump_block_lists_nolock(c); - spin_unlock(&c->erase_completion_lock); - mutex_unlock(&c->alloc_sem); - return -ENOSPC; - } - spin_unlock(&c->erase_completion_lock); if (!xattr) xattr = jffs2_verify_xattr(c); spin_lock(&c->inocache_lock); + /* Instead of doing the inodes in numeric order, doing a lookup + * in the hash for each possible number, just walk the hash + * buckets of *existing* inodes. This means that we process + * them out-of-order, but it can be a lot faster if there's + * a sparse inode# space. Which there often is. */ + want_ino = c->check_ino; + for (bucket = c->check_ino % c->inocache_hashsize ; bucket < c->inocache_hashsize; bucket++) { + for (ic = c->inocache_list[bucket]; ic; ic = ic->next) { + if (ic->ino < want_ino) + continue; + + if (ic->state != INO_STATE_CHECKEDABSENT && + ic->state != INO_STATE_PRESENT) + goto got_next; /* with inocache_lock held */ + + jffs2_dbg(1, "Skipping ino #%u already checked\n", + ic->ino); + } + want_ino = 0; + } - ic = jffs2_get_ino_cache(c, c->checked_ino++); + /* Point c->check_ino past the end of the last bucket. */ + c->check_ino = ((c->highest_ino + c->inocache_hashsize + 1) & + ~c->inocache_hashsize) - 1; - if (!ic) { - spin_unlock(&c->inocache_lock); - continue; - } + spin_unlock(&c->inocache_lock); + + pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n", + c->unchecked_size); + jffs2_dbg_dump_block_lists_nolock(c); + mutex_unlock(&c->alloc_sem); + return -ENOSPC; + + got_next: + /* For next time round the loop, we want c->checked_ino to indicate + * the *next* one we want to check. And since we're walking the + * buckets rather than doing it sequentially, it's: */ + c->check_ino = ic->ino + c->inocache_hashsize; if (!ic->pino_nlink) { jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n", @@ -176,8 +198,6 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) switch(ic->state) { case INO_STATE_CHECKEDABSENT: case INO_STATE_PRESENT: - jffs2_dbg(1, "Skipping ino #%u already checked\n", - ic->ino); spin_unlock(&c->inocache_lock); continue; @@ -196,7 +216,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) ic->ino); /* We need to come back again for the _same_ inode. We've made no progress in this case, but that should be OK */ - c->checked_ino--; + c->check_ino = ic->ino; mutex_unlock(&c->alloc_sem); sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); @@ -532,7 +552,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era goto upnout; } /* We found a datanode. Do the GC */ - if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) { + if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) { /* It crosses a page boundary. Therefore, it must be a hole. */ ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end); } else { @@ -1172,8 +1192,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era struct jffs2_node_frag *frag; uint32_t min, max; - min = start & ~(PAGE_CACHE_SIZE-1); - max = min + PAGE_CACHE_SIZE; + min = start & ~(PAGE_SIZE-1); + max = min + PAGE_SIZE; frag = jffs2_lookup_node_frag(&f->fragtree, start); @@ -1331,7 +1351,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset); datalen = end - offset; - writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1)); + writebuf = pg_ptr + (offset & (PAGE_SIZE -1)); comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen); diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h index 046fee8b6e9b..778275f48a87 100644 --- a/fs/jffs2/jffs2_fs_sb.h +++ b/fs/jffs2/jffs2_fs_sb.h @@ -49,7 +49,7 @@ struct jffs2_sb_info { struct mtd_info *mtd; uint32_t highest_ino; - uint32_t checked_ino; + uint32_t check_ino; /* *NEXT* inode to be checked */ unsigned int flags; diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 9a5449bc3afb..b86c78d178c6 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c @@ -90,7 +90,7 @@ uint32_t jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, /* If the last fragment starts at the RAM page boundary, it is * REF_PRISTINE irrespective of its size. */ - if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { + if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) { dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n", frag->ofs, frag->ofs + frag->size); frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; @@ -237,7 +237,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r If so, both 'this' and the new node get marked REF_NORMAL so the GC can take a look. */ - if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { + if (lastend && (lastend-1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) { if (this->node) mark_ref_normal(this->node->raw); mark_ref_normal(newfrag->node->raw); @@ -382,7 +382,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in /* If we now share a page with other nodes, mark either previous or next node REF_NORMAL, as appropriate. */ - if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { + if (newfrag->ofs & (PAGE_SIZE-1)) { struct jffs2_node_frag *prev = frag_prev(newfrag); mark_ref_normal(fn->raw); @@ -391,7 +391,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in mark_ref_normal(prev->node->raw); } - if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { + if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE-1)) { struct jffs2_node_frag *next = frag_next(newfrag); if (next) { diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index b6bd4affd9ad..cda0774c2c9c 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c @@ -846,8 +846,8 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c) return 1; if (c->unchecked_size) { - jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n", - c->unchecked_size, c->checked_ino); + jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, check_ino #%d\n", + c->unchecked_size, c->check_ino); return 1; } diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index 5a3da3f52908..b25d28a21212 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c @@ -1183,22 +1183,20 @@ void jffs2_dirty_trigger(struct jffs2_sb_info *c) int jffs2_nand_flash_setup(struct jffs2_sb_info *c) { - struct nand_ecclayout *oinfo = c->mtd->ecclayout; - if (!c->mtd->oobsize) return 0; /* Cleanmarker is out-of-band, so inline size zero */ c->cleanmarker_size = 0; - if (!oinfo || oinfo->oobavail == 0) { + if (c->mtd->oobavail == 0) { pr_err("inconsistent device description\n"); return -EINVAL; } jffs2_dbg(1, "using OOB on NAND\n"); - c->oobavail = oinfo->oobavail; + c->oobavail = c->mtd->oobavail; /* Initialise write buffer */ init_rwsem(&c->wbuf_sem); diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c index b634de4c8101..7fb187ab2682 100644 --- a/fs/jffs2/write.c +++ b/fs/jffs2/write.c @@ -172,8 +172,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 beginning of a page and runs to the end of the file, or if it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. */ - if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || - ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && + if ((je32_to_cpu(ri->dsize) >= PAGE_SIZE) || + ( ((je32_to_cpu(ri->offset)&(PAGE_SIZE-1))==0) && (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) { flash_ofs |= REF_PRISTINE; } else { @@ -366,7 +366,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, break; } mutex_lock(&f->sem); - datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1))); + datalen = min_t(uint32_t, writelen, + PAGE_SIZE - (offset & (PAGE_SIZE-1))); cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen); comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen); diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index a3eb316b1ac3..b60e015cc757 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -80,7 +80,7 @@ static inline void lock_metapage(struct metapage *mp) static struct kmem_cache *metapage_cache; static mempool_t *metapage_mempool; -#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE) +#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE) #if MPS_PER_PAGE > 1 @@ -316,7 +316,7 @@ static void last_write_complete(struct page *page) struct metapage *mp; unsigned int offset; - for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { + for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { mp = page_to_mp(page, offset); if (mp && test_bit(META_io, &mp->flag)) { if (mp->lsn) @@ -366,12 +366,12 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) int bad_blocks = 0; page_start = (sector_t)page->index << - (PAGE_CACHE_SHIFT - inode->i_blkbits); + (PAGE_SHIFT - inode->i_blkbits); BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); set_page_writeback(page); - for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { + for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { mp = page_to_mp(page, offset); if (!mp || !test_bit(META_dirty, &mp->flag)) @@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) bio = NULL; } else inc_io(page); - xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits; + xlen = (PAGE_SIZE - offset) >> inode->i_blkbits; pblock = metapage_get_blocks(inode, lblock, &xlen); if (!pblock) { printk(KERN_ERR "JFS: metapage_get_blocks failed\n"); @@ -485,7 +485,7 @@ static int metapage_readpage(struct file *fp, struct page *page) struct inode *inode = page->mapping->host; struct bio *bio = NULL; int block_offset; - int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; + int blocks_per_page = PAGE_SIZE >> inode->i_blkbits; sector_t page_start; /* address of page in fs blocks */ sector_t pblock; int xlen; @@ -494,7 +494,7 @@ static int metapage_readpage(struct file *fp, struct page *page) BUG_ON(!PageLocked(page)); page_start = (sector_t)page->index << - (PAGE_CACHE_SHIFT - inode->i_blkbits); + (PAGE_SHIFT - inode->i_blkbits); block_offset = 0; while (block_offset < blocks_per_page) { @@ -542,7 +542,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask) int ret = 1; int offset; - for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { + for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { mp = page_to_mp(page, offset); if (!mp) @@ -568,7 +568,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask) static void metapage_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { - BUG_ON(offset || length < PAGE_CACHE_SIZE); + BUG_ON(offset || length < PAGE_SIZE); BUG_ON(PageWriteback(page)); @@ -599,10 +599,10 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock, inode->i_ino, lblock, absolute); l2bsize = inode->i_blkbits; - l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize; + l2BlocksPerPage = PAGE_SHIFT - l2bsize; page_index = lblock >> l2BlocksPerPage; page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize; - if ((page_offset + size) > PAGE_CACHE_SIZE) { + if ((page_offset + size) > PAGE_SIZE) { jfs_err("MetaData crosses page boundary!!"); jfs_err("lblock = %lx, size = %d", lblock, size); dump_stack(); @@ -621,7 +621,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock, mapping = inode->i_mapping; } - if (new && (PSIZE == PAGE_CACHE_SIZE)) { + if (new && (PSIZE == PAGE_SIZE)) { page = grab_cache_page(mapping, page_index); if (!page) { jfs_err("grab_cache_page failed!"); @@ -693,7 +693,7 @@ unlock: void grab_metapage(struct metapage * mp) { jfs_info("grab_metapage: mp = 0x%p", mp); - page_cache_get(mp->page); + get_page(mp->page); lock_page(mp->page); mp->count++; lock_metapage(mp); @@ -706,12 +706,12 @@ void force_metapage(struct metapage *mp) jfs_info("force_metapage: mp = 0x%p", mp); set_bit(META_forcewrite, &mp->flag); clear_bit(META_sync, &mp->flag); - page_cache_get(page); + get_page(page); lock_page(page); set_page_dirty(page); write_one_page(page, 1); clear_bit(META_forcewrite, &mp->flag); - page_cache_release(page); + put_page(page); } void hold_metapage(struct metapage *mp) @@ -726,7 +726,7 @@ void put_metapage(struct metapage *mp) unlock_page(mp->page); return; } - page_cache_get(mp->page); + get_page(mp->page); mp->count++; lock_metapage(mp); unlock_page(mp->page); @@ -746,7 +746,7 @@ void release_metapage(struct metapage * mp) assert(mp->count); if (--mp->count || mp->nohomeok) { unlock_page(page); - page_cache_release(page); + put_page(page); return; } @@ -764,13 +764,13 @@ void release_metapage(struct metapage * mp) drop_metapage(page, mp); unlock_page(page); - page_cache_release(page); + put_page(page); } void __invalidate_metapages(struct inode *ip, s64 addr, int len) { sector_t lblock; - int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits; + int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits; int BlocksPerPage = 1 << l2BlocksPerPage; /* All callers are interested in block device's mapping */ struct address_space *mapping = @@ -788,7 +788,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len) page = find_lock_page(mapping, lblock >> l2BlocksPerPage); if (!page) continue; - for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { + for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { mp = page_to_mp(page, offset); if (!mp) continue; @@ -803,7 +803,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len) remove_from_logsync(mp); } unlock_page(page); - page_cache_release(page); + put_page(page); } } diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h index 337e9e51ac06..a869fb4a20d6 100644 --- a/fs/jfs/jfs_metapage.h +++ b/fs/jfs/jfs_metapage.h @@ -106,7 +106,7 @@ static inline void metapage_nohomeok(struct metapage *mp) lock_page(page); if (!mp->nohomeok++) { mark_metapage_dirty(mp); - page_cache_get(page); + get_page(page); wait_on_page_writeback(page); } unlock_page(page); @@ -128,7 +128,7 @@ static inline void metapage_wait_for_io(struct metapage *mp) static inline void _metapage_homeok(struct metapage *mp) { if (!--mp->nohomeok) - page_cache_release(mp->page); + put_page(mp->page); } static inline void metapage_homeok(struct metapage *mp) diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 4f5d85ba8e23..78d599198bf5 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -596,7 +596,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) * Page cache is indexed by long. * I would use MAX_LFS_FILESIZE, but it's only half as big */ - sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, + sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1, (u64)sb->s_maxbytes); #endif sb->s_time_gran = 1; diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index b67dbccdaf88..f73541fbe7af 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -138,8 +138,8 @@ static int kernfs_fill_super(struct super_block *sb, unsigned long magic) struct dentry *root; info->sb = sb; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = magic; sb->s_op = &kernfs_sops; sb->s_time_gran = 1; diff --git a/fs/libfs.c b/fs/libfs.c index 0ca80b2af420..f3fa82ce9b70 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -25,7 +25,7 @@ int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, { struct inode *inode = d_inode(dentry); generic_fillattr(inode, stat); - stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9); + stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9); return 0; } EXPORT_SYMBOL(simple_getattr); @@ -33,7 +33,7 @@ EXPORT_SYMBOL(simple_getattr); int simple_statfs(struct dentry *dentry, struct kstatfs *buf) { buf->f_type = dentry->d_sb->s_magic; - buf->f_bsize = PAGE_CACHE_SIZE; + buf->f_bsize = PAGE_SIZE; buf->f_namelen = NAME_MAX; return 0; } @@ -395,7 +395,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping, struct page *page; pgoff_t index; - index = pos >> PAGE_CACHE_SHIFT; + index = pos >> PAGE_SHIFT; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) @@ -403,10 +403,10 @@ int simple_write_begin(struct file *file, struct address_space *mapping, *pagep = page; - if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + if (!PageUptodate(page) && (len != PAGE_SIZE)) { + unsigned from = pos & (PAGE_SIZE - 1); - zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE); + zero_user_segments(page, 0, from, from + len, PAGE_SIZE); } return 0; } @@ -442,7 +442,7 @@ int simple_write_end(struct file *file, struct address_space *mapping, /* zero the stale part of the page if we did a short copy */ if (copied < len) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); zero_user(page, from + copied, len - copied); } @@ -458,7 +458,7 @@ int simple_write_end(struct file *file, struct address_space *mapping, set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); return copied; } @@ -477,8 +477,8 @@ int simple_fill_super(struct super_block *s, unsigned long magic, struct dentry *dentry; int i; - s->s_blocksize = PAGE_CACHE_SIZE; - s->s_blocksize_bits = PAGE_CACHE_SHIFT; + s->s_blocksize = PAGE_SIZE; + s->s_blocksize_bits = PAGE_SHIFT; s->s_magic = magic; s->s_op = &simple_super_operations; s->s_time_gran = 1; @@ -994,12 +994,12 @@ int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks) { u64 last_fs_block = num_blocks - 1; u64 last_fs_page = - last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits); + last_fs_block >> (PAGE_SHIFT - blocksize_bits); if (unlikely(num_blocks == 0)) return 0; - if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT)) + if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT)) return -EINVAL; if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) || diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index a709d80c8ebc..cc26f8f215f5 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c @@ -64,7 +64,7 @@ static void writeseg_end_io(struct bio *bio) bio_for_each_segment_all(bvec, bio, i) { end_page_writeback(bvec->bv_page); - page_cache_release(bvec->bv_page); + put_page(bvec->bv_page); } bio_put(bio); if (atomic_dec_and_test(&super->s_pending_writes)) diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c index 9c501449450d..b76a62b1978f 100644 --- a/fs/logfs/dev_mtd.c +++ b/fs/logfs/dev_mtd.c @@ -46,9 +46,9 @@ static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len, BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs)); BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift); - BUG_ON(len > PAGE_CACHE_SIZE); - page_start = ofs & PAGE_CACHE_MASK; - page_end = PAGE_CACHE_ALIGN(ofs + len) - 1; + BUG_ON(len > PAGE_SIZE); + page_start = ofs & PAGE_MASK; + page_end = PAGE_ALIGN(ofs + len) - 1; ret = mtd_write(mtd, ofs, len, &retlen, buf); if (ret || (retlen != len)) return -EIO; @@ -82,7 +82,7 @@ static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs, if (!page) continue; memset(page_address(page), 0xFF, PAGE_SIZE); - page_cache_release(page); + put_page(page); } return 0; } @@ -195,7 +195,7 @@ static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE, page_address(page)); unlock_page(page); - page_cache_release(page); + put_page(page); if (err) return err; } diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index 542468e9bfb4..ddbed2be5366 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c @@ -183,7 +183,7 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry) if (name->len != be16_to_cpu(dd->namelen) || memcmp(name->name, dd->name, name->len)) { kunmap_atomic(dd); - page_cache_release(page); + put_page(page); continue; } @@ -238,7 +238,7 @@ static int logfs_unlink(struct inode *dir, struct dentry *dentry) return PTR_ERR(page); } index = page->index; - page_cache_release(page); + put_page(page); mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(dir, ta); @@ -316,7 +316,7 @@ static int logfs_readdir(struct file *file, struct dir_context *ctx) be16_to_cpu(dd->namelen), be64_to_cpu(dd->ino), dd->type); kunmap(page); - page_cache_release(page); + put_page(page); if (full) break; } @@ -349,7 +349,7 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry, dd = kmap_atomic(page); ino = be64_to_cpu(dd->ino); kunmap_atomic(dd); - page_cache_release(page); + put_page(page); inode = logfs_iget(dir->i_sb, ino); if (IS_ERR(inode)) @@ -392,7 +392,7 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry, err = logfs_write_buf(dir, page, WF_LOCK); unlock_page(page); - page_cache_release(page); + put_page(page); if (!err) grow_dir(dir, index); return err; @@ -561,7 +561,7 @@ static int logfs_get_dd(struct inode *dir, struct dentry *dentry, map = kmap_atomic(page); memcpy(dd, map, sizeof(*dd)); kunmap_atomic(map); - page_cache_release(page); + put_page(page); return 0; } diff --git a/fs/logfs/file.c b/fs/logfs/file.c index 61eaeb1b6cac..f01ddfb1a03b 100644 --- a/fs/logfs/file.c +++ b/fs/logfs/file.c @@ -15,21 +15,21 @@ static int logfs_write_begin(struct file *file, struct address_space *mapping, { struct inode *inode = mapping->host; struct page *page; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; - if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) + if ((len == PAGE_SIZE) || PageUptodate(page)) return 0; - if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { - unsigned start = pos & (PAGE_CACHE_SIZE - 1); + if ((pos & PAGE_MASK) >= i_size_read(inode)) { + unsigned start = pos & (PAGE_SIZE - 1); unsigned end = start + len; /* Reading beyond i_size is simple: memset to zero */ - zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); + zero_user_segments(page, 0, start, end, PAGE_SIZE); return 0; } return logfs_readpage_nolock(page); @@ -41,11 +41,11 @@ static int logfs_write_end(struct file *file, struct address_space *mapping, { struct inode *inode = mapping->host; pgoff_t index = page->index; - unsigned start = pos & (PAGE_CACHE_SIZE - 1); + unsigned start = pos & (PAGE_SIZE - 1); unsigned end = start + copied; int ret = 0; - BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize); + BUG_ON(PAGE_SIZE != inode->i_sb->s_blocksize); BUG_ON(page->index > I3_BLOCKS); if (copied < len) { @@ -61,8 +61,8 @@ static int logfs_write_end(struct file *file, struct address_space *mapping, if (copied == 0) goto out; /* FIXME: do we need to update inode? */ - if (i_size_read(inode) < (index << PAGE_CACHE_SHIFT) + end) { - i_size_write(inode, (index << PAGE_CACHE_SHIFT) + end); + if (i_size_read(inode) < (index << PAGE_SHIFT) + end) { + i_size_write(inode, (index << PAGE_SHIFT) + end); mark_inode_dirty_sync(inode); } @@ -75,7 +75,7 @@ static int logfs_write_end(struct file *file, struct address_space *mapping, } out: unlock_page(page); - page_cache_release(page); + put_page(page); return ret ? ret : copied; } @@ -118,7 +118,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; loff_t i_size = i_size_read(inode); - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + pgoff_t end_index = i_size >> PAGE_SHIFT; unsigned offset; u64 bix; level_t level; @@ -142,7 +142,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc) return __logfs_writepage(page); /* Is the page fully outside i_size? (truncate in progress) */ - offset = i_size & (PAGE_CACHE_SIZE-1); + offset = i_size & (PAGE_SIZE-1); if (bix > end_index || offset == 0) { unlock_page(page); return 0; /* don't care */ @@ -155,7 +155,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc) * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + zero_user_segment(page, offset, PAGE_SIZE); return __logfs_writepage(page); } diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index 20973c9e52f8..3fb8c6d67303 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c @@ -281,7 +281,7 @@ static struct page *logfs_get_read_page(struct inode *inode, u64 bix, static void logfs_put_read_page(struct page *page) { unlock_page(page); - page_cache_release(page); + put_page(page); } static void logfs_lock_write_page(struct page *page) @@ -323,7 +323,7 @@ repeat: return NULL; err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS); if (unlikely(err)) { - page_cache_release(page); + put_page(page); if (err == -EEXIST) goto repeat; return NULL; @@ -342,7 +342,7 @@ static void logfs_unlock_write_page(struct page *page) static void logfs_put_write_page(struct page *page) { logfs_unlock_write_page(page); - page_cache_release(page); + put_page(page); } static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level, @@ -562,7 +562,7 @@ static void indirect_free_block(struct super_block *sb, if (PagePrivate(page)) { ClearPagePrivate(page); - page_cache_release(page); + put_page(page); set_page_private(page, 0); } __free_block(sb, block); @@ -655,7 +655,7 @@ static void alloc_data_block(struct inode *inode, struct page *page) block->page = page; SetPagePrivate(page); - page_cache_get(page); + get_page(page); set_page_private(page, (unsigned long) block); block->ops = &indirect_block_ops; @@ -709,7 +709,7 @@ static u64 block_get_pointer(struct page *page, int index) static int logfs_read_empty(struct page *page) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); return 0; } @@ -1660,7 +1660,7 @@ static int truncate_data_block(struct inode *inode, struct page *page, if (err) return err; - zero_user_segment(page, size - pageofs, PAGE_CACHE_SIZE); + zero_user_segment(page, size - pageofs, PAGE_SIZE); return logfs_segment_write(inode, page, shadow); } @@ -1919,7 +1919,7 @@ static void move_page_to_inode(struct inode *inode, struct page *page) block->page = NULL; if (PagePrivate(page)) { ClearPagePrivate(page); - page_cache_release(page); + put_page(page); set_page_private(page, 0); } } @@ -1940,7 +1940,7 @@ static void move_inode_to_page(struct page *page, struct inode *inode) if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); set_page_private(page, (unsigned long) block); } @@ -1971,7 +1971,7 @@ int logfs_read_inode(struct inode *inode) logfs_disk_to_inode(di, inode); kunmap_atomic(di); move_page_to_inode(inode, page); - page_cache_release(page); + put_page(page); return 0; } diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index d270e4b2ab6b..1efd6055f4b0 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c @@ -90,9 +90,9 @@ int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len, if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); } - page_cache_release(page); + put_page(page); buf += copylen; len -= copylen; @@ -117,9 +117,9 @@ static void pad_partial_page(struct logfs_area *area) memset(page_address(page) + offset, 0xff, len); if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); } - page_cache_release(page); + put_page(page); } } @@ -129,20 +129,20 @@ static void pad_full_pages(struct logfs_area *area) struct logfs_super *super = logfs_super(sb); u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); u32 len = super->s_segsize - area->a_used_bytes; - pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT; - pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT; + pgoff_t index = PAGE_ALIGN(ofs) >> PAGE_SHIFT; + pgoff_t no_indizes = len >> PAGE_SHIFT; struct page *page; while (no_indizes) { page = get_mapping_page(sb, index, 0); BUG_ON(!page); /* FIXME: reserve a pool */ SetPageUptodate(page); - memset(page_address(page), 0xff, PAGE_CACHE_SIZE); + memset(page_address(page), 0xff, PAGE_SIZE); if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); } - page_cache_release(page); + put_page(page); index++; no_indizes--; } @@ -411,7 +411,7 @@ int wbuf_read(struct super_block *sb, u64 ofs, size_t len, void *buf) if (IS_ERR(page)) return PTR_ERR(page); memcpy(buf, page_address(page) + offset, copylen); - page_cache_release(page); + put_page(page); buf += copylen; len -= copylen; @@ -499,7 +499,7 @@ static void move_btree_to_page(struct inode *inode, struct page *page, if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); set_page_private(page, (unsigned long) block); } block->ops = &indirect_block_ops; @@ -554,7 +554,7 @@ void move_page_to_btree(struct page *page) if (PagePrivate(page)) { ClearPagePrivate(page); - page_cache_release(page); + put_page(page); set_page_private(page, 0); } block->ops = &btree_block_ops; @@ -723,9 +723,9 @@ void freeseg(struct super_block *sb, u32 segno) continue; if (PagePrivate(page)) { ClearPagePrivate(page); - page_cache_release(page); + put_page(page); } - page_cache_release(page); + put_page(page); } } diff --git a/fs/logfs/super.c b/fs/logfs/super.c index 54360293bcb5..5751082dba52 100644 --- a/fs/logfs/super.c +++ b/fs/logfs/super.c @@ -48,7 +48,7 @@ void emergency_read_end(struct page *page) if (page == emergency_page) mutex_unlock(&emergency_mutex); else - page_cache_release(page); + put_page(page); } static void dump_segfile(struct super_block *sb) @@ -206,7 +206,7 @@ static int write_one_sb(struct super_block *sb, logfs_set_segment_erased(sb, segno, ec, 0); logfs_write_ds(sb, ds, segno, ec); err = super->s_devops->write_sb(sb, page); - page_cache_release(page); + put_page(page); return err; } @@ -366,24 +366,24 @@ static struct page *find_super_block(struct super_block *sb) return NULL; last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]); if (!last || IS_ERR(last)) { - page_cache_release(first); + put_page(first); return NULL; } if (!logfs_check_ds(page_address(first))) { - page_cache_release(last); + put_page(last); return first; } /* First one didn't work, try the second superblock */ if (!logfs_check_ds(page_address(last))) { - page_cache_release(first); + put_page(first); return last; } /* Neither worked, sorry folks */ - page_cache_release(first); - page_cache_release(last); + put_page(first); + put_page(last); return NULL; } @@ -425,7 +425,7 @@ static int __logfs_read_sb(struct super_block *sb) super->s_data_levels = ds->ds_data_levels; super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels + super->s_data_levels; - page_cache_release(page); + put_page(page); return 0; } diff --git a/fs/minix/dir.c b/fs/minix/dir.c index d19ac258105a..33957c07cd11 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c @@ -28,7 +28,7 @@ const struct file_operations minix_dir_operations = { static inline void dir_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } /* @@ -38,10 +38,10 @@ static inline void dir_put_page(struct page *page) static unsigned minix_last_byte(struct inode *inode, unsigned long page_nr) { - unsigned last_byte = PAGE_CACHE_SIZE; + unsigned last_byte = PAGE_SIZE; - if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT)) - last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1); + if (page_nr == (inode->i_size >> PAGE_SHIFT)) + last_byte = inode->i_size & (PAGE_SIZE - 1); return last_byte; } @@ -92,8 +92,8 @@ static int minix_readdir(struct file *file, struct dir_context *ctx) if (pos >= inode->i_size) return 0; - offset = pos & ~PAGE_CACHE_MASK; - n = pos >> PAGE_CACHE_SHIFT; + offset = pos & ~PAGE_MASK; + n = pos >> PAGE_SHIFT; for ( ; n < npages; n++, offset = 0) { char *p, *kaddr, *limit; @@ -229,7 +229,7 @@ int minix_add_link(struct dentry *dentry, struct inode *inode) lock_page(page); kaddr = (char*)page_address(page); dir_end = kaddr + minix_last_byte(dir, n); - limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize; + limit = kaddr + PAGE_SIZE - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { de = (minix_dirent *)p; de3 = (minix3_dirent *)p; @@ -327,7 +327,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir) } kaddr = kmap_atomic(page); - memset(kaddr, 0, PAGE_CACHE_SIZE); + memset(kaddr, 0, PAGE_SIZE); if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)kaddr; @@ -350,7 +350,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir) err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); fail: - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/minix/namei.c b/fs/minix/namei.c index a795a11e50c7..2887d1d95ce2 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -243,11 +243,11 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, out_dir: if (dir_de) { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } out_old: kunmap(old_page); - page_cache_release(old_page); + put_page(old_page); out: return err; } diff --git a/fs/mpage.c b/fs/mpage.c index 6bd9fd90964e..eedc644b78d7 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -107,7 +107,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) * don't make any buffers if there is only one buffer on * the page and the page just needs to be set up to date */ - if (inode->i_blkbits == PAGE_CACHE_SHIFT && + if (inode->i_blkbits == PAGE_SHIFT && buffer_uptodate(bh)) { SetPageUptodate(page); return; @@ -145,7 +145,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, { struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; - const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; + const unsigned blocks_per_page = PAGE_SIZE >> blkbits; const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; @@ -162,7 +162,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, if (page_has_buffers(page)) goto confused; - block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); + block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); last_block = block_in_file + nr_pages * blocks_per_page; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; if (last_block > last_block_in_file) @@ -249,7 +249,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, } if (first_hole != blocks_per_page) { - zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); + zero_user_segment(page, first_hole << blkbits, PAGE_SIZE); if (first_hole == 0) { SetPageUptodate(page); unlock_page(page); @@ -331,7 +331,7 @@ confused: * * then this code just gives up and calls the buffer_head-based read function. * It does handle a page which has holes at the end - that is a common case: - * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. + * the end-of-file on blocksize < PAGE_SIZE setups. * * BH_Boundary explanation: * @@ -380,7 +380,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, &first_logical_block, get_block, gfp); } - page_cache_release(page); + put_page(page); } BUG_ON(!list_empty(pages)); if (bio) @@ -472,7 +472,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; unsigned long end_index; - const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; + const unsigned blocks_per_page = PAGE_SIZE >> blkbits; sector_t last_block; sector_t block_in_file; sector_t blocks[MAX_BUF_PER_PAGE]; @@ -542,7 +542,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, * The page has no buffers: map it to disk */ BUG_ON(!PageUptodate(page)); - block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); + block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); last_block = (i_size - 1) >> blkbits; map_bh.b_page = page; for (page_block = 0; page_block < blocks_per_page; ) { @@ -574,7 +574,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, first_unmapped = page_block; page_is_mapped: - end_index = i_size >> PAGE_CACHE_SHIFT; + end_index = i_size >> PAGE_SHIFT; if (page->index >= end_index) { /* * The page straddles i_size. It must be zeroed out on each @@ -584,11 +584,11 @@ page_is_mapped: * is zeroed when mapped, and writes to that region are not * written out to the file." */ - unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); + unsigned offset = i_size & (PAGE_SIZE - 1); if (page->index > end_index || !offset) goto confused; - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + zero_user_segment(page, offset, PAGE_SIZE); } /* diff --git a/fs/namei.c b/fs/namei.c index 794f81dce766..1d9ca2d5dff6 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1740,15 +1740,17 @@ static int walk_component(struct nameidata *nd, int flags) nd->flags); if (IS_ERR(path.dentry)) return PTR_ERR(path.dentry); - if (unlikely(d_is_negative(path.dentry))) { - dput(path.dentry); - return -ENOENT; - } + path.mnt = nd->path.mnt; err = follow_managed(&path, nd); if (unlikely(err < 0)) return err; + if (unlikely(d_is_negative(path.dentry))) { + path_to_nameidata(&path, nd); + return -ENOENT; + } + seq = 0; /* we are already out of RCU mode */ inode = d_backing_inode(path.dentry); } diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index b7f8eaeea5d8..bfdad003ee56 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -510,7 +510,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx) kunmap(ctl.page); SetPageUptodate(ctl.page); unlock_page(ctl.page); - page_cache_release(ctl.page); + put_page(ctl.page); ctl.page = NULL; } ctl.idx = 0; @@ -520,7 +520,7 @@ invalid_cache: if (ctl.page) { kunmap(ctl.page); unlock_page(ctl.page); - page_cache_release(ctl.page); + put_page(ctl.page); ctl.page = NULL; } ctl.cache = cache; @@ -554,14 +554,14 @@ finished: kunmap(ctl.page); SetPageUptodate(ctl.page); unlock_page(ctl.page); - page_cache_release(ctl.page); + put_page(ctl.page); } if (page) { cache->head = ctl.head; kunmap(page); SetPageUptodate(page); unlock_page(page); - page_cache_release(page); + put_page(page); } out: return result; @@ -649,7 +649,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, kunmap(ctl.page); SetPageUptodate(ctl.page); unlock_page(ctl.page); - page_cache_release(ctl.page); + put_page(ctl.page); } ctl.cache = NULL; ctl.idx -= NCP_DIRCACHE_SIZE; diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h index 5233fbc1747a..17cfb743b5bf 100644 --- a/fs/ncpfs/ncplib_kernel.h +++ b/fs/ncpfs/ncplib_kernel.h @@ -191,7 +191,7 @@ struct ncp_cache_head { int eof; }; -#define NCP_DIRCACHE_SIZE ((int)(PAGE_CACHE_SIZE/sizeof(struct dentry *))) +#define NCP_DIRCACHE_SIZE ((int)(PAGE_SIZE/sizeof(struct dentry *))) union ncp_dir_cache { struct ncp_cache_head head; struct dentry *dentry[NCP_DIRCACHE_SIZE]; diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index ddd0138f410c..17a42e4eb872 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -231,7 +231,7 @@ bl_read_pagelist(struct nfs_pgio_header *header) size_t bytes_left = header->args.count; unsigned int pg_offset = header->args.pgbase, pg_len; struct page **pages = header->args.pages; - int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; + int pg_index = header->args.pgbase >> PAGE_SHIFT; const bool is_dio = (header->dreq != NULL); struct blk_plug plug; int i; @@ -263,13 +263,13 @@ bl_read_pagelist(struct nfs_pgio_header *header) } if (is_dio) { - if (pg_offset + bytes_left > PAGE_CACHE_SIZE) - pg_len = PAGE_CACHE_SIZE - pg_offset; + if (pg_offset + bytes_left > PAGE_SIZE) + pg_len = PAGE_SIZE - pg_offset; else pg_len = bytes_left; } else { BUG_ON(pg_offset != 0); - pg_len = PAGE_CACHE_SIZE; + pg_len = PAGE_SIZE; } if (is_hole(&be)) { @@ -339,9 +339,9 @@ static void bl_write_cleanup(struct work_struct *work) if (likely(!hdr->pnfs_error)) { struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg); - u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK; + u64 start = hdr->args.offset & (loff_t)PAGE_MASK; u64 end = (hdr->args.offset + hdr->args.count + - PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK; + PAGE_SIZE - 1) & (loff_t)PAGE_MASK; ext_tree_mark_written(bl, start >> SECTOR_SHIFT, (end - start) >> SECTOR_SHIFT); @@ -373,7 +373,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync) loff_t offset = header->args.offset; size_t count = header->args.count; struct page **pages = header->args.pages; - int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; + int pg_index = header->args.pgbase >> PAGE_SHIFT; unsigned int pg_len; struct blk_plug plug; int i; @@ -392,7 +392,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync) blk_start_plug(&plug); /* we always write out the whole page */ - offset = offset & (loff_t)PAGE_CACHE_MASK; + offset = offset & (loff_t)PAGE_MASK; isect = offset >> SECTOR_SHIFT; for (i = pg_index; i < header->page_array.npages; i++) { @@ -408,7 +408,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync) extent_length = be.be_length - (isect - be.be_f_offset); } - pg_len = PAGE_CACHE_SIZE; + pg_len = PAGE_SIZE; bio = do_add_page_to_bio(bio, header->page_array.npages - i, WRITE, isect, pages[i], &map, &be, bl_end_io_write, par, @@ -446,8 +446,8 @@ static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo) kfree(bl); } -static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode, - gfp_t gfp_flags) +static struct pnfs_layout_hdr *__bl_alloc_layout_hdr(struct inode *inode, + gfp_t gfp_flags, bool is_scsi_layout) { struct pnfs_block_layout *bl; @@ -460,9 +460,22 @@ static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode, bl->bl_ext_ro = RB_ROOT; spin_lock_init(&bl->bl_ext_lock); + bl->bl_scsi_layout = is_scsi_layout; return &bl->bl_layout; } +static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode, + gfp_t gfp_flags) +{ + return __bl_alloc_layout_hdr(inode, gfp_flags, false); +} + +static struct pnfs_layout_hdr *sl_alloc_layout_hdr(struct inode *inode, + gfp_t gfp_flags) +{ + return __bl_alloc_layout_hdr(inode, gfp_flags, true); +} + static void bl_free_lseg(struct pnfs_layout_segment *lseg) { dprintk("%s enter\n", __func__); @@ -743,7 +756,7 @@ bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh) static bool is_aligned_req(struct nfs_pageio_descriptor *pgio, - struct nfs_page *req, unsigned int alignment) + struct nfs_page *req, unsigned int alignment, bool is_write) { /* * Always accept buffered writes, higher layers take care of the @@ -758,7 +771,8 @@ is_aligned_req(struct nfs_pageio_descriptor *pgio, if (IS_ALIGNED(req->wb_bytes, alignment)) return true; - if (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode)) { + if (is_write && + (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode))) { /* * If the write goes up to the inode size, just write * the full page. Data past the inode size is @@ -775,7 +789,7 @@ is_aligned_req(struct nfs_pageio_descriptor *pgio, static void bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) { - if (!is_aligned_req(pgio, req, SECTOR_SIZE)) { + if (!is_aligned_req(pgio, req, SECTOR_SIZE, false)) { nfs_pageio_reset_read_mds(pgio); return; } @@ -791,7 +805,7 @@ static size_t bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req) { - if (!is_aligned_req(pgio, req, SECTOR_SIZE)) + if (!is_aligned_req(pgio, req, SECTOR_SIZE, false)) return 0; return pnfs_generic_pg_test(pgio, prev, req); } @@ -806,7 +820,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx) pgoff_t end; /* Optimize common case that writes from 0 to end of file */ - end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); + end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); if (end != inode->i_mapping->nrpages) { rcu_read_lock(); end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX); @@ -814,9 +828,9 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx) } if (!end) - return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT); + return i_size_read(inode) - (idx << PAGE_SHIFT); else - return (end - idx) << PAGE_CACHE_SHIFT; + return (end - idx) << PAGE_SHIFT; } static void @@ -824,7 +838,7 @@ bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) { u64 wb_size; - if (!is_aligned_req(pgio, req, PAGE_SIZE)) { + if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) { nfs_pageio_reset_write_mds(pgio); return; } @@ -846,7 +860,7 @@ static size_t bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req) { - if (!is_aligned_req(pgio, req, PAGE_SIZE)) + if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) return 0; return pnfs_generic_pg_test(pgio, prev, req); } @@ -888,22 +902,53 @@ static struct pnfs_layoutdriver_type blocklayout_type = { .sync = pnfs_generic_sync, }; +static struct pnfs_layoutdriver_type scsilayout_type = { + .id = LAYOUT_SCSI, + .name = "LAYOUT_SCSI", + .owner = THIS_MODULE, + .flags = PNFS_LAYOUTRET_ON_SETATTR | + PNFS_READ_WHOLE_PAGE, + .read_pagelist = bl_read_pagelist, + .write_pagelist = bl_write_pagelist, + .alloc_layout_hdr = sl_alloc_layout_hdr, + .free_layout_hdr = bl_free_layout_hdr, + .alloc_lseg = bl_alloc_lseg, + .free_lseg = bl_free_lseg, + .return_range = bl_return_range, + .prepare_layoutcommit = bl_prepare_layoutcommit, + .cleanup_layoutcommit = bl_cleanup_layoutcommit, + .set_layoutdriver = bl_set_layoutdriver, + .alloc_deviceid_node = bl_alloc_deviceid_node, + .free_deviceid_node = bl_free_deviceid_node, + .pg_read_ops = &bl_pg_read_ops, + .pg_write_ops = &bl_pg_write_ops, + .sync = pnfs_generic_sync, +}; + + static int __init nfs4blocklayout_init(void) { int ret; dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__); - ret = pnfs_register_layoutdriver(&blocklayout_type); + ret = bl_init_pipefs(); if (ret) goto out; - ret = bl_init_pipefs(); + + ret = pnfs_register_layoutdriver(&blocklayout_type); if (ret) - goto out_unregister; + goto out_cleanup_pipe; + + ret = pnfs_register_layoutdriver(&scsilayout_type); + if (ret) + goto out_unregister_block; return 0; -out_unregister: +out_unregister_block: pnfs_unregister_layoutdriver(&blocklayout_type); +out_cleanup_pipe: + bl_cleanup_pipefs(); out: return ret; } @@ -913,8 +958,9 @@ static void __exit nfs4blocklayout_exit(void) dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n", __func__); - bl_cleanup_pipefs(); + pnfs_unregister_layoutdriver(&scsilayout_type); pnfs_unregister_layoutdriver(&blocklayout_type); + bl_cleanup_pipefs(); } MODULE_ALIAS("nfs-layouttype4-3"); diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h index c556640dcf3b..18e6fd0b9506 100644 --- a/fs/nfs/blocklayout/blocklayout.h +++ b/fs/nfs/blocklayout/blocklayout.h @@ -40,8 +40,8 @@ #include "../pnfs.h" #include "../netns.h" -#define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT) -#define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT) +#define PAGE_CACHE_SECTORS (PAGE_SIZE >> SECTOR_SHIFT) +#define PAGE_CACHE_SECTOR_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define SECTOR_SIZE (1 << SECTOR_SHIFT) struct pnfs_block_dev; @@ -55,7 +55,6 @@ struct pnfs_block_dev; */ #define PNFS_BLOCK_UUID_LEN 128 - struct pnfs_block_volume { enum pnfs_block_volume_type type; union { @@ -82,6 +81,13 @@ struct pnfs_block_volume { u32 volumes_count; u32 volumes[PNFS_BLOCK_MAX_DEVICES]; } stripe; + struct { + enum scsi_code_set code_set; + enum scsi_designator_type designator_type; + int designator_len; + u8 designator[256]; + u64 pr_key; + } scsi; }; }; @@ -106,6 +112,9 @@ struct pnfs_block_dev { struct block_device *bdev; u64 disk_offset; + u64 pr_key; + bool pr_registered; + bool (*map)(struct pnfs_block_dev *dev, u64 offset, struct pnfs_block_dev_map *map); }; @@ -131,6 +140,7 @@ struct pnfs_block_layout { struct rb_root bl_ext_rw; struct rb_root bl_ext_ro; spinlock_t bl_ext_lock; /* Protects list manipulation */ + bool bl_scsi_layout; }; static inline struct pnfs_block_layout * @@ -182,6 +192,6 @@ void ext_tree_mark_committed(struct nfs4_layoutcommit_args *arg, int status); dev_t bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b, gfp_t gfp_mask); int __init bl_init_pipefs(void); -void __exit bl_cleanup_pipefs(void); +void bl_cleanup_pipefs(void); #endif /* FS_NFS_NFS4BLOCKLAYOUT_H */ diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c index a861bbdfe577..e5b89675263e 100644 --- a/fs/nfs/blocklayout/dev.c +++ b/fs/nfs/blocklayout/dev.c @@ -1,11 +1,12 @@ /* - * Copyright (c) 2014 Christoph Hellwig. + * Copyright (c) 2014-2016 Christoph Hellwig. */ #include <linux/sunrpc/svc.h> #include <linux/blkdev.h> #include <linux/nfs4.h> #include <linux/nfs_fs.h> #include <linux/nfs_xdr.h> +#include <linux/pr.h> #include "blocklayout.h" @@ -21,6 +22,17 @@ bl_free_device(struct pnfs_block_dev *dev) bl_free_device(&dev->children[i]); kfree(dev->children); } else { + if (dev->pr_registered) { + const struct pr_ops *ops = + dev->bdev->bd_disk->fops->pr_ops; + int error; + + error = ops->pr_register(dev->bdev, dev->pr_key, 0, + false); + if (error) + pr_err("failed to unregister PR key.\n"); + } + if (dev->bdev) blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE); } @@ -113,6 +125,24 @@ nfs4_block_decode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b) for (i = 0; i < b->stripe.volumes_count; i++) b->stripe.volumes[i] = be32_to_cpup(p++); break; + case PNFS_BLOCK_VOLUME_SCSI: + p = xdr_inline_decode(xdr, 4 + 4 + 4); + if (!p) + return -EIO; + b->scsi.code_set = be32_to_cpup(p++); + b->scsi.designator_type = be32_to_cpup(p++); + b->scsi.designator_len = be32_to_cpup(p++); + p = xdr_inline_decode(xdr, b->scsi.designator_len); + if (!p) + return -EIO; + if (b->scsi.designator_len > 256) + return -EIO; + memcpy(&b->scsi.designator, p, b->scsi.designator_len); + p = xdr_inline_decode(xdr, 8); + if (!p) + return -EIO; + p = xdr_decode_hyper(p, &b->scsi.pr_key); + break; default: dprintk("unknown volume type!\n"); return -EIO; @@ -216,6 +246,116 @@ bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d, return 0; } +static bool +bl_validate_designator(struct pnfs_block_volume *v) +{ + switch (v->scsi.designator_type) { + case PS_DESIGNATOR_EUI64: + if (v->scsi.code_set != PS_CODE_SET_BINARY) + return false; + + if (v->scsi.designator_len != 8 && + v->scsi.designator_len != 10 && + v->scsi.designator_len != 16) + return false; + + return true; + case PS_DESIGNATOR_NAA: + if (v->scsi.code_set != PS_CODE_SET_BINARY) + return false; + + if (v->scsi.designator_len != 8 && + v->scsi.designator_len != 16) + return false; + + return true; + case PS_DESIGNATOR_T10: + case PS_DESIGNATOR_NAME: + pr_err("pNFS: unsupported designator " + "(code set %d, type %d, len %d.\n", + v->scsi.code_set, + v->scsi.designator_type, + v->scsi.designator_len); + return false; + default: + pr_err("pNFS: invalid designator " + "(code set %d, type %d, len %d.\n", + v->scsi.code_set, + v->scsi.designator_type, + v->scsi.designator_len); + return false; + } +} + +static int +bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d, + struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) +{ + struct pnfs_block_volume *v = &volumes[idx]; + const struct pr_ops *ops; + const char *devname; + int error; + + if (!bl_validate_designator(v)) + return -EINVAL; + + switch (v->scsi.designator_len) { + case 8: + devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%8phN", + v->scsi.designator); + break; + case 12: + devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%12phN", + v->scsi.designator); + break; + case 16: + devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%16phN", + v->scsi.designator); + break; + default: + return -EINVAL; + } + + d->bdev = blkdev_get_by_path(devname, FMODE_READ, NULL); + if (IS_ERR(d->bdev)) { + pr_warn("pNFS: failed to open device %s (%ld)\n", + devname, PTR_ERR(d->bdev)); + kfree(devname); + return PTR_ERR(d->bdev); + } + + kfree(devname); + + d->len = i_size_read(d->bdev->bd_inode); + d->map = bl_map_simple; + d->pr_key = v->scsi.pr_key; + + pr_info("pNFS: using block device %s (reservation key 0x%llx)\n", + d->bdev->bd_disk->disk_name, d->pr_key); + + ops = d->bdev->bd_disk->fops->pr_ops; + if (!ops) { + pr_err("pNFS: block device %s does not support reservations.", + d->bdev->bd_disk->disk_name); + error = -EINVAL; + goto out_blkdev_put; + } + + error = ops->pr_register(d->bdev, 0, d->pr_key, true); + if (error) { + pr_err("pNFS: failed to register key for block device %s.", + d->bdev->bd_disk->disk_name); + goto out_blkdev_put; + } + + d->pr_registered = true; + return 0; + +out_blkdev_put: + blkdev_put(d->bdev, FMODE_READ); + return error; +} + static int bl_parse_slice(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) @@ -303,6 +443,8 @@ bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d, return bl_parse_concat(server, d, volumes, idx, gfp_mask); case PNFS_BLOCK_VOLUME_STRIPE: return bl_parse_stripe(server, d, volumes, idx, gfp_mask); + case PNFS_BLOCK_VOLUME_SCSI: + return bl_parse_scsi(server, d, volumes, idx, gfp_mask); default: dprintk("unsupported volume type: %d\n", volumes[idx].type); return -EIO; diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c index 35ab51c04814..720b3ff55fa9 100644 --- a/fs/nfs/blocklayout/extent_tree.c +++ b/fs/nfs/blocklayout/extent_tree.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Christoph Hellwig. + * Copyright (c) 2014-2016 Christoph Hellwig. */ #include <linux/vmalloc.h> @@ -462,10 +462,12 @@ out: return err; } -static size_t ext_tree_layoutupdate_size(size_t count) +static size_t ext_tree_layoutupdate_size(struct pnfs_block_layout *bl, size_t count) { - return sizeof(__be32) /* number of entries */ + - PNFS_BLOCK_EXTENT_SIZE * count; + if (bl->bl_scsi_layout) + return sizeof(__be32) + PNFS_SCSI_RANGE_SIZE * count; + else + return sizeof(__be32) + PNFS_BLOCK_EXTENT_SIZE * count; } static void ext_tree_free_commitdata(struct nfs4_layoutcommit_args *arg, @@ -483,6 +485,23 @@ static void ext_tree_free_commitdata(struct nfs4_layoutcommit_args *arg, } } +static __be32 *encode_block_extent(struct pnfs_block_extent *be, __be32 *p) +{ + p = xdr_encode_opaque_fixed(p, be->be_device->deviceid.data, + NFS4_DEVICEID4_SIZE); + p = xdr_encode_hyper(p, be->be_f_offset << SECTOR_SHIFT); + p = xdr_encode_hyper(p, be->be_length << SECTOR_SHIFT); + p = xdr_encode_hyper(p, 0LL); + *p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA); + return p; +} + +static __be32 *encode_scsi_range(struct pnfs_block_extent *be, __be32 *p) +{ + p = xdr_encode_hyper(p, be->be_f_offset << SECTOR_SHIFT); + return xdr_encode_hyper(p, be->be_length << SECTOR_SHIFT); +} + static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p, size_t buffer_size, size_t *count) { @@ -496,19 +515,16 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p, continue; (*count)++; - if (ext_tree_layoutupdate_size(*count) > buffer_size) { + if (ext_tree_layoutupdate_size(bl, *count) > buffer_size) { /* keep counting.. */ ret = -ENOSPC; continue; } - p = xdr_encode_opaque_fixed(p, be->be_device->deviceid.data, - NFS4_DEVICEID4_SIZE); - p = xdr_encode_hyper(p, be->be_f_offset << SECTOR_SHIFT); - p = xdr_encode_hyper(p, be->be_length << SECTOR_SHIFT); - p = xdr_encode_hyper(p, 0LL); - *p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA); - + if (bl->bl_scsi_layout) + p = encode_scsi_range(be, p); + else + p = encode_block_extent(be, p); be->be_tag = EXTENT_COMMITTING; } spin_unlock(&bl->bl_ext_lock); @@ -537,7 +553,7 @@ retry: if (unlikely(ret)) { ext_tree_free_commitdata(arg, buffer_size); - buffer_size = ext_tree_layoutupdate_size(count); + buffer_size = ext_tree_layoutupdate_size(bl, count); count = 0; arg->layoutupdate_pages = @@ -556,7 +572,7 @@ retry: } *start_p = cpu_to_be32(count); - arg->layoutupdate_len = ext_tree_layoutupdate_size(count); + arg->layoutupdate_len = ext_tree_layoutupdate_size(bl, count); if (unlikely(arg->layoutupdate_pages != &arg->layoutupdate_page)) { void *p = start_p, *end = p + arg->layoutupdate_len; diff --git a/fs/nfs/blocklayout/rpc_pipefs.c b/fs/nfs/blocklayout/rpc_pipefs.c index dbe5839cdeba..9fb067a6f7e0 100644 --- a/fs/nfs/blocklayout/rpc_pipefs.c +++ b/fs/nfs/blocklayout/rpc_pipefs.c @@ -281,7 +281,7 @@ out: return ret; } -void __exit bl_cleanup_pipefs(void) +void bl_cleanup_pipefs(void) { rpc_pipefs_notifier_unregister(&nfs4blocklayout_block); unregister_pernet_subsys(&nfs4blocklayout_net_ops); diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index ff8195bd75ea..5fe1cecbf9f0 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -37,10 +37,11 @@ enum nfs4_callback_opnum { OP_CB_ILLEGAL = 10044, }; +struct nfs4_slot; struct cb_process_state { __be32 drc_status; struct nfs_client *clp; - u32 slotid; + struct nfs4_slot *slot; u32 minorversion; struct net *net; }; diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index f0939d097406..618ced381a14 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -354,47 +354,38 @@ out: * a single outstanding callback request at a time. */ static __be32 -validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) +validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot, + const struct cb_sequenceargs * args) { - struct nfs4_slot *slot; - - dprintk("%s enter. slotid %u seqid %u\n", - __func__, args->csa_slotid, args->csa_sequenceid); + dprintk("%s enter. slotid %u seqid %u, slot table seqid: %u\n", + __func__, args->csa_slotid, args->csa_sequenceid, slot->seq_nr); - if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS) + if (args->csa_slotid > tbl->server_highest_slotid) return htonl(NFS4ERR_BADSLOT); - slot = tbl->slots + args->csa_slotid; - dprintk("%s slot table seqid: %u\n", __func__, slot->seq_nr); - - /* Normal */ - if (likely(args->csa_sequenceid == slot->seq_nr + 1)) - goto out_ok; - /* Replay */ if (args->csa_sequenceid == slot->seq_nr) { dprintk("%s seqid %u is a replay\n", __func__, args->csa_sequenceid); + if (nfs4_test_locked_slot(tbl, slot->slot_nr)) + return htonl(NFS4ERR_DELAY); /* Signal process_op to set this error on next op */ if (args->csa_cachethis == 0) return htonl(NFS4ERR_RETRY_UNCACHED_REP); - /* The ca_maxresponsesize_cached is 0 with no DRC */ - else if (args->csa_cachethis == 1) - return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE); + /* Liar! We never allowed you to set csa_cachethis != 0 */ + return htonl(NFS4ERR_SEQ_FALSE_RETRY); } /* Wraparound */ - if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) { - slot->seq_nr = 1; - goto out_ok; - } + if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) { + if (args->csa_sequenceid == 1) + return htonl(NFS4_OK); + } else if (likely(args->csa_sequenceid == slot->seq_nr + 1)) + return htonl(NFS4_OK); /* Misordered request */ return htonl(NFS4ERR_SEQ_MISORDERED); -out_ok: - tbl->highest_used_slotid = args->csa_slotid; - return htonl(NFS4_OK); } /* @@ -473,6 +464,12 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, tbl = &clp->cl_session->bc_slot_table; slot = tbl->slots + args->csa_slotid; + /* Set up res before grabbing the spinlock */ + memcpy(&res->csr_sessionid, &args->csa_sessionid, + sizeof(res->csr_sessionid)); + res->csr_sequenceid = args->csa_sequenceid; + res->csr_slotid = args->csa_slotid; + spin_lock(&tbl->slot_tbl_lock); /* state manager is resetting the session */ if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) { @@ -485,18 +482,26 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, goto out_unlock; } - memcpy(&res->csr_sessionid, &args->csa_sessionid, - sizeof(res->csr_sessionid)); - res->csr_sequenceid = args->csa_sequenceid; - res->csr_slotid = args->csa_slotid; - res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; - res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; + status = htonl(NFS4ERR_BADSLOT); + slot = nfs4_lookup_slot(tbl, args->csa_slotid); + if (IS_ERR(slot)) + goto out_unlock; + + res->csr_highestslotid = tbl->server_highest_slotid; + res->csr_target_highestslotid = tbl->target_highest_slotid; - status = validate_seqid(tbl, args); + status = validate_seqid(tbl, slot, args); if (status) goto out_unlock; + if (!nfs4_try_to_lock_slot(tbl, slot)) { + status = htonl(NFS4ERR_DELAY); + goto out_unlock; + } + cps->slot = slot; - cps->slotid = args->csa_slotid; + /* The ca_maxresponsesize_cached is 0 with no DRC */ + if (args->csa_cachethis != 0) + return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE); /* * Check for pending referring calls. If a match is found, a @@ -513,7 +518,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, * If CB_SEQUENCE returns an error, then the state of the slot * (sequence ID, cached reply) MUST NOT change. */ - slot->seq_nr++; + slot->seq_nr = args->csa_sequenceid; out_unlock: spin_unlock(&tbl->slot_tbl_lock); diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 646cdac73488..976c90608e56 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -752,7 +752,8 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) return htonl(NFS_OK); } -static void nfs4_callback_free_slot(struct nfs4_session *session) +static void nfs4_callback_free_slot(struct nfs4_session *session, + struct nfs4_slot *slot) { struct nfs4_slot_table *tbl = &session->bc_slot_table; @@ -761,15 +762,17 @@ static void nfs4_callback_free_slot(struct nfs4_session *session) * Let the state manager know callback processing done. * A single slot, so highest used slotid is either 0 or -1 */ - tbl->highest_used_slotid = NFS4_NO_SLOT; + nfs4_free_slot(tbl, slot); nfs4_slot_tbl_drain_complete(tbl); spin_unlock(&tbl->slot_tbl_lock); } static void nfs4_cb_free_slot(struct cb_process_state *cps) { - if (cps->slotid != NFS4_NO_SLOT) - nfs4_callback_free_slot(cps->clp->cl_session); + if (cps->slot) { + nfs4_callback_free_slot(cps->clp->cl_session, cps->slot); + cps->slot = NULL; + } } #else /* CONFIG_NFS_V4_1 */ @@ -893,7 +896,6 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r struct cb_process_state cps = { .drc_status = 0, .clp = NULL, - .slotid = NFS4_NO_SLOT, .net = SVC_NET(rqstp), }; unsigned int nops = 0; diff --git a/fs/nfs/client.c b/fs/nfs/client.c index d6d5d2a48e83..0c96528db94a 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -736,7 +736,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, server->rsize = max_rpc_payload; if (server->rsize > NFS_MAX_FILE_IO_SIZE) server->rsize = NFS_MAX_FILE_IO_SIZE; - server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + server->rpages = (server->rsize + PAGE_SIZE - 1) >> PAGE_SHIFT; server->backing_dev_info.name = "nfs"; server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD; @@ -745,13 +745,13 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, server->wsize = max_rpc_payload; if (server->wsize > NFS_MAX_FILE_IO_SIZE) server->wsize = NFS_MAX_FILE_IO_SIZE; - server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + server->wpages = (server->wsize + PAGE_SIZE - 1) >> PAGE_SHIFT; server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL); server->dtsize = nfs_block_size(fsinfo->dtpref, NULL); - if (server->dtsize > PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES) - server->dtsize = PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES; + if (server->dtsize > PAGE_SIZE * NFS_MAX_READDIR_PAGES) + server->dtsize = PAGE_SIZE * NFS_MAX_READDIR_PAGES; if (server->dtsize > server->rsize) server->dtsize = server->rsize; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index a89d32a38e88..33eb81738d03 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -707,7 +707,7 @@ void cache_page_release(nfs_readdir_descriptor_t *desc) { if (!desc->page->mapping) nfs_readdir_clear_array(desc->page); - page_cache_release(desc->page); + put_page(desc->page); desc->page = NULL; } @@ -1923,7 +1923,7 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) * add_to_page_cache_lru() grabs an extra page refcount. * Drop it here to avoid leaking this page later. */ - page_cache_release(page); + put_page(page); } else __free_page(page); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 7a0cfd3266e5..c93826e4a8c6 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -269,7 +269,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages) { unsigned int i; for (i = 0; i < npages; i++) - page_cache_release(pages[i]); + put_page(pages[i]); } void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, @@ -1003,7 +1003,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) iov_iter_count(iter)); pos = iocb->ki_pos; - end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT; + end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT; inode_lock(inode); @@ -1013,7 +1013,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) if (mapping->nrpages) { result = invalidate_inode_pages2_range(mapping, - pos >> PAGE_CACHE_SHIFT, end); + pos >> PAGE_SHIFT, end); if (result) goto out_unlock; } @@ -1042,7 +1042,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) if (mapping->nrpages) { invalidate_inode_pages2_range(mapping, - pos >> PAGE_CACHE_SHIFT, end); + pos >> PAGE_SHIFT, end); } inode_unlock(inode); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 748bb813b8ec..be01095b97ae 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(nfs_file_mmap); * nfs_file_write() that a write error occurred, and hence cause it to * fall back to doing a synchronous write. */ -int +static int nfs_file_fsync_commit(struct file *file, loff_t start, loff_t end, int datasync) { struct nfs_open_context *ctx = nfs_file_open_context(file); @@ -263,9 +263,8 @@ nfs_file_fsync_commit(struct file *file, loff_t start, loff_t end, int datasync) out: return ret; } -EXPORT_SYMBOL_GPL(nfs_file_fsync_commit); -static int +int nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int ret; @@ -273,13 +272,15 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) trace_nfs_fsync_enter(inode); - nfs_inode_dio_wait(inode); + inode_dio_wait(inode); do { ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret != 0) break; inode_lock(inode); ret = nfs_file_fsync_commit(file, start, end, datasync); + if (!ret) + ret = pnfs_sync_inode(inode, !!datasync); inode_unlock(inode); /* * If nfs_file_fsync_commit detected a server reboot, then @@ -293,6 +294,7 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) trace_nfs_fsync_exit(inode, ret); return ret; } +EXPORT_SYMBOL_GPL(nfs_file_fsync); /* * Decide whether a read/modify/write cycle may be more efficient @@ -318,7 +320,7 @@ static int nfs_want_read_modify_write(struct file *file, struct page *page, loff_t pos, unsigned len) { unsigned int pglen = nfs_page_length(page); - unsigned int offset = pos & (PAGE_CACHE_SIZE - 1); + unsigned int offset = pos & (PAGE_SIZE - 1); unsigned int end = offset + len; if (pnfs_ld_read_whole_page(file->f_mapping->host)) { @@ -349,7 +351,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping, struct page **pagep, void **fsdata) { int ret; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct page *page; int once_thru = 0; @@ -368,7 +370,7 @@ start: /* * Wait for O_DIRECT to complete */ - nfs_inode_dio_wait(mapping->host); + inode_dio_wait(mapping->host); page = grab_cache_page_write_begin(mapping, index, flags); if (!page) @@ -378,12 +380,12 @@ start: ret = nfs_flush_incompatible(file, page); if (ret) { unlock_page(page); - page_cache_release(page); + put_page(page); } else if (!once_thru && nfs_want_read_modify_write(file, page, pos, len)) { once_thru = 1; ret = nfs_readpage(file, page); - page_cache_release(page); + put_page(page); if (!ret) goto start; } @@ -394,7 +396,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { - unsigned offset = pos & (PAGE_CACHE_SIZE - 1); + unsigned offset = pos & (PAGE_SIZE - 1); struct nfs_open_context *ctx = nfs_file_open_context(file); int status; @@ -411,20 +413,20 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, if (pglen == 0) { zero_user_segments(page, 0, offset, - end, PAGE_CACHE_SIZE); + end, PAGE_SIZE); SetPageUptodate(page); } else if (end >= pglen) { - zero_user_segment(page, end, PAGE_CACHE_SIZE); + zero_user_segment(page, end, PAGE_SIZE); if (offset == 0) SetPageUptodate(page); } else - zero_user_segment(page, pglen, PAGE_CACHE_SIZE); + zero_user_segment(page, pglen, PAGE_SIZE); } status = nfs_updatepage(file, page, offset, copied); unlock_page(page); - page_cache_release(page); + put_page(page); if (status < 0) return status; @@ -452,7 +454,7 @@ static void nfs_invalidate_page(struct page *page, unsigned int offset, dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n", page, offset, length); - if (offset != 0 || length < PAGE_CACHE_SIZE) + if (offset != 0 || length < PAGE_SIZE) return; /* Cancel any unstarted writes on this page */ nfs_wb_page_cancel(page_file_mapping(page)->host, page); diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index eb370460ce20..add0e5a70bd6 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -418,6 +418,8 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, pnfs_error_mark_layout_for_return(ino, lseg); } else pnfs_error_mark_layout_for_return(ino, lseg); + ds = NULL; + goto out; } out_update_creds: if (ff_layout_update_mirror_cred(mirror, ds)) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 847b678af4f0..738c84a42eb0 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -141,7 +141,7 @@ void nfs_evict_inode(struct inode *inode) int nfs_sync_inode(struct inode *inode) { - nfs_inode_dio_wait(inode); + inode_dio_wait(inode); return nfs_wb_all(inode); } EXPORT_SYMBOL_GPL(nfs_sync_inode); diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 9a547aa3ec8e..f1d1d2c472e9 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -358,7 +358,7 @@ int nfs_mknod(struct inode *, struct dentry *, umode_t, dev_t); int nfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); /* file.c */ -int nfs_file_fsync_commit(struct file *, loff_t, loff_t, int); +int nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync); loff_t nfs_file_llseek(struct file *, loff_t, int); ssize_t nfs_file_read(struct kiocb *, struct iov_iter *); ssize_t nfs_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *, @@ -515,10 +515,6 @@ extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry); /* direct.c */ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, struct nfs_direct_req *dreq); -static inline void nfs_inode_dio_wait(struct inode *inode) -{ - inode_dio_wait(inode); -} extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq); /* nfs4proc.c */ @@ -642,11 +638,11 @@ unsigned int nfs_page_length(struct page *page) if (i_size > 0) { pgoff_t page_index = page_file_index(page); - pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; + pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT; if (page_index < end_index) - return PAGE_CACHE_SIZE; + return PAGE_SIZE; if (page_index == end_index) - return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1; + return ((i_size - 1) & ~PAGE_MASK) + 1; } return 0; } diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 2a9ff14cfb3b..d0390516467c 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -128,37 +128,6 @@ nfs4_file_flush(struct file *file, fl_owner_t id) return vfs_fsync(file, 0); } -static int -nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) -{ - int ret; - struct inode *inode = file_inode(file); - - trace_nfs_fsync_enter(inode); - - nfs_inode_dio_wait(inode); - do { - ret = filemap_write_and_wait_range(inode->i_mapping, start, end); - if (ret != 0) - break; - inode_lock(inode); - ret = nfs_file_fsync_commit(file, start, end, datasync); - if (!ret) - ret = pnfs_sync_inode(inode, !!datasync); - inode_unlock(inode); - /* - * If nfs_file_fsync_commit detected a server reboot, then - * resend all dirty pages that might have been covered by - * the NFS_CONTEXT_RESEND_WRITES flag - */ - start = 0; - end = LLONG_MAX; - } while (ret == -EAGAIN); - - trace_nfs_fsync_exit(inode, ret); - return ret; -} - #ifdef CONFIG_NFS_V4_2 static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) { @@ -266,7 +235,7 @@ const struct file_operations nfs4_file_operations = { .open = nfs4_file_open, .flush = nfs4_file_flush, .release = nfs_file_release, - .fsync = nfs4_file_fsync, + .fsync = nfs_file_fsync, .lock = nfs_lock, .flock = nfs_flock, .splice_read = nfs_file_splice_read, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 400a70b3be7b..327b8c34d360 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6783,13 +6783,26 @@ nfs41_same_server_scope(struct nfs41_server_scope *a, return false; } +static void +nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) +{ +} + +static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { + .rpc_call_done = &nfs4_bind_one_conn_to_session_done, +}; + /* - * nfs4_proc_bind_conn_to_session() + * nfs4_proc_bind_one_conn_to_session() * * The 4.1 client currently uses the same TCP connection for the * fore and backchannel. */ -int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) +static +int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, + struct rpc_xprt *xprt, + struct nfs_client *clp, + struct rpc_cred *cred) { int status; struct nfs41_bind_conn_to_session_args args = { @@ -6804,6 +6817,14 @@ int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred .rpc_resp = &res, .rpc_cred = cred, }; + struct rpc_task_setup task_setup_data = { + .rpc_client = clnt, + .rpc_xprt = xprt, + .callback_ops = &nfs4_bind_one_conn_to_session_ops, + .rpc_message = &msg, + .flags = RPC_TASK_TIMEOUT, + }; + struct rpc_task *task; dprintk("--> %s\n", __func__); @@ -6811,7 +6832,16 @@ int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) args.dir = NFS4_CDFC4_FORE; - status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); + /* Do not set the backchannel flag unless this is clnt->cl_xprt */ + if (xprt != rcu_access_pointer(clnt->cl_xprt)) + args.dir = NFS4_CDFC4_FORE; + + task = rpc_run_task(&task_setup_data); + if (!IS_ERR(task)) { + status = task->tk_status; + rpc_put_task(task); + } else + status = PTR_ERR(task); trace_nfs4_bind_conn_to_session(clp, status); if (status == 0) { if (memcmp(res.sessionid.data, @@ -6838,6 +6868,31 @@ out: return status; } +struct rpc_bind_conn_calldata { + struct nfs_client *clp; + struct rpc_cred *cred; +}; + +static int +nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, + struct rpc_xprt *xprt, + void *calldata) +{ + struct rpc_bind_conn_calldata *p = calldata; + + return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); +} + +int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) +{ + struct rpc_bind_conn_calldata data = { + .clp = clp, + .cred = cred, + }; + return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, + nfs4_proc_bind_conn_to_session_callback, &data); +} + /* * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map * and operations we'd like to see to enable certain features in the allow map @@ -7320,7 +7375,7 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) args->bc_attrs.max_resp_sz = PAGE_SIZE; args->bc_attrs.max_resp_sz_cached = 0; args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; - args->bc_attrs.max_reqs = 1; + args->bc_attrs.max_reqs = NFS41_BC_MAX_CALLBACKS; dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c index e23366effcfb..332d06e64fa9 100644 --- a/fs/nfs/nfs4session.c +++ b/fs/nfs/nfs4session.c @@ -135,6 +135,43 @@ static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table *tbl, return ERR_PTR(-ENOMEM); } +static void nfs4_lock_slot(struct nfs4_slot_table *tbl, + struct nfs4_slot *slot) +{ + u32 slotid = slot->slot_nr; + + __set_bit(slotid, tbl->used_slots); + if (slotid > tbl->highest_used_slotid || + tbl->highest_used_slotid == NFS4_NO_SLOT) + tbl->highest_used_slotid = slotid; + slot->generation = tbl->generation; +} + +/* + * nfs4_try_to_lock_slot - Given a slot try to allocate it + * + * Note: must be called with the slot_tbl_lock held. + */ +bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot) +{ + if (nfs4_test_locked_slot(tbl, slot->slot_nr)) + return false; + nfs4_lock_slot(tbl, slot); + return true; +} + +/* + * nfs4_lookup_slot - Find a slot but don't allocate it + * + * Note: must be called with the slot_tbl_lock held. + */ +struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid) +{ + if (slotid <= tbl->max_slotid) + return nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT); + return ERR_PTR(-E2BIG); +} + /* * nfs4_alloc_slot - efficiently look for a free slot * @@ -153,18 +190,11 @@ struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl) __func__, tbl->used_slots[0], tbl->highest_used_slotid, tbl->max_slotid + 1); slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1); - if (slotid > tbl->max_slotid) - goto out; - ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT); - if (IS_ERR(ret)) - goto out; - __set_bit(slotid, tbl->used_slots); - if (slotid > tbl->highest_used_slotid || - tbl->highest_used_slotid == NFS4_NO_SLOT) - tbl->highest_used_slotid = slotid; - ret->generation = tbl->generation; - -out: + if (slotid <= tbl->max_slotid) { + ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT); + if (!IS_ERR(ret)) + nfs4_lock_slot(tbl, ret); + } dprintk("<-- %s used_slots=%04lx highest_used=%u slotid=%u\n", __func__, tbl->used_slots[0], tbl->highest_used_slotid, !IS_ERR(ret) ? ret->slot_nr : NFS4_NO_SLOT); diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index e3ea2c5324d6..5b51298d1d03 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h @@ -77,6 +77,8 @@ extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, unsigned int max_reqs, const char *queue); extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl); extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl); +extern struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid); +extern bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl); bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, @@ -88,6 +90,12 @@ static inline bool nfs4_slot_tbl_draining(struct nfs4_slot_table *tbl) return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); } +static inline bool nfs4_test_locked_slot(const struct nfs4_slot_table *tbl, + u32 slotid) +{ + return !!test_bit(slotid, tbl->used_slots); +} + #if defined(CONFIG_NFS_V4_1) extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl, u32 target_highest_slotid); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 4e4441216804..88474a4fc669 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -5001,7 +5001,7 @@ static int decode_space_limit(struct xdr_stream *xdr, blocksize = be32_to_cpup(p); maxsize = (uint64_t)nblocks * (uint64_t)blocksize; } - maxsize >>= PAGE_CACHE_SHIFT; + maxsize >>= PAGE_SHIFT; *pagemod_limit = min_t(u64, maxsize, ULONG_MAX); return 0; out_overflow: diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 9aebffb40505..049c1b1f2932 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -486,7 +486,7 @@ static void __r4w_put_page(void *priv, struct page *page) dprintk("%s: index=0x%lx\n", __func__, (page == ZERO_PAGE(0)) ? -1UL : page->index); if (ZERO_PAGE(0) != page) - page_cache_release(page); + put_page(page); return; } diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 8ce4f61cbaa5..1f6db4231057 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -342,7 +342,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page, * update_nfs_request below if the region is not locked. */ req->wb_page = page; req->wb_index = page_file_index(page); - page_cache_get(page); + get_page(page); req->wb_offset = offset; req->wb_pgbase = offset; req->wb_bytes = count; @@ -392,7 +392,7 @@ static void nfs_clear_request(struct nfs_page *req) struct nfs_lock_context *l_ctx = req->wb_lock_context; if (page != NULL) { - page_cache_release(page); + put_page(page); req->wb_page = NULL; } if (l_ctx != NULL) { @@ -904,7 +904,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev, return false; } else { if (req->wb_pgbase != 0 || - prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) + prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE) return false; } } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 2fa483e6dbe2..89a5ef4df08a 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -841,7 +841,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, i_size = i_size_read(ino); - lgp->args.minlength = PAGE_CACHE_SIZE; + lgp->args.minlength = PAGE_SIZE; if (lgp->args.minlength > range->length) lgp->args.minlength = range->length; if (range->iomode == IOMODE_READ) { @@ -1618,13 +1618,13 @@ lookup_again: spin_unlock(&clp->cl_lock); } - pg_offset = arg.offset & ~PAGE_CACHE_MASK; + pg_offset = arg.offset & ~PAGE_MASK; if (pg_offset) { arg.offset -= pg_offset; arg.length += pg_offset; } if (arg.length != NFS4_MAX_UINT64) - arg.length = PAGE_CACHE_ALIGN(arg.length); + arg.length = PAGE_ALIGN(arg.length); lseg = send_layoutget(lo, ctx, &arg, gfp_flags); atomic_dec(&lo->plh_outstanding); diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 81ac6480f9e7..4aaed890048f 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -606,12 +606,22 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv, dprintk("%s: DS %s: trying address %s\n", __func__, ds->ds_remotestr, da->da_remotestr); - clp = get_v3_ds_connect(mds_srv->nfs_client, + if (!IS_ERR(clp)) { + struct xprt_create xprt_args = { + .ident = XPRT_TRANSPORT_TCP, + .net = clp->cl_net, + .dstaddr = (struct sockaddr *)&da->da_addr, + .addrlen = da->da_addrlen, + .servername = clp->cl_hostname, + }; + /* Add this address as an alias */ + rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, + rpc_clnt_test_and_add_xprt, NULL); + } else + clp = get_v3_ds_connect(mds_srv->nfs_client, (struct sockaddr *)&da->da_addr, da->da_addrlen, IPPROTO_TCP, timeo, retrans, au_flavor); - if (!IS_ERR(clp)) - break; } if (IS_ERR(clp)) { diff --git a/fs/nfs/read.c b/fs/nfs/read.c index eb31e23e7def..6776d7a7839e 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -46,7 +46,7 @@ static void nfs_readhdr_free(struct nfs_pgio_header *rhdr) static int nfs_return_empty_page(struct page *page) { - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); SetPageUptodate(page); unlock_page(page); return 0; @@ -118,8 +118,8 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, unlock_page(page); return PTR_ERR(new); } - if (len < PAGE_CACHE_SIZE) - zero_user_segment(page, len, PAGE_CACHE_SIZE); + if (len < PAGE_SIZE) + zero_user_segment(page, len, PAGE_SIZE); nfs_pageio_init_read(&pgio, inode, false, &nfs_async_read_completion_ops); @@ -295,7 +295,7 @@ int nfs_readpage(struct file *file, struct page *page) int error; dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", - page, PAGE_CACHE_SIZE, page_file_index(page)); + page, PAGE_SIZE, page_file_index(page)); nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); nfs_add_stats(inode, NFSIOS_READPAGES, 1); @@ -361,8 +361,8 @@ readpage_async_filler(void *data, struct page *page) if (IS_ERR(new)) goto out_error; - if (len < PAGE_CACHE_SIZE) - zero_user_segment(page, len, PAGE_CACHE_SIZE); + if (len < PAGE_SIZE) + zero_user_segment(page, len, PAGE_SIZE); if (!nfs_pageio_add_request(desc->pgio, new)) { nfs_list_remove_request(new); nfs_readpage_release(new); @@ -424,8 +424,8 @@ int nfs_readpages(struct file *filp, struct address_space *mapping, pgm = &pgio.pg_mirrors[0]; NFS_I(inode)->read_io += pgm->pg_bytes_written; - npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> + PAGE_SHIFT; nfs_add_stats(inode, NFSIOS_READPAGES, npages); read_complete: put_nfs_open_context(desc.ctx); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 5754835a2886..5f4fd53e5764 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -150,7 +150,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c spin_lock(&inode->i_lock); i_size = i_size_read(inode); - end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; + end_index = (i_size - 1) >> PAGE_SHIFT; if (i_size > 0 && page_file_index(page) < end_index) goto out; end = page_file_offset(page) + ((loff_t)offset+count); @@ -1942,7 +1942,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder) { loff_t range_start = page_file_offset(page); - loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); + loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index a0b77fc1bd39..c9f583d7bac8 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig @@ -84,12 +84,30 @@ config NFSD_V4 If unsure, say N. config NFSD_PNFS - bool "NFSv4.1 server support for Parallel NFS (pNFS)" - depends on NFSD_V4 + bool + +config NFSD_BLOCKLAYOUT + bool "NFSv4.1 server support for pNFS block layouts" + depends on NFSD_V4 && BLOCK + select NFSD_PNFS + help + This option enables support for the exporting pNFS block layouts + in the kernel's NFS server. The pNFS block layout enables NFS + clients to directly perform I/O to block devices accesible to both + the server and the clients. See RFC 5663 for more details. + + If unsure, say N. + +config NFSD_SCSILAYOUT + bool "NFSv4.1 server support for pNFS SCSI layouts" + depends on NFSD_V4 && BLOCK + select NFSD_PNFS help - This option enables support for the parallel NFS features of the - minor version 1 of the NFSv4 protocol (RFC5661) in the kernel's NFS - server. + This option enables support for the exporting pNFS SCSI layouts + in the kernel's NFS server. The pNFS SCSI layout enables NFS + clients to directly perform I/O to SCSI devices accesible to both + the server and the clients. See draft-ietf-nfsv4-scsi-layout for + more details. If unsure, say N. diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile index 9a6028e120c6..3ae5f3c77e28 100644 --- a/fs/nfsd/Makefile +++ b/fs/nfsd/Makefile @@ -17,4 +17,6 @@ nfsd-$(CONFIG_NFSD_V3) += nfs3proc.o nfs3xdr.o nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \ nfs4acl.o nfs4callback.o nfs4recover.o -nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o blocklayout.o blocklayoutxdr.o +nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o +nfsd-$(CONFIG_NFSD_BLOCKLAYOUT) += blocklayout.o blocklayoutxdr.o +nfsd-$(CONFIG_NFSD_SCSILAYOUT) += blocklayout.o blocklayoutxdr.o diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index c29d9421bd5e..e55b5242614d 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -1,11 +1,14 @@ /* - * Copyright (c) 2014 Christoph Hellwig. + * Copyright (c) 2014-2016 Christoph Hellwig. */ #include <linux/exportfs.h> #include <linux/genhd.h> #include <linux/slab.h> +#include <linux/pr.h> #include <linux/nfsd/debug.h> +#include <scsi/scsi_proto.h> +#include <scsi/scsi_common.h> #include "blocklayoutxdr.h" #include "pnfs.h" @@ -13,37 +16,6 @@ #define NFSDDBG_FACILITY NFSDDBG_PNFS -static int -nfsd4_block_get_device_info_simple(struct super_block *sb, - struct nfsd4_getdeviceinfo *gdp) -{ - struct pnfs_block_deviceaddr *dev; - struct pnfs_block_volume *b; - - dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) + - sizeof(struct pnfs_block_volume), GFP_KERNEL); - if (!dev) - return -ENOMEM; - gdp->gd_device = dev; - - dev->nr_volumes = 1; - b = &dev->volumes[0]; - - b->type = PNFS_BLOCK_VOLUME_SIMPLE; - b->simple.sig_len = PNFS_BLOCK_UUID_LEN; - return sb->s_export_op->get_uuid(sb, b->simple.sig, &b->simple.sig_len, - &b->simple.offset); -} - -static __be32 -nfsd4_block_proc_getdeviceinfo(struct super_block *sb, - struct nfsd4_getdeviceinfo *gdp) -{ - if (sb->s_bdev != sb->s_bdev->bd_contains) - return nfserr_inval; - return nfserrno(nfsd4_block_get_device_info_simple(sb, gdp)); -} - static __be32 nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, struct nfsd4_layoutget *args) @@ -141,20 +113,13 @@ out_layoutunavailable: } static __be32 -nfsd4_block_proc_layoutcommit(struct inode *inode, - struct nfsd4_layoutcommit *lcp) +nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp, + struct iomap *iomaps, int nr_iomaps) { loff_t new_size = lcp->lc_last_wr + 1; struct iattr iattr = { .ia_valid = 0 }; - struct iomap *iomaps; - int nr_iomaps; int error; - nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout, - lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); - if (nr_iomaps < 0) - return nfserrno(nr_iomaps); - if (lcp->lc_mtime.tv_nsec == UTIME_NOW || timespec_compare(&lcp->lc_mtime, &inode->i_mtime) < 0) lcp->lc_mtime = current_fs_time(inode->i_sb); @@ -172,6 +137,54 @@ nfsd4_block_proc_layoutcommit(struct inode *inode, return nfserrno(error); } +#ifdef CONFIG_NFSD_BLOCKLAYOUT +static int +nfsd4_block_get_device_info_simple(struct super_block *sb, + struct nfsd4_getdeviceinfo *gdp) +{ + struct pnfs_block_deviceaddr *dev; + struct pnfs_block_volume *b; + + dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) + + sizeof(struct pnfs_block_volume), GFP_KERNEL); + if (!dev) + return -ENOMEM; + gdp->gd_device = dev; + + dev->nr_volumes = 1; + b = &dev->volumes[0]; + + b->type = PNFS_BLOCK_VOLUME_SIMPLE; + b->simple.sig_len = PNFS_BLOCK_UUID_LEN; + return sb->s_export_op->get_uuid(sb, b->simple.sig, &b->simple.sig_len, + &b->simple.offset); +} + +static __be32 +nfsd4_block_proc_getdeviceinfo(struct super_block *sb, + struct nfs4_client *clp, + struct nfsd4_getdeviceinfo *gdp) +{ + if (sb->s_bdev != sb->s_bdev->bd_contains) + return nfserr_inval; + return nfserrno(nfsd4_block_get_device_info_simple(sb, gdp)); +} + +static __be32 +nfsd4_block_proc_layoutcommit(struct inode *inode, + struct nfsd4_layoutcommit *lcp) +{ + struct iomap *iomaps; + int nr_iomaps; + + nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout, + lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); + if (nr_iomaps < 0) + return nfserrno(nr_iomaps); + + return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps); +} + const struct nfsd4_layout_ops bl_layout_ops = { /* * Pretend that we send notification to the client. This is a blatant @@ -190,3 +203,206 @@ const struct nfsd4_layout_ops bl_layout_ops = { .encode_layoutget = nfsd4_block_encode_layoutget, .proc_layoutcommit = nfsd4_block_proc_layoutcommit, }; +#endif /* CONFIG_NFSD_BLOCKLAYOUT */ + +#ifdef CONFIG_NFSD_SCSILAYOUT +static int nfsd4_scsi_identify_device(struct block_device *bdev, + struct pnfs_block_volume *b) +{ + struct request_queue *q = bdev->bd_disk->queue; + struct request *rq; + size_t bufflen = 252, len, id_len; + u8 *buf, *d, type, assoc; + int error; + + buf = kzalloc(bufflen, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + rq = blk_get_request(q, READ, GFP_KERNEL); + if (IS_ERR(rq)) { + error = -ENOMEM; + goto out_free_buf; + } + blk_rq_set_block_pc(rq); + + error = blk_rq_map_kern(q, rq, buf, bufflen, GFP_KERNEL); + if (error) + goto out_put_request; + + rq->cmd[0] = INQUIRY; + rq->cmd[1] = 1; + rq->cmd[2] = 0x83; + rq->cmd[3] = bufflen >> 8; + rq->cmd[4] = bufflen & 0xff; + rq->cmd_len = COMMAND_SIZE(INQUIRY); + + error = blk_execute_rq(rq->q, NULL, rq, 1); + if (error) { + pr_err("pNFS: INQUIRY 0x83 failed with: %x\n", + rq->errors); + goto out_put_request; + } + + len = (buf[2] << 8) + buf[3] + 4; + if (len > bufflen) { + pr_err("pNFS: INQUIRY 0x83 response invalid (len = %zd)\n", + len); + goto out_put_request; + } + + d = buf + 4; + for (d = buf + 4; d < buf + len; d += id_len + 4) { + id_len = d[3]; + type = d[1] & 0xf; + assoc = (d[1] >> 4) & 0x3; + + /* + * We only care about a EUI-64 and NAA designator types + * with LU association. + */ + if (assoc != 0x00) + continue; + if (type != 0x02 && type != 0x03) + continue; + if (id_len != 8 && id_len != 12 && id_len != 16) + continue; + + b->scsi.code_set = PS_CODE_SET_BINARY; + b->scsi.designator_type = type == 0x02 ? + PS_DESIGNATOR_EUI64 : PS_DESIGNATOR_NAA; + b->scsi.designator_len = id_len; + memcpy(b->scsi.designator, d + 4, id_len); + + /* + * If we found a 8 or 12 byte descriptor continue on to + * see if a 16 byte one is available. If we find a + * 16 byte descriptor we're done. + */ + if (id_len == 16) + break; + } + +out_put_request: + blk_put_request(rq); +out_free_buf: + kfree(buf); + return error; +} + +#define NFSD_MDS_PR_KEY 0x0100000000000000 + +/* + * We use the client ID as a unique key for the reservations. + * This allows us to easily fence a client when recalls fail. + */ +static u64 nfsd4_scsi_pr_key(struct nfs4_client *clp) +{ + return ((u64)clp->cl_clientid.cl_boot << 32) | clp->cl_clientid.cl_id; +} + +static int +nfsd4_block_get_device_info_scsi(struct super_block *sb, + struct nfs4_client *clp, + struct nfsd4_getdeviceinfo *gdp) +{ + struct pnfs_block_deviceaddr *dev; + struct pnfs_block_volume *b; + const struct pr_ops *ops; + int error; + + dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) + + sizeof(struct pnfs_block_volume), GFP_KERNEL); + if (!dev) + return -ENOMEM; + gdp->gd_device = dev; + + dev->nr_volumes = 1; + b = &dev->volumes[0]; + + b->type = PNFS_BLOCK_VOLUME_SCSI; + b->scsi.pr_key = nfsd4_scsi_pr_key(clp); + + error = nfsd4_scsi_identify_device(sb->s_bdev, b); + if (error) + return error; + + ops = sb->s_bdev->bd_disk->fops->pr_ops; + if (!ops) { + pr_err("pNFS: device %s does not support PRs.\n", + sb->s_id); + return -EINVAL; + } + + error = ops->pr_register(sb->s_bdev, 0, NFSD_MDS_PR_KEY, true); + if (error) { + pr_err("pNFS: failed to register key for device %s.\n", + sb->s_id); + return -EINVAL; + } + + error = ops->pr_reserve(sb->s_bdev, NFSD_MDS_PR_KEY, + PR_EXCLUSIVE_ACCESS_REG_ONLY, 0); + if (error) { + pr_err("pNFS: failed to reserve device %s.\n", + sb->s_id); + return -EINVAL; + } + + return 0; +} + +static __be32 +nfsd4_scsi_proc_getdeviceinfo(struct super_block *sb, + struct nfs4_client *clp, + struct nfsd4_getdeviceinfo *gdp) +{ + if (sb->s_bdev != sb->s_bdev->bd_contains) + return nfserr_inval; + return nfserrno(nfsd4_block_get_device_info_scsi(sb, clp, gdp)); +} +static __be32 +nfsd4_scsi_proc_layoutcommit(struct inode *inode, + struct nfsd4_layoutcommit *lcp) +{ + struct iomap *iomaps; + int nr_iomaps; + + nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout, + lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); + if (nr_iomaps < 0) + return nfserrno(nr_iomaps); + + return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps); +} + +static void +nfsd4_scsi_fence_client(struct nfs4_layout_stateid *ls) +{ + struct nfs4_client *clp = ls->ls_stid.sc_client; + struct block_device *bdev = ls->ls_file->f_path.mnt->mnt_sb->s_bdev; + + bdev->bd_disk->fops->pr_ops->pr_preempt(bdev, NFSD_MDS_PR_KEY, + nfsd4_scsi_pr_key(clp), 0, true); +} + +const struct nfsd4_layout_ops scsi_layout_ops = { + /* + * Pretend that we send notification to the client. This is a blatant + * lie to force recent Linux clients to cache our device IDs. + * We rarely ever change the device ID, so the harm of leaking deviceids + * for a while isn't too bad. Unfortunately RFC5661 is a complete mess + * in this regard, but I filed errata 4119 for this a while ago, and + * hopefully the Linux client will eventually start caching deviceids + * without this again. + */ + .notify_types = + NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE, + .proc_getdeviceinfo = nfsd4_scsi_proc_getdeviceinfo, + .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo, + .proc_layoutget = nfsd4_block_proc_layoutget, + .encode_layoutget = nfsd4_block_encode_layoutget, + .proc_layoutcommit = nfsd4_scsi_proc_layoutcommit, + .fence_client = nfsd4_scsi_fence_client, +}; +#endif /* CONFIG_NFSD_SCSILAYOUT */ diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c index 6d834dc9bbc8..6c3b316f932e 100644 --- a/fs/nfsd/blocklayoutxdr.c +++ b/fs/nfsd/blocklayoutxdr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Christoph Hellwig. + * Copyright (c) 2014-2016 Christoph Hellwig. */ #include <linux/sunrpc/svc.h> #include <linux/exportfs.h> @@ -53,6 +53,18 @@ nfsd4_block_encode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b) p = xdr_encode_hyper(p, b->simple.offset); p = xdr_encode_opaque(p, b->simple.sig, b->simple.sig_len); break; + case PNFS_BLOCK_VOLUME_SCSI: + len = 4 + 4 + 4 + 4 + b->scsi.designator_len + 8; + p = xdr_reserve_space(xdr, len); + if (!p) + return -ETOOSMALL; + + *p++ = cpu_to_be32(b->type); + *p++ = cpu_to_be32(b->scsi.code_set); + *p++ = cpu_to_be32(b->scsi.designator_type); + p = xdr_encode_opaque(p, b->scsi.designator, b->scsi.designator_len); + p = xdr_encode_hyper(p, b->scsi.pr_key); + break; default: return -ENOTSUPP; } @@ -93,18 +105,22 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, u32 block_size) { struct iomap *iomaps; - u32 nr_iomaps, expected, i; + u32 nr_iomaps, i; if (len < sizeof(u32)) { dprintk("%s: extent array too small: %u\n", __func__, len); return -EINVAL; } + len -= sizeof(u32); + if (len % PNFS_BLOCK_EXTENT_SIZE) { + dprintk("%s: extent array invalid: %u\n", __func__, len); + return -EINVAL; + } nr_iomaps = be32_to_cpup(p++); - expected = sizeof(__be32) + nr_iomaps * PNFS_BLOCK_EXTENT_SIZE; - if (len != expected) { + if (nr_iomaps != len / PNFS_BLOCK_EXTENT_SIZE) { dprintk("%s: extent array size mismatch: %u/%u\n", - __func__, len, expected); + __func__, len, nr_iomaps); return -EINVAL; } @@ -155,3 +171,54 @@ fail: kfree(iomaps); return -EINVAL; } + +int +nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, + u32 block_size) +{ + struct iomap *iomaps; + u32 nr_iomaps, expected, i; + + if (len < sizeof(u32)) { + dprintk("%s: extent array too small: %u\n", __func__, len); + return -EINVAL; + } + + nr_iomaps = be32_to_cpup(p++); + expected = sizeof(__be32) + nr_iomaps * PNFS_SCSI_RANGE_SIZE; + if (len != expected) { + dprintk("%s: extent array size mismatch: %u/%u\n", + __func__, len, expected); + return -EINVAL; + } + + iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL); + if (!iomaps) { + dprintk("%s: failed to allocate extent array\n", __func__); + return -ENOMEM; + } + + for (i = 0; i < nr_iomaps; i++) { + u64 val; + + p = xdr_decode_hyper(p, &val); + if (val & (block_size - 1)) { + dprintk("%s: unaligned offset 0x%llx\n", __func__, val); + goto fail; + } + iomaps[i].offset = val; + + p = xdr_decode_hyper(p, &val); + if (val & (block_size - 1)) { + dprintk("%s: unaligned length 0x%llx\n", __func__, val); + goto fail; + } + iomaps[i].length = val; + } + + *iomapp = iomaps; + return nr_iomaps; +fail: + kfree(iomaps); + return -EINVAL; +} diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h index 6de925fe8499..397bc7563a49 100644 --- a/fs/nfsd/blocklayoutxdr.h +++ b/fs/nfsd/blocklayoutxdr.h @@ -15,6 +15,11 @@ struct pnfs_block_extent { enum pnfs_block_extent_state es; }; +struct pnfs_block_range { + u64 foff; + u64 len; +}; + /* * Random upper cap for the uuid length to avoid unbounded allocation. * Not actually limited by the protocol. @@ -29,6 +34,13 @@ struct pnfs_block_volume { u32 sig_len; u8 sig[PNFS_BLOCK_UUID_LEN]; } simple; + struct { + enum scsi_code_set code_set; + enum scsi_designator_type designator_type; + int designator_len; + u8 designator[256]; + u64 pr_key; + } scsi; }; }; @@ -43,5 +55,7 @@ __be32 nfsd4_block_encode_layoutget(struct xdr_stream *xdr, struct nfsd4_layoutget *lgp); int nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, u32 block_size); +int nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, + u32 block_size); #endif /* _NFSD_BLOCKLAYOUTXDR_H */ diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 7b755b7f785c..51c3b06e8036 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -147,6 +147,7 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp, { __be32 nfserr; u32 max_blocksize = svc_max_payload(rqstp); + unsigned long cnt = min(argp->count, max_blocksize); dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n", SVCFH_fmt(&argp->fh), @@ -157,7 +158,7 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp, * 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof) * + 1 (xdr opaque byte count) = 26 */ - resp->count = min(argp->count, max_blocksize); + resp->count = cnt; svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4); fh_copy(&resp->fh, &argp->fh); @@ -167,8 +168,8 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp, &resp->count); if (nfserr == 0) { struct inode *inode = d_inode(resp->fh.fh_dentry); - - resp->eof = (argp->offset + resp->count) >= inode->i_size; + resp->eof = nfsd_eof_on_read(cnt, resp->count, argp->offset, + inode->i_size); } RETURN_STATUS(nfserr); diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index ce2d010d3b17..825c7bc8d789 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2014 Christoph Hellwig. */ +#include <linux/blkdev.h> #include <linux/kmod.h> #include <linux/file.h> #include <linux/jhash.h> @@ -26,7 +27,12 @@ static const struct nfsd4_callback_ops nfsd4_cb_layout_ops; static const struct lock_manager_operations nfsd4_layouts_lm_ops; const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = { +#ifdef CONFIG_NFSD_BLOCKLAYOUT [LAYOUT_BLOCK_VOLUME] = &bl_layout_ops, +#endif +#ifdef CONFIG_NFSD_SCSILAYOUT + [LAYOUT_SCSI] = &scsi_layout_ops, +#endif }; /* pNFS device ID to export fsid mapping */ @@ -121,10 +127,24 @@ void nfsd4_setup_layout_type(struct svc_export *exp) if (!(exp->ex_flags & NFSEXP_PNFS)) return; + /* + * Check if the file system supports exporting a block-like layout. + * If the block device supports reservations prefer the SCSI layout, + * otherwise advertise the block layout. + */ +#ifdef CONFIG_NFSD_BLOCKLAYOUT if (sb->s_export_op->get_uuid && sb->s_export_op->map_blocks && sb->s_export_op->commit_blocks) exp->ex_layout_type = LAYOUT_BLOCK_VOLUME; +#endif +#ifdef CONFIG_NFSD_SCSILAYOUT + /* overwrite block layout selection if needed */ + if (sb->s_export_op->map_blocks && + sb->s_export_op->commit_blocks && + sb->s_bdev && sb->s_bdev->bd_disk->fops->pr_ops) + exp->ex_layout_type = LAYOUT_SCSI; +#endif } static void @@ -590,8 +610,6 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); - trace_layout_recall_fail(&ls->ls_stid.sc_stateid); - printk(KERN_WARNING "nfsd: client %s failed to respond to layout recall. " " Fencing..\n", addr_str); @@ -626,6 +644,7 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task) container_of(cb, struct nfs4_layout_stateid, ls_recall); struct nfsd_net *nn; ktime_t now, cutoff; + const struct nfsd4_layout_ops *ops; LIST_HEAD(reaplist); @@ -661,7 +680,13 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task) /* * Unknown error or non-responding client, we'll need to fence. */ - nfsd4_cb_layout_fail(ls); + trace_layout_recall_fail(&ls->ls_stid.sc_stateid); + + ops = nfsd4_layout_ops[ls->ls_layout_type]; + if (ops->fence_client) + ops->fence_client(ls); + else + nfsd4_cb_layout_fail(ls); return -1; } } diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 4cba7865f496..de1ff1d98bb1 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -864,12 +864,10 @@ static __be32 nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_secinfo *secinfo) { - struct svc_fh resfh; struct svc_export *exp; struct dentry *dentry; __be32 err; - fh_init(&resfh, NFS4_FHSIZE); err = fh_verify(rqstp, &cstate->current_fh, S_IFDIR, NFSD_MAY_EXEC); if (err) return err; @@ -878,6 +876,7 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, &exp, &dentry); if (err) return err; + fh_unlock(&cstate->current_fh); if (d_really_is_negative(dentry)) { exp_put(exp); err = nfserr_noent; @@ -1269,8 +1268,10 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp, goto out; nfserr = nfs_ok; - if (gdp->gd_maxcount != 0) - nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp); + if (gdp->gd_maxcount != 0) { + nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, + cstate->session->se_client, gdp); + } gdp->gd_notify_types &= ops->notify_types; out: diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 195fe2668207..66eaeb1e8c2c 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -1266,6 +1266,7 @@ nfsd4_umh_cltrack_init(struct net *net) /* XXX: The usermode helper s not working in container yet. */ if (net != &init_net) { pr_warn("NFSD: attempt to initialize umh client tracking in a container ignored.\n"); + kfree(grace_start); return -EINVAL; } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c484a2b6cd10..0462eeddfff9 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2408,7 +2408,8 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, default: /* checked by xdr code */ WARN_ON_ONCE(1); case SP4_SSV: - return nfserr_encr_alg_unsupp; + status = nfserr_encr_alg_unsupp; + goto out_nolock; } /* Cases below refer to rfc 5661 section 18.35.4: */ @@ -2586,21 +2587,26 @@ static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfs return nfs_ok; } +/* + * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now. + * These are based on similar macros in linux/sunrpc/msg_prot.h . + */ +#define RPC_MAX_HEADER_WITH_AUTH_SYS \ + (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK)) + +#define RPC_MAX_REPHEADER_WITH_AUTH_SYS \ + (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK)) + #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \ - RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32)) + RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32)) #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \ - RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32)) + RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \ + sizeof(__be32)) static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca) { ca->headerpadsz = 0; - /* - * These RPC_MAX_HEADER macros are overkill, especially since we - * don't even do gss on the backchannel yet. But this is still - * less than 1k. Tighten up this estimate in the unlikely event - * it turns out to be a problem for some client: - */ if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) return nfserr_toosmall; if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) @@ -2710,10 +2716,9 @@ nfsd4_create_session(struct svc_rqst *rqstp, goto out_free_conn; } status = nfs_ok; - /* - * We do not support RDMA or persistent sessions - */ + /* Persistent sessions are not supported */ cr_ses->flags &= ~SESSION4_PERSIST; + /* Upshifting from TCP to RDMA is not supported */ cr_ses->flags &= ~SESSION4_RDMA; init_session(rqstp, new, conf, cr_ses); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index d6ef0955a979..9df898ba648f 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -1072,8 +1072,9 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename READ_BUF(4); rename->rn_snamelen = be32_to_cpup(p++); - READ_BUF(rename->rn_snamelen + 4); + READ_BUF(rename->rn_snamelen); SAVEMEM(rename->rn_sname, rename->rn_snamelen); + READ_BUF(4); rename->rn_tnamelen = be32_to_cpup(p++); READ_BUF(rename->rn_tnamelen); SAVEMEM(rename->rn_tname, rename->rn_tnamelen); @@ -1155,13 +1156,14 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient READ_BUF(8); setclientid->se_callback_prog = be32_to_cpup(p++); setclientid->se_callback_netid_len = be32_to_cpup(p++); - - READ_BUF(setclientid->se_callback_netid_len + 4); + READ_BUF(setclientid->se_callback_netid_len); SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len); + READ_BUF(4); setclientid->se_callback_addr_len = be32_to_cpup(p++); - READ_BUF(setclientid->se_callback_addr_len + 4); + READ_BUF(setclientid->se_callback_addr_len); SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len); + READ_BUF(4); setclientid->se_callback_ident = be32_to_cpup(p++); DECODE_TAIL; @@ -1835,8 +1837,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) READ_BUF(4); argp->taglen = be32_to_cpup(p++); - READ_BUF(argp->taglen + 8); + READ_BUF(argp->taglen); SAVEMEM(argp->tag, argp->taglen); + READ_BUF(8); argp->minorversion = be32_to_cpup(p++); argp->opcnt = be32_to_cpup(p++); max_reply += 4 + (XDR_QUADLEN(argp->taglen) << 2); @@ -3060,7 +3063,7 @@ static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, p = xdr_encode_opaque_fixed(p, bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN); *p++ = cpu_to_be32(bcts->dir); - /* Sorry, we do not yet support RDMA over 4.1: */ + /* Upshifting from TCP to RDMA is not supported */ *p++ = cpu_to_be32(0); } return nfserr; @@ -3362,6 +3365,7 @@ static __be32 nfsd4_encode_splice_read( struct xdr_stream *xdr = &resp->xdr; struct xdr_buf *buf = xdr->buf; u32 eof; + long len; int space_left; __be32 nfserr; __be32 *p = xdr->p - 2; @@ -3370,6 +3374,7 @@ static __be32 nfsd4_encode_splice_read( if (xdr->end - xdr->p < 1) return nfserr_resource; + len = maxcount; nfserr = nfsd_splice_read(read->rd_rqstp, file, read->rd_offset, &maxcount); if (nfserr) { @@ -3382,8 +3387,8 @@ static __be32 nfsd4_encode_splice_read( return nfserr; } - eof = (read->rd_offset + maxcount >= - d_inode(read->rd_fhp->fh_dentry)->i_size); + eof = nfsd_eof_on_read(len, maxcount, read->rd_offset, + d_inode(read->rd_fhp->fh_dentry)->i_size); *(p++) = htonl(eof); *(p++) = htonl(maxcount); @@ -3453,14 +3458,15 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp, } read->rd_vlen = v; + len = maxcount; nfserr = nfsd_readv(file, read->rd_offset, resp->rqstp->rq_vec, read->rd_vlen, &maxcount); if (nfserr) return nfserr; xdr_truncate_encode(xdr, starting_len + 8 + ((maxcount+3)&~3)); - eof = (read->rd_offset + maxcount >= - d_inode(read->rd_fhp->fh_dentry)->i_size); + eof = nfsd_eof_on_read(len, maxcount, read->rd_offset, + d_inode(read->rd_fhp->fh_dentry)->i_size); tmp = htonl(eof); write_bytes_to_xdr_buf(xdr->buf, starting_len , &tmp, 4); diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h index d4c4453674c6..7d073b9b1553 100644 --- a/fs/nfsd/pnfs.h +++ b/fs/nfsd/pnfs.h @@ -21,6 +21,7 @@ struct nfsd4_layout_ops { u32 notify_types; __be32 (*proc_getdeviceinfo)(struct super_block *sb, + struct nfs4_client *clp, struct nfsd4_getdeviceinfo *gdevp); __be32 (*encode_getdeviceinfo)(struct xdr_stream *xdr, struct nfsd4_getdeviceinfo *gdevp); @@ -32,10 +33,17 @@ struct nfsd4_layout_ops { __be32 (*proc_layoutcommit)(struct inode *inode, struct nfsd4_layoutcommit *lcp); + + void (*fence_client)(struct nfs4_layout_stateid *ls); }; extern const struct nfsd4_layout_ops *nfsd4_layout_ops[]; +#ifdef CONFIG_NFSD_BLOCKLAYOUT extern const struct nfsd4_layout_ops bl_layout_ops; +#endif +#ifdef CONFIG_NFSD_SCSILAYOUT +extern const struct nfsd4_layout_ops scsi_layout_ops; +#endif __be32 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stateid_t *stateid, diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index c11ba316f23f..2d573ec057f8 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -139,4 +139,23 @@ static inline int nfsd_create_is_exclusive(int createmode) || createmode == NFS4_CREATE_EXCLUSIVE4_1; } +static inline bool nfsd_eof_on_read(long requested, long read, + loff_t offset, loff_t size) +{ + /* We assume a short read means eof: */ + if (requested > read) + return true; + /* + * A non-short read might also reach end of file. The spec + * still requires us to set eof in that case. + * + * Further operations may have modified the file size since + * the read, so the following check is not atomic with the read. + * We've only seen that cause a problem for a client in the case + * where the read returned a count of 0 without setting eof. + * That case was fixed by the addition of the above check. + */ + return (offset + read >= size); +} + #endif /* LINUX_NFSD_VFS_H */ diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index 27f75bcbeb30..a9fb3636c142 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -458,7 +458,7 @@ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, struct buffer_head *pbh; __u64 key; - key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT - + key = page_index(bh->b_page) << (PAGE_SHIFT - bmap->b_inode->i_blkbits); for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page) key++; diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index a35ae35e6932..e0c9daf9aa22 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -62,7 +62,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) set_buffer_uptodate(bh); unlock_page(bh->b_page); - page_cache_release(bh->b_page); + put_page(bh->b_page); return bh; } @@ -128,7 +128,7 @@ found: out_locked: unlock_page(page); - page_cache_release(page); + put_page(page); return err; } @@ -146,7 +146,7 @@ void nilfs_btnode_delete(struct buffer_head *bh) pgoff_t index = page_index(page); int still_dirty; - page_cache_get(page); + get_page(page); lock_page(page); wait_on_page_writeback(page); @@ -154,7 +154,7 @@ void nilfs_btnode_delete(struct buffer_head *bh) still_dirty = PageDirty(page); mapping = page->mapping; unlock_page(page); - page_cache_release(page); + put_page(page); if (!still_dirty && mapping) invalidate_inode_pages2_range(mapping, index, index); @@ -181,7 +181,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, obh = ctxt->bh; ctxt->newbh = NULL; - if (inode->i_blkbits == PAGE_CACHE_SHIFT) { + if (inode->i_blkbits == PAGE_SHIFT) { lock_page(obh->b_page); /* * We cannot call radix_tree_preload for the kernels older diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 6b8b92b19cec..e08f064e4bd7 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -58,7 +58,7 @@ static inline unsigned nilfs_chunk_size(struct inode *inode) static inline void nilfs_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } /* @@ -69,9 +69,9 @@ static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr) { unsigned last_byte = inode->i_size; - last_byte -= page_nr << PAGE_CACHE_SHIFT; - if (last_byte > PAGE_CACHE_SIZE) - last_byte = PAGE_CACHE_SIZE; + last_byte -= page_nr << PAGE_SHIFT; + if (last_byte > PAGE_SIZE) + last_byte = PAGE_SIZE; return last_byte; } @@ -109,12 +109,12 @@ static void nilfs_check_page(struct page *page) unsigned chunk_size = nilfs_chunk_size(dir); char *kaddr = page_address(page); unsigned offs, rec_len; - unsigned limit = PAGE_CACHE_SIZE; + unsigned limit = PAGE_SIZE; struct nilfs_dir_entry *p; char *error; - if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { - limit = dir->i_size & ~PAGE_CACHE_MASK; + if ((dir->i_size >> PAGE_SHIFT) == page->index) { + limit = dir->i_size & ~PAGE_MASK; if (limit & (chunk_size - 1)) goto Ebadsize; if (!limit) @@ -161,7 +161,7 @@ Espan: bad_entry: nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - " "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", - dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, (unsigned long) le64_to_cpu(p->inode), rec_len, p->name_len); goto fail; @@ -170,7 +170,7 @@ Eend: nilfs_error(sb, "nilfs_check_page", "entry in directory #%lu spans the page boundary" "offset=%lu, inode=%lu", - dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, (page->index<<PAGE_SHIFT)+offs, (unsigned long) le64_to_cpu(p->inode)); fail: SetPageChecked(page); @@ -256,8 +256,8 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx) loff_t pos = ctx->pos; struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; - unsigned int offset = pos & ~PAGE_CACHE_MASK; - unsigned long n = pos >> PAGE_CACHE_SHIFT; + unsigned int offset = pos & ~PAGE_MASK; + unsigned long n = pos >> PAGE_SHIFT; unsigned long npages = dir_pages(inode); /* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */ @@ -272,7 +272,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx) if (IS_ERR(page)) { nilfs_error(sb, __func__, "bad page in #%lu", inode->i_ino); - ctx->pos += PAGE_CACHE_SIZE - offset; + ctx->pos += PAGE_SIZE - offset; return -EIO; } kaddr = page_address(page); @@ -361,7 +361,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr, if (++n >= npages) n = 0; /* next page is past the blocks we've got */ - if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) { + if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { nilfs_error(dir->i_sb, __func__, "dir %lu size %lld exceeds block count %llu", dir->i_ino, dir->i_size, @@ -401,7 +401,7 @@ ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr) if (de) { res = le64_to_cpu(de->inode); kunmap(page); - page_cache_release(page); + put_page(page); } return res; } @@ -460,7 +460,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode) kaddr = page_address(page); dir_end = kaddr + nilfs_last_byte(dir, n); de = (struct nilfs_dir_entry *)kaddr; - kaddr += PAGE_CACHE_SIZE - reclen; + kaddr += PAGE_SIZE - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { /* We hit i_size */ @@ -603,7 +603,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent) kunmap_atomic(kaddr); nilfs_commit_chunk(page, mapping, 0, chunk_size); fail: - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 748ca238915a..0224b7826ace 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -115,7 +115,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, failed: unlock_page(bh->b_page); - page_cache_release(bh->b_page); + put_page(bh->b_page); return err; } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 21a1e2e0d92f..534631358b13 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -249,7 +249,7 @@ static int nilfs_set_page_dirty(struct page *page) if (nr_dirty) nilfs_set_file_dirty(inode, nr_dirty); } else if (ret) { - unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); + unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); nilfs_set_file_dirty(inode, nr_dirty); } @@ -291,7 +291,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata) { struct inode *inode = mapping->host; - unsigned start = pos & (PAGE_CACHE_SIZE - 1); + unsigned start = pos & (PAGE_SIZE - 1); unsigned nr_dirty; int err; diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 1125f40233ff..f6982b9153d5 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -110,7 +110,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block, failed_bh: unlock_page(bh->b_page); - page_cache_release(bh->b_page); + put_page(bh->b_page); brelse(bh); failed_unlock: @@ -170,7 +170,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, failed_bh: unlock_page(bh->b_page); - page_cache_release(bh->b_page); + put_page(bh->b_page); brelse(bh); failed: return ret; @@ -363,7 +363,7 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block) int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) { pgoff_t index = (pgoff_t)block >> - (PAGE_CACHE_SHIFT - inode->i_blkbits); + (PAGE_SHIFT - inode->i_blkbits); struct page *page; unsigned long first_block; int ret = 0; @@ -376,7 +376,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) wait_on_page_writeback(page); first_block = (unsigned long)index << - (PAGE_CACHE_SHIFT - inode->i_blkbits); + (PAGE_SHIFT - inode->i_blkbits); if (page_has_buffers(page)) { struct buffer_head *bh; @@ -385,7 +385,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) } still_dirty = PageDirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); if (still_dirty || invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0) @@ -578,7 +578,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh) } unlock_page(page); - page_cache_release(page); + put_page(page); return 0; } @@ -597,7 +597,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh) bh_frozen = nilfs_page_get_nth_block(page, n); } unlock_page(page); - page_cache_release(page); + put_page(page); } return bh_frozen; } diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 7ccdb961eea9..151bc19d47c0 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -431,11 +431,11 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, out_dir: if (dir_de) { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } out_old: kunmap(old_page); - page_cache_release(old_page); + put_page(old_page); out: nilfs_transaction_abort(old_dir->i_sb); return err; diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index c20df77eff99..489391561cda 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -50,7 +50,7 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, if (!page_has_buffers(page)) create_empty_buffers(page, 1 << blkbits, b_state); - first_block = (unsigned long)index << (PAGE_CACHE_SHIFT - blkbits); + first_block = (unsigned long)index << (PAGE_SHIFT - blkbits); bh = nilfs_page_get_nth_block(page, block - first_block); touch_buffer(bh); @@ -64,7 +64,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode, unsigned long b_state) { int blkbits = inode->i_blkbits; - pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits); + pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits); struct page *page; struct buffer_head *bh; @@ -75,7 +75,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode, bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); if (unlikely(!bh)) { unlock_page(page); - page_cache_release(page); + put_page(page); return NULL; } return bh; @@ -288,7 +288,7 @@ repeat: __set_page_dirty_nobuffers(dpage); unlock_page(dpage); - page_cache_release(dpage); + put_page(dpage); unlock_page(page); } pagevec_release(&pvec); @@ -333,7 +333,7 @@ repeat: WARN_ON(PageDirty(dpage)); nilfs_copy_page(dpage, page, 0); unlock_page(dpage); - page_cache_release(dpage); + put_page(dpage); } else { struct page *page2; @@ -350,7 +350,7 @@ repeat: if (unlikely(err < 0)) { WARN_ON(err == -EEXIST); page->mapping = NULL; - page_cache_release(page); /* for cache */ + put_page(page); /* for cache */ } else { page->mapping = dmap; dmap->nrpages++; @@ -523,8 +523,8 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode, if (inode->i_mapping->nrpages == 0) return 0; - index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); - nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits); + index = start_blk >> (PAGE_SHIFT - inode->i_blkbits); + nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits); pagevec_init(&pvec, 0); @@ -537,7 +537,7 @@ repeat: if (length > 0 && pvec.pages[0]->index > index) goto out; - b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits); i = 0; do { page = pvec.pages[i]; diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 9b4f205d1173..5afa77fadc11 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -544,14 +544,14 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, blocksize, page, NULL); unlock_page(page); - page_cache_release(page); + put_page(page); (*nr_salvaged_blocks)++; goto next; failed_page: unlock_page(page); - page_cache_release(page); + put_page(page); failed_inode: printk(KERN_WARNING diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 3b65adaae7e4..4317f72568e6 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2070,7 +2070,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) goto failed_to_write; if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE || - nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) { + nilfs->ns_blocksize_bits != PAGE_SHIFT) { /* * At this point, we avoid double buffering * for blocksize < pagesize because page dirty diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 7521e11db728..97768a1379f2 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -74,7 +74,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) set_buffer_uptodate(bh); - file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + + file_ofs = ((s64)page->index << PAGE_SHIFT) + bh_offset(bh); read_lock_irqsave(&ni->size_lock, flags); init_size = ni->initialized_size; @@ -142,7 +142,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) u32 rec_size; rec_size = ni->itype.index.block_size; - recs = PAGE_CACHE_SIZE / rec_size; + recs = PAGE_SIZE / rec_size; /* Should have been verified before we got here... */ BUG_ON(!recs); local_irq_save(flags); @@ -229,7 +229,7 @@ static int ntfs_read_block(struct page *page) * fully truncated, truncate will throw it away as soon as we unlock * it so no need to worry what we do with it. */ - iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); + iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits); read_lock_irqsave(&ni->size_lock, flags); lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; init_size = ni->initialized_size; @@ -412,9 +412,9 @@ retry_readpage: vi = page->mapping->host; i_size = i_size_read(vi); /* Is the page fully outside i_size? (truncate in progress) */ - if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT)) { - zero_user(page, 0, PAGE_CACHE_SIZE); + if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >> + PAGE_SHIFT)) { + zero_user(page, 0, PAGE_SIZE); ntfs_debug("Read outside i_size - truncated?"); goto done; } @@ -463,7 +463,7 @@ retry_readpage: * ok to ignore the compressed flag here. */ if (unlikely(page->index > 0)) { - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); goto done; } if (!NInoAttr(ni)) @@ -509,7 +509,7 @@ retry_readpage: le16_to_cpu(ctx->attr->data.resident.value_offset), attr_len); /* Zero the remainder of the page. */ - memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); + memset(addr + attr_len, 0, PAGE_SIZE - attr_len); flush_dcache_page(page); kunmap_atomic(addr); put_unm_err_out: @@ -599,7 +599,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) /* NOTE: Different naming scheme to ntfs_read_block()! */ /* The first block in the page. */ - block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); + block = (s64)page->index << (PAGE_SHIFT - blocksize_bits); read_lock_irqsave(&ni->size_lock, flags); i_size = i_size_read(vi); @@ -674,7 +674,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) // in the inode. // Again, for each page do: // __set_page_dirty_buffers(); - // page_cache_release() + // put_page() // We don't need to wait on the writes. // Update iblock. } @@ -925,7 +925,7 @@ static int ntfs_write_mst_block(struct page *page, ntfs_volume *vol = ni->vol; u8 *kaddr; unsigned int rec_size = ni->itype.index.block_size; - ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size]; + ntfs_inode *locked_nis[PAGE_SIZE / rec_size]; struct buffer_head *bh, *head, *tbh, *rec_start_bh; struct buffer_head *bhs[MAX_BUF_PER_PAGE]; runlist_element *rl; @@ -949,7 +949,7 @@ static int ntfs_write_mst_block(struct page *page, (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); bh_size = vol->sb->s_blocksize; bh_size_bits = vol->sb->s_blocksize_bits; - max_bhs = PAGE_CACHE_SIZE / bh_size; + max_bhs = PAGE_SIZE / bh_size; BUG_ON(!max_bhs); BUG_ON(max_bhs > MAX_BUF_PER_PAGE); @@ -961,13 +961,13 @@ static int ntfs_write_mst_block(struct page *page, BUG_ON(!bh); rec_size_bits = ni->itype.index.block_size_bits; - BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits)); + BUG_ON(!(PAGE_SIZE >> rec_size_bits)); bhs_per_rec = rec_size >> bh_size_bits; BUG_ON(!bhs_per_rec); /* The first block in the page. */ rec_block = block = (sector_t)page->index << - (PAGE_CACHE_SHIFT - bh_size_bits); + (PAGE_SHIFT - bh_size_bits); /* The first out of bounds block for the data size. */ dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits; @@ -1133,7 +1133,7 @@ lock_retry_remap: unsigned long mft_no; /* Get the mft record number. */ - mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) + mft_no = (((s64)page->index << PAGE_SHIFT) + ofs) >> rec_size_bits; /* Check whether to write this mft record. */ tni = NULL; @@ -1249,7 +1249,7 @@ do_mirror: continue; ofs = bh_offset(tbh); /* Get the mft record number. */ - mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) + mft_no = (((s64)page->index << PAGE_SHIFT) + ofs) >> rec_size_bits; if (mft_no < vol->mftmirr_size) ntfs_sync_mft_mirror(vol, mft_no, @@ -1300,7 +1300,7 @@ done: * Set page error if there is only one ntfs record in the page. * Otherwise we would loose per-record granularity. */ - if (ni->itype.index.block_size == PAGE_CACHE_SIZE) + if (ni->itype.index.block_size == PAGE_SIZE) SetPageError(page); NVolSetErrors(vol); } @@ -1308,7 +1308,7 @@ done: ntfs_debug("Page still contains one or more dirty ntfs " "records. Redirtying the page starting at " "record 0x%lx.", page->index << - (PAGE_CACHE_SHIFT - rec_size_bits)); + (PAGE_SHIFT - rec_size_bits)); redirty_page_for_writepage(wbc, page); unlock_page(page); } else { @@ -1365,13 +1365,13 @@ retry_writepage: BUG_ON(!PageLocked(page)); i_size = i_size_read(vi); /* Is the page fully outside i_size? (truncate in progress) */ - if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT)) { + if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >> + PAGE_SHIFT)) { /* * The page may have dirty, unmapped buffers. Make them * freeable here, so the page does not leak. */ - block_invalidatepage(page, 0, PAGE_CACHE_SIZE); + block_invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); ntfs_debug("Write outside i_size - truncated?"); return 0; @@ -1414,10 +1414,10 @@ retry_writepage: /* NInoNonResident() == NInoIndexAllocPresent() */ if (NInoNonResident(ni)) { /* We have to zero every time due to mmap-at-end-of-file. */ - if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { + if (page->index >= (i_size >> PAGE_SHIFT)) { /* The page straddles i_size. */ - unsigned int ofs = i_size & ~PAGE_CACHE_MASK; - zero_user_segment(page, ofs, PAGE_CACHE_SIZE); + unsigned int ofs = i_size & ~PAGE_MASK; + zero_user_segment(page, ofs, PAGE_SIZE); } /* Handle mst protected attributes. */ if (NInoMstProtected(ni)) @@ -1500,7 +1500,7 @@ retry_writepage: le16_to_cpu(ctx->attr->data.resident.value_offset), addr, attr_len); /* Zero out of bounds area in the page cache page. */ - memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); + memset(addr + attr_len, 0, PAGE_SIZE - attr_len); kunmap_atomic(addr); flush_dcache_page(page); flush_dcache_mft_record_page(ctx->ntfs_ino); diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h index caecc58f529c..820d6eabf60f 100644 --- a/fs/ntfs/aops.h +++ b/fs/ntfs/aops.h @@ -40,7 +40,7 @@ static inline void ntfs_unmap_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } /** @@ -49,7 +49,7 @@ static inline void ntfs_unmap_page(struct page *page) * @index: index into the page cache for @mapping of the page to map * * Read a page from the page cache of the address space @mapping at position - * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes. + * @index, where @index is in units of PAGE_SIZE, and not in bytes. * * If the page is not in memory it is loaded from disk first using the readpage * method defined in the address space operations of @mapping and the page is diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index 250ed5b20c8f..44a39a099b54 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c @@ -152,7 +152,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino != old_ctx.base_ntfs_ino) { put_this_page = old_ctx.ntfs_ino->page; - page_cache_get(put_this_page); + get_page(put_this_page); } /* * Reinitialize the search context so we can lookup the @@ -275,7 +275,7 @@ retry_map: * the pieces anyway. */ if (put_this_page) - page_cache_release(put_this_page); + put_page(put_this_page); } return err; } @@ -1660,7 +1660,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) memcpy(kaddr, (u8*)a + le16_to_cpu(a->data.resident.value_offset), attr_size); - memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size); + memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size); kunmap_atomic(kaddr); flush_dcache_page(page); SetPageUptodate(page); @@ -1748,7 +1748,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) if (page) { set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); } ntfs_debug("Done."); return 0; @@ -1835,7 +1835,7 @@ rl_err_out: ntfs_free(rl); page_err_out: unlock_page(page); - page_cache_release(page); + put_page(page); } if (err == -EINVAL) err = -EIO; @@ -2513,17 +2513,17 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) BUG_ON(NInoEncrypted(ni)); mapping = VFS_I(ni)->i_mapping; /* Work out the starting index and page offset. */ - idx = ofs >> PAGE_CACHE_SHIFT; - start_ofs = ofs & ~PAGE_CACHE_MASK; + idx = ofs >> PAGE_SHIFT; + start_ofs = ofs & ~PAGE_MASK; /* Work out the ending index and page offset. */ end = ofs + cnt; - end_ofs = end & ~PAGE_CACHE_MASK; + end_ofs = end & ~PAGE_MASK; /* If the end is outside the inode size return -ESPIPE. */ if (unlikely(end > i_size_read(VFS_I(ni)))) { ntfs_error(vol->sb, "Request exceeds end of attribute."); return -ESPIPE; } - end >>= PAGE_CACHE_SHIFT; + end >>= PAGE_SHIFT; /* If there is a first partial page, need to do it the slow way. */ if (start_ofs) { page = read_mapping_page(mapping, idx, NULL); @@ -2536,7 +2536,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) * If the last page is the same as the first page, need to * limit the write to the end offset. */ - size = PAGE_CACHE_SIZE; + size = PAGE_SIZE; if (idx == end) size = end_ofs; kaddr = kmap_atomic(page); @@ -2544,7 +2544,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) flush_dcache_page(page); kunmap_atomic(kaddr); set_page_dirty(page); - page_cache_release(page); + put_page(page); balance_dirty_pages_ratelimited(mapping); cond_resched(); if (idx == end) @@ -2561,7 +2561,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) return -ENOMEM; } kaddr = kmap_atomic(page); - memset(kaddr, val, PAGE_CACHE_SIZE); + memset(kaddr, val, PAGE_SIZE); flush_dcache_page(page); kunmap_atomic(kaddr); /* @@ -2585,7 +2585,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) set_page_dirty(page); /* Finally unlock and release the page. */ unlock_page(page); - page_cache_release(page); + put_page(page); balance_dirty_pages_ratelimited(mapping); cond_resched(); } @@ -2602,7 +2602,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) flush_dcache_page(page); kunmap_atomic(kaddr); set_page_dirty(page); - page_cache_release(page); + put_page(page); balance_dirty_pages_ratelimited(mapping); cond_resched(); } diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c index 0809cf876098..ec130c588d2b 100644 --- a/fs/ntfs/bitmap.c +++ b/fs/ntfs/bitmap.c @@ -67,8 +67,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, * Calculate the indices for the pages containing the first and last * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively. */ - index = start_bit >> (3 + PAGE_CACHE_SHIFT); - end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT); + index = start_bit >> (3 + PAGE_SHIFT); + end_index = (start_bit + cnt - 1) >> (3 + PAGE_SHIFT); /* Get the page containing the first bit (@start_bit). */ mapping = vi->i_mapping; @@ -82,7 +82,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, kaddr = page_address(page); /* Set @pos to the position of the byte containing @start_bit. */ - pos = (start_bit >> 3) & ~PAGE_CACHE_MASK; + pos = (start_bit >> 3) & ~PAGE_MASK; /* Calculate the position of @start_bit in the first byte. */ bit = start_bit & 7; @@ -108,7 +108,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, * Depending on @value, modify all remaining whole bytes in the page up * to @cnt. */ - len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos); + len = min_t(s64, cnt >> 3, PAGE_SIZE - pos); memset(kaddr + pos, value ? 0xff : 0, len); cnt -= len << 3; @@ -132,7 +132,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, * Depending on @value, modify all remaining whole bytes in the * page up to @cnt. */ - len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE); + len = min_t(s64, cnt >> 3, PAGE_SIZE); memset(kaddr, value ? 0xff : 0, len); cnt -= len << 3; } diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c index f82498c35e78..f2b5e746f49b 100644 --- a/fs/ntfs/compress.c +++ b/fs/ntfs/compress.c @@ -104,16 +104,12 @@ static void zero_partial_compressed_page(struct page *page, unsigned int kp_ofs; ntfs_debug("Zeroing page region outside initialized size."); - if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { - /* - * FIXME: Using clear_page() will become wrong when we get - * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem. - */ + if (((s64)page->index << PAGE_SHIFT) >= initialized_size) { clear_page(kp); return; } - kp_ofs = initialized_size & ~PAGE_CACHE_MASK; - memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); + kp_ofs = initialized_size & ~PAGE_MASK; + memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs); return; } @@ -123,7 +119,7 @@ static void zero_partial_compressed_page(struct page *page, static inline void handle_bounds_compressed_page(struct page *page, const loff_t i_size, const s64 initialized_size) { - if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && + if ((page->index >= (initialized_size >> PAGE_SHIFT)) && (initialized_size < i_size)) zero_partial_compressed_page(page, initialized_size); return; @@ -160,7 +156,7 @@ static inline void handle_bounds_compressed_page(struct page *page, * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was * completed during the decompression of the compression block (@cb_start). * - * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up + * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up * unpredicatbly! You have been warned! * * Note to hackers: This function may not sleep until it has finished accessing @@ -241,7 +237,7 @@ return_error: if (di == xpage) *xpage_done = 1; else - page_cache_release(dp); + put_page(dp); dest_pages[di] = NULL; } } @@ -274,7 +270,7 @@ return_error: cb = cb_sb_end; /* Advance destination position to next sub-block. */ - *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK; + *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK; if (!*dest_ofs && (++*dest_index > dest_max_index)) goto return_overflow; goto do_next_sb; @@ -301,7 +297,7 @@ return_error: /* Advance destination position to next sub-block. */ *dest_ofs += NTFS_SB_SIZE; - if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) { + if (!(*dest_ofs &= ~PAGE_MASK)) { finalize_page: /* * First stage: add current page index to array of @@ -335,7 +331,7 @@ do_next_tag: *dest_ofs += nr_bytes; } /* We have finished the current sub-block. */ - if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) + if (!(*dest_ofs &= ~PAGE_MASK)) goto finalize_page; goto do_next_sb; } @@ -462,7 +458,7 @@ return_overflow: * have been written to so that we would lose data if we were to just overwrite * them with the out-of-date uncompressed data. * - * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at + * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at * the end of the file I think. We need to detect this case and zero the out * of bounds remainder of the page in question and mark it as handled. At the * moment we would just return -EIO on such a page. This bug will only become @@ -470,7 +466,7 @@ return_overflow: * clusters so is probably not going to be seen by anyone. Still this should * be fixed. (AIA) * - * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in + * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in * handling sparse and compressed cbs. (AIA) * * FIXME: At the moment we don't do any zeroing out in the case that @@ -497,14 +493,14 @@ int ntfs_read_compressed_block(struct page *page) u64 cb_size_mask = cb_size - 1UL; VCN vcn; LCN lcn; - /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */ - VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >> + /* The first wanted vcn (minimum alignment is PAGE_SIZE). */ + VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >> vol->cluster_size_bits; /* * The first vcn after the last wanted vcn (minimum alignment is again - * PAGE_CACHE_SIZE. + * PAGE_SIZE. */ - VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1) + VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1) & ~cb_size_mask) >> vol->cluster_size_bits; /* Number of compression blocks (cbs) in the wanted vcn range. */ unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits @@ -515,7 +511,7 @@ int ntfs_read_compressed_block(struct page *page) * guarantees of start_vcn and end_vcn, no need to round up here. */ unsigned int nr_pages = (end_vcn - start_vcn) << - vol->cluster_size_bits >> PAGE_CACHE_SHIFT; + vol->cluster_size_bits >> PAGE_SHIFT; unsigned int xpage, max_page, cur_page, cur_ofs, i; unsigned int cb_clusters, cb_max_ofs; int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0; @@ -549,7 +545,7 @@ int ntfs_read_compressed_block(struct page *page) * We have already been given one page, this is the one we must do. * Once again, the alignment guarantees keep it simple. */ - offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT; + offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT; xpage = index - offset; pages[xpage] = page; /* @@ -560,13 +556,13 @@ int ntfs_read_compressed_block(struct page *page) i_size = i_size_read(VFS_I(ni)); initialized_size = ni->initialized_size; read_unlock_irqrestore(&ni->size_lock, flags); - max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - + max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) - offset; /* Is the page fully outside i_size? (truncate in progress) */ if (xpage >= max_page) { kfree(bhs); kfree(pages); - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); ntfs_debug("Compressed read outside i_size - truncated?"); SetPageUptodate(page); unlock_page(page); @@ -591,7 +587,7 @@ int ntfs_read_compressed_block(struct page *page) continue; } unlock_page(page); - page_cache_release(page); + put_page(page); pages[i] = NULL; } } @@ -735,9 +731,9 @@ lock_retry_remap: ntfs_debug("Successfully read the compression block."); /* The last page and maximum offset within it for the current cb. */ - cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size; - cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK; - cb_max_page >>= PAGE_CACHE_SHIFT; + cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size; + cb_max_ofs = cb_max_page & ~PAGE_MASK; + cb_max_page >>= PAGE_SHIFT; /* Catch end of file inside a compression block. */ if (cb_max_page > max_page) @@ -753,16 +749,11 @@ lock_retry_remap: for (; cur_page < cb_max_page; cur_page++) { page = pages[cur_page]; if (page) { - /* - * FIXME: Using clear_page() will become wrong - * when we get PAGE_CACHE_SIZE != PAGE_SIZE but - * for now there is no problem. - */ if (likely(!cur_ofs)) clear_page(page_address(page)); else memset(page_address(page) + cur_ofs, 0, - PAGE_CACHE_SIZE - + PAGE_SIZE - cur_ofs); flush_dcache_page(page); kunmap(page); @@ -771,10 +762,10 @@ lock_retry_remap: if (cur_page == xpage) xpage_done = 1; else - page_cache_release(page); + put_page(page); pages[cur_page] = NULL; } - cb_pos += PAGE_CACHE_SIZE - cur_ofs; + cb_pos += PAGE_SIZE - cur_ofs; cur_ofs = 0; if (cb_pos >= cb_end) break; @@ -807,7 +798,7 @@ lock_retry_remap: * synchronous io for the majority of pages. * Or if we choose not to do the read-ahead/-behind stuff, we * could just return block_read_full_page(pages[xpage]) as long - * as PAGE_CACHE_SIZE <= cb_size. + * as PAGE_SIZE <= cb_size. */ if (cb_max_ofs) cb_max_page--; @@ -816,8 +807,8 @@ lock_retry_remap: page = pages[cur_page]; if (page) memcpy(page_address(page) + cur_ofs, cb_pos, - PAGE_CACHE_SIZE - cur_ofs); - cb_pos += PAGE_CACHE_SIZE - cur_ofs; + PAGE_SIZE - cur_ofs); + cb_pos += PAGE_SIZE - cur_ofs; cur_ofs = 0; if (cb_pos >= cb_end) break; @@ -850,10 +841,10 @@ lock_retry_remap: if (cur2_page == xpage) xpage_done = 1; else - page_cache_release(page); + put_page(page); pages[cur2_page] = NULL; } - cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2; + cb_pos2 += PAGE_SIZE - cur_ofs2; cur_ofs2 = 0; if (cb_pos2 >= cb_end) break; @@ -884,7 +875,7 @@ lock_retry_remap: kunmap(page); unlock_page(page); if (prev_cur_page != xpage) - page_cache_release(page); + put_page(page); pages[prev_cur_page] = NULL; } } @@ -914,7 +905,7 @@ lock_retry_remap: kunmap(page); unlock_page(page); if (cur_page != xpage) - page_cache_release(page); + put_page(page); pages[cur_page] = NULL; } } @@ -961,7 +952,7 @@ err_out: kunmap(page); unlock_page(page); if (i != xpage) - page_cache_release(page); + put_page(page); } } kfree(pages); diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index b2eff5816adc..a18613579001 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -315,11 +315,11 @@ found_it: descend_into_child_node: /* * Convert vcn to index into the index allocation attribute in units - * of PAGE_CACHE_SIZE and map the page cache page, reading it from + * of PAGE_SIZE and map the page cache page, reading it from * disk if necessary. */ page = ntfs_map_page(ia_mapping, vcn << - dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); + dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT); if (IS_ERR(page)) { ntfs_error(sb, "Failed to map directory index page, error %ld.", -PTR_ERR(page)); @@ -331,9 +331,9 @@ descend_into_child_node: fast_descend_into_child_node: /* Get to the index allocation block. */ ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << - dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); + dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK)); /* Bounds checks. */ - if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { + if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) { ntfs_error(sb, "Out of bounds check failed. Corrupt directory " "inode 0x%lx or driver bug.", dir_ni->mft_no); goto unm_err_out; @@ -366,7 +366,7 @@ fast_descend_into_child_node: goto unm_err_out; } index_end = (u8*)ia + dir_ni->itype.index.block_size; - if (index_end > kaddr + PAGE_CACHE_SIZE) { + if (index_end > kaddr + PAGE_SIZE) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " "0x%lx crosses page boundary. Impossible! " "Cannot access! This is probably a bug in the " @@ -559,9 +559,9 @@ found_it2: /* If vcn is in the same page cache page as old_vcn we * recycle the mapped page. */ if (old_vcn << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT == vcn << + PAGE_SHIFT == vcn << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT) + PAGE_SHIFT) goto fast_descend_into_child_node; unlock_page(page); ntfs_unmap_page(page); @@ -793,11 +793,11 @@ found_it: descend_into_child_node: /* * Convert vcn to index into the index allocation attribute in units - * of PAGE_CACHE_SIZE and map the page cache page, reading it from + * of PAGE_SIZE and map the page cache page, reading it from * disk if necessary. */ page = ntfs_map_page(ia_mapping, vcn << - dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); + dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT); if (IS_ERR(page)) { ntfs_error(sb, "Failed to map directory index page, error %ld.", -PTR_ERR(page)); @@ -809,9 +809,9 @@ descend_into_child_node: fast_descend_into_child_node: /* Get to the index allocation block. */ ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << - dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); + dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK)); /* Bounds checks. */ - if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { + if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) { ntfs_error(sb, "Out of bounds check failed. Corrupt directory " "inode 0x%lx or driver bug.", dir_ni->mft_no); goto unm_err_out; @@ -844,7 +844,7 @@ fast_descend_into_child_node: goto unm_err_out; } index_end = (u8*)ia + dir_ni->itype.index.block_size; - if (index_end > kaddr + PAGE_CACHE_SIZE) { + if (index_end > kaddr + PAGE_SIZE) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " "0x%lx crosses page boundary. Impossible! " "Cannot access! This is probably a bug in the " @@ -968,9 +968,9 @@ found_it2: /* If vcn is in the same page cache page as old_vcn we * recycle the mapped page. */ if (old_vcn << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT == vcn << + PAGE_SHIFT == vcn << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT) + PAGE_SHIFT) goto fast_descend_into_child_node; unlock_page(page); ntfs_unmap_page(page); @@ -1246,15 +1246,15 @@ skip_index_root: goto iput_err_out; } /* Get the starting bit position in the current bitmap page. */ - cur_bmp_pos = bmp_pos & ((PAGE_CACHE_SIZE * 8) - 1); - bmp_pos &= ~(u64)((PAGE_CACHE_SIZE * 8) - 1); + cur_bmp_pos = bmp_pos & ((PAGE_SIZE * 8) - 1); + bmp_pos &= ~(u64)((PAGE_SIZE * 8) - 1); get_next_bmp_page: ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx", - (unsigned long long)bmp_pos >> (3 + PAGE_CACHE_SHIFT), + (unsigned long long)bmp_pos >> (3 + PAGE_SHIFT), (unsigned long long)bmp_pos & - (unsigned long long)((PAGE_CACHE_SIZE * 8) - 1)); + (unsigned long long)((PAGE_SIZE * 8) - 1)); bmp_page = ntfs_map_page(bmp_mapping, - bmp_pos >> (3 + PAGE_CACHE_SHIFT)); + bmp_pos >> (3 + PAGE_SHIFT)); if (IS_ERR(bmp_page)) { ntfs_error(sb, "Reading index bitmap failed."); err = PTR_ERR(bmp_page); @@ -1270,9 +1270,9 @@ find_next_index_buffer: * If we have reached the end of the bitmap page, get the next * page, and put away the old one. */ - if (unlikely((cur_bmp_pos >> 3) >= PAGE_CACHE_SIZE)) { + if (unlikely((cur_bmp_pos >> 3) >= PAGE_SIZE)) { ntfs_unmap_page(bmp_page); - bmp_pos += PAGE_CACHE_SIZE * 8; + bmp_pos += PAGE_SIZE * 8; cur_bmp_pos = 0; goto get_next_bmp_page; } @@ -1285,8 +1285,8 @@ find_next_index_buffer: ntfs_debug("Handling index buffer 0x%llx.", (unsigned long long)bmp_pos + cur_bmp_pos); /* If the current index buffer is in the same page we reuse the page. */ - if ((prev_ia_pos & (s64)PAGE_CACHE_MASK) != - (ia_pos & (s64)PAGE_CACHE_MASK)) { + if ((prev_ia_pos & (s64)PAGE_MASK) != + (ia_pos & (s64)PAGE_MASK)) { prev_ia_pos = ia_pos; if (likely(ia_page != NULL)) { unlock_page(ia_page); @@ -1296,7 +1296,7 @@ find_next_index_buffer: * Map the page cache page containing the current ia_pos, * reading it from disk if necessary. */ - ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_CACHE_SHIFT); + ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_SHIFT); if (IS_ERR(ia_page)) { ntfs_error(sb, "Reading index allocation data failed."); err = PTR_ERR(ia_page); @@ -1307,10 +1307,10 @@ find_next_index_buffer: kaddr = (u8*)page_address(ia_page); } /* Get the current index buffer. */ - ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK & - ~(s64)(ndir->itype.index.block_size - 1))); + ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_MASK & + ~(s64)(ndir->itype.index.block_size - 1))); /* Bounds checks. */ - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { + if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE)) { ntfs_error(sb, "Out of bounds check failed. Corrupt directory " "inode 0x%lx or driver bug.", vdir->i_ino); goto err_out; @@ -1348,7 +1348,7 @@ find_next_index_buffer: goto err_out; } index_end = (u8*)ia + ndir->itype.index.block_size; - if (unlikely(index_end > kaddr + PAGE_CACHE_SIZE)) { + if (unlikely(index_end > kaddr + PAGE_SIZE)) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " "0x%lx crosses page boundary. Impossible! " "Cannot access! This is probably a bug in the " diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index bed4d427dfae..91117ada8528 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -220,8 +220,8 @@ do_non_resident_extend: m = NULL; } mapping = vi->i_mapping; - index = old_init_size >> PAGE_CACHE_SHIFT; - end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + index = old_init_size >> PAGE_SHIFT; + end_index = (new_init_size + PAGE_SIZE - 1) >> PAGE_SHIFT; do { /* * Read the page. If the page is not present, this will zero @@ -233,7 +233,7 @@ do_non_resident_extend: goto init_err_out; } if (unlikely(PageError(page))) { - page_cache_release(page); + put_page(page); err = -EIO; goto init_err_out; } @@ -242,13 +242,13 @@ do_non_resident_extend: * enough to make ntfs_writepage() work. */ write_lock_irqsave(&ni->size_lock, flags); - ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT; + ni->initialized_size = (s64)(index + 1) << PAGE_SHIFT; if (ni->initialized_size > new_init_size) ni->initialized_size = new_init_size; write_unlock_irqrestore(&ni->size_lock, flags); /* Set the page dirty so it gets written out. */ set_page_dirty(page); - page_cache_release(page); + put_page(page); /* * Play nice with the vm and the rest of the system. This is * very much needed as we can potentially be modifying the @@ -543,7 +543,7 @@ out: err_out: while (nr > 0) { unlock_page(pages[--nr]); - page_cache_release(pages[nr]); + put_page(pages[nr]); } goto out; } @@ -573,7 +573,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh) * only partially being written to. * * If @nr_pages is greater than one, we are guaranteed that the cluster size is - * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside + * greater than PAGE_SIZE, that all pages in @pages are entirely inside * the same cluster and that they are the entirety of that cluster, and that * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole. * @@ -653,7 +653,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, u = 0; do_next_page: page = pages[u]; - bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; + bh_pos = (s64)page->index << PAGE_SHIFT; bh = head = page_buffers(page); do { VCN cdelta; @@ -810,11 +810,11 @@ map_buffer_cached: kaddr = kmap_atomic(page); if (bh_pos < pos) { - pofs = bh_pos & ~PAGE_CACHE_MASK; + pofs = bh_pos & ~PAGE_MASK; memset(kaddr + pofs, 0, pos - bh_pos); } if (bh_end > end) { - pofs = end & ~PAGE_CACHE_MASK; + pofs = end & ~PAGE_MASK; memset(kaddr + pofs, 0, bh_end - end); } kunmap_atomic(kaddr); @@ -942,7 +942,7 @@ rl_not_mapped_enoent: * unmapped. This can only happen when the cluster size is * less than the page cache size. */ - if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) { + if (unlikely(vol->cluster_size < PAGE_SIZE)) { bh_cend = (bh_end + vol->cluster_size - 1) >> vol->cluster_size_bits; if ((bh_cend <= cpos || bh_cpos >= cend)) { @@ -1208,7 +1208,7 @@ rl_not_mapped_enoent: wait_on_buffer(bh); if (likely(buffer_uptodate(bh))) { page = bh->b_page; - bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) + + bh_pos = ((s64)page->index << PAGE_SHIFT) + bh_offset(bh); /* * If the buffer overflows the initialized size, need @@ -1350,7 +1350,7 @@ rl_not_mapped_enoent: bh = head = page_buffers(page); do { if (u == nr_pages && - ((s64)page->index << PAGE_CACHE_SHIFT) + + ((s64)page->index << PAGE_SHIFT) + bh_offset(bh) >= end) break; if (!buffer_new(bh)) @@ -1422,7 +1422,7 @@ static inline int ntfs_commit_pages_after_non_resident_write( bool partial; page = pages[u]; - bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; + bh_pos = (s64)page->index << PAGE_SHIFT; bh = head = page_buffers(page); partial = false; do { @@ -1639,7 +1639,7 @@ static int ntfs_commit_pages_after_write(struct page **pages, if (end < attr_len) memcpy(kaddr + end, kattr + end, attr_len - end); /* Zero the region outside the end of the attribute value. */ - memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); + memset(kaddr + attr_len, 0, PAGE_SIZE - attr_len); flush_dcache_page(page); SetPageUptodate(page); } @@ -1706,7 +1706,7 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages, unsigned len, copied; do { - len = PAGE_CACHE_SIZE - ofs; + len = PAGE_SIZE - ofs; if (len > bytes) len = bytes; copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, @@ -1724,14 +1724,14 @@ out: return total; err: /* Zero the rest of the target like __copy_from_user(). */ - len = PAGE_CACHE_SIZE - copied; + len = PAGE_SIZE - copied; do { if (len > bytes) len = bytes; zero_user(*pages, copied, len); bytes -= len; copied = 0; - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; } while (++pages < last_page); goto out; } @@ -1787,8 +1787,8 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i, * attributes. */ nr_pages = 1; - if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) - nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; + if (vol->cluster_size > PAGE_SIZE && NInoNonResident(ni)) + nr_pages = vol->cluster_size >> PAGE_SHIFT; last_vcn = -1; do { VCN vcn; @@ -1796,9 +1796,9 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i, unsigned ofs, do_pages, u; size_t copied; - start_idx = idx = pos >> PAGE_CACHE_SHIFT; - ofs = pos & ~PAGE_CACHE_MASK; - bytes = PAGE_CACHE_SIZE - ofs; + start_idx = idx = pos >> PAGE_SHIFT; + ofs = pos & ~PAGE_MASK; + bytes = PAGE_SIZE - ofs; do_pages = 1; if (nr_pages > 1) { vcn = pos >> vol->cluster_size_bits; @@ -1832,7 +1832,7 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i, if (lcn == LCN_HOLE) { start_idx = (pos & ~(s64) vol->cluster_size_mask) - >> PAGE_CACHE_SHIFT; + >> PAGE_SHIFT; bytes = vol->cluster_size - (pos & vol->cluster_size_mask); do_pages = nr_pages; @@ -1871,12 +1871,12 @@ again: if (unlikely(status)) { do { unlock_page(pages[--do_pages]); - page_cache_release(pages[do_pages]); + put_page(pages[do_pages]); } while (do_pages); break; } } - u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; + u = (pos >> PAGE_SHIFT) - pages[0]->index; copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs, i, bytes); ntfs_flush_dcache_pages(pages + u, do_pages - u); @@ -1889,7 +1889,7 @@ again: } do { unlock_page(pages[--do_pages]); - page_cache_release(pages[do_pages]); + put_page(pages[do_pages]); } while (do_pages); if (unlikely(status < 0)) break; @@ -1921,7 +1921,7 @@ again: } } while (iov_iter_count(i)); if (cached_page) - page_cache_release(cached_page); + put_page(cached_page); ntfs_debug("Done. Returning %s (written 0x%lx, status %li).", written ? "written" : "status", (unsigned long)written, (long)status); diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c index 096c135691ae..0d645f357930 100644 --- a/fs/ntfs/index.c +++ b/fs/ntfs/index.c @@ -272,11 +272,11 @@ done: descend_into_child_node: /* * Convert vcn to index into the index allocation attribute in units - * of PAGE_CACHE_SIZE and map the page cache page, reading it from + * of PAGE_SIZE and map the page cache page, reading it from * disk if necessary. */ page = ntfs_map_page(ia_mapping, vcn << - idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); + idx_ni->itype.index.vcn_size_bits >> PAGE_SHIFT); if (IS_ERR(page)) { ntfs_error(sb, "Failed to map index page, error %ld.", -PTR_ERR(page)); @@ -288,9 +288,9 @@ descend_into_child_node: fast_descend_into_child_node: /* Get to the index allocation block. */ ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << - idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); + idx_ni->itype.index.vcn_size_bits) & ~PAGE_MASK)); /* Bounds checks. */ - if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { + if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) { ntfs_error(sb, "Out of bounds check failed. Corrupt inode " "0x%lx or driver bug.", idx_ni->mft_no); goto unm_err_out; @@ -323,7 +323,7 @@ fast_descend_into_child_node: goto unm_err_out; } index_end = (u8*)ia + idx_ni->itype.index.block_size; - if (index_end > kaddr + PAGE_CACHE_SIZE) { + if (index_end > kaddr + PAGE_SIZE) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx " "crosses page boundary. Impossible! Cannot " "access! This is probably a bug in the " @@ -427,9 +427,9 @@ ia_done: * the mapped page. */ if (old_vcn << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT == vcn << + PAGE_SHIFT == vcn << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT) + PAGE_SHIFT) goto fast_descend_into_child_node; unlock_page(page); ntfs_unmap_page(page); diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index d284f07eda77..f40972d6df90 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -868,12 +868,12 @@ skip_attr_list_load: ni->itype.index.block_size); goto unm_err_out; } - if (ni->itype.index.block_size > PAGE_CACHE_SIZE) { + if (ni->itype.index.block_size > PAGE_SIZE) { ntfs_error(vi->i_sb, "Index block size (%u) > " - "PAGE_CACHE_SIZE (%ld) is not " + "PAGE_SIZE (%ld) is not " "supported. Sorry.", ni->itype.index.block_size, - PAGE_CACHE_SIZE); + PAGE_SIZE); err = -EOPNOTSUPP; goto unm_err_out; } @@ -1585,10 +1585,10 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi) "two.", ni->itype.index.block_size); goto unm_err_out; } - if (ni->itype.index.block_size > PAGE_CACHE_SIZE) { - ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE " + if (ni->itype.index.block_size > PAGE_SIZE) { + ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE " "(%ld) is not supported. Sorry.", - ni->itype.index.block_size, PAGE_CACHE_SIZE); + ni->itype.index.block_size, PAGE_SIZE); err = -EOPNOTSUPP; goto unm_err_out; } diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c index 1711b710b641..27a24a42f712 100644 --- a/fs/ntfs/lcnalloc.c +++ b/fs/ntfs/lcnalloc.c @@ -283,15 +283,15 @@ runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn, ntfs_unmap_page(page); } page = ntfs_map_page(mapping, last_read_pos >> - PAGE_CACHE_SHIFT); + PAGE_SHIFT); if (IS_ERR(page)) { err = PTR_ERR(page); ntfs_error(vol->sb, "Failed to map page."); goto out; } - buf_size = last_read_pos & ~PAGE_CACHE_MASK; + buf_size = last_read_pos & ~PAGE_MASK; buf = page_address(page) + buf_size; - buf_size = PAGE_CACHE_SIZE - buf_size; + buf_size = PAGE_SIZE - buf_size; if (unlikely(last_read_pos + buf_size > i_size)) buf_size = i_size - last_read_pos; buf_size <<= 3; diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c index c71de292c5ad..9d71213ca81e 100644 --- a/fs/ntfs/logfile.c +++ b/fs/ntfs/logfile.c @@ -381,7 +381,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi, * completely inside @rp, just copy it from there. Otherwise map all * the required pages and copy the data from them. */ - size = PAGE_CACHE_SIZE - (pos & ~PAGE_CACHE_MASK); + size = PAGE_SIZE - (pos & ~PAGE_MASK); if (size >= le32_to_cpu(rp->system_page_size)) { memcpy(trp, rp, le32_to_cpu(rp->system_page_size)); } else { @@ -394,8 +394,8 @@ static int ntfs_check_and_load_restart_page(struct inode *vi, /* Copy the remaining data one page at a time. */ have_read = size; to_read = le32_to_cpu(rp->system_page_size) - size; - idx = (pos + size) >> PAGE_CACHE_SHIFT; - BUG_ON((pos + size) & ~PAGE_CACHE_MASK); + idx = (pos + size) >> PAGE_SHIFT; + BUG_ON((pos + size) & ~PAGE_MASK); do { page = ntfs_map_page(vi->i_mapping, idx); if (IS_ERR(page)) { @@ -406,7 +406,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi, err = -EIO; goto err_out; } - size = min_t(int, to_read, PAGE_CACHE_SIZE); + size = min_t(int, to_read, PAGE_SIZE); memcpy((u8*)trp + have_read, page_address(page), size); ntfs_unmap_page(page); have_read += size; @@ -509,11 +509,11 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp) * log page size if the page cache size is between the default log page * size and twice that. */ - if (PAGE_CACHE_SIZE >= DefaultLogPageSize && PAGE_CACHE_SIZE <= + if (PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2) log_page_size = DefaultLogPageSize; else - log_page_size = PAGE_CACHE_SIZE; + log_page_size = PAGE_SIZE; log_page_mask = log_page_size - 1; /* * Use ntfs_ffs() instead of ffs() to enable the compiler to @@ -539,7 +539,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp) * to be empty. */ for (pos = 0; pos < size; pos <<= 1) { - pgoff_t idx = pos >> PAGE_CACHE_SHIFT; + pgoff_t idx = pos >> PAGE_SHIFT; if (!page || page->index != idx) { if (page) ntfs_unmap_page(page); @@ -550,7 +550,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp) goto err_out; } } - kaddr = (u8*)page_address(page) + (pos & ~PAGE_CACHE_MASK); + kaddr = (u8*)page_address(page) + (pos & ~PAGE_MASK); /* * A non-empty block means the logfile is not empty while an * empty block after a non-empty block has been encountered diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 3014a36a255b..37b2501caaa4 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c @@ -61,16 +61,16 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni) * here if the volume was that big... */ index = (u64)ni->mft_no << vol->mft_record_size_bits >> - PAGE_CACHE_SHIFT; - ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; + PAGE_SHIFT; + ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_MASK; i_size = i_size_read(mft_vi); /* The maximum valid index into the page cache for $MFT's data. */ - end_index = i_size >> PAGE_CACHE_SHIFT; + end_index = i_size >> PAGE_SHIFT; /* If the wanted index is out of bounds the mft record doesn't exist. */ if (unlikely(index >= end_index)) { - if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs + + if (index > end_index || (i_size & ~PAGE_MASK) < ofs + vol->mft_record_size) { page = ERR_PTR(-ENOENT); ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, " @@ -487,7 +487,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, } /* Get the page containing the mirror copy of the mft record @m. */ page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >> - (PAGE_CACHE_SHIFT - vol->mft_record_size_bits)); + (PAGE_SHIFT - vol->mft_record_size_bits)); if (IS_ERR(page)) { ntfs_error(vol->sb, "Failed to map mft mirror page."); err = PTR_ERR(page); @@ -497,7 +497,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, BUG_ON(!PageUptodate(page)); ClearPageUptodate(page); /* Offset of the mft mirror record inside the page. */ - page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; + page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK; /* The address in the page of the mirror copy of the mft record @m. */ kmirr = page_address(page) + page_ofs; /* Copy the mst protected mft record to the mirror. */ @@ -1178,8 +1178,8 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol, for (; pass <= 2;) { /* Cap size to pass_end. */ ofs = data_pos >> 3; - page_ofs = ofs & ~PAGE_CACHE_MASK; - size = PAGE_CACHE_SIZE - page_ofs; + page_ofs = ofs & ~PAGE_MASK; + size = PAGE_SIZE - page_ofs; ll = ((pass_end + 7) >> 3) - ofs; if (size > ll) size = ll; @@ -1190,7 +1190,7 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol, */ if (size) { page = ntfs_map_page(mftbmp_mapping, - ofs >> PAGE_CACHE_SHIFT); + ofs >> PAGE_SHIFT); if (IS_ERR(page)) { ntfs_error(vol->sb, "Failed to read mft " "bitmap, aborting."); @@ -1328,13 +1328,13 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol) */ ll = lcn >> 3; page = ntfs_map_page(vol->lcnbmp_ino->i_mapping, - ll >> PAGE_CACHE_SHIFT); + ll >> PAGE_SHIFT); if (IS_ERR(page)) { up_write(&mftbmp_ni->runlist.lock); ntfs_error(vol->sb, "Failed to read from lcn bitmap."); return PTR_ERR(page); } - b = (u8*)page_address(page) + (ll & ~PAGE_CACHE_MASK); + b = (u8*)page_address(page) + (ll & ~PAGE_MASK); tb = 1 << (lcn & 7ull); down_write(&vol->lcnbmp_lock); if (*b != 0xff && !(*b & tb)) { @@ -2103,14 +2103,14 @@ static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no) * The index into the page cache and the offset within the page cache * page of the wanted mft record. */ - index = mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; - ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; + index = mft_no << vol->mft_record_size_bits >> PAGE_SHIFT; + ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK; /* The maximum valid index into the page cache for $MFT's data. */ i_size = i_size_read(mft_vi); - end_index = i_size >> PAGE_CACHE_SHIFT; + end_index = i_size >> PAGE_SHIFT; if (unlikely(index >= end_index)) { if (unlikely(index > end_index || ofs + vol->mft_record_size >= - (i_size & ~PAGE_CACHE_MASK))) { + (i_size & ~PAGE_MASK))) { ntfs_error(vol->sb, "Tried to format non-existing mft " "record 0x%llx.", (long long)mft_no); return -ENOENT; @@ -2515,8 +2515,8 @@ mft_rec_already_initialized: * We now have allocated and initialized the mft record. Calculate the * index of and the offset within the page cache page the record is in. */ - index = bit << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; - ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; + index = bit << vol->mft_record_size_bits >> PAGE_SHIFT; + ofs = (bit << vol->mft_record_size_bits) & ~PAGE_MASK; /* Read, map, and pin the page containing the mft record. */ page = ntfs_map_page(vol->mft_ino->i_mapping, index); if (IS_ERR(page)) { diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h index c581e26a350d..12de47b96ca9 100644 --- a/fs/ntfs/ntfs.h +++ b/fs/ntfs/ntfs.h @@ -43,7 +43,7 @@ typedef enum { NTFS_MAX_NAME_LEN = 255, NTFS_MAX_ATTR_NAME_LEN = 255, NTFS_MAX_CLUSTER_SIZE = 64 * 1024, /* 64kiB */ - NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_CACHE_SIZE, + NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_SIZE, } NTFS_CONSTANTS; /* Global variables. */ diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 1b38abdaa3ed..ecb49870a680 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -823,14 +823,14 @@ static bool parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b) ntfs_debug("vol->mft_record_size_bits = %i (0x%x)", vol->mft_record_size_bits, vol->mft_record_size_bits); /* - * We cannot support mft record sizes above the PAGE_CACHE_SIZE since + * We cannot support mft record sizes above the PAGE_SIZE since * we store $MFT/$DATA, the table of mft records in the page cache. */ - if (vol->mft_record_size > PAGE_CACHE_SIZE) { + if (vol->mft_record_size > PAGE_SIZE) { ntfs_error(vol->sb, "Mft record size (%i) exceeds the " - "PAGE_CACHE_SIZE on your system (%lu). " + "PAGE_SIZE on your system (%lu). " "This is not supported. Sorry.", - vol->mft_record_size, PAGE_CACHE_SIZE); + vol->mft_record_size, PAGE_SIZE); return false; } /* We cannot support mft record sizes below the sector size. */ @@ -1096,7 +1096,7 @@ static bool check_mft_mirror(ntfs_volume *vol) ntfs_debug("Entering."); /* Compare contents of $MFT and $MFTMirr. */ - mrecs_per_page = PAGE_CACHE_SIZE / vol->mft_record_size; + mrecs_per_page = PAGE_SIZE / vol->mft_record_size; BUG_ON(!mrecs_per_page); BUG_ON(!vol->mftmirr_size); mft_page = mirr_page = NULL; @@ -1615,20 +1615,20 @@ static bool load_and_init_attrdef(ntfs_volume *vol) if (!vol->attrdef) goto iput_failed; index = 0; - max_index = i_size >> PAGE_CACHE_SHIFT; - size = PAGE_CACHE_SIZE; + max_index = i_size >> PAGE_SHIFT; + size = PAGE_SIZE; while (index < max_index) { /* Read the attrdef table and copy it into the linear buffer. */ read_partial_attrdef_page: page = ntfs_map_page(ino->i_mapping, index); if (IS_ERR(page)) goto free_iput_failed; - memcpy((u8*)vol->attrdef + (index++ << PAGE_CACHE_SHIFT), + memcpy((u8*)vol->attrdef + (index++ << PAGE_SHIFT), page_address(page), size); ntfs_unmap_page(page); }; - if (size == PAGE_CACHE_SIZE) { - size = i_size & ~PAGE_CACHE_MASK; + if (size == PAGE_SIZE) { + size = i_size & ~PAGE_MASK; if (size) goto read_partial_attrdef_page; } @@ -1684,20 +1684,20 @@ static bool load_and_init_upcase(ntfs_volume *vol) if (!vol->upcase) goto iput_upcase_failed; index = 0; - max_index = i_size >> PAGE_CACHE_SHIFT; - size = PAGE_CACHE_SIZE; + max_index = i_size >> PAGE_SHIFT; + size = PAGE_SIZE; while (index < max_index) { /* Read the upcase table and copy it into the linear buffer. */ read_partial_upcase_page: page = ntfs_map_page(ino->i_mapping, index); if (IS_ERR(page)) goto iput_upcase_failed; - memcpy((char*)vol->upcase + (index++ << PAGE_CACHE_SHIFT), + memcpy((char*)vol->upcase + (index++ << PAGE_SHIFT), page_address(page), size); ntfs_unmap_page(page); }; - if (size == PAGE_CACHE_SIZE) { - size = i_size & ~PAGE_CACHE_MASK; + if (size == PAGE_SIZE) { + size = i_size & ~PAGE_MASK; if (size) goto read_partial_upcase_page; } @@ -2471,14 +2471,14 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) down_read(&vol->lcnbmp_lock); /* * Convert the number of bits into bytes rounded up, then convert into - * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one + * multiples of PAGE_SIZE, rounding up so that if we have one * full and one partial page max_index = 2. */ - max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; - /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ + max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >> + PAGE_SHIFT; + /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */ ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", - max_index, PAGE_CACHE_SIZE / 4); + max_index, PAGE_SIZE / 4); for (index = 0; index < max_index; index++) { unsigned long *kaddr; @@ -2491,7 +2491,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) if (IS_ERR(page)) { ntfs_debug("read_mapping_page() error. Skipping " "page (index 0x%lx).", index); - nr_free -= PAGE_CACHE_SIZE * 8; + nr_free -= PAGE_SIZE * 8; continue; } kaddr = kmap_atomic(page); @@ -2503,9 +2503,9 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) * ntfs_readpage(). */ nr_free -= bitmap_weight(kaddr, - PAGE_CACHE_SIZE * BITS_PER_BYTE); + PAGE_SIZE * BITS_PER_BYTE); kunmap_atomic(kaddr); - page_cache_release(page); + put_page(page); } ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1); /* @@ -2547,9 +2547,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, pgoff_t index; ntfs_debug("Entering."); - /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ + /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */ ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " - "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); + "0x%lx.", max_index, PAGE_SIZE / 4); for (index = 0; index < max_index; index++) { unsigned long *kaddr; @@ -2562,7 +2562,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, if (IS_ERR(page)) { ntfs_debug("read_mapping_page() error. Skipping " "page (index 0x%lx).", index); - nr_free -= PAGE_CACHE_SIZE * 8; + nr_free -= PAGE_SIZE * 8; continue; } kaddr = kmap_atomic(page); @@ -2574,9 +2574,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, * ntfs_readpage(). */ nr_free -= bitmap_weight(kaddr, - PAGE_CACHE_SIZE * BITS_PER_BYTE); + PAGE_SIZE * BITS_PER_BYTE); kunmap_atomic(kaddr); - page_cache_release(page); + put_page(page); } ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.", index - 1); @@ -2618,17 +2618,17 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs) /* Type of filesystem. */ sfs->f_type = NTFS_SB_MAGIC; /* Optimal transfer block size. */ - sfs->f_bsize = PAGE_CACHE_SIZE; + sfs->f_bsize = PAGE_SIZE; /* * Total data blocks in filesystem in units of f_bsize and since * inodes are also stored in data blocs ($MFT is a file) this is just * the total clusters. */ sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; /* Free data blocks in filesystem in units of f_bsize. */ size = get_nr_free_clusters(vol) << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; if (size < 0LL) size = 0LL; /* Free blocks avail to non-superuser, same as above on NTFS. */ @@ -2639,11 +2639,11 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs) size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits; /* * Convert the maximum number of set bits into bytes rounded up, then - * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we + * convert into multiples of PAGE_SIZE, rounding up so that if we * have one full and one partial page max_index = 2. */ max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits) - + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + + 7) >> 3) + PAGE_SIZE - 1) >> PAGE_SHIFT; read_unlock_irqrestore(&mft_ni->size_lock, flags); /* Number of inodes in filesystem (at this point in time). */ sfs->f_files = size; @@ -2765,15 +2765,15 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) if (!parse_options(vol, (char*)opt)) goto err_out_now; - /* We support sector sizes up to the PAGE_CACHE_SIZE. */ - if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) { + /* We support sector sizes up to the PAGE_SIZE. */ + if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) { if (!silent) ntfs_error(sb, "Device has unsupported sector size " "(%i). The maximum supported sector " "size on this architecture is %lu " "bytes.", bdev_logical_block_size(sb->s_bdev), - PAGE_CACHE_SIZE); + PAGE_SIZE); goto err_out_now; } /* diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index ce210d4951a1..e27e6527912b 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile @@ -41,7 +41,8 @@ ocfs2-objs := \ quota_local.o \ quota_global.o \ xattr.o \ - acl.o + acl.o \ + filecheck.o ocfs2_stackglue-objs := stackglue.o ocfs2_stack_o2cb-objs := stack_o2cb.o diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index d002579c6f2b..e361d1a0ca09 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -2516,21 +2516,6 @@ static int ocfs2_update_edge_lengths(handle_t *handle, struct ocfs2_extent_block *eb; u32 range; - /* - * In normal tree rotation process, we will never touch the - * tree branch above subtree_index and ocfs2_extend_rotate_transaction - * doesn't reserve the credits for them either. - * - * But we do have a special case here which will update the rightmost - * records for all the bh in the path. - * So we have to allocate extra credits and access them. - */ - ret = ocfs2_extend_trans(handle, subtree_index); - if (ret) { - mlog_errno(ret); - goto out; - } - ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { mlog_errno(ret); @@ -2956,7 +2941,7 @@ static int __ocfs2_rotate_tree_left(handle_t *handle, right_path->p_node[subtree_root].bh->b_blocknr, right_path->p_tree_depth); - ret = ocfs2_extend_rotate_transaction(handle, subtree_root, + ret = ocfs2_extend_rotate_transaction(handle, 0, orig_credits, left_path); if (ret) { mlog_errno(ret); @@ -3029,21 +3014,9 @@ static int ocfs2_remove_rightmost_path(handle_t *handle, struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; - ret = ocfs2_et_sanity_check(et); if (ret) goto out; - /* - * There's two ways we handle this depending on - * whether path is the only existing one. - */ - ret = ocfs2_extend_rotate_transaction(handle, 0, - handle->h_buffer_credits, - path); - if (ret) { - mlog_errno(ret); - goto out; - } ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { @@ -3641,6 +3614,14 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path, */ if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 && le16_to_cpu(el->l_next_free_rec) == 1) { + /* extend credit for ocfs2_remove_rightmost_path */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + right_path); + if (ret) { + mlog_errno(ret); + goto out; + } ret = ocfs2_remove_rightmost_path(handle, et, right_path, @@ -3679,6 +3660,14 @@ static int ocfs2_try_to_merge_extent(handle_t *handle, BUG_ON(ctxt->c_contig_type == CONTIG_NONE); if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) { + /* extend credit for ocfs2_remove_rightmost_path */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + path); + if (ret) { + mlog_errno(ret); + goto out; + } /* * The merge code will need to create an empty * extent to take the place of the newly @@ -3727,6 +3716,15 @@ static int ocfs2_try_to_merge_extent(handle_t *handle, */ BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0])); + /* extend credit for ocfs2_remove_rightmost_path */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + path); + if (ret) { + mlog_errno(ret); + goto out; + } + /* The merge left us with an empty extent, remove it. */ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { @@ -3748,6 +3746,15 @@ static int ocfs2_try_to_merge_extent(handle_t *handle, goto out; } + /* extend credit for ocfs2_remove_rightmost_path */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + path); + if (ret) { + mlog_errno(ret); + goto out; + } + ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); /* * Error from this last rotate is not critical, so @@ -3783,6 +3790,16 @@ static int ocfs2_try_to_merge_extent(handle_t *handle, } if (ctxt->c_split_covers_rec) { + /* extend credit for ocfs2_remove_rightmost_path */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + path); + if (ret) { + mlog_errno(ret); + ret = 0; + goto out; + } + /* * The merge may have left an empty extent in * our leaf. Try to rotate it away. @@ -5342,6 +5359,15 @@ static int ocfs2_truncate_rec(handle_t *handle, struct ocfs2_extent_block *eb; if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) { + /* extend credit for ocfs2_remove_rightmost_path */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + path); + if (ret) { + mlog_errno(ret); + goto out; + } + ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); @@ -5928,16 +5954,6 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, ocfs2_journal_dirty(handle, tl_bh); - /* TODO: Perhaps we can calculate the bulk of the - * credits up front rather than extending like - * this. */ - status = ocfs2_extend_trans(handle, - OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); - if (status < 0) { - mlog_errno(status); - goto bail; - } - rec = tl->tl_recs[i]; start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb, le32_to_cpu(rec.t_start)); @@ -5958,6 +5974,13 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, goto bail; } } + + status = ocfs2_extend_trans(handle, + OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); + if (status < 0) { + mlog_errno(status); + goto bail; + } i--; } @@ -6016,7 +6039,7 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) goto out_mutex; } - handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); + handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); @@ -6079,7 +6102,7 @@ void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb, if (cancel) cancel_delayed_work(&osb->osb_truncate_log_wq); - queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq, + queue_delayed_work(osb->ocfs2_wq, &osb->osb_truncate_log_wq, OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL); } } @@ -6253,7 +6276,7 @@ void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb) if (tl_inode) { cancel_delayed_work(&osb->osb_truncate_log_wq); - flush_workqueue(ocfs2_wq); + flush_workqueue(osb->ocfs2_wq); status = ocfs2_flush_truncate_log(osb); if (status < 0) @@ -6648,7 +6671,7 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, { int i; struct page *page; - unsigned int from, to = PAGE_CACHE_SIZE; + unsigned int from, to = PAGE_SIZE; struct super_block *sb = inode->i_sb; BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); @@ -6656,21 +6679,21 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, if (numpages == 0) goto out; - to = PAGE_CACHE_SIZE; + to = PAGE_SIZE; for(i = 0; i < numpages; i++) { page = pages[i]; - from = start & (PAGE_CACHE_SIZE - 1); - if ((end >> PAGE_CACHE_SHIFT) == page->index) - to = end & (PAGE_CACHE_SIZE - 1); + from = start & (PAGE_SIZE - 1); + if ((end >> PAGE_SHIFT) == page->index) + to = end & (PAGE_SIZE - 1); - BUG_ON(from > PAGE_CACHE_SIZE); - BUG_ON(to > PAGE_CACHE_SIZE); + BUG_ON(from > PAGE_SIZE); + BUG_ON(to > PAGE_SIZE); ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, &phys); - start = (page->index + 1) << PAGE_CACHE_SHIFT; + start = (page->index + 1) << PAGE_SHIFT; } out: if (pages) @@ -6689,7 +6712,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, numpages = 0; last_page_bytes = PAGE_ALIGN(end); - index = start >> PAGE_CACHE_SHIFT; + index = start >> PAGE_SHIFT; do { pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); if (!pages[numpages]) { @@ -6700,7 +6723,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, numpages++; index++; - } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT)); + } while (index < (last_page_bytes >> PAGE_SHIFT)); out: if (ret != 0) { @@ -6927,8 +6950,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, * to do that now. */ if (!ocfs2_sparse_alloc(osb) && - PAGE_CACHE_SIZE < osb->s_clustersize) - end = PAGE_CACHE_SIZE; + PAGE_SIZE < osb->s_clustersize) + end = PAGE_SIZE; ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); if (ret) { @@ -6948,8 +6971,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, goto out_unlock; } - page_end = PAGE_CACHE_SIZE; - if (PAGE_CACHE_SIZE > osb->s_clustersize) + page_end = PAGE_SIZE; + if (PAGE_SIZE > osb->s_clustersize) page_end = osb->s_clustersize; for (i = 0; i < num_pages; i++) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 043110e5212d..ad1577348a92 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -234,7 +234,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, size = i_size_read(inode); - if (size > PAGE_CACHE_SIZE || + if (size > PAGE_SIZE || size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { ocfs2_error(inode->i_sb, "Inode %llu has with inline data has bad size: %Lu\n", @@ -247,7 +247,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, if (size) memcpy(kaddr, di->id2.i_data.id_data, size); /* Clear the remaining part of the page */ - memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); + memset(kaddr + size, 0, PAGE_SIZE - size); flush_dcache_page(page); kunmap_atomic(kaddr); @@ -282,7 +282,7 @@ static int ocfs2_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); - loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; + loff_t start = (loff_t)page->index << PAGE_SHIFT; int ret, unlock = 1; trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, @@ -385,7 +385,7 @@ static int ocfs2_readpages(struct file *filp, struct address_space *mapping, * drop out in that case as it's not worth handling here. */ last = list_entry(pages->prev, struct page, lru); - start = (loff_t)last->index << PAGE_CACHE_SHIFT; + start = (loff_t)last->index << PAGE_SHIFT; if (start >= i_size_read(inode)) goto out_unlock; @@ -499,158 +499,6 @@ bail: return status; } -/* - * TODO: Make this into a generic get_blocks function. - * - * From do_direct_io in direct-io.c: - * "So what we do is to permit the ->get_blocks function to populate - * bh.b_size with the size of IO which is permitted at this offset and - * this i_blkbits." - * - * This function is called directly from get_more_blocks in direct-io.c. - * - * called like this: dio->get_blocks(dio->inode, fs_startblk, - * fs_count, map_bh, dio->rw == WRITE); - */ -static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create) -{ - int ret; - u32 cpos = 0; - int alloc_locked = 0; - u64 p_blkno, inode_blocks, contig_blocks; - unsigned int ext_flags; - unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; - unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; - unsigned long len = bh_result->b_size; - unsigned int clusters_to_alloc = 0, contig_clusters = 0; - - cpos = ocfs2_blocks_to_clusters(inode->i_sb, iblock); - - /* This function won't even be called if the request isn't all - * nicely aligned and of the right size, so there's no need - * for us to check any of that. */ - - inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); - - down_read(&OCFS2_I(inode)->ip_alloc_sem); - - /* This figures out the size of the next contiguous block, and - * our logical offset */ - ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, - &contig_blocks, &ext_flags); - up_read(&OCFS2_I(inode)->ip_alloc_sem); - - if (ret) { - mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n", - (unsigned long long)iblock); - ret = -EIO; - goto bail; - } - - /* We should already CoW the refcounted extent in case of create. */ - BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED)); - - /* allocate blocks if no p_blkno is found, and create == 1 */ - if (!p_blkno && create) { - ret = ocfs2_inode_lock(inode, NULL, 1); - if (ret < 0) { - mlog_errno(ret); - goto bail; - } - - alloc_locked = 1; - - down_write(&OCFS2_I(inode)->ip_alloc_sem); - - /* fill hole, allocate blocks can't be larger than the size - * of the hole */ - clusters_to_alloc = ocfs2_clusters_for_bytes(inode->i_sb, len); - contig_clusters = ocfs2_clusters_for_blocks(inode->i_sb, - contig_blocks); - if (clusters_to_alloc > contig_clusters) - clusters_to_alloc = contig_clusters; - - /* allocate extent and insert them into the extent tree */ - ret = ocfs2_extend_allocation(inode, cpos, - clusters_to_alloc, 0); - if (ret < 0) { - up_write(&OCFS2_I(inode)->ip_alloc_sem); - mlog_errno(ret); - goto bail; - } - - ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, - &contig_blocks, &ext_flags); - if (ret < 0) { - up_write(&OCFS2_I(inode)->ip_alloc_sem); - mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n", - (unsigned long long)iblock); - ret = -EIO; - goto bail; - } - set_buffer_new(bh_result); - up_write(&OCFS2_I(inode)->ip_alloc_sem); - } - - /* - * get_more_blocks() expects us to describe a hole by clearing - * the mapped bit on bh_result(). - * - * Consider an unwritten extent as a hole. - */ - if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) - map_bh(bh_result, inode->i_sb, p_blkno); - else - clear_buffer_mapped(bh_result); - - /* make sure we don't map more than max_blocks blocks here as - that's all the kernel will handle at this point. */ - if (max_blocks < contig_blocks) - contig_blocks = max_blocks; - bh_result->b_size = contig_blocks << blocksize_bits; -bail: - if (alloc_locked) - ocfs2_inode_unlock(inode, 1); - return ret; -} - -/* - * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're - * particularly interested in the aio/dio case. We use the rw_lock DLM lock - * to protect io on one node from truncation on another. - */ -static int ocfs2_dio_end_io(struct kiocb *iocb, - loff_t offset, - ssize_t bytes, - void *private) -{ - struct inode *inode = file_inode(iocb->ki_filp); - int level; - - if (bytes <= 0) - return 0; - - /* this io's submitter should not have unlocked this before we could */ - BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); - - if (ocfs2_iocb_is_unaligned_aio(iocb)) { - ocfs2_iocb_clear_unaligned_aio(iocb); - - mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio); - } - - /* Let rw unlock to be done later to protect append direct io write */ - if (offset + bytes <= i_size_read(inode)) { - ocfs2_iocb_clear_rw_locked(iocb); - - level = ocfs2_iocb_rw_locked_level(iocb); - ocfs2_rw_unlock(inode, level); - } - - return 0; -} - static int ocfs2_releasepage(struct page *page, gfp_t wait) { if (!page_has_buffers(page)) @@ -658,374 +506,17 @@ static int ocfs2_releasepage(struct page *page, gfp_t wait) return try_to_free_buffers(page); } -static int ocfs2_is_overwrite(struct ocfs2_super *osb, - struct inode *inode, loff_t offset) -{ - int ret = 0; - u32 v_cpos = 0; - u32 p_cpos = 0; - unsigned int num_clusters = 0; - unsigned int ext_flags = 0; - - v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset); - ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, - &num_clusters, &ext_flags); - if (ret < 0) { - mlog_errno(ret); - return ret; - } - - if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) - return 1; - - return 0; -} - -static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb, - struct inode *inode, loff_t offset, - u64 zero_len, int cluster_align) -{ - u32 p_cpos = 0; - u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode)); - unsigned int num_clusters = 0; - unsigned int ext_flags = 0; - int ret = 0; - - if (offset <= i_size_read(inode) || cluster_align) - return 0; - - ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters, - &ext_flags); - if (ret < 0) { - mlog_errno(ret); - return ret; - } - - if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { - u64 s = i_size_read(inode); - sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) + - (do_div(s, osb->s_clustersize) >> 9); - - ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector, - zero_len >> 9, GFP_NOFS, false); - if (ret < 0) - mlog_errno(ret); - } - - return ret; -} - -static int ocfs2_direct_IO_extend_no_holes(struct ocfs2_super *osb, - struct inode *inode, loff_t offset) -{ - u64 zero_start, zero_len, total_zero_len; - u32 p_cpos = 0, clusters_to_add; - u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode)); - unsigned int num_clusters = 0; - unsigned int ext_flags = 0; - u32 size_div, offset_div; - int ret = 0; - - { - u64 o = offset; - u64 s = i_size_read(inode); - - offset_div = do_div(o, osb->s_clustersize); - size_div = do_div(s, osb->s_clustersize); - } - - if (offset <= i_size_read(inode)) - return 0; - - clusters_to_add = ocfs2_bytes_to_clusters(inode->i_sb, offset) - - ocfs2_bytes_to_clusters(inode->i_sb, i_size_read(inode)); - total_zero_len = offset - i_size_read(inode); - if (clusters_to_add) - total_zero_len -= offset_div; - - /* Allocate clusters to fill out holes, and this is only needed - * when we add more than one clusters. Otherwise the cluster will - * be allocated during direct IO */ - if (clusters_to_add > 1) { - ret = ocfs2_extend_allocation(inode, - OCFS2_I(inode)->ip_clusters, - clusters_to_add - 1, 0); - if (ret) { - mlog_errno(ret); - goto out; - } - } - - while (total_zero_len) { - ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters, - &ext_flags); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - - zero_start = ocfs2_clusters_to_bytes(osb->sb, p_cpos) + - size_div; - zero_len = ocfs2_clusters_to_bytes(osb->sb, num_clusters) - - size_div; - zero_len = min(total_zero_len, zero_len); - - if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { - ret = blkdev_issue_zeroout(osb->sb->s_bdev, - zero_start >> 9, zero_len >> 9, - GFP_NOFS, false); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - } - - total_zero_len -= zero_len; - v_cpos += ocfs2_bytes_to_clusters(osb->sb, zero_len + size_div); - - /* Only at first iteration can be cluster not aligned. - * So set size_div to 0 for the rest */ - size_div = 0; - } - -out: - return ret; -} - -static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, - struct iov_iter *iter, - loff_t offset) -{ - ssize_t ret = 0; - ssize_t written = 0; - bool orphaned = false; - int is_overwrite = 0; - struct file *file = iocb->ki_filp; - struct inode *inode = file_inode(file)->i_mapping->host; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct buffer_head *di_bh = NULL; - size_t count = iter->count; - journal_t *journal = osb->journal->j_journal; - u64 zero_len_head, zero_len_tail; - int cluster_align_head, cluster_align_tail; - loff_t final_size = offset + count; - int append_write = offset >= i_size_read(inode) ? 1 : 0; - unsigned int num_clusters = 0; - unsigned int ext_flags = 0; - - { - u64 o = offset; - u64 s = i_size_read(inode); - - zero_len_head = do_div(o, 1 << osb->s_clustersize_bits); - cluster_align_head = !zero_len_head; - - zero_len_tail = osb->s_clustersize - - do_div(s, osb->s_clustersize); - if ((offset - i_size_read(inode)) < zero_len_tail) - zero_len_tail = offset - i_size_read(inode); - cluster_align_tail = !zero_len_tail; - } - - /* - * when final_size > inode->i_size, inode->i_size will be - * updated after direct write, so add the inode to orphan - * dir first. - */ - if (final_size > i_size_read(inode)) { - ret = ocfs2_add_inode_to_orphan(osb, inode); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - orphaned = true; - } - - if (append_write) { - ret = ocfs2_inode_lock(inode, NULL, 1); - if (ret < 0) { - mlog_errno(ret); - goto clean_orphan; - } - - /* zeroing out the previously allocated cluster tail - * that but not zeroed */ - if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) { - down_read(&OCFS2_I(inode)->ip_alloc_sem); - ret = ocfs2_direct_IO_zero_extend(osb, inode, offset, - zero_len_tail, cluster_align_tail); - up_read(&OCFS2_I(inode)->ip_alloc_sem); - } else { - down_write(&OCFS2_I(inode)->ip_alloc_sem); - ret = ocfs2_direct_IO_extend_no_holes(osb, inode, - offset); - up_write(&OCFS2_I(inode)->ip_alloc_sem); - } - if (ret < 0) { - mlog_errno(ret); - ocfs2_inode_unlock(inode, 1); - goto clean_orphan; - } - - is_overwrite = ocfs2_is_overwrite(osb, inode, offset); - if (is_overwrite < 0) { - mlog_errno(is_overwrite); - ret = is_overwrite; - ocfs2_inode_unlock(inode, 1); - goto clean_orphan; - } - - ocfs2_inode_unlock(inode, 1); - } - - written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, - offset, ocfs2_direct_IO_get_blocks, - ocfs2_dio_end_io, NULL, 0); - /* overwrite aio may return -EIOCBQUEUED, and it is not an error */ - if ((written < 0) && (written != -EIOCBQUEUED)) { - loff_t i_size = i_size_read(inode); - - if (offset + count > i_size) { - ret = ocfs2_inode_lock(inode, &di_bh, 1); - if (ret < 0) { - mlog_errno(ret); - goto clean_orphan; - } - - if (i_size == i_size_read(inode)) { - ret = ocfs2_truncate_file(inode, di_bh, - i_size); - if (ret < 0) { - if (ret != -ENOSPC) - mlog_errno(ret); - - ocfs2_inode_unlock(inode, 1); - brelse(di_bh); - di_bh = NULL; - goto clean_orphan; - } - } - - ocfs2_inode_unlock(inode, 1); - brelse(di_bh); - di_bh = NULL; - - ret = jbd2_journal_force_commit(journal); - if (ret < 0) - mlog_errno(ret); - } - } else if (written > 0 && append_write && !is_overwrite && - !cluster_align_head) { - /* zeroing out the allocated cluster head */ - u32 p_cpos = 0; - u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset); - - ret = ocfs2_inode_lock(inode, NULL, 0); - if (ret < 0) { - mlog_errno(ret); - goto clean_orphan; - } - - ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, - &num_clusters, &ext_flags); - if (ret < 0) { - mlog_errno(ret); - ocfs2_inode_unlock(inode, 0); - goto clean_orphan; - } - - BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)); - - ret = blkdev_issue_zeroout(osb->sb->s_bdev, - (u64)p_cpos << (osb->s_clustersize_bits - 9), - zero_len_head >> 9, GFP_NOFS, false); - if (ret < 0) - mlog_errno(ret); - - ocfs2_inode_unlock(inode, 0); - } - -clean_orphan: - if (orphaned) { - int tmp_ret; - int update_isize = written > 0 ? 1 : 0; - loff_t end = update_isize ? offset + written : 0; - - tmp_ret = ocfs2_inode_lock(inode, &di_bh, 1); - if (tmp_ret < 0) { - ret = tmp_ret; - mlog_errno(ret); - goto out; - } - - tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, - update_isize, end); - if (tmp_ret < 0) { - ocfs2_inode_unlock(inode, 1); - ret = tmp_ret; - mlog_errno(ret); - brelse(di_bh); - goto out; - } - - ocfs2_inode_unlock(inode, 1); - brelse(di_bh); - - tmp_ret = jbd2_journal_force_commit(journal); - if (tmp_ret < 0) { - ret = tmp_ret; - mlog_errno(tmp_ret); - } - } - -out: - if (ret >= 0) - ret = written; - return ret; -} - -static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) -{ - struct file *file = iocb->ki_filp; - struct inode *inode = file_inode(file)->i_mapping->host; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - int full_coherency = !(osb->s_mount_opt & - OCFS2_MOUNT_COHERENCY_BUFFERED); - - /* - * Fallback to buffered I/O if we see an inode without - * extents. - */ - if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) - return 0; - - /* Fallback to buffered I/O if we are appending and - * concurrent O_DIRECT writes are allowed. - */ - if (i_size_read(inode) <= offset && !full_coherency) - return 0; - - if (iov_iter_rw(iter) == READ) - return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, - iter, offset, - ocfs2_direct_IO_get_blocks, - ocfs2_dio_end_io, NULL, 0); - else - return ocfs2_direct_IO_write(iocb, iter, offset); -} - static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, u32 cpos, unsigned int *start, unsigned int *end) { - unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE; + unsigned int cluster_start = 0, cluster_end = PAGE_SIZE; - if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) { + if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) { unsigned int cpp; - cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits); + cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits); cluster_start = cpos % cpp; cluster_start = cluster_start << osb->s_clustersize_bits; @@ -1193,13 +684,20 @@ next_bh: return ret; } -#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE) +#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE) #define OCFS2_MAX_CTXT_PAGES 1 #else -#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE) +#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE) #endif -#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE) +#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE) + +struct ocfs2_unwritten_extent { + struct list_head ue_node; + struct list_head ue_ip_node; + u32 ue_cpos; + u32 ue_phys; +}; /* * Describe the state of a single cluster to be written to. @@ -1212,7 +710,7 @@ struct ocfs2_write_cluster_desc { * filled. */ unsigned c_new; - unsigned c_unwritten; + unsigned c_clear_unwritten; unsigned c_needs_zero; }; @@ -1224,6 +722,9 @@ struct ocfs2_write_ctxt { /* First cluster allocated in a nonsparse extend */ u32 w_first_new_cpos; + /* Type of caller. Must be one of buffer, mmap, direct. */ + ocfs2_write_type_t w_type; + struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; /* @@ -1272,6 +773,8 @@ struct ocfs2_write_ctxt { struct buffer_head *w_di_bh; struct ocfs2_cached_dealloc_ctxt w_dealloc; + + struct list_head w_unwritten_list; }; void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) @@ -1282,7 +785,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) if (pages[i]) { unlock_page(pages[i]); mark_page_accessed(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } } } @@ -1305,13 +808,30 @@ static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) } } mark_page_accessed(wc->w_target_page); - page_cache_release(wc->w_target_page); + put_page(wc->w_target_page); } ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); } -static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) +static void ocfs2_free_unwritten_list(struct inode *inode, + struct list_head *head) { + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL; + + list_for_each_entry_safe(ue, tmp, head, ue_node) { + list_del(&ue->ue_node); + spin_lock(&oi->ip_lock); + list_del(&ue->ue_ip_node); + spin_unlock(&oi->ip_lock); + kfree(ue); + } +} + +static void ocfs2_free_write_ctxt(struct inode *inode, + struct ocfs2_write_ctxt *wc) +{ + ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list); ocfs2_unlock_pages(wc); brelse(wc->w_di_bh); kfree(wc); @@ -1319,7 +839,8 @@ static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, struct ocfs2_super *osb, loff_t pos, - unsigned len, struct buffer_head *di_bh) + unsigned len, ocfs2_write_type_t type, + struct buffer_head *di_bh) { u32 cend; struct ocfs2_write_ctxt *wc; @@ -1334,13 +855,15 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, wc->w_clen = cend - wc->w_cpos + 1; get_bh(di_bh); wc->w_di_bh = di_bh; + wc->w_type = type; - if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) + if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) wc->w_large_pages = 1; else wc->w_large_pages = 0; ocfs2_init_dealloc_ctxt(&wc->w_dealloc); + INIT_LIST_HEAD(&wc->w_unwritten_list); *wcp = wc; @@ -1397,16 +920,17 @@ static void ocfs2_write_failure(struct inode *inode, loff_t user_pos, unsigned user_len) { int i; - unsigned from = user_pos & (PAGE_CACHE_SIZE - 1), + unsigned from = user_pos & (PAGE_SIZE - 1), to = user_pos + user_len; struct page *tmppage; - ocfs2_zero_new_buffers(wc->w_target_page, from, to); + if (wc->w_target_page) + ocfs2_zero_new_buffers(wc->w_target_page, from, to); for(i = 0; i < wc->w_num_pages; i++) { tmppage = wc->w_pages[i]; - if (page_has_buffers(tmppage)) { + if (tmppage && page_has_buffers(tmppage)) { if (ocfs2_should_order_data(inode)) ocfs2_jbd2_file_inode(wc->w_handle, inode); @@ -1436,7 +960,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, (page_offset(page) <= user_pos)); if (page == wc->w_target_page) { - map_from = user_pos & (PAGE_CACHE_SIZE - 1); + map_from = user_pos & (PAGE_SIZE - 1); map_to = map_from + user_len; if (new) @@ -1510,7 +1034,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, struct inode *inode = mapping->host; loff_t last_byte; - target_index = user_pos >> PAGE_CACHE_SHIFT; + target_index = user_pos >> PAGE_SHIFT; /* * Figure out how many pages we'll be manipulating here. For @@ -1529,18 +1053,20 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, */ last_byte = max(user_pos + user_len, i_size_read(inode)); BUG_ON(last_byte < 1); - end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1; + end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1; if ((start + wc->w_num_pages) > end_index) wc->w_num_pages = end_index - start; } else { wc->w_num_pages = 1; start = target_index; } + end_index = (user_pos + user_len - 1) >> PAGE_SHIFT; for(i = 0; i < wc->w_num_pages; i++) { index = start + i; - if (index == target_index && mmap_page) { + if (index >= target_index && index <= end_index && + wc->w_type == OCFS2_WRITE_MMAP) { /* * ocfs2_pagemkwrite() is a little different * and wants us to directly use the page @@ -1556,9 +1082,14 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, goto out; } - page_cache_get(mmap_page); + get_page(mmap_page); wc->w_pages[i] = mmap_page; wc->w_target_locked = true; + } else if (index >= target_index && index <= end_index && + wc->w_type == OCFS2_WRITE_DIRECT) { + /* Direct write has no mapping page. */ + wc->w_pages[i] = NULL; + continue; } else { wc->w_pages[i] = find_or_create_page(mapping, index, GFP_NOFS); @@ -1583,19 +1114,20 @@ out: * Prepare a single cluster for write one cluster into the file. */ static int ocfs2_write_cluster(struct address_space *mapping, - u32 phys, unsigned int unwritten, + u32 *phys, unsigned int new, + unsigned int clear_unwritten, unsigned int should_zero, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, unsigned user_len) { - int ret, i, new; - u64 v_blkno, p_blkno; + int ret, i; + u64 p_blkno; struct inode *inode = mapping->host; struct ocfs2_extent_tree et; + int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); - new = phys == 0 ? 1 : 0; if (new) { u32 tmp_pos; @@ -1605,9 +1137,9 @@ static int ocfs2_write_cluster(struct address_space *mapping, */ tmp_pos = cpos; ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode, - &tmp_pos, 1, 0, wc->w_di_bh, - wc->w_handle, data_ac, - meta_ac, NULL); + &tmp_pos, 1, !clear_unwritten, + wc->w_di_bh, wc->w_handle, + data_ac, meta_ac, NULL); /* * This shouldn't happen because we must have already * calculated the correct meta data allocation required. The @@ -1624,11 +1156,11 @@ static int ocfs2_write_cluster(struct address_space *mapping, mlog_errno(ret); goto out; } - } else if (unwritten) { + } else if (clear_unwritten) { ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), wc->w_di_bh); ret = ocfs2_mark_extent_written(inode, &et, - wc->w_handle, cpos, 1, phys, + wc->w_handle, cpos, 1, *phys, meta_ac, &wc->w_dealloc); if (ret < 0) { mlog_errno(ret); @@ -1636,30 +1168,33 @@ static int ocfs2_write_cluster(struct address_space *mapping, } } - if (should_zero) - v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, cpos); - else - v_blkno = user_pos >> inode->i_sb->s_blocksize_bits; - /* * The only reason this should fail is due to an inability to * find the extent added. */ - ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL, - NULL); + ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL); if (ret < 0) { mlog(ML_ERROR, "Get physical blkno failed for inode %llu, " - "at logical block %llu", - (unsigned long long)OCFS2_I(inode)->ip_blkno, - (unsigned long long)v_blkno); + "at logical cluster %u", + (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); goto out; } - BUG_ON(p_blkno == 0); + BUG_ON(*phys == 0); + + p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys); + if (!should_zero) + p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1); for(i = 0; i < wc->w_num_pages; i++) { int tmpret; + /* This is the direct io target page. */ + if (wc->w_pages[i] == NULL) { + p_blkno++; + continue; + } + tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, wc->w_pages[i], cpos, user_pos, user_len, @@ -1706,8 +1241,9 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping, if ((cluster_off + local_len) > osb->s_clustersize) local_len = osb->s_clustersize - cluster_off; - ret = ocfs2_write_cluster(mapping, desc->c_phys, - desc->c_unwritten, + ret = ocfs2_write_cluster(mapping, &desc->c_phys, + desc->c_new, + desc->c_clear_unwritten, desc->c_needs_zero, data_ac, meta_ac, wc, desc->c_cpos, pos, local_len); @@ -1736,7 +1272,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, { struct ocfs2_write_cluster_desc *desc; - wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1); + wc->w_target_from = pos & (PAGE_SIZE - 1); wc->w_target_to = wc->w_target_from + len; if (alloc == 0) @@ -1773,11 +1309,71 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, &wc->w_target_to); } else { wc->w_target_from = 0; - wc->w_target_to = PAGE_CACHE_SIZE; + wc->w_target_to = PAGE_SIZE; } } /* + * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to + * do the zero work. And should not to clear UNWRITTEN since it will be cleared + * by the direct io procedure. + * If this is a new extent that allocated by direct io, we should mark it in + * the ip_unwritten_list. + */ +static int ocfs2_unwritten_check(struct inode *inode, + struct ocfs2_write_ctxt *wc, + struct ocfs2_write_cluster_desc *desc) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_unwritten_extent *ue = NULL, *new = NULL; + int ret = 0; + + if (!desc->c_needs_zero) + return 0; + +retry: + spin_lock(&oi->ip_lock); + /* Needs not to zero no metter buffer or direct. The one who is zero + * the cluster is doing zero. And he will clear unwritten after all + * cluster io finished. */ + list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) { + if (desc->c_cpos == ue->ue_cpos) { + BUG_ON(desc->c_new); + desc->c_needs_zero = 0; + desc->c_clear_unwritten = 0; + goto unlock; + } + } + + if (wc->w_type != OCFS2_WRITE_DIRECT) + goto unlock; + + if (new == NULL) { + spin_unlock(&oi->ip_lock); + new = kmalloc(sizeof(struct ocfs2_unwritten_extent), + GFP_NOFS); + if (new == NULL) { + ret = -ENOMEM; + goto out; + } + goto retry; + } + /* This direct write will doing zero. */ + new->ue_cpos = desc->c_cpos; + new->ue_phys = desc->c_phys; + desc->c_clear_unwritten = 0; + list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list); + list_add_tail(&new->ue_node, &wc->w_unwritten_list); + new = NULL; +unlock: + spin_unlock(&oi->ip_lock); +out: + if (new) + kfree(new); + return ret; +} + +/* * Populate each single-cluster write descriptor in the write context * with information about the i/o to be done. * @@ -1852,14 +1448,21 @@ static int ocfs2_populate_write_desc(struct inode *inode, if (phys == 0) { desc->c_new = 1; desc->c_needs_zero = 1; + desc->c_clear_unwritten = 1; *clusters_to_alloc = *clusters_to_alloc + 1; } if (ext_flags & OCFS2_EXT_UNWRITTEN) { - desc->c_unwritten = 1; + desc->c_clear_unwritten = 1; desc->c_needs_zero = 1; } + ret = ocfs2_unwritten_check(inode, wc, desc); + if (ret) { + mlog_errno(ret); + goto out; + } + num_clusters--; } @@ -2022,8 +1625,10 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, if (ret) mlog_errno(ret); - wc->w_first_new_cpos = - ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); + /* There is no wc if this is call from direct. */ + if (wc) + wc->w_first_new_cpos = + ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); return ret; } @@ -2077,9 +1682,8 @@ out: return ret; } -int ocfs2_write_begin_nolock(struct file *filp, - struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, +int ocfs2_write_begin_nolock(struct address_space *mapping, + loff_t pos, unsigned len, ocfs2_write_type_t type, struct page **pagep, void **fsdata, struct buffer_head *di_bh, struct page *mmap_page) { @@ -2096,7 +1700,7 @@ int ocfs2_write_begin_nolock(struct file *filp, int try_free = 1, ret1; try_again: - ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh); + ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh); if (ret) { mlog_errno(ret); return ret; @@ -2115,14 +1719,17 @@ try_again: } } - if (ocfs2_sparse_alloc(osb)) - ret = ocfs2_zero_tail(inode, di_bh, pos); - else - ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len, - wc); - if (ret) { - mlog_errno(ret); - goto out; + /* Direct io change i_size late, should not zero tail here. */ + if (type != OCFS2_WRITE_DIRECT) { + if (ocfs2_sparse_alloc(osb)) + ret = ocfs2_zero_tail(inode, di_bh, pos); + else + ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, + len, wc); + if (ret) { + mlog_errno(ret); + goto out; + } } ret = ocfs2_check_range_for_refcount(inode, pos, len); @@ -2153,7 +1760,7 @@ try_again: (unsigned long long)OCFS2_I(inode)->ip_blkno, (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), - pos, len, flags, mmap_page, + pos, len, type, mmap_page, clusters_to_alloc, extents_to_split); /* @@ -2183,17 +1790,17 @@ try_again: credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); - - } + } else if (type == OCFS2_WRITE_DIRECT) + /* direct write needs not to start trans if no extents alloc. */ + goto success; /* * We have to zero sparse allocated clusters, unwritten extent clusters, * and non-sparse clusters we just extended. For non-sparse writes, * we know zeros will only be needed in the first and/or last cluster. */ - if (clusters_to_alloc || extents_to_split || - (wc->w_clen && (wc->w_desc[0].c_needs_zero || - wc->w_desc[wc->w_clen - 1].c_needs_zero))) + if (wc->w_clen && (wc->w_desc[0].c_needs_zero || + wc->w_desc[wc->w_clen - 1].c_needs_zero)) cluster_of_pages = 1; else cluster_of_pages = 0; @@ -2260,7 +1867,8 @@ try_again: ocfs2_free_alloc_context(meta_ac); success: - *pagep = wc->w_target_page; + if (pagep) + *pagep = wc->w_target_page; *fsdata = wc; return 0; out_quota: @@ -2271,7 +1879,7 @@ out_commit: ocfs2_commit_trans(osb, handle); out: - ocfs2_free_write_ctxt(wc); + ocfs2_free_write_ctxt(inode, wc); if (data_ac) { ocfs2_free_alloc_context(data_ac); @@ -2323,8 +1931,8 @@ static int ocfs2_write_begin(struct file *file, struct address_space *mapping, */ down_write(&OCFS2_I(inode)->ip_alloc_sem); - ret = ocfs2_write_begin_nolock(file, mapping, pos, len, flags, pagep, - fsdata, di_bh, NULL); + ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER, + pagep, fsdata, di_bh, NULL); if (ret) { mlog_errno(ret); goto out_fail; @@ -2373,7 +1981,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping, struct page *page, void *fsdata) { int i, ret; - unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1); + unsigned from, to, start = pos & (PAGE_SIZE - 1); struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_write_ctxt *wc = fsdata; @@ -2381,12 +1989,16 @@ int ocfs2_write_end_nolock(struct address_space *mapping, handle_t *handle = wc->w_handle; struct page *tmppage; - ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - copied = ret; - mlog_errno(ret); - goto out; + BUG_ON(!list_empty(&wc->w_unwritten_list)); + + if (handle) { + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), + wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + copied = ret; + mlog_errno(ret); + goto out; + } } if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { @@ -2394,24 +2006,29 @@ int ocfs2_write_end_nolock(struct address_space *mapping, goto out_write_size; } - if (unlikely(copied < len)) { + if (unlikely(copied < len) && wc->w_target_page) { if (!PageUptodate(wc->w_target_page)) copied = 0; ocfs2_zero_new_buffers(wc->w_target_page, start+copied, start+len); } - flush_dcache_page(wc->w_target_page); + if (wc->w_target_page) + flush_dcache_page(wc->w_target_page); for(i = 0; i < wc->w_num_pages; i++) { tmppage = wc->w_pages[i]; + /* This is the direct io target page. */ + if (tmppage == NULL) + continue; + if (tmppage == wc->w_target_page) { from = wc->w_target_from; to = wc->w_target_to; - BUG_ON(from > PAGE_CACHE_SIZE || - to > PAGE_CACHE_SIZE || + BUG_ON(from > PAGE_SIZE || + to > PAGE_SIZE || to < from); } else { /* @@ -2420,29 +2037,33 @@ int ocfs2_write_end_nolock(struct address_space *mapping, * to flush their entire range. */ from = 0; - to = PAGE_CACHE_SIZE; + to = PAGE_SIZE; } if (page_has_buffers(tmppage)) { - if (ocfs2_should_order_data(inode)) - ocfs2_jbd2_file_inode(wc->w_handle, inode); + if (handle && ocfs2_should_order_data(inode)) + ocfs2_jbd2_file_inode(handle, inode); block_commit_write(tmppage, from, to); } } out_write_size: - pos += copied; - if (pos > i_size_read(inode)) { - i_size_write(inode, pos); - mark_inode_dirty(inode); - } - inode->i_blocks = ocfs2_inode_sector_count(inode); - di->i_size = cpu_to_le64((u64)i_size_read(inode)); - inode->i_mtime = inode->i_ctime = CURRENT_TIME; - di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); - di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); - ocfs2_update_inode_fsync_trans(handle, inode, 1); - ocfs2_journal_dirty(handle, wc->w_di_bh); + /* Direct io do not update i_size here. */ + if (wc->w_type != OCFS2_WRITE_DIRECT) { + pos += copied; + if (pos > i_size_read(inode)) { + i_size_write(inode, pos); + mark_inode_dirty(inode); + } + inode->i_blocks = ocfs2_inode_sector_count(inode); + di->i_size = cpu_to_le64((u64)i_size_read(inode)); + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); + di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); + ocfs2_update_inode_fsync_trans(handle, inode, 1); + } + if (handle) + ocfs2_journal_dirty(handle, wc->w_di_bh); out: /* unlock pages before dealloc since it needs acquiring j_trans_barrier @@ -2452,7 +2073,8 @@ out: */ ocfs2_unlock_pages(wc); - ocfs2_commit_trans(osb, handle); + if (handle) + ocfs2_commit_trans(osb, handle); ocfs2_run_deallocs(osb, &wc->w_dealloc); @@ -2477,6 +2099,360 @@ static int ocfs2_write_end(struct file *file, struct address_space *mapping, return ret; } +struct ocfs2_dio_write_ctxt { + struct list_head dw_zero_list; + unsigned dw_zero_count; + int dw_orphaned; + pid_t dw_writer_pid; +}; + +static struct ocfs2_dio_write_ctxt * +ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc) +{ + struct ocfs2_dio_write_ctxt *dwc = NULL; + + if (bh->b_private) + return bh->b_private; + + dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS); + if (dwc == NULL) + return NULL; + INIT_LIST_HEAD(&dwc->dw_zero_list); + dwc->dw_zero_count = 0; + dwc->dw_orphaned = 0; + dwc->dw_writer_pid = task_pid_nr(current); + bh->b_private = dwc; + *alloc = 1; + + return dwc; +} + +static void ocfs2_dio_free_write_ctx(struct inode *inode, + struct ocfs2_dio_write_ctxt *dwc) +{ + ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list); + kfree(dwc); +} + +/* + * TODO: Make this into a generic get_blocks function. + * + * From do_direct_io in direct-io.c: + * "So what we do is to permit the ->get_blocks function to populate + * bh.b_size with the size of IO which is permitted at this offset and + * this i_blkbits." + * + * This function is called directly from get_more_blocks in direct-io.c. + * + * called like this: dio->get_blocks(dio->inode, fs_startblk, + * fs_count, map_bh, dio->rw == WRITE); + */ +static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_write_ctxt *wc; + struct ocfs2_write_cluster_desc *desc = NULL; + struct ocfs2_dio_write_ctxt *dwc = NULL; + struct buffer_head *di_bh = NULL; + u64 p_blkno; + loff_t pos = iblock << inode->i_sb->s_blocksize_bits; + unsigned len, total_len = bh_result->b_size; + int ret = 0, first_get_block = 0; + + len = osb->s_clustersize - (pos & (osb->s_clustersize - 1)); + len = min(total_len, len); + + mlog(0, "get block of %lu at %llu:%u req %u\n", + inode->i_ino, pos, len, total_len); + + /* + * Because we need to change file size in ocfs2_dio_end_io_write(), or + * we may need to add it to orphan dir. So can not fall to fast path + * while file size will be changed. + */ + if (pos + total_len <= i_size_read(inode)) { + down_read(&oi->ip_alloc_sem); + /* This is the fast path for re-write. */ + ret = ocfs2_get_block(inode, iblock, bh_result, create); + + up_read(&oi->ip_alloc_sem); + + if (buffer_mapped(bh_result) && + !buffer_new(bh_result) && + ret == 0) + goto out; + + /* Clear state set by ocfs2_get_block. */ + bh_result->b_state = 0; + } + + dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block); + if (unlikely(dwc == NULL)) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) > + ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) && + !dwc->dw_orphaned) { + /* + * when we are going to alloc extents beyond file size, add the + * inode to orphan dir, so we can recall those spaces when + * system crashed during write. + */ + ret = ocfs2_add_inode_to_orphan(osb, inode); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + dwc->dw_orphaned = 1; + } + + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret) { + mlog_errno(ret); + goto out; + } + + down_write(&oi->ip_alloc_sem); + + if (first_get_block) { + if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) + ret = ocfs2_zero_tail(inode, di_bh, pos); + else + ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, + total_len, NULL); + if (ret < 0) { + mlog_errno(ret); + goto unlock; + } + } + + ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len, + OCFS2_WRITE_DIRECT, NULL, + (void **)&wc, di_bh, NULL); + if (ret) { + mlog_errno(ret); + goto unlock; + } + + desc = &wc->w_desc[0]; + + p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys); + BUG_ON(p_blkno == 0); + p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1); + + map_bh(bh_result, inode->i_sb, p_blkno); + bh_result->b_size = len; + if (desc->c_needs_zero) + set_buffer_new(bh_result); + + /* May sleep in end_io. It should not happen in a irq context. So defer + * it to dio work queue. */ + set_buffer_defer_completion(bh_result); + + if (!list_empty(&wc->w_unwritten_list)) { + struct ocfs2_unwritten_extent *ue = NULL; + + ue = list_first_entry(&wc->w_unwritten_list, + struct ocfs2_unwritten_extent, + ue_node); + BUG_ON(ue->ue_cpos != desc->c_cpos); + /* The physical address may be 0, fill it. */ + ue->ue_phys = desc->c_phys; + + list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list); + dwc->dw_zero_count++; + } + + ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, NULL, wc); + BUG_ON(ret != len); + ret = 0; +unlock: + up_write(&oi->ip_alloc_sem); + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); +out: + if (ret < 0) + ret = -EIO; + return ret; +} + +static void ocfs2_dio_end_io_write(struct inode *inode, + struct ocfs2_dio_write_ctxt *dwc, + loff_t offset, + ssize_t bytes) +{ + struct ocfs2_cached_dealloc_ctxt dealloc; + struct ocfs2_extent_tree et; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_unwritten_extent *ue = NULL; + struct buffer_head *di_bh = NULL; + struct ocfs2_dinode *di; + struct ocfs2_alloc_context *data_ac = NULL; + struct ocfs2_alloc_context *meta_ac = NULL; + handle_t *handle = NULL; + loff_t end = offset + bytes; + int ret = 0, credits = 0, locked = 0; + + ocfs2_init_dealloc_ctxt(&dealloc); + + /* We do clear unwritten, delete orphan, change i_size here. If neither + * of these happen, we can skip all this. */ + if (list_empty(&dwc->dw_zero_list) && + end <= i_size_read(inode) && + !dwc->dw_orphaned) + goto out; + + /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we + * are in that context. */ + if (dwc->dw_writer_pid != task_pid_nr(current)) { + mutex_lock(&inode->i_mutex); + locked = 1; + } + + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + down_write(&oi->ip_alloc_sem); + + /* Delete orphan before acquire i_mutex. */ + if (dwc->dw_orphaned) { + BUG_ON(dwc->dw_writer_pid != task_pid_nr(current)); + + end = end > i_size_read(inode) ? end : 0; + + ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, + !!end, end); + if (ret < 0) + mlog_errno(ret); + } + + di = (struct ocfs2_dinode *)di_bh; + + ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); + + ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2, + &data_ac, &meta_ac); + if (ret) { + mlog_errno(ret); + goto unlock; + } + + credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); + + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto unlock; + } + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto commit; + } + + list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) { + ret = ocfs2_mark_extent_written(inode, &et, handle, + ue->ue_cpos, 1, + ue->ue_phys, + meta_ac, &dealloc); + if (ret < 0) { + mlog_errno(ret); + break; + } + } + + if (end > i_size_read(inode)) { + ret = ocfs2_set_inode_size(handle, inode, di_bh, end); + if (ret < 0) + mlog_errno(ret); + } +commit: + ocfs2_commit_trans(osb, handle); +unlock: + up_write(&oi->ip_alloc_sem); + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); +out: + if (data_ac) + ocfs2_free_alloc_context(data_ac); + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + ocfs2_run_deallocs(osb, &dealloc); + if (locked) + mutex_unlock(&inode->i_mutex); + ocfs2_dio_free_write_ctx(inode, dwc); +} + +/* + * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're + * particularly interested in the aio/dio case. We use the rw_lock DLM lock + * to protect io on one node from truncation on another. + */ +static int ocfs2_dio_end_io(struct kiocb *iocb, + loff_t offset, + ssize_t bytes, + void *private) +{ + struct inode *inode = file_inode(iocb->ki_filp); + int level; + + if (bytes <= 0) + return 0; + + /* this io's submitter should not have unlocked this before we could */ + BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); + + if (private) + ocfs2_dio_end_io_write(inode, private, offset, bytes); + + ocfs2_iocb_clear_rw_locked(iocb); + + level = ocfs2_iocb_rw_locked_level(iocb); + ocfs2_rw_unlock(inode, level); + return 0; +} + +static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file_inode(file)->i_mapping->host; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + loff_t end = offset + iter->count; + get_block_t *get_block; + + /* + * Fallback to buffered I/O if we see an inode without + * extents. + */ + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + return 0; + + /* Fallback to buffered I/O if we do not support append dio. */ + if (end > i_size_read(inode) && !ocfs2_supports_append_dio(osb)) + return 0; + + if (iov_iter_rw(iter) == READ) + get_block = ocfs2_get_block; + else + get_block = ocfs2_dio_get_block; + + return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, + iter, offset, get_block, + ocfs2_dio_end_io, NULL, 0); +} + const struct address_space_operations ocfs2_aops = { .readpage = ocfs2_readpage, .readpages = ocfs2_readpages, diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 24e496d6bdcd..b1c9f28a57b1 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -47,9 +47,14 @@ int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); -int ocfs2_write_begin_nolock(struct file *filp, - struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, +typedef enum { + OCFS2_WRITE_BUFFER = 0, + OCFS2_WRITE_DIRECT, + OCFS2_WRITE_MMAP, +} ocfs2_write_type_t; + +int ocfs2_write_begin_nolock(struct address_space *mapping, + loff_t pos, unsigned len, ocfs2_write_type_t type, struct page **pagep, void **fsdata, struct buffer_head *di_bh, struct page *mmap_page); @@ -79,7 +84,6 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level) enum ocfs2_iocb_lock_bits { OCFS2_IOCB_RW_LOCK = 0, OCFS2_IOCB_RW_LOCK_LEVEL, - OCFS2_IOCB_UNALIGNED_IO, OCFS2_IOCB_NUM_LOCKS }; @@ -88,11 +92,4 @@ enum ocfs2_iocb_lock_bits { #define ocfs2_iocb_rw_locked_level(iocb) \ test_bit(OCFS2_IOCB_RW_LOCK_LEVEL, (unsigned long *)&iocb->private) -#define ocfs2_iocb_set_unaligned_aio(iocb) \ - set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) -#define ocfs2_iocb_clear_unaligned_aio(iocb) \ - clear_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) -#define ocfs2_iocb_is_unaligned_aio(iocb) \ - test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) - #endif /* OCFS2_FILE_H */ diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index ef6a2ec494de..1934abb6b680 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -417,13 +417,13 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, bio->bi_private = wc; bio->bi_end_io = o2hb_bio_end_io; - vec_start = (cs << bits) % PAGE_CACHE_SIZE; + vec_start = (cs << bits) % PAGE_SIZE; while(cs < max_slots) { current_page = cs / spp; page = reg->hr_slot_data[current_page]; - vec_len = min(PAGE_CACHE_SIZE - vec_start, - (max_slots-cs) * (PAGE_CACHE_SIZE/spp) ); + vec_len = min(PAGE_SIZE - vec_start, + (max_slots-cs) * (PAGE_SIZE/spp) ); mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n", current_page, vec_len, vec_start); @@ -431,7 +431,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, len = bio_add_page(bio, page, vec_len, vec_start); if (len != vec_len) break; - cs += vec_len / (PAGE_CACHE_SIZE/spp); + cs += vec_len / (PAGE_SIZE/spp); vec_start = 0; } @@ -1444,8 +1444,8 @@ static void o2hb_region_release(struct config_item *item) debugfs_remove(reg->hr_debug_dir); kfree(reg->hr_db_livenodes); kfree(reg->hr_db_regnum); - kfree(reg->hr_debug_elapsed_time); - kfree(reg->hr_debug_pinned); + kfree(reg->hr_db_elapsed_time); + kfree(reg->hr_db_pinned); spin_lock(&o2hb_live_lock); list_del(®->hr_all_item); @@ -1576,7 +1576,7 @@ static ssize_t o2hb_region_dev_show(struct config_item *item, char *page) static void o2hb_init_region_params(struct o2hb_region *reg) { - reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits; + reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits; reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS; mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n", diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index e36d63ff1783..cdeafb4e7ed6 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -212,6 +212,12 @@ grant: if (lock->lksb->flags & DLM_LKSB_PUT_LVB) memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN); + /* + * Move the lock to the tail because it may be the only lock which has + * an invalid lvb. + */ + list_move_tail(&lock->list, &res->granted); + status = DLM_NORMAL; *call_ast = 1; goto unlock_exit; @@ -262,6 +268,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, struct dlm_lock *lock, int flags, int type) { enum dlm_status status; + u8 old_owner = res->owner; mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); @@ -287,6 +294,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, status = DLM_DENIED; goto bail; } + + if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) { + mlog(0, "last convert request returned DLM_RECOVERING, but " + "owner has already queued and sent ast to me. res %.*s, " + "(cookie=%u:%llu, type=%d, conv=%d)\n", + res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), + lock->ml.type, lock->ml.convert_type); + status = DLM_NORMAL; + goto bail; + } + res->state |= DLM_LOCK_RES_IN_PROGRESS; /* move lock to local convert queue */ /* do not alter lock refcount. switching lists. */ @@ -316,11 +336,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; lock->convert_pending = 0; - /* if it failed, move it back to granted queue */ + /* if it failed, move it back to granted queue. + * if master returns DLM_NORMAL and then down before sending ast, + * it may have already been moved to granted queue, reset to + * DLM_RECOVERING and retry convert */ if (status != DLM_NORMAL) { if (status != DLM_NOTQUEUED) dlm_error(status); dlm_revert_pending_convert(res, lock); + } else if ((res->state & DLM_LOCK_RES_RECOVERING) || + (old_owner != res->owner)) { + mlog(0, "res %.*s is in recovering or has been recovered.\n", + res->lockname.len, res->lockname.name); + status = DLM_RECOVERING; } bail: spin_unlock(&res->spinlock); diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index cd38488a10fc..f6b313898763 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -2083,7 +2083,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, dlm_lock_get(lock); if (lock->convert_pending) { /* move converting lock back to granted */ - BUG_ON(i != DLM_CONVERTING_LIST); mlog(0, "node died with convert pending " "on %.*s. move back to granted list.\n", res->lockname.len, res->lockname.name); diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 03768bb3aab1..47b3b2d4e775 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -571,8 +571,8 @@ static int dlmfs_fill_super(struct super_block * sb, int silent) { sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = DLMFS_MAGIC; sb->s_op = &dlmfs_ops; sb->s_root = d_make_root(dlmfs_get_root_inode(sb)); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 7cb38fdca229..5308841756be 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -770,14 +770,14 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, { struct address_space *mapping = inode->i_mapping; struct page *page; - unsigned long index = abs_from >> PAGE_CACHE_SHIFT; + unsigned long index = abs_from >> PAGE_SHIFT; handle_t *handle; int ret = 0; unsigned zero_from, zero_to, block_start, block_end; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; BUG_ON(abs_from >= abs_to); - BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); + BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT)); BUG_ON(abs_from & (inode->i_blkbits - 1)); handle = ocfs2_zero_start_ordered_transaction(inode, di_bh); @@ -794,10 +794,10 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, } /* Get the offsets within the page that we want to zero */ - zero_from = abs_from & (PAGE_CACHE_SIZE - 1); - zero_to = abs_to & (PAGE_CACHE_SIZE - 1); + zero_from = abs_from & (PAGE_SIZE - 1); + zero_to = abs_to & (PAGE_SIZE - 1); if (!zero_to) - zero_to = PAGE_CACHE_SIZE; + zero_to = PAGE_SIZE; trace_ocfs2_write_zero_page( (unsigned long long)OCFS2_I(inode)->ip_blkno, @@ -851,7 +851,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, out_unlock: unlock_page(page); - page_cache_release(page); + put_page(page); out_commit_trans: if (handle) ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); @@ -959,7 +959,7 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start, BUG_ON(range_start >= range_end); while (zero_pos < range_end) { - next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE; + next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE; if (next_pos > range_end) next_pos = range_end; rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh); @@ -1381,44 +1381,6 @@ out: return ret; } -/* - * Will look for holes and unwritten extents in the range starting at - * pos for count bytes (inclusive). - */ -static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos, - size_t count) -{ - int ret = 0; - unsigned int extent_flags; - u32 cpos, clusters, extent_len, phys_cpos; - struct super_block *sb = inode->i_sb; - - cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits; - clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos; - - while (clusters) { - ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len, - &extent_flags); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - - if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) { - ret = 1; - break; - } - - if (extent_len > clusters) - extent_len = clusters; - - clusters -= extent_len; - cpos += extent_len; - } -out: - return ret; -} - static int ocfs2_write_remove_suid(struct inode *inode) { int ret; @@ -2129,18 +2091,12 @@ out: static int ocfs2_prepare_inode_for_write(struct file *file, loff_t pos, - size_t count, - int appending, - int *direct_io, - int *has_refcount) + size_t count) { int ret = 0, meta_level = 0; struct dentry *dentry = file->f_path.dentry; struct inode *inode = d_inode(dentry); loff_t end; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - int full_coherency = !(osb->s_mount_opt & - OCFS2_MOUNT_COHERENCY_BUFFERED); /* * We start with a read level meta lock and only jump to an ex @@ -2189,10 +2145,6 @@ static int ocfs2_prepare_inode_for_write(struct file *file, pos, count, &meta_level); - if (has_refcount) - *has_refcount = 1; - if (direct_io) - *direct_io = 0; } if (ret < 0) { @@ -2200,67 +2152,12 @@ static int ocfs2_prepare_inode_for_write(struct file *file, goto out_unlock; } - /* - * Skip the O_DIRECT checks if we don't need - * them. - */ - if (!direct_io || !(*direct_io)) - break; - - /* - * There's no sane way to do direct writes to an inode - * with inline data. - */ - if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { - *direct_io = 0; - break; - } - - /* - * Allowing concurrent direct writes means - * i_size changes wouldn't be synchronized, so - * one node could wind up truncating another - * nodes writes. - */ - if (end > i_size_read(inode) && !full_coherency) { - *direct_io = 0; - break; - } - - /* - * Fallback to old way if the feature bit is not set. - */ - if (end > i_size_read(inode) && - !ocfs2_supports_append_dio(osb)) { - *direct_io = 0; - break; - } - - /* - * We don't fill holes during direct io, so - * check for them here. If any are found, the - * caller will have to retake some cluster - * locks and initiate the io as buffered. - */ - ret = ocfs2_check_range_for_holes(inode, pos, count); - if (ret == 1) { - /* - * Fallback to old way if the feature bit is not set. - * Otherwise try dio first and then complete the rest - * request through buffer io. - */ - if (!ocfs2_supports_append_dio(osb)) - *direct_io = 0; - ret = 0; - } else if (ret < 0) - mlog_errno(ret); break; } out_unlock: trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno, - pos, appending, count, - direct_io, has_refcount); + pos, count); if (meta_level >= 0) ocfs2_inode_unlock(inode, meta_level); @@ -2272,18 +2169,16 @@ out: static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { - int direct_io, appending, rw_level; - int can_do_direct, has_refcount = 0; + int direct_io, rw_level; ssize_t written = 0; ssize_t ret; - size_t count = iov_iter_count(from), orig_count; + size_t count = iov_iter_count(from); struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int full_coherency = !(osb->s_mount_opt & OCFS2_MOUNT_COHERENCY_BUFFERED); - int unaligned_dio = 0; - int dropped_dio = 0; + void *saved_ki_complete = NULL; int append_write = ((iocb->ki_pos + count) >= i_size_read(inode) ? 1 : 0); @@ -2296,12 +2191,10 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, if (count == 0) return 0; - appending = iocb->ki_flags & IOCB_APPEND ? 1 : 0; direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0; inode_lock(inode); -relock: /* * Concurrent O_DIRECT writes are allowed with * mount_option "coherency=buffered". @@ -2334,7 +2227,6 @@ relock: ocfs2_inode_unlock(inode, 1); } - orig_count = iov_iter_count(from); ret = generic_write_checks(iocb, from); if (ret <= 0) { if (ret) @@ -2343,41 +2235,18 @@ relock: } count = ret; - can_do_direct = direct_io; - ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, appending, - &can_do_direct, &has_refcount); + ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count); if (ret < 0) { mlog_errno(ret); goto out; } - if (direct_io && !is_sync_kiocb(iocb)) - unaligned_dio = ocfs2_is_io_unaligned(inode, count, iocb->ki_pos); - - /* - * We can't complete the direct I/O as requested, fall back to - * buffered I/O. - */ - if (direct_io && !can_do_direct) { - ocfs2_rw_unlock(inode, rw_level); - - rw_level = -1; - - direct_io = 0; - iocb->ki_flags &= ~IOCB_DIRECT; - iov_iter_reexpand(from, orig_count); - dropped_dio = 1; - goto relock; - } - - if (unaligned_dio) { + if (direct_io && !is_sync_kiocb(iocb) && + ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) { /* - * Wait on previous unaligned aio to complete before - * proceeding. + * Make it a sync io if it's an unaligned aio. */ - mutex_lock(&OCFS2_I(inode)->ip_unaligned_aio); - /* Mark the iocb as needing an unlock in ocfs2_dio_end_io */ - ocfs2_iocb_set_unaligned_aio(iocb); + saved_ki_complete = xchg(&iocb->ki_complete, NULL); } /* communicate with ocfs2_dio_end_io */ @@ -2398,14 +2267,13 @@ relock: */ if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) { rw_level = -1; - unaligned_dio = 0; } if (unlikely(written <= 0)) - goto no_sync; + goto out; if (((file->f_flags & O_DSYNC) && !direct_io) || - IS_SYNC(inode) || dropped_dio) { + IS_SYNC(inode)) { ret = filemap_fdatawrite_range(file->f_mapping, iocb->ki_pos - written, iocb->ki_pos - 1); @@ -2424,13 +2292,10 @@ relock: iocb->ki_pos - 1); } -no_sync: - if (unaligned_dio && ocfs2_iocb_is_unaligned_aio(iocb)) { - ocfs2_iocb_clear_unaligned_aio(iocb); - mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio); - } - out: + if (saved_ki_complete) + xchg(&iocb->ki_complete, saved_ki_complete); + if (rw_level != -1) ocfs2_rw_unlock(inode, rw_level); diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c new file mode 100644 index 000000000000..2cabbcf2f28e --- /dev/null +++ b/fs/ocfs2/filecheck.c @@ -0,0 +1,606 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * filecheck.c + * + * Code which implements online file check. + * + * Copyright (C) 2016 SuSE. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/kmod.h> +#include <linux/fs.h> +#include <linux/kobject.h> +#include <linux/sysfs.h> +#include <linux/sysctl.h> +#include <cluster/masklog.h> + +#include "ocfs2.h" +#include "ocfs2_fs.h" +#include "stackglue.h" +#include "inode.h" + +#include "filecheck.h" + + +/* File check error strings, + * must correspond with error number in header file. + */ +static const char * const ocfs2_filecheck_errs[] = { + "SUCCESS", + "FAILED", + "INPROGRESS", + "READONLY", + "INJBD", + "INVALIDINO", + "BLOCKECC", + "BLOCKNO", + "VALIDFLAG", + "GENERATION", + "UNSUPPORTED" +}; + +static DEFINE_SPINLOCK(ocfs2_filecheck_sysfs_lock); +static LIST_HEAD(ocfs2_filecheck_sysfs_list); + +struct ocfs2_filecheck { + struct list_head fc_head; /* File check entry list head */ + spinlock_t fc_lock; + unsigned int fc_max; /* Maximum number of entry in list */ + unsigned int fc_size; /* Current entry count in list */ + unsigned int fc_done; /* Finished entry count in list */ +}; + +struct ocfs2_filecheck_sysfs_entry { /* sysfs entry per mounting */ + struct list_head fs_list; + atomic_t fs_count; + struct super_block *fs_sb; + struct kset *fs_devicekset; + struct kset *fs_fcheckkset; + struct ocfs2_filecheck *fs_fcheck; +}; + +#define OCFS2_FILECHECK_MAXSIZE 100 +#define OCFS2_FILECHECK_MINSIZE 10 + +/* File check operation type */ +enum { + OCFS2_FILECHECK_TYPE_CHK = 0, /* Check a file(inode) */ + OCFS2_FILECHECK_TYPE_FIX, /* Fix a file(inode) */ + OCFS2_FILECHECK_TYPE_SET = 100 /* Set entry list maximum size */ +}; + +struct ocfs2_filecheck_entry { + struct list_head fe_list; + unsigned long fe_ino; + unsigned int fe_type; + unsigned int fe_done:1; + unsigned int fe_status:31; +}; + +struct ocfs2_filecheck_args { + unsigned int fa_type; + union { + unsigned long fa_ino; + unsigned int fa_len; + }; +}; + +static const char * +ocfs2_filecheck_error(int errno) +{ + if (!errno) + return ocfs2_filecheck_errs[errno]; + + BUG_ON(errno < OCFS2_FILECHECK_ERR_START || + errno > OCFS2_FILECHECK_ERR_END); + return ocfs2_filecheck_errs[errno - OCFS2_FILECHECK_ERR_START + 1]; +} + +static ssize_t ocfs2_filecheck_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf); +static ssize_t ocfs2_filecheck_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count); +static struct kobj_attribute ocfs2_attr_filecheck_chk = + __ATTR(check, S_IRUSR | S_IWUSR, + ocfs2_filecheck_show, + ocfs2_filecheck_store); +static struct kobj_attribute ocfs2_attr_filecheck_fix = + __ATTR(fix, S_IRUSR | S_IWUSR, + ocfs2_filecheck_show, + ocfs2_filecheck_store); +static struct kobj_attribute ocfs2_attr_filecheck_set = + __ATTR(set, S_IRUSR | S_IWUSR, + ocfs2_filecheck_show, + ocfs2_filecheck_store); + +static int ocfs2_filecheck_sysfs_wait(atomic_t *p) +{ + schedule(); + return 0; +} + +static void +ocfs2_filecheck_sysfs_free(struct ocfs2_filecheck_sysfs_entry *entry) +{ + struct ocfs2_filecheck_entry *p; + + if (!atomic_dec_and_test(&entry->fs_count)) + wait_on_atomic_t(&entry->fs_count, ocfs2_filecheck_sysfs_wait, + TASK_UNINTERRUPTIBLE); + + spin_lock(&entry->fs_fcheck->fc_lock); + while (!list_empty(&entry->fs_fcheck->fc_head)) { + p = list_first_entry(&entry->fs_fcheck->fc_head, + struct ocfs2_filecheck_entry, fe_list); + list_del(&p->fe_list); + BUG_ON(!p->fe_done); /* To free a undone file check entry */ + kfree(p); + } + spin_unlock(&entry->fs_fcheck->fc_lock); + + kset_unregister(entry->fs_fcheckkset); + kset_unregister(entry->fs_devicekset); + kfree(entry->fs_fcheck); + kfree(entry); +} + +static void +ocfs2_filecheck_sysfs_add(struct ocfs2_filecheck_sysfs_entry *entry) +{ + spin_lock(&ocfs2_filecheck_sysfs_lock); + list_add_tail(&entry->fs_list, &ocfs2_filecheck_sysfs_list); + spin_unlock(&ocfs2_filecheck_sysfs_lock); +} + +static int ocfs2_filecheck_sysfs_del(const char *devname) +{ + struct ocfs2_filecheck_sysfs_entry *p; + + spin_lock(&ocfs2_filecheck_sysfs_lock); + list_for_each_entry(p, &ocfs2_filecheck_sysfs_list, fs_list) { + if (!strcmp(p->fs_sb->s_id, devname)) { + list_del(&p->fs_list); + spin_unlock(&ocfs2_filecheck_sysfs_lock); + ocfs2_filecheck_sysfs_free(p); + return 0; + } + } + spin_unlock(&ocfs2_filecheck_sysfs_lock); + return 1; +} + +static void +ocfs2_filecheck_sysfs_put(struct ocfs2_filecheck_sysfs_entry *entry) +{ + if (atomic_dec_and_test(&entry->fs_count)) + wake_up_atomic_t(&entry->fs_count); +} + +static struct ocfs2_filecheck_sysfs_entry * +ocfs2_filecheck_sysfs_get(const char *devname) +{ + struct ocfs2_filecheck_sysfs_entry *p = NULL; + + spin_lock(&ocfs2_filecheck_sysfs_lock); + list_for_each_entry(p, &ocfs2_filecheck_sysfs_list, fs_list) { + if (!strcmp(p->fs_sb->s_id, devname)) { + atomic_inc(&p->fs_count); + spin_unlock(&ocfs2_filecheck_sysfs_lock); + return p; + } + } + spin_unlock(&ocfs2_filecheck_sysfs_lock); + return NULL; +} + +int ocfs2_filecheck_create_sysfs(struct super_block *sb) +{ + int ret = 0; + struct kset *device_kset = NULL; + struct kset *fcheck_kset = NULL; + struct ocfs2_filecheck *fcheck = NULL; + struct ocfs2_filecheck_sysfs_entry *entry = NULL; + struct attribute **attrs = NULL; + struct attribute_group attrgp; + + if (!ocfs2_kset) + return -ENOMEM; + + attrs = kmalloc(sizeof(struct attribute *) * 4, GFP_NOFS); + if (!attrs) { + ret = -ENOMEM; + goto error; + } else { + attrs[0] = &ocfs2_attr_filecheck_chk.attr; + attrs[1] = &ocfs2_attr_filecheck_fix.attr; + attrs[2] = &ocfs2_attr_filecheck_set.attr; + attrs[3] = NULL; + memset(&attrgp, 0, sizeof(attrgp)); + attrgp.attrs = attrs; + } + + fcheck = kmalloc(sizeof(struct ocfs2_filecheck), GFP_NOFS); + if (!fcheck) { + ret = -ENOMEM; + goto error; + } else { + INIT_LIST_HEAD(&fcheck->fc_head); + spin_lock_init(&fcheck->fc_lock); + fcheck->fc_max = OCFS2_FILECHECK_MINSIZE; + fcheck->fc_size = 0; + fcheck->fc_done = 0; + } + + if (strlen(sb->s_id) <= 0) { + mlog(ML_ERROR, + "Cannot get device basename when create filecheck sysfs\n"); + ret = -ENODEV; + goto error; + } + + device_kset = kset_create_and_add(sb->s_id, NULL, &ocfs2_kset->kobj); + if (!device_kset) { + ret = -ENOMEM; + goto error; + } + + fcheck_kset = kset_create_and_add("filecheck", NULL, + &device_kset->kobj); + if (!fcheck_kset) { + ret = -ENOMEM; + goto error; + } + + ret = sysfs_create_group(&fcheck_kset->kobj, &attrgp); + if (ret) + goto error; + + entry = kmalloc(sizeof(struct ocfs2_filecheck_sysfs_entry), GFP_NOFS); + if (!entry) { + ret = -ENOMEM; + goto error; + } else { + atomic_set(&entry->fs_count, 1); + entry->fs_sb = sb; + entry->fs_devicekset = device_kset; + entry->fs_fcheckkset = fcheck_kset; + entry->fs_fcheck = fcheck; + ocfs2_filecheck_sysfs_add(entry); + } + + kfree(attrs); + return 0; + +error: + kfree(attrs); + kfree(entry); + kfree(fcheck); + kset_unregister(fcheck_kset); + kset_unregister(device_kset); + return ret; +} + +int ocfs2_filecheck_remove_sysfs(struct super_block *sb) +{ + return ocfs2_filecheck_sysfs_del(sb->s_id); +} + +static int +ocfs2_filecheck_erase_entries(struct ocfs2_filecheck_sysfs_entry *ent, + unsigned int count); +static int +ocfs2_filecheck_adjust_max(struct ocfs2_filecheck_sysfs_entry *ent, + unsigned int len) +{ + int ret; + + if ((len < OCFS2_FILECHECK_MINSIZE) || (len > OCFS2_FILECHECK_MAXSIZE)) + return -EINVAL; + + spin_lock(&ent->fs_fcheck->fc_lock); + if (len < (ent->fs_fcheck->fc_size - ent->fs_fcheck->fc_done)) { + mlog(ML_ERROR, + "Cannot set online file check maximum entry number " + "to %u due to too many pending entries(%u)\n", + len, ent->fs_fcheck->fc_size - ent->fs_fcheck->fc_done); + ret = -EBUSY; + } else { + if (len < ent->fs_fcheck->fc_size) + BUG_ON(!ocfs2_filecheck_erase_entries(ent, + ent->fs_fcheck->fc_size - len)); + + ent->fs_fcheck->fc_max = len; + ret = 0; + } + spin_unlock(&ent->fs_fcheck->fc_lock); + + return ret; +} + +#define OCFS2_FILECHECK_ARGS_LEN 24 +static int +ocfs2_filecheck_args_get_long(const char *buf, size_t count, + unsigned long *val) +{ + char buffer[OCFS2_FILECHECK_ARGS_LEN]; + + memcpy(buffer, buf, count); + buffer[count] = '\0'; + + if (kstrtoul(buffer, 0, val)) + return 1; + + return 0; +} + +static int +ocfs2_filecheck_type_parse(const char *name, unsigned int *type) +{ + if (!strncmp(name, "fix", 4)) + *type = OCFS2_FILECHECK_TYPE_FIX; + else if (!strncmp(name, "check", 6)) + *type = OCFS2_FILECHECK_TYPE_CHK; + else if (!strncmp(name, "set", 4)) + *type = OCFS2_FILECHECK_TYPE_SET; + else + return 1; + + return 0; +} + +static int +ocfs2_filecheck_args_parse(const char *name, const char *buf, size_t count, + struct ocfs2_filecheck_args *args) +{ + unsigned long val = 0; + unsigned int type; + + /* too short/long args length */ + if ((count < 1) || (count >= OCFS2_FILECHECK_ARGS_LEN)) + return 1; + + if (ocfs2_filecheck_type_parse(name, &type)) + return 1; + if (ocfs2_filecheck_args_get_long(buf, count, &val)) + return 1; + + if (val <= 0) + return 1; + + args->fa_type = type; + if (type == OCFS2_FILECHECK_TYPE_SET) + args->fa_len = (unsigned int)val; + else + args->fa_ino = val; + + return 0; +} + +static ssize_t ocfs2_filecheck_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + + ssize_t ret = 0, total = 0, remain = PAGE_SIZE; + unsigned int type; + struct ocfs2_filecheck_entry *p; + struct ocfs2_filecheck_sysfs_entry *ent; + + if (ocfs2_filecheck_type_parse(attr->attr.name, &type)) + return -EINVAL; + + ent = ocfs2_filecheck_sysfs_get(kobj->parent->name); + if (!ent) { + mlog(ML_ERROR, + "Cannot get the corresponding entry via device basename %s\n", + kobj->name); + return -ENODEV; + } + + if (type == OCFS2_FILECHECK_TYPE_SET) { + spin_lock(&ent->fs_fcheck->fc_lock); + total = snprintf(buf, remain, "%u\n", ent->fs_fcheck->fc_max); + spin_unlock(&ent->fs_fcheck->fc_lock); + goto exit; + } + + ret = snprintf(buf, remain, "INO\t\tDONE\tERROR\n"); + total += ret; + remain -= ret; + spin_lock(&ent->fs_fcheck->fc_lock); + list_for_each_entry(p, &ent->fs_fcheck->fc_head, fe_list) { + if (p->fe_type != type) + continue; + + ret = snprintf(buf + total, remain, "%lu\t\t%u\t%s\n", + p->fe_ino, p->fe_done, + ocfs2_filecheck_error(p->fe_status)); + if (ret < 0) { + total = ret; + break; + } + if (ret == remain) { + /* snprintf() didn't fit */ + total = -E2BIG; + break; + } + total += ret; + remain -= ret; + } + spin_unlock(&ent->fs_fcheck->fc_lock); + +exit: + ocfs2_filecheck_sysfs_put(ent); + return total; +} + +static int +ocfs2_filecheck_erase_entry(struct ocfs2_filecheck_sysfs_entry *ent) +{ + struct ocfs2_filecheck_entry *p; + + list_for_each_entry(p, &ent->fs_fcheck->fc_head, fe_list) { + if (p->fe_done) { + list_del(&p->fe_list); + kfree(p); + ent->fs_fcheck->fc_size--; + ent->fs_fcheck->fc_done--; + return 1; + } + } + + return 0; +} + +static int +ocfs2_filecheck_erase_entries(struct ocfs2_filecheck_sysfs_entry *ent, + unsigned int count) +{ + unsigned int i = 0; + unsigned int ret = 0; + + while (i++ < count) { + if (ocfs2_filecheck_erase_entry(ent)) + ret++; + else + break; + } + + return (ret == count ? 1 : 0); +} + +static void +ocfs2_filecheck_done_entry(struct ocfs2_filecheck_sysfs_entry *ent, + struct ocfs2_filecheck_entry *entry) +{ + entry->fe_done = 1; + spin_lock(&ent->fs_fcheck->fc_lock); + ent->fs_fcheck->fc_done++; + spin_unlock(&ent->fs_fcheck->fc_lock); +} + +static unsigned int +ocfs2_filecheck_handle(struct super_block *sb, + unsigned long ino, unsigned int flags) +{ + unsigned int ret = OCFS2_FILECHECK_ERR_SUCCESS; + struct inode *inode = NULL; + int rc; + + inode = ocfs2_iget(OCFS2_SB(sb), ino, flags, 0); + if (IS_ERR(inode)) { + rc = (int)(-(long)inode); + if (rc >= OCFS2_FILECHECK_ERR_START && + rc < OCFS2_FILECHECK_ERR_END) + ret = rc; + else + ret = OCFS2_FILECHECK_ERR_FAILED; + } else + iput(inode); + + return ret; +} + +static void +ocfs2_filecheck_handle_entry(struct ocfs2_filecheck_sysfs_entry *ent, + struct ocfs2_filecheck_entry *entry) +{ + if (entry->fe_type == OCFS2_FILECHECK_TYPE_CHK) + entry->fe_status = ocfs2_filecheck_handle(ent->fs_sb, + entry->fe_ino, OCFS2_FI_FLAG_FILECHECK_CHK); + else if (entry->fe_type == OCFS2_FILECHECK_TYPE_FIX) + entry->fe_status = ocfs2_filecheck_handle(ent->fs_sb, + entry->fe_ino, OCFS2_FI_FLAG_FILECHECK_FIX); + else + entry->fe_status = OCFS2_FILECHECK_ERR_UNSUPPORTED; + + ocfs2_filecheck_done_entry(ent, entry); +} + +static ssize_t ocfs2_filecheck_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct ocfs2_filecheck_args args; + struct ocfs2_filecheck_entry *entry; + struct ocfs2_filecheck_sysfs_entry *ent; + ssize_t ret = 0; + + if (count == 0) + return count; + + if (ocfs2_filecheck_args_parse(attr->attr.name, buf, count, &args)) { + mlog(ML_ERROR, "Invalid arguments for online file check\n"); + return -EINVAL; + } + + ent = ocfs2_filecheck_sysfs_get(kobj->parent->name); + if (!ent) { + mlog(ML_ERROR, + "Cannot get the corresponding entry via device basename %s\n", + kobj->parent->name); + return -ENODEV; + } + + if (args.fa_type == OCFS2_FILECHECK_TYPE_SET) { + ret = ocfs2_filecheck_adjust_max(ent, args.fa_len); + goto exit; + } + + entry = kmalloc(sizeof(struct ocfs2_filecheck_entry), GFP_NOFS); + if (!entry) { + ret = -ENOMEM; + goto exit; + } + + spin_lock(&ent->fs_fcheck->fc_lock); + if ((ent->fs_fcheck->fc_size >= ent->fs_fcheck->fc_max) && + (ent->fs_fcheck->fc_done == 0)) { + mlog(ML_ERROR, + "Cannot do more file check " + "since file check queue(%u) is full now\n", + ent->fs_fcheck->fc_max); + ret = -EBUSY; + kfree(entry); + } else { + if ((ent->fs_fcheck->fc_size >= ent->fs_fcheck->fc_max) && + (ent->fs_fcheck->fc_done > 0)) { + /* Delete the oldest entry which was done, + * make sure the entry size in list does + * not exceed maximum value + */ + BUG_ON(!ocfs2_filecheck_erase_entry(ent)); + } + + entry->fe_ino = args.fa_ino; + entry->fe_type = args.fa_type; + entry->fe_done = 0; + entry->fe_status = OCFS2_FILECHECK_ERR_INPROGRESS; + list_add_tail(&entry->fe_list, &ent->fs_fcheck->fc_head); + ent->fs_fcheck->fc_size++; + } + spin_unlock(&ent->fs_fcheck->fc_lock); + + if (!ret) + ocfs2_filecheck_handle_entry(ent, entry); + +exit: + ocfs2_filecheck_sysfs_put(ent); + return (!ret ? count : ret); +} diff --git a/fs/ocfs2/filecheck.h b/fs/ocfs2/filecheck.h new file mode 100644 index 000000000000..e5cd002a2c09 --- /dev/null +++ b/fs/ocfs2/filecheck.h @@ -0,0 +1,49 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * filecheck.h + * + * Online file check. + * + * Copyright (C) 2016 SuSE. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + + +#ifndef FILECHECK_H +#define FILECHECK_H + +#include <linux/types.h> +#include <linux/list.h> + + +/* File check errno */ +enum { + OCFS2_FILECHECK_ERR_SUCCESS = 0, /* Success */ + OCFS2_FILECHECK_ERR_FAILED = 1000, /* Other failure */ + OCFS2_FILECHECK_ERR_INPROGRESS, /* In progress */ + OCFS2_FILECHECK_ERR_READONLY, /* Read only */ + OCFS2_FILECHECK_ERR_INJBD, /* Buffer in jbd */ + OCFS2_FILECHECK_ERR_INVALIDINO, /* Invalid ino */ + OCFS2_FILECHECK_ERR_BLOCKECC, /* Block ecc */ + OCFS2_FILECHECK_ERR_BLOCKNO, /* Block number */ + OCFS2_FILECHECK_ERR_VALIDFLAG, /* Inode valid flag */ + OCFS2_FILECHECK_ERR_GENERATION, /* Inode generation */ + OCFS2_FILECHECK_ERR_UNSUPPORTED /* Unsupported */ +}; + +#define OCFS2_FILECHECK_ERR_START OCFS2_FILECHECK_ERR_FAILED +#define OCFS2_FILECHECK_ERR_END OCFS2_FILECHECK_ERR_UNSUPPORTED + +int ocfs2_filecheck_create_sysfs(struct super_block *sb); +int ocfs2_filecheck_remove_sysfs(struct super_block *sb); + +#endif /* FILECHECK_H */ diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 36294446d960..12f4a9e9800f 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -53,6 +53,7 @@ #include "xattr.h" #include "refcounttree.h" #include "ocfs2_trace.h" +#include "filecheck.h" #include "buffer_head_io.h" @@ -74,6 +75,14 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, struct inode *inode, struct buffer_head *fe_bh); +static int ocfs2_filecheck_read_inode_block_full(struct inode *inode, + struct buffer_head **bh, + int flags, int type); +static int ocfs2_filecheck_validate_inode_block(struct super_block *sb, + struct buffer_head *bh); +static int ocfs2_filecheck_repair_inode_block(struct super_block *sb, + struct buffer_head *bh); + void ocfs2_set_inode_flags(struct inode *inode) { unsigned int flags = OCFS2_I(inode)->ip_attr; @@ -127,6 +136,7 @@ struct inode *ocfs2_ilookup(struct super_block *sb, u64 blkno) struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags, int sysfile_type) { + int rc = 0; struct inode *inode = NULL; struct super_block *sb = osb->sb; struct ocfs2_find_inode_args args; @@ -161,12 +171,17 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags, } trace_ocfs2_iget5_locked(inode->i_state); if (inode->i_state & I_NEW) { - ocfs2_read_locked_inode(inode, &args); + rc = ocfs2_read_locked_inode(inode, &args); unlock_new_inode(inode); } if (is_bad_inode(inode)) { iput(inode); - inode = ERR_PTR(-ESTALE); + if ((flags & OCFS2_FI_FLAG_FILECHECK_CHK) || + (flags & OCFS2_FI_FLAG_FILECHECK_FIX)) + /* Return OCFS2_FILECHECK_ERR_XXX related errno */ + inode = ERR_PTR(rc); + else + inode = ERR_PTR(-ESTALE); goto bail; } @@ -410,7 +425,7 @@ static int ocfs2_read_locked_inode(struct inode *inode, struct ocfs2_super *osb; struct ocfs2_dinode *fe; struct buffer_head *bh = NULL; - int status, can_lock; + int status, can_lock, lock_level = 0; u32 generation = 0; status = -EINVAL; @@ -478,7 +493,7 @@ static int ocfs2_read_locked_inode(struct inode *inode, mlog_errno(status); return status; } - status = ocfs2_inode_lock(inode, NULL, 0); + status = ocfs2_inode_lock(inode, NULL, lock_level); if (status) { make_bad_inode(inode); mlog_errno(status); @@ -495,16 +510,32 @@ static int ocfs2_read_locked_inode(struct inode *inode, } if (can_lock) { - status = ocfs2_read_inode_block_full(inode, &bh, - OCFS2_BH_IGNORE_CACHE); + if (args->fi_flags & OCFS2_FI_FLAG_FILECHECK_CHK) + status = ocfs2_filecheck_read_inode_block_full(inode, + &bh, OCFS2_BH_IGNORE_CACHE, 0); + else if (args->fi_flags & OCFS2_FI_FLAG_FILECHECK_FIX) + status = ocfs2_filecheck_read_inode_block_full(inode, + &bh, OCFS2_BH_IGNORE_CACHE, 1); + else + status = ocfs2_read_inode_block_full(inode, + &bh, OCFS2_BH_IGNORE_CACHE); } else { status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh); /* * If buffer is in jbd, then its checksum may not have been * computed as yet. */ - if (!status && !buffer_jbd(bh)) - status = ocfs2_validate_inode_block(osb->sb, bh); + if (!status && !buffer_jbd(bh)) { + if (args->fi_flags & OCFS2_FI_FLAG_FILECHECK_CHK) + status = ocfs2_filecheck_validate_inode_block( + osb->sb, bh); + else if (args->fi_flags & OCFS2_FI_FLAG_FILECHECK_FIX) + status = ocfs2_filecheck_repair_inode_block( + osb->sb, bh); + else + status = ocfs2_validate_inode_block( + osb->sb, bh); + } } if (status < 0) { mlog_errno(status); @@ -532,11 +563,24 @@ static int ocfs2_read_locked_inode(struct inode *inode, BUG_ON(args->fi_blkno != le64_to_cpu(fe->i_blkno)); + if (buffer_dirty(bh) && !buffer_jbd(bh)) { + if (can_lock) { + ocfs2_inode_unlock(inode, lock_level); + lock_level = 1; + ocfs2_inode_lock(inode, NULL, lock_level); + } + status = ocfs2_write_block(osb, bh, INODE_CACHE(inode)); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + status = 0; bail: if (can_lock) - ocfs2_inode_unlock(inode, 0); + ocfs2_inode_unlock(inode, lock_level); if (status < 0) make_bad_inode(inode); @@ -1126,6 +1170,9 @@ static void ocfs2_clear_inode(struct inode *inode) mlog_bug_on_msg(!list_empty(&oi->ip_io_markers), "Clear inode of %llu, inode has io markers\n", (unsigned long long)oi->ip_blkno); + mlog_bug_on_msg(!list_empty(&oi->ip_unwritten_list), + "Clear inode of %llu, inode has unwritten extents\n", + (unsigned long long)oi->ip_blkno); ocfs2_extent_map_trunc(inode, 0); @@ -1397,6 +1444,169 @@ bail: return rc; } +static int ocfs2_filecheck_validate_inode_block(struct super_block *sb, + struct buffer_head *bh) +{ + int rc = 0; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; + + trace_ocfs2_filecheck_validate_inode_block( + (unsigned long long)bh->b_blocknr); + + BUG_ON(!buffer_uptodate(bh)); + + /* + * Call ocfs2_validate_meta_ecc() first since it has ecc repair + * function, but we should not return error immediately when ecc + * validation fails, because the reason is quite likely the invalid + * inode number inputed. + */ + rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check); + if (rc) { + mlog(ML_ERROR, + "Filecheck: checksum failed for dinode %llu\n", + (unsigned long long)bh->b_blocknr); + rc = -OCFS2_FILECHECK_ERR_BLOCKECC; + } + + if (!OCFS2_IS_VALID_DINODE(di)) { + mlog(ML_ERROR, + "Filecheck: invalid dinode #%llu: signature = %.*s\n", + (unsigned long long)bh->b_blocknr, 7, di->i_signature); + rc = -OCFS2_FILECHECK_ERR_INVALIDINO; + goto bail; + } else if (rc) + goto bail; + + if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) { + mlog(ML_ERROR, + "Filecheck: invalid dinode #%llu: i_blkno is %llu\n", + (unsigned long long)bh->b_blocknr, + (unsigned long long)le64_to_cpu(di->i_blkno)); + rc = -OCFS2_FILECHECK_ERR_BLOCKNO; + goto bail; + } + + if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) { + mlog(ML_ERROR, + "Filecheck: invalid dinode #%llu: OCFS2_VALID_FL " + "not set\n", + (unsigned long long)bh->b_blocknr); + rc = -OCFS2_FILECHECK_ERR_VALIDFLAG; + goto bail; + } + + if (le32_to_cpu(di->i_fs_generation) != + OCFS2_SB(sb)->fs_generation) { + mlog(ML_ERROR, + "Filecheck: invalid dinode #%llu: fs_generation is %u\n", + (unsigned long long)bh->b_blocknr, + le32_to_cpu(di->i_fs_generation)); + rc = -OCFS2_FILECHECK_ERR_GENERATION; + goto bail; + } + +bail: + return rc; +} + +static int ocfs2_filecheck_repair_inode_block(struct super_block *sb, + struct buffer_head *bh) +{ + int changed = 0; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; + + if (!ocfs2_filecheck_validate_inode_block(sb, bh)) + return 0; + + trace_ocfs2_filecheck_repair_inode_block( + (unsigned long long)bh->b_blocknr); + + if (ocfs2_is_hard_readonly(OCFS2_SB(sb)) || + ocfs2_is_soft_readonly(OCFS2_SB(sb))) { + mlog(ML_ERROR, + "Filecheck: cannot repair dinode #%llu " + "on readonly filesystem\n", + (unsigned long long)bh->b_blocknr); + return -OCFS2_FILECHECK_ERR_READONLY; + } + + if (buffer_jbd(bh)) { + mlog(ML_ERROR, + "Filecheck: cannot repair dinode #%llu, " + "its buffer is in jbd\n", + (unsigned long long)bh->b_blocknr); + return -OCFS2_FILECHECK_ERR_INJBD; + } + + if (!OCFS2_IS_VALID_DINODE(di)) { + /* Cannot fix invalid inode block */ + return -OCFS2_FILECHECK_ERR_INVALIDINO; + } + + if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) { + /* Cannot just add VALID_FL flag back as a fix, + * need more things to check here. + */ + return -OCFS2_FILECHECK_ERR_VALIDFLAG; + } + + if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) { + di->i_blkno = cpu_to_le64(bh->b_blocknr); + changed = 1; + mlog(ML_ERROR, + "Filecheck: reset dinode #%llu: i_blkno to %llu\n", + (unsigned long long)bh->b_blocknr, + (unsigned long long)le64_to_cpu(di->i_blkno)); + } + + if (le32_to_cpu(di->i_fs_generation) != + OCFS2_SB(sb)->fs_generation) { + di->i_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation); + changed = 1; + mlog(ML_ERROR, + "Filecheck: reset dinode #%llu: fs_generation to %u\n", + (unsigned long long)bh->b_blocknr, + le32_to_cpu(di->i_fs_generation)); + } + + if (changed || ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check)) { + ocfs2_compute_meta_ecc(sb, bh->b_data, &di->i_check); + mark_buffer_dirty(bh); + mlog(ML_ERROR, + "Filecheck: reset dinode #%llu: compute meta ecc\n", + (unsigned long long)bh->b_blocknr); + } + + return 0; +} + +static int +ocfs2_filecheck_read_inode_block_full(struct inode *inode, + struct buffer_head **bh, + int flags, int type) +{ + int rc; + struct buffer_head *tmp = *bh; + + if (!type) /* Check inode block */ + rc = ocfs2_read_blocks(INODE_CACHE(inode), + OCFS2_I(inode)->ip_blkno, + 1, &tmp, flags, + ocfs2_filecheck_validate_inode_block); + else /* Repair inode block */ + rc = ocfs2_read_blocks(INODE_CACHE(inode), + OCFS2_I(inode)->ip_blkno, + 1, &tmp, flags, + ocfs2_filecheck_repair_inode_block); + + /* If ocfs2_read_blocks() got us a new bh, pass it up. */ + if (!rc && !*bh) + *bh = tmp; + + return rc; +} + int ocfs2_read_inode_block_full(struct inode *inode, struct buffer_head **bh, int flags) { diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index aac8b86f312e..d8f3fc8d2551 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -43,9 +43,6 @@ struct ocfs2_inode_info /* protects extended attribute changes on this inode */ struct rw_semaphore ip_xattr_sem; - /* Number of outstanding AIO's which are not page aligned */ - struct mutex ip_unaligned_aio; - /* These fields are protected by ip_lock */ spinlock_t ip_lock; u32 ip_open_count; @@ -57,6 +54,9 @@ struct ocfs2_inode_info u32 ip_flags; /* see below */ u32 ip_attr; /* inode attributes */ + /* Record unwritten extents during direct io. */ + struct list_head ip_unwritten_list; + /* protected by recovery_lock. */ struct inode *ip_next_orphan; @@ -139,6 +139,9 @@ int ocfs2_drop_inode(struct inode *inode); /* Flags for ocfs2_iget() */ #define OCFS2_FI_FLAG_SYSFILE 0x1 #define OCFS2_FI_FLAG_ORPHAN_RECOVERY 0x2 +#define OCFS2_FI_FLAG_FILECHECK_CHK 0x4 +#define OCFS2_FI_FLAG_FILECHECK_FIX 0x8 + struct inode *ocfs2_ilookup(struct super_block *sb, u64 feoff); struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 feoff, unsigned flags, int sysfile_type); diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 61b833b721d8..e607419cdfa4 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -231,7 +231,7 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb) /* At this point, we know that no more recovery threads can be * launched, so wait for any recovery completion work to * complete. */ - flush_workqueue(ocfs2_wq); + flush_workqueue(osb->ocfs2_wq); /* * Now that recovery is shut down, and the osb is about to be @@ -1326,7 +1326,7 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, spin_lock(&journal->j_lock); list_add_tail(&item->lri_list, &journal->j_la_cleanups); - queue_work(ocfs2_wq, &journal->j_recovery_work); + queue_work(journal->j_osb->ocfs2_wq, &journal->j_recovery_work); spin_unlock(&journal->j_lock); } @@ -1968,7 +1968,7 @@ static void ocfs2_orphan_scan_work(struct work_struct *work) mutex_lock(&os->os_lock); ocfs2_queue_orphan_scan(osb); if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) - queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work, + queue_delayed_work(osb->ocfs2_wq, &os->os_orphan_scan_work, ocfs2_orphan_scan_timeout()); mutex_unlock(&os->os_lock); } @@ -2008,7 +2008,7 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb) atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); else { atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE); - queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work, + queue_delayed_work(osb->ocfs2_wq, &os->os_orphan_scan_work, ocfs2_orphan_scan_timeout()); } } diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 7d62c43a2c3e..fe0d1f9571bb 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -386,7 +386,7 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) struct ocfs2_dinode *alloc = NULL; cancel_delayed_work(&osb->la_enable_wq); - flush_workqueue(ocfs2_wq); + flush_workqueue(osb->ocfs2_wq); if (osb->local_alloc_state == OCFS2_LA_UNUSED) goto out; @@ -1085,7 +1085,7 @@ static int ocfs2_recalc_la_window(struct ocfs2_super *osb, } else { osb->local_alloc_state = OCFS2_LA_DISABLED; } - queue_delayed_work(ocfs2_wq, &osb->la_enable_wq, + queue_delayed_work(osb->ocfs2_wq, &osb->la_enable_wq, OCFS2_LA_ENABLE_INTERVAL); goto out_unlock; } diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 77ebc2bc1cca..71545ad4628c 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -65,13 +65,13 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; loff_t pos = page_offset(page); - unsigned int len = PAGE_CACHE_SIZE; + unsigned int len = PAGE_SIZE; pgoff_t last_index; struct page *locked_page = NULL; void *fsdata; loff_t size = i_size_read(inode); - last_index = (size - 1) >> PAGE_CACHE_SHIFT; + last_index = (size - 1) >> PAGE_SHIFT; /* * There are cases that lead to the page no longer bebongs to the @@ -102,10 +102,10 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, * because the "write" would invalidate their data. */ if (page->index == last_index) - len = ((size - 1) & ~PAGE_CACHE_MASK) + 1; + len = ((size - 1) & ~PAGE_MASK) + 1; - ret = ocfs2_write_begin_nolock(file, mapping, pos, len, 0, &locked_page, - &fsdata, di_bh, page); + ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, + &locked_page, &fsdata, di_bh, page); if (ret) { if (ret != -ENOSPC) mlog_errno(ret); diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 7a0126267847..e63af7ddfe68 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -464,6 +464,14 @@ struct ocfs2_super struct ocfs2_refcount_tree *osb_ref_tree_lru; struct mutex system_file_mutex; + + /* + * OCFS2 needs to schedule several different types of work which + * require cluster locking, disk I/O, recovery waits, etc. Since these + * types of work tend to be heavy we avoid using the kernel events + * workqueue and schedule on our own. + */ + struct workqueue_struct *ocfs2_wq; }; #define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info) @@ -814,10 +822,10 @@ static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb, u32 clusters = pg_index; unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; - if (unlikely(PAGE_CACHE_SHIFT > cbits)) - clusters = pg_index << (PAGE_CACHE_SHIFT - cbits); - else if (PAGE_CACHE_SHIFT < cbits) - clusters = pg_index >> (cbits - PAGE_CACHE_SHIFT); + if (unlikely(PAGE_SHIFT > cbits)) + clusters = pg_index << (PAGE_SHIFT - cbits); + else if (PAGE_SHIFT < cbits) + clusters = pg_index >> (cbits - PAGE_SHIFT); return clusters; } @@ -831,10 +839,10 @@ static inline pgoff_t ocfs2_align_clusters_to_page_index(struct super_block *sb, unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; pgoff_t index = clusters; - if (PAGE_CACHE_SHIFT > cbits) { - index = (pgoff_t)clusters >> (PAGE_CACHE_SHIFT - cbits); - } else if (PAGE_CACHE_SHIFT < cbits) { - index = (pgoff_t)clusters << (cbits - PAGE_CACHE_SHIFT); + if (PAGE_SHIFT > cbits) { + index = (pgoff_t)clusters >> (PAGE_SHIFT - cbits); + } else if (PAGE_SHIFT < cbits) { + index = (pgoff_t)clusters << (cbits - PAGE_SHIFT); } return index; @@ -845,8 +853,8 @@ static inline unsigned int ocfs2_pages_per_cluster(struct super_block *sb) unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; unsigned int pages_per_cluster = 1; - if (PAGE_CACHE_SHIFT < cbits) - pages_per_cluster = 1 << (cbits - PAGE_CACHE_SHIFT); + if (PAGE_SHIFT < cbits) + pages_per_cluster = 1 << (cbits - PAGE_SHIFT); return pages_per_cluster; } diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h index a52a2dbc064e..f8f5fc5e6c05 100644 --- a/fs/ocfs2/ocfs2_trace.h +++ b/fs/ocfs2/ocfs2_trace.h @@ -1450,28 +1450,20 @@ DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_remove_inode_range); TRACE_EVENT(ocfs2_prepare_inode_for_write, TP_PROTO(unsigned long long ino, unsigned long long saved_pos, - int appending, unsigned long count, - int *direct_io, int *has_refcount), - TP_ARGS(ino, saved_pos, appending, count, direct_io, has_refcount), + unsigned long count), + TP_ARGS(ino, saved_pos, count), TP_STRUCT__entry( __field(unsigned long long, ino) __field(unsigned long long, saved_pos) - __field(int, appending) __field(unsigned long, count) - __field(int, direct_io) - __field(int, has_refcount) ), TP_fast_assign( __entry->ino = ino; __entry->saved_pos = saved_pos; - __entry->appending = appending; __entry->count = count; - __entry->direct_io = direct_io ? *direct_io : -1; - __entry->has_refcount = has_refcount ? *has_refcount : -1; ), - TP_printk("%llu %llu %d %lu %d %d", __entry->ino, - __entry->saved_pos, __entry->appending, __entry->count, - __entry->direct_io, __entry->has_refcount) + TP_printk("%llu %llu %lu", __entry->ino, + __entry->saved_pos, __entry->count) ); DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret); @@ -1540,6 +1532,8 @@ DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_read_locked_inode); DEFINE_OCFS2_INT_INT_EVENT(ocfs2_check_orphan_recovery_state); DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_inode_block); +DEFINE_OCFS2_ULL_EVENT(ocfs2_filecheck_validate_inode_block); +DEFINE_OCFS2_ULL_EVENT(ocfs2_filecheck_repair_inode_block); TRACE_EVENT(ocfs2_inode_is_valid_to_delete, TP_PROTO(void *task, void *dc_task, unsigned long long ino, diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 91bc674203ed..ab6a6cdcf91c 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -726,7 +726,7 @@ static int ocfs2_release_dquot(struct dquot *dquot) dqgrab(dquot); /* First entry on list -> queue work */ if (llist_add(&OCFS2_DQUOT(dquot)->list, &osb->dquot_drop_list)) - queue_work(ocfs2_wq, &osb->dquot_drop_work); + queue_work(osb->ocfs2_wq, &osb->dquot_drop_work); goto out; } status = ocfs2_lock_global_qf(oinfo, 1); @@ -867,6 +867,10 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid) int status = 0; trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type); + if (!sb_has_quota_loaded(sb, type)) { + status = -ESRCH; + goto out; + } status = ocfs2_lock_global_qf(info, 0); if (status < 0) goto out; @@ -878,8 +882,11 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid) out_global: ocfs2_unlock_global_qf(info, 0); out: - /* Avoid logging ENOENT since it just means there isn't next ID */ - if (status && status != -ENOENT) + /* + * Avoid logging ENOENT since it just means there isn't next ID and + * ESRCH which means quota isn't enabled for the filesystem. + */ + if (status && status != -ENOENT && status != -ESRCH) mlog_errno(status); return status; } diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 3eff031aaf26..744d5d90c363 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2937,16 +2937,16 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, end = i_size_read(inode); while (offset < end) { - page_index = offset >> PAGE_CACHE_SHIFT; - map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; + page_index = offset >> PAGE_SHIFT; + map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; if (map_end > end) map_end = end; /* from, to is the offset within the page. */ - from = offset & (PAGE_CACHE_SIZE - 1); - to = PAGE_CACHE_SIZE; - if (map_end & (PAGE_CACHE_SIZE - 1)) - to = map_end & (PAGE_CACHE_SIZE - 1); + from = offset & (PAGE_SIZE - 1); + to = PAGE_SIZE; + if (map_end & (PAGE_SIZE - 1)) + to = map_end & (PAGE_SIZE - 1); page = find_or_create_page(mapping, page_index, GFP_NOFS); if (!page) { @@ -2956,10 +2956,10 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, } /* - * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page + * In case PAGE_SIZE <= CLUSTER_SIZE, This page * can't be dirtied before we CoW it out. */ - if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) + if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) BUG_ON(PageDirty(page)); if (!PageUptodate(page)) { @@ -2987,7 +2987,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, mark_page_accessed(page); unlock: unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; offset = map_end; if (ret) @@ -3165,8 +3165,8 @@ int ocfs2_cow_sync_writeback(struct super_block *sb, } while (offset < end) { - page_index = offset >> PAGE_CACHE_SHIFT; - map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; + page_index = offset >> PAGE_SHIFT; + map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; if (map_end > end) map_end = end; @@ -3182,7 +3182,7 @@ int ocfs2_cow_sync_writeback(struct super_block *sb, mark_page_accessed(page); unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; offset = map_end; if (ret) diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c index 576b9a04873f..18451e0fab81 100644 --- a/fs/ocfs2/resize.c +++ b/fs/ocfs2/resize.c @@ -196,7 +196,7 @@ static int update_backups(struct inode * inode, u32 clusters, char *data) for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) { blkno = ocfs2_backup_super_blkno(inode->i_sb, i); cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno); - if (cluster > clusters) + if (cluster >= clusters) break; ret = ocfs2_read_blocks_sync(osb, blkno, 1, &backup); diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 5d965e83bd43..13219ed73e1d 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -629,7 +629,8 @@ static struct attribute_group ocfs2_attr_group = { .attrs = ocfs2_attrs, }; -static struct kset *ocfs2_kset; +struct kset *ocfs2_kset; +EXPORT_SYMBOL_GPL(ocfs2_kset); static void ocfs2_sysfs_exit(void) { diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h index 66334a30cea8..f2dce10fae54 100644 --- a/fs/ocfs2/stackglue.h +++ b/fs/ocfs2/stackglue.h @@ -298,4 +298,6 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin); void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin); +extern struct kset *ocfs2_kset; + #endif /* STACKGLUE_H */ diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 302854ee0985..d7cae3327de5 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -74,17 +74,12 @@ #include "suballoc.h" #include "buffer_head_io.h" +#include "filecheck.h" static struct kmem_cache *ocfs2_inode_cachep; struct kmem_cache *ocfs2_dquot_cachep; struct kmem_cache *ocfs2_qf_chunk_cachep; -/* OCFS2 needs to schedule several different types of work which - * require cluster locking, disk I/O, recovery waits, etc. Since these - * types of work tend to be heavy we avoid using the kernel events - * workqueue and schedule on our own. */ -struct workqueue_struct *ocfs2_wq = NULL; - static struct dentry *ocfs2_debugfs_root; MODULE_AUTHOR("Oracle"); @@ -610,8 +605,8 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits, /* * We might be limited by page cache size. */ - if (bytes > PAGE_CACHE_SIZE) { - bytes = PAGE_CACHE_SIZE; + if (bytes > PAGE_SIZE) { + bytes = PAGE_SIZE; trim = 1; /* * Shift by 31 here so that we don't get larger than @@ -1205,6 +1200,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) /* Start this when the mount is almost sure of being successful */ ocfs2_orphan_scan_start(osb); + /* Create filecheck sysfile /sys/fs/ocfs2/<devname>/filecheck */ + ocfs2_filecheck_create_sysfs(sb); + return status; read_super_error: @@ -1609,33 +1607,25 @@ static int __init ocfs2_init(void) if (status < 0) goto out2; - ocfs2_wq = create_singlethread_workqueue("ocfs2_wq"); - if (!ocfs2_wq) { - status = -ENOMEM; - goto out3; - } - ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL); if (!ocfs2_debugfs_root) { status = -ENOMEM; mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n"); - goto out4; + goto out3; } ocfs2_set_locking_protocol(); status = register_quota_format(&ocfs2_quota_format); if (status < 0) - goto out4; + goto out3; status = register_filesystem(&ocfs2_fs_type); if (!status) return 0; unregister_quota_format(&ocfs2_quota_format); -out4: - destroy_workqueue(ocfs2_wq); - debugfs_remove(ocfs2_debugfs_root); out3: + debugfs_remove(ocfs2_debugfs_root); ocfs2_free_mem_caches(); out2: exit_ocfs2_uptodate_cache(); @@ -1646,11 +1636,6 @@ out1: static void __exit ocfs2_exit(void) { - if (ocfs2_wq) { - flush_workqueue(ocfs2_wq); - destroy_workqueue(ocfs2_wq); - } - unregister_quota_format(&ocfs2_quota_format); debugfs_remove(ocfs2_debugfs_root); @@ -1668,6 +1653,7 @@ static void ocfs2_put_super(struct super_block *sb) ocfs2_sync_blockdev(sb); ocfs2_dismount_volume(sb, 0); + ocfs2_filecheck_remove_sysfs(sb); } static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf) @@ -1740,8 +1726,8 @@ static void ocfs2_inode_init_once(void *data) spin_lock_init(&oi->ip_lock); ocfs2_extent_map_init(&oi->vfs_inode); INIT_LIST_HEAD(&oi->ip_io_markers); + INIT_LIST_HEAD(&oi->ip_unwritten_list); oi->ip_dir_start_lookup = 0; - mutex_init(&oi->ip_unaligned_aio); init_rwsem(&oi->ip_alloc_sem); init_rwsem(&oi->ip_xattr_sem); mutex_init(&oi->ip_io_mutex); @@ -2344,6 +2330,12 @@ static int ocfs2_initialize_super(struct super_block *sb, } cleancache_init_shared_fs(sb); + osb->ocfs2_wq = create_singlethread_workqueue("ocfs2_wq"); + if (!osb->ocfs2_wq) { + status = -ENOMEM; + mlog_errno(status); + } + bail: return status; } @@ -2531,6 +2523,12 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb) { /* This function assumes that the caller has the main osb resource */ + /* ocfs2_initializer_super have already created this workqueue */ + if (osb->ocfs2_wq) { + flush_workqueue(osb->ocfs2_wq); + destroy_workqueue(osb->ocfs2_wq); + } + ocfs2_free_slot_info(osb); kfree(osb->osb_orphan_wipes); diff --git a/fs/ocfs2/super.h b/fs/ocfs2/super.h index b477d0b1c7b6..b023e4f3d740 100644 --- a/fs/ocfs2/super.h +++ b/fs/ocfs2/super.h @@ -26,8 +26,6 @@ #ifndef OCFS2_SUPER_H #define OCFS2_SUPER_H -extern struct workqueue_struct *ocfs2_wq; - int ocfs2_publish_get_mount_state(struct ocfs2_super *osb, int node_num); diff --git a/fs/open.c b/fs/open.c index 55bdc75e2172..17cb6b1dab75 100644 --- a/fs/open.c +++ b/fs/open.c @@ -992,14 +992,12 @@ struct file *filp_open(const char *filename, int flags, umode_t mode) EXPORT_SYMBOL(filp_open); struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt, - const char *filename, int flags) + const char *filename, int flags, umode_t mode) { struct open_flags op; - int err = build_open_flags(flags, 0, &op); + int err = build_open_flags(flags, mode, &op); if (err) return ERR_PTR(err); - if (flags & O_CREAT) - return ERR_PTR(-EINVAL); return do_file_open_root(dentry, mnt, filename, &op); } EXPORT_SYMBOL(file_open_root); diff --git a/fs/orangefs/Kconfig b/fs/orangefs/Kconfig new file mode 100644 index 000000000000..1554c02489de --- /dev/null +++ b/fs/orangefs/Kconfig @@ -0,0 +1,6 @@ +config ORANGEFS_FS + tristate "ORANGEFS (Powered by PVFS) support" + select FS_POSIX_ACL + help + Orange is a parallel file system designed for use on high end + computing (HEC) systems. diff --git a/fs/orangefs/Makefile b/fs/orangefs/Makefile new file mode 100644 index 000000000000..a9d6a968fe6d --- /dev/null +++ b/fs/orangefs/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the ORANGEFS filesystem. +# + +obj-$(CONFIG_ORANGEFS_FS) += orangefs.o + +orangefs-objs := acl.o file.o orangefs-cache.o orangefs-utils.o xattr.o \ + dcache.o inode.o orangefs-sysfs.o orangefs-mod.o super.o \ + devorangefs-req.o namei.o symlink.o dir.o orangefs-bufmap.o \ + orangefs-debugfs.o waitqueue.o diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c new file mode 100644 index 000000000000..03f89dbb2512 --- /dev/null +++ b/fs/orangefs/acl.c @@ -0,0 +1,175 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" +#include <linux/posix_acl_xattr.h> +#include <linux/fs_struct.h> + +struct posix_acl *orangefs_get_acl(struct inode *inode, int type) +{ + struct posix_acl *acl; + int ret; + char *key = NULL, *value = NULL; + + switch (type) { + case ACL_TYPE_ACCESS: + key = ORANGEFS_XATTR_NAME_ACL_ACCESS; + break; + case ACL_TYPE_DEFAULT: + key = ORANGEFS_XATTR_NAME_ACL_DEFAULT; + break; + default: + gossip_err("orangefs_get_acl: bogus value of type %d\n", type); + return ERR_PTR(-EINVAL); + } + /* + * Rather than incurring a network call just to determine the exact + * length of the attribute, I just allocate a max length to save on + * the network call. Conceivably, we could pass NULL to + * orangefs_inode_getxattr() to probe the length of the value, but + * I don't do that for now. + */ + value = kmalloc(ORANGEFS_MAX_XATTR_VALUELEN, GFP_KERNEL); + if (value == NULL) + return ERR_PTR(-ENOMEM); + + gossip_debug(GOSSIP_ACL_DEBUG, + "inode %pU, key %s, type %d\n", + get_khandle_from_ino(inode), + key, + type); + ret = orangefs_inode_getxattr(inode, + "", + key, + value, + ORANGEFS_MAX_XATTR_VALUELEN); + /* if the key exists, convert it to an in-memory rep */ + if (ret > 0) { + acl = posix_acl_from_xattr(&init_user_ns, value, ret); + } else if (ret == -ENODATA || ret == -ENOSYS) { + acl = NULL; + } else { + gossip_err("inode %pU retrieving acl's failed with error %d\n", + get_khandle_from_ino(inode), + ret); + acl = ERR_PTR(ret); + } + /* kfree(NULL) is safe, so don't worry if value ever got used */ + kfree(value); + return acl; +} + +int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + int error = 0; + void *value = NULL; + size_t size = 0; + const char *name = NULL; + + switch (type) { + case ACL_TYPE_ACCESS: + name = ORANGEFS_XATTR_NAME_ACL_ACCESS; + if (acl) { + umode_t mode = inode->i_mode; + /* + * can we represent this with the traditional file + * mode permission bits? + */ + error = posix_acl_equiv_mode(acl, &mode); + if (error < 0) { + gossip_err("%s: posix_acl_equiv_mode err: %d\n", + __func__, + error); + return error; + } + + if (inode->i_mode != mode) + SetModeFlag(orangefs_inode); + inode->i_mode = mode; + mark_inode_dirty_sync(inode); + if (error == 0) + acl = NULL; + } + break; + case ACL_TYPE_DEFAULT: + name = ORANGEFS_XATTR_NAME_ACL_DEFAULT; + break; + default: + gossip_err("%s: invalid type %d!\n", __func__, type); + return -EINVAL; + } + + gossip_debug(GOSSIP_ACL_DEBUG, + "%s: inode %pU, key %s type %d\n", + __func__, get_khandle_from_ino(inode), + name, + type); + + if (acl) { + size = posix_acl_xattr_size(acl->a_count); + value = kmalloc(size, GFP_KERNEL); + if (!value) + return -ENOMEM; + + error = posix_acl_to_xattr(&init_user_ns, acl, value, size); + if (error < 0) + goto out; + } + + gossip_debug(GOSSIP_ACL_DEBUG, + "%s: name %s, value %p, size %zd, acl %p\n", + __func__, name, value, size, acl); + /* + * Go ahead and set the extended attribute now. NOTE: Suppose acl + * was NULL, then value will be NULL and size will be 0 and that + * will xlate to a removexattr. However, we don't want removexattr + * complain if attributes does not exist. + */ + error = orangefs_inode_setxattr(inode, "", name, value, size, 0); + +out: + kfree(value); + if (!error) + set_cached_acl(inode, type, acl); + return error; +} + +int orangefs_init_acl(struct inode *inode, struct inode *dir) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct posix_acl *default_acl, *acl; + umode_t mode = inode->i_mode; + int error = 0; + + ClearModeFlag(orangefs_inode); + + error = posix_acl_create(dir, &mode, &default_acl, &acl); + if (error) + return error; + + if (default_acl) { + error = orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); + posix_acl_release(default_acl); + } + + if (acl) { + if (!error) + error = orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS); + posix_acl_release(acl); + } + + /* If mode of the inode was changed, then do a forcible ->setattr */ + if (mode != inode->i_mode) { + SetModeFlag(orangefs_inode); + inode->i_mode = mode; + orangefs_flush_inode(inode); + } + + return error; +} diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c new file mode 100644 index 000000000000..5dfc4f3cfe68 --- /dev/null +++ b/fs/orangefs/dcache.c @@ -0,0 +1,138 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * Implementation of dentry (directory cache) functions. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" + +/* Returns 1 if dentry can still be trusted, else 0. */ +static int orangefs_revalidate_lookup(struct dentry *dentry) +{ + struct dentry *parent_dentry = dget_parent(dentry); + struct inode *parent_inode = parent_dentry->d_inode; + struct orangefs_inode_s *parent = ORANGEFS_I(parent_inode); + struct inode *inode = dentry->d_inode; + struct orangefs_kernel_op_s *new_op; + int ret = 0; + int err = 0; + + gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: attempting lookup.\n", __func__); + + new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP); + if (!new_op) + goto out_put_parent; + + new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW; + new_op->upcall.req.lookup.parent_refn = parent->refn; + strncpy(new_op->upcall.req.lookup.d_name, + dentry->d_name.name, + ORANGEFS_NAME_MAX); + + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s:%s:%d interrupt flag [%d]\n", + __FILE__, + __func__, + __LINE__, + get_interruptible_flag(parent_inode)); + + err = service_operation(new_op, "orangefs_lookup", + get_interruptible_flag(parent_inode)); + + /* Positive dentry: reject if error or not the same inode. */ + if (inode) { + if (err) { + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s:%s:%d lookup failure.\n", + __FILE__, __func__, __LINE__); + goto out_drop; + } + if (!match_handle(new_op->downcall.resp.lookup.refn.khandle, + inode)) { + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s:%s:%d no match.\n", + __FILE__, __func__, __LINE__); + goto out_drop; + } + + /* Negative dentry: reject if success or error other than ENOENT. */ + } else { + gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: negative dentry.\n", + __func__); + if (!err || err != -ENOENT) { + if (new_op->downcall.status != 0) + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s:%s:%d lookup failure.\n", + __FILE__, __func__, __LINE__); + goto out_drop; + } + } + + ret = 1; +out_release_op: + op_release(new_op); +out_put_parent: + dput(parent_dentry); + return ret; +out_drop: + gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d revalidate failed\n", + __FILE__, __func__, __LINE__); + goto out_release_op; +} + +/* + * Verify that dentry is valid. + * + * Should return 1 if dentry can still be trusted, else 0. + */ +static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags) +{ + int ret; + + if (flags & LOOKUP_RCU) + return -ECHILD; + + gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: called on dentry %p.\n", + __func__, dentry); + + /* skip root handle lookups. */ + if (dentry->d_inode && is_root_handle(dentry->d_inode)) + return 1; + + /* + * If this passes, the positive dentry still exists or the negative + * dentry still does not exist. + */ + if (!orangefs_revalidate_lookup(dentry)) + return 0; + + /* We do not need to continue with negative dentries. */ + if (!dentry->d_inode) + goto out; + + /* Now we must perform a getattr to validate the inode contents. */ + + ret = orangefs_inode_check_changed(dentry->d_inode); + if (ret < 0) { + gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d getattr failure.\n", + __FILE__, __func__, __LINE__); + return 0; + } + if (ret == 0) + return 0; + +out: + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s: negative dentry or positive dentry and inode valid.\n", + __func__); + return 1; +} + +const struct dentry_operations orangefs_dentry_operations = { + .d_revalidate = orangefs_d_revalidate, +}; diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c new file mode 100644 index 000000000000..db170beba797 --- /dev/null +++ b/fs/orangefs/devorangefs-req.c @@ -0,0 +1,943 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * Changes by Acxiom Corporation to add protocol version to kernel + * communication, Copyright Acxiom Corporation, 2005. + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-dev-proto.h" +#include "orangefs-bufmap.h" + +#include <linux/debugfs.h> +#include <linux/slab.h> + +/* this file implements the /dev/pvfs2-req device node */ + +static int open_access_count; + +#define DUMP_DEVICE_ERROR() \ +do { \ + gossip_err("*****************************************************\n");\ + gossip_err("ORANGEFS Device Error: You cannot open the device file "); \ + gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \ + "are no ", ORANGEFS_REQDEVICE_NAME); \ + gossip_err("instances of a program using this device\ncurrently " \ + "running. (You must verify this!)\n"); \ + gossip_err("For example, you can use the lsof program as follows:\n");\ + gossip_err("'lsof | grep %s' (run this as root)\n", \ + ORANGEFS_REQDEVICE_NAME); \ + gossip_err(" open_access_count = %d\n", open_access_count); \ + gossip_err("*****************************************************\n");\ +} while (0) + +static int hash_func(__u64 tag, int table_size) +{ + return do_div(tag, (unsigned int)table_size); +} + +static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op) +{ + int index = hash_func(op->tag, hash_table_size); + + list_add_tail(&op->list, &htable_ops_in_progress[index]); +} + +/* + * find the op with this tag and remove it from the in progress + * hash table. + */ +static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag) +{ + struct orangefs_kernel_op_s *op, *next; + int index; + + index = hash_func(tag, hash_table_size); + + spin_lock(&htable_ops_in_progress_lock); + list_for_each_entry_safe(op, + next, + &htable_ops_in_progress[index], + list) { + if (op->tag == tag && !op_state_purged(op) && + !op_state_given_up(op)) { + list_del_init(&op->list); + spin_unlock(&htable_ops_in_progress_lock); + return op; + } + } + + spin_unlock(&htable_ops_in_progress_lock); + return NULL; +} + +/* Returns whether any FS are still pending remounted */ +static int mark_all_pending_mounts(void) +{ + int unmounted = 1; + struct orangefs_sb_info_s *orangefs_sb = NULL; + + spin_lock(&orangefs_superblocks_lock); + list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) { + /* All of these file system require a remount */ + orangefs_sb->mount_pending = 1; + unmounted = 0; + } + spin_unlock(&orangefs_superblocks_lock); + return unmounted; +} + +/* + * Determine if a given file system needs to be remounted or not + * Returns -1 on error + * 0 if already mounted + * 1 if needs remount + */ +static int fs_mount_pending(__s32 fsid) +{ + int mount_pending = -1; + struct orangefs_sb_info_s *orangefs_sb = NULL; + + spin_lock(&orangefs_superblocks_lock); + list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) { + if (orangefs_sb->fs_id == fsid) { + mount_pending = orangefs_sb->mount_pending; + break; + } + } + spin_unlock(&orangefs_superblocks_lock); + return mount_pending; +} + +static int orangefs_devreq_open(struct inode *inode, struct file *file) +{ + int ret = -EINVAL; + + if (!(file->f_flags & O_NONBLOCK)) { + gossip_err("%s: device cannot be opened in blocking mode\n", + __func__); + goto out; + } + ret = -EACCES; + gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n"); + mutex_lock(&devreq_mutex); + + if (open_access_count == 0) { + open_access_count = 1; + ret = 0; + } else { + DUMP_DEVICE_ERROR(); + } + mutex_unlock(&devreq_mutex); + +out: + + gossip_debug(GOSSIP_DEV_DEBUG, + "pvfs2-client-core: open device complete (ret = %d)\n", + ret); + return ret; +} + +/* Function for read() callers into the device */ +static ssize_t orangefs_devreq_read(struct file *file, + char __user *buf, + size_t count, loff_t *offset) +{ + struct orangefs_kernel_op_s *op, *temp; + __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION; + static __s32 magic = ORANGEFS_DEVREQ_MAGIC; + struct orangefs_kernel_op_s *cur_op = NULL; + unsigned long ret; + + /* We do not support blocking IO. */ + if (!(file->f_flags & O_NONBLOCK)) { + gossip_err("%s: blocking read from client-core.\n", + __func__); + return -EINVAL; + } + + /* + * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then + * always read with that size buffer. + */ + if (count != MAX_DEV_REQ_UPSIZE) { + gossip_err("orangefs: client-core tried to read wrong size\n"); + return -EINVAL; + } + +restart: + /* Get next op (if any) from top of list. */ + spin_lock(&orangefs_request_list_lock); + list_for_each_entry_safe(op, temp, &orangefs_request_list, list) { + __s32 fsid; + /* This lock is held past the end of the loop when we break. */ + spin_lock(&op->lock); + if (unlikely(op_state_purged(op) || op_state_given_up(op))) { + spin_unlock(&op->lock); + continue; + } + + fsid = fsid_of_op(op); + if (fsid != ORANGEFS_FS_ID_NULL) { + int ret; + /* Skip ops whose filesystem needs to be mounted. */ + ret = fs_mount_pending(fsid); + if (ret == 1) { + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: mount pending, skipping op tag " + "%llu %s\n", + __func__, + llu(op->tag), + get_opname_string(op)); + spin_unlock(&op->lock); + continue; + /* + * Skip ops whose filesystem we don't know about unless + * it is being mounted. + */ + /* XXX: is there a better way to detect this? */ + } else if (ret == -1 && + !(op->upcall.type == + ORANGEFS_VFS_OP_FS_MOUNT || + op->upcall.type == + ORANGEFS_VFS_OP_GETATTR)) { + gossip_debug(GOSSIP_DEV_DEBUG, + "orangefs: skipping op tag %llu %s\n", + llu(op->tag), get_opname_string(op)); + gossip_err( + "orangefs: ERROR: fs_mount_pending %d\n", + fsid); + spin_unlock(&op->lock); + continue; + } + } + /* + * Either this op does not pertain to a filesystem, is mounting + * a filesystem, or pertains to a mounted filesystem. Let it + * through. + */ + cur_op = op; + break; + } + + /* + * At this point we either have a valid op and can continue or have not + * found an op and must ask the client to try again later. + */ + if (!cur_op) { + spin_unlock(&orangefs_request_list_lock); + return -EAGAIN; + } + + gossip_debug(GOSSIP_DEV_DEBUG, "%s: reading op tag %llu %s\n", + __func__, + llu(cur_op->tag), + get_opname_string(cur_op)); + + /* + * Such an op should never be on the list in the first place. If so, we + * will abort. + */ + if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) { + gossip_err("orangefs: ERROR: Current op already queued.\n"); + list_del_init(&cur_op->list); + spin_unlock(&cur_op->lock); + spin_unlock(&orangefs_request_list_lock); + return -EAGAIN; + } + + list_del_init(&cur_op->list); + spin_unlock(&orangefs_request_list_lock); + + spin_unlock(&cur_op->lock); + + /* Push the upcall out. */ + ret = copy_to_user(buf, &proto_ver, sizeof(__s32)); + if (ret != 0) + goto error; + ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32)); + if (ret != 0) + goto error; + ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64)); + if (ret != 0) + goto error; + ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall, + sizeof(struct orangefs_upcall_s)); + if (ret != 0) + goto error; + + spin_lock(&htable_ops_in_progress_lock); + spin_lock(&cur_op->lock); + if (unlikely(op_state_given_up(cur_op))) { + spin_unlock(&cur_op->lock); + spin_unlock(&htable_ops_in_progress_lock); + complete(&cur_op->waitq); + goto restart; + } + + /* + * Set the operation to be in progress and move it between lists since + * it has been sent to the client. + */ + set_op_state_inprogress(cur_op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: 1 op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(cur_op), + cur_op->op_state, + current->comm); + orangefs_devreq_add_op(cur_op); + spin_unlock(&cur_op->lock); + spin_unlock(&htable_ops_in_progress_lock); + + /* The client only asks to read one size buffer. */ + return MAX_DEV_REQ_UPSIZE; +error: + /* + * We were unable to copy the op data to the client. Put the op back in + * list. If client has crashed, the op will be purged later when the + * device is released. + */ + gossip_err("orangefs: Failed to copy data to user space\n"); + spin_lock(&orangefs_request_list_lock); + spin_lock(&cur_op->lock); + if (likely(!op_state_given_up(cur_op))) { + set_op_state_waiting(cur_op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: 2 op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(cur_op), + cur_op->op_state, + current->comm); + list_add(&cur_op->list, &orangefs_request_list); + spin_unlock(&cur_op->lock); + } else { + spin_unlock(&cur_op->lock); + complete(&cur_op->waitq); + } + spin_unlock(&orangefs_request_list_lock); + return -EFAULT; +} + +/* + * Function for writev() callers into the device. + * + * Userspace should have written: + * - __u32 version + * - __u32 magic + * - __u64 tag + * - struct orangefs_downcall_s + * - trailer buffer (in the case of READDIR operations) + */ +static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb, + struct iov_iter *iter) +{ + ssize_t ret; + struct orangefs_kernel_op_s *op = NULL; + struct { + __u32 version; + __u32 magic; + __u64 tag; + } head; + int total = ret = iov_iter_count(iter); + int n; + int downcall_size = sizeof(struct orangefs_downcall_s); + int head_size = sizeof(head); + + gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n", + __func__, + total, + ret); + + if (total < MAX_DEV_REQ_DOWNSIZE) { + gossip_err("%s: total:%d: must be at least:%u:\n", + __func__, + total, + (unsigned int) MAX_DEV_REQ_DOWNSIZE); + return -EFAULT; + } + + n = copy_from_iter(&head, head_size, iter); + if (n < head_size) { + gossip_err("%s: failed to copy head.\n", __func__); + return -EFAULT; + } + + if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) { + gossip_err("%s: userspace claims version" + "%d, minimum version required: %d.\n", + __func__, + head.version, + ORANGEFS_MINIMUM_USERSPACE_VERSION); + return -EPROTO; + } + + if (head.magic != ORANGEFS_DEVREQ_MAGIC) { + gossip_err("Error: Device magic number does not match.\n"); + return -EPROTO; + } + + /* remove the op from the in progress hash table */ + op = orangefs_devreq_remove_op(head.tag); + if (!op) { + gossip_err("WARNING: No one's waiting for tag %llu\n", + llu(head.tag)); + return ret; + } + + n = copy_from_iter(&op->downcall, downcall_size, iter); + if (n != downcall_size) { + gossip_err("%s: failed to copy downcall.\n", __func__); + goto Efault; + } + + if (op->downcall.status) + goto wakeup; + + /* + * We've successfully peeled off the head and the downcall. + * Something has gone awry if total doesn't equal the + * sum of head_size, downcall_size and trailer_size. + */ + if ((head_size + downcall_size + op->downcall.trailer_size) != total) { + gossip_err("%s: funky write, head_size:%d" + ": downcall_size:%d: trailer_size:%lld" + ": total size:%d:\n", + __func__, + head_size, + downcall_size, + op->downcall.trailer_size, + total); + goto Efault; + } + + /* Only READDIR operations should have trailers. */ + if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) && + (op->downcall.trailer_size != 0)) { + gossip_err("%s: %x operation with trailer.", + __func__, + op->downcall.type); + goto Efault; + } + + /* READDIR operations should always have trailers. */ + if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) && + (op->downcall.trailer_size == 0)) { + gossip_err("%s: %x operation with no trailer.", + __func__, + op->downcall.type); + goto Efault; + } + + if (op->downcall.type != ORANGEFS_VFS_OP_READDIR) + goto wakeup; + + op->downcall.trailer_buf = + vmalloc(op->downcall.trailer_size); + if (op->downcall.trailer_buf == NULL) { + gossip_err("%s: failed trailer vmalloc.\n", + __func__); + goto Enomem; + } + memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size); + n = copy_from_iter(op->downcall.trailer_buf, + op->downcall.trailer_size, + iter); + if (n != op->downcall.trailer_size) { + gossip_err("%s: failed to copy trailer.\n", __func__); + vfree(op->downcall.trailer_buf); + goto Efault; + } + +wakeup: + /* + * Return to vfs waitqueue, and back to service_operation + * through wait_for_matching_downcall. + */ + spin_lock(&op->lock); + if (unlikely(op_is_cancel(op))) { + spin_unlock(&op->lock); + put_cancel(op); + } else if (unlikely(op_state_given_up(op))) { + spin_unlock(&op->lock); + complete(&op->waitq); + } else { + set_op_state_serviced(op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(op), + op->op_state, + current->comm); + spin_unlock(&op->lock); + } + return ret; + +Efault: + op->downcall.status = -(ORANGEFS_ERROR_BIT | 9); + ret = -EFAULT; + goto wakeup; + +Enomem: + op->downcall.status = -(ORANGEFS_ERROR_BIT | 8); + ret = -ENOMEM; + goto wakeup; +} + +/* + * NOTE: gets called when the last reference to this device is dropped. + * Using the open_access_count variable, we enforce a reference count + * on this file so that it can be opened by only one process at a time. + * the devreq_mutex is used to make sure all i/o has completed + * before we call orangefs_bufmap_finalize, and similar such tricky + * situations + */ +static int orangefs_devreq_release(struct inode *inode, struct file *file) +{ + int unmounted = 0; + + gossip_debug(GOSSIP_DEV_DEBUG, + "%s:pvfs2-client-core: exiting, closing device\n", + __func__); + + mutex_lock(&devreq_mutex); + orangefs_bufmap_finalize(); + + open_access_count = -1; + + unmounted = mark_all_pending_mounts(); + gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n", + (unmounted ? "UNMOUNTED" : "MOUNTED")); + + purge_waiting_ops(); + purge_inprogress_ops(); + + orangefs_bufmap_run_down(); + + gossip_debug(GOSSIP_DEV_DEBUG, + "pvfs2-client-core: device close complete\n"); + open_access_count = 0; + mutex_unlock(&devreq_mutex); + return 0; +} + +int is_daemon_in_service(void) +{ + int in_service; + + /* + * What this function does is checks if client-core is alive + * based on the access count we maintain on the device. + */ + mutex_lock(&devreq_mutex); + in_service = open_access_count == 1 ? 0 : -EIO; + mutex_unlock(&devreq_mutex); + return in_service; +} + +bool __is_daemon_in_service(void) +{ + return open_access_count == 1; +} + +static inline long check_ioctl_command(unsigned int command) +{ + /* Check for valid ioctl codes */ + if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) { + gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n", + command, + _IOC_TYPE(command), + ORANGEFS_DEV_MAGIC); + return -EINVAL; + } + /* and valid ioctl commands */ + if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) { + gossip_err("Invalid ioctl command number [%d >= %d]\n", + _IOC_NR(command), ORANGEFS_DEV_MAXNR); + return -ENOIOCTLCMD; + } + return 0; +} + +static long dispatch_ioctl_command(unsigned int command, unsigned long arg) +{ + static __s32 magic = ORANGEFS_DEVREQ_MAGIC; + static __s32 max_up_size = MAX_DEV_REQ_UPSIZE; + static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE; + struct ORANGEFS_dev_map_desc user_desc; + int ret = 0; + struct dev_mask_info_s mask_info = { 0 }; + struct dev_mask2_info_s mask2_info = { 0, 0 }; + int upstream_kmod = 1; + struct orangefs_sb_info_s *orangefs_sb; + + /* mtmoore: add locking here */ + + switch (command) { + case ORANGEFS_DEV_GET_MAGIC: + return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ? + -EIO : + 0); + case ORANGEFS_DEV_GET_MAX_UPSIZE: + return ((put_user(max_up_size, + (__s32 __user *) arg) == -EFAULT) ? + -EIO : + 0); + case ORANGEFS_DEV_GET_MAX_DOWNSIZE: + return ((put_user(max_down_size, + (__s32 __user *) arg) == -EFAULT) ? + -EIO : + 0); + case ORANGEFS_DEV_MAP: + ret = copy_from_user(&user_desc, + (struct ORANGEFS_dev_map_desc __user *) + arg, + sizeof(struct ORANGEFS_dev_map_desc)); + /* WTF -EIO and not -EFAULT? */ + return ret ? -EIO : orangefs_bufmap_initialize(&user_desc); + case ORANGEFS_DEV_REMOUNT_ALL: + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: got ORANGEFS_DEV_REMOUNT_ALL\n", + __func__); + + /* + * remount all mounted orangefs volumes to regain the lost + * dynamic mount tables (if any) -- NOTE: this is done + * without keeping the superblock list locked due to the + * upcall/downcall waiting. also, the request mutex is + * used to ensure that no operations will be serviced until + * all of the remounts are serviced (to avoid ops between + * mounts to fail) + */ + ret = mutex_lock_interruptible(&request_mutex); + if (ret < 0) + return ret; + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: priority remount in progress\n", + __func__); + spin_lock(&orangefs_superblocks_lock); + list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) { + /* + * We have to drop the spinlock, so entries can be + * removed. They can't be freed, though, so we just + * keep the forward pointers and zero the back ones - + * that way we can get to the rest of the list. + */ + if (!orangefs_sb->list.prev) + continue; + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: Remounting SB %p\n", + __func__, + orangefs_sb); + + spin_unlock(&orangefs_superblocks_lock); + ret = orangefs_remount(orangefs_sb); + spin_lock(&orangefs_superblocks_lock); + if (ret) { + gossip_debug(GOSSIP_DEV_DEBUG, + "SB %p remount failed\n", + orangefs_sb); + break; + } + } + spin_unlock(&orangefs_superblocks_lock); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: priority remount complete\n", + __func__); + mutex_unlock(&request_mutex); + return ret; + + case ORANGEFS_DEV_UPSTREAM: + ret = copy_to_user((void __user *)arg, + &upstream_kmod, + sizeof(upstream_kmod)); + + if (ret != 0) + return -EIO; + else + return ret; + + case ORANGEFS_DEV_CLIENT_MASK: + ret = copy_from_user(&mask2_info, + (void __user *)arg, + sizeof(struct dev_mask2_info_s)); + + if (ret != 0) + return -EIO; + + client_debug_mask.mask1 = mask2_info.mask1_value; + client_debug_mask.mask2 = mask2_info.mask2_value; + + pr_info("%s: client debug mask has been been received " + ":%llx: :%llx:\n", + __func__, + (unsigned long long)client_debug_mask.mask1, + (unsigned long long)client_debug_mask.mask2); + + return ret; + + case ORANGEFS_DEV_CLIENT_STRING: + ret = copy_from_user(&client_debug_array_string, + (void __user *)arg, + ORANGEFS_MAX_DEBUG_STRING_LEN); + /* + * The real client-core makes an effort to ensure + * that actual strings that aren't too long to fit in + * this buffer is what we get here. We're going to use + * string functions on the stuff we got, so we'll make + * this extra effort to try and keep from + * flowing out of this buffer when we use the string + * functions, even if somehow the stuff we end up + * with here is garbage. + */ + client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] = + '\0'; + + if (ret != 0) { + pr_info("%s: CLIENT_STRING: copy_from_user failed\n", + __func__); + return -EIO; + } + + pr_info("%s: client debug array string has been received.\n", + __func__); + + if (!help_string_initialized) { + + /* Free the "we don't know yet" default string... */ + kfree(debug_help_string); + + /* build a proper debug help string */ + if (orangefs_prepare_debugfs_help_string(0)) { + gossip_err("%s: no debug help string \n", + __func__); + return -EIO; + } + + /* Replace the boilerplate boot-time debug-help file. */ + debugfs_remove(help_file_dentry); + + help_file_dentry = + debugfs_create_file( + ORANGEFS_KMOD_DEBUG_HELP_FILE, + 0444, + debug_dir, + debug_help_string, + &debug_help_fops); + + if (!help_file_dentry) { + gossip_err("%s: debugfs_create_file failed for" + " :%s:!\n", + __func__, + ORANGEFS_KMOD_DEBUG_HELP_FILE); + return -EIO; + } + } + + debug_mask_to_string(&client_debug_mask, 1); + + debugfs_remove(client_debug_dentry); + + orangefs_client_debug_init(); + + help_string_initialized++; + + return ret; + + case ORANGEFS_DEV_DEBUG: + ret = copy_from_user(&mask_info, + (void __user *)arg, + sizeof(mask_info)); + + if (ret != 0) + return -EIO; + + if (mask_info.mask_type == KERNEL_MASK) { + if ((mask_info.mask_value == 0) + && (kernel_mask_set_mod_init)) { + /* + * the kernel debug mask was set when the + * kernel module was loaded; don't override + * it if the client-core was started without + * a value for ORANGEFS_KMODMASK. + */ + return 0; + } + debug_mask_to_string(&mask_info.mask_value, + mask_info.mask_type); + gossip_debug_mask = mask_info.mask_value; + pr_info("%s: kernel debug mask has been modified to " + ":%s: :%llx:\n", + __func__, + kernel_debug_string, + (unsigned long long)gossip_debug_mask); + } else if (mask_info.mask_type == CLIENT_MASK) { + debug_mask_to_string(&mask_info.mask_value, + mask_info.mask_type); + pr_info("%s: client debug mask has been modified to" + ":%s: :%llx:\n", + __func__, + client_debug_string, + llu(mask_info.mask_value)); + } else { + gossip_lerr("Invalid mask type....\n"); + return -EINVAL; + } + + return ret; + + default: + return -ENOIOCTLCMD; + } + return -ENOIOCTLCMD; +} + +static long orangefs_devreq_ioctl(struct file *file, + unsigned int command, unsigned long arg) +{ + long ret; + + /* Check for properly constructed commands */ + ret = check_ioctl_command(command); + if (ret < 0) + return (int)ret; + + return (int)dispatch_ioctl_command(command, arg); +} + +#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */ + +/* Compat structure for the ORANGEFS_DEV_MAP ioctl */ +struct ORANGEFS_dev_map_desc32 { + compat_uptr_t ptr; + __s32 total_size; + __s32 size; + __s32 count; +}; + +static unsigned long translate_dev_map26(unsigned long args, long *error) +{ + struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args; + /* + * Depending on the architecture, allocate some space on the + * user-call-stack based on our expected layout. + */ + struct ORANGEFS_dev_map_desc __user *p = + compat_alloc_user_space(sizeof(*p)); + compat_uptr_t addr; + + *error = 0; + /* get the ptr from the 32 bit user-space */ + if (get_user(addr, &p32->ptr)) + goto err; + /* try to put that into a 64-bit layout */ + if (put_user(compat_ptr(addr), &p->ptr)) + goto err; + /* copy the remaining fields */ + if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32))) + goto err; + if (copy_in_user(&p->size, &p32->size, sizeof(__s32))) + goto err; + if (copy_in_user(&p->count, &p32->count, sizeof(__s32))) + goto err; + return (unsigned long)p; +err: + *error = -EFAULT; + return 0; +} + +/* + * 32 bit user-space apps' ioctl handlers when kernel modules + * is compiled as a 64 bit one + */ +static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long args) +{ + long ret; + unsigned long arg = args; + + /* Check for properly constructed commands */ + ret = check_ioctl_command(cmd); + if (ret < 0) + return ret; + if (cmd == ORANGEFS_DEV_MAP) { + /* + * convert the arguments to what we expect internally + * in kernel space + */ + arg = translate_dev_map26(args, &ret); + if (ret < 0) { + gossip_err("Could not translate dev map\n"); + return ret; + } + } + /* no other ioctl requires translation */ + return dispatch_ioctl_command(cmd, arg); +} + +#endif /* CONFIG_COMPAT is in .config */ + +/* the assigned character device major number */ +static int orangefs_dev_major; + +/* + * Initialize orangefs device specific state: + * Must be called at module load time only + */ +int orangefs_dev_init(void) +{ + /* register orangefs-req device */ + orangefs_dev_major = register_chrdev(0, + ORANGEFS_REQDEVICE_NAME, + &orangefs_devreq_file_operations); + if (orangefs_dev_major < 0) { + gossip_debug(GOSSIP_DEV_DEBUG, + "Failed to register /dev/%s (error %d)\n", + ORANGEFS_REQDEVICE_NAME, orangefs_dev_major); + return orangefs_dev_major; + } + + gossip_debug(GOSSIP_DEV_DEBUG, + "*** /dev/%s character device registered ***\n", + ORANGEFS_REQDEVICE_NAME); + gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n", + ORANGEFS_REQDEVICE_NAME, orangefs_dev_major); + return 0; +} + +void orangefs_dev_cleanup(void) +{ + unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME); + gossip_debug(GOSSIP_DEV_DEBUG, + "*** /dev/%s character device unregistered ***\n", + ORANGEFS_REQDEVICE_NAME); +} + +static unsigned int orangefs_devreq_poll(struct file *file, + struct poll_table_struct *poll_table) +{ + int poll_revent_mask = 0; + + poll_wait(file, &orangefs_request_list_waitq, poll_table); + + if (!list_empty(&orangefs_request_list)) + poll_revent_mask |= POLL_IN; + return poll_revent_mask; +} + +const struct file_operations orangefs_devreq_file_operations = { + .owner = THIS_MODULE, + .read = orangefs_devreq_read, + .write_iter = orangefs_devreq_write_iter, + .open = orangefs_devreq_open, + .release = orangefs_devreq_release, + .unlocked_ioctl = orangefs_devreq_ioctl, + +#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */ + .compat_ioctl = orangefs_devreq_compat_ioctl, +#endif + .poll = orangefs_devreq_poll +}; diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c new file mode 100644 index 000000000000..ba7dec40771e --- /dev/null +++ b/fs/orangefs/dir.c @@ -0,0 +1,398 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" + +/* + * decode routine used by kmod to deal with the blob sent from + * userspace for readdirs. The blob contains zero or more of these + * sub-blobs: + * __u32 - represents length of the character string that follows. + * string - between 1 and ORANGEFS_NAME_MAX bytes long. + * padding - (if needed) to cause the __u32 plus the string to be + * eight byte aligned. + * khandle - sizeof(khandle) bytes. + */ +static long decode_dirents(char *ptr, size_t size, + struct orangefs_readdir_response_s *readdir) +{ + int i; + struct orangefs_readdir_response_s *rd = + (struct orangefs_readdir_response_s *) ptr; + char *buf = ptr; + int khandle_size = sizeof(struct orangefs_khandle); + size_t offset = offsetof(struct orangefs_readdir_response_s, + dirent_array); + /* 8 reflects eight byte alignment */ + int smallest_blob = khandle_size + 8; + __u32 len; + int aligned_len; + int sizeof_u32 = sizeof(__u32); + long ret; + + gossip_debug(GOSSIP_DIR_DEBUG, "%s: size:%zu:\n", __func__, size); + + /* size is = offset on empty dirs, > offset on non-empty dirs... */ + if (size < offset) { + gossip_err("%s: size:%zu: offset:%zu:\n", + __func__, + size, + offset); + ret = -EINVAL; + goto out; + } + + if ((size == offset) && (readdir->orangefs_dirent_outcount != 0)) { + gossip_err("%s: size:%zu: dirent_outcount:%d:\n", + __func__, + size, + readdir->orangefs_dirent_outcount); + ret = -EINVAL; + goto out; + } + + readdir->token = rd->token; + readdir->orangefs_dirent_outcount = rd->orangefs_dirent_outcount; + readdir->dirent_array = kcalloc(readdir->orangefs_dirent_outcount, + sizeof(*readdir->dirent_array), + GFP_KERNEL); + if (readdir->dirent_array == NULL) { + gossip_err("%s: kcalloc failed.\n", __func__); + ret = -ENOMEM; + goto out; + } + + buf += offset; + size -= offset; + + for (i = 0; i < readdir->orangefs_dirent_outcount; i++) { + if (size < smallest_blob) { + gossip_err("%s: size:%zu: smallest_blob:%d:\n", + __func__, + size, + smallest_blob); + ret = -EINVAL; + goto free; + } + + len = *(__u32 *)buf; + if ((len < 1) || (len > ORANGEFS_NAME_MAX)) { + gossip_err("%s: len:%d:\n", __func__, len); + ret = -EINVAL; + goto free; + } + + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: size:%zu: len:%d:\n", + __func__, + size, + len); + + readdir->dirent_array[i].d_name = buf + sizeof_u32; + readdir->dirent_array[i].d_length = len; + + /* + * Calculate "aligned" length of this string and its + * associated __u32 descriptor. + */ + aligned_len = ((sizeof_u32 + len + 1) + 7) & ~7; + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: aligned_len:%d:\n", + __func__, + aligned_len); + + /* + * The end of the blob should coincide with the end + * of the last sub-blob. + */ + if (size < aligned_len + khandle_size) { + gossip_err("%s: ran off the end of the blob.\n", + __func__); + ret = -EINVAL; + goto free; + } + size -= aligned_len + khandle_size; + + buf += aligned_len; + + readdir->dirent_array[i].khandle = + *(struct orangefs_khandle *) buf; + buf += khandle_size; + } + ret = buf - ptr; + gossip_debug(GOSSIP_DIR_DEBUG, "%s: returning:%ld:\n", __func__, ret); + goto out; + +free: + kfree(readdir->dirent_array); + readdir->dirent_array = NULL; + +out: + return ret; +} + +/* + * Read directory entries from an instance of an open directory. + */ +static int orangefs_readdir(struct file *file, struct dir_context *ctx) +{ + int ret = 0; + int buffer_index; + /* + * ptoken supports Orangefs' distributed directory logic, added + * in 2.9.2. + */ + __u64 *ptoken = file->private_data; + __u64 pos = 0; + ino_t ino = 0; + struct dentry *dentry = file->f_path.dentry; + struct orangefs_kernel_op_s *new_op = NULL; + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode); + int buffer_full = 0; + struct orangefs_readdir_response_s readdir_response; + void *dents_buf; + int i = 0; + int len = 0; + ino_t current_ino = 0; + char *current_entry = NULL; + long bytes_decoded; + + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: ctx->pos:%lld, ptoken = %llu\n", + __func__, + lld(ctx->pos), + llu(*ptoken)); + + pos = (__u64) ctx->pos; + + /* are we done? */ + if (pos == ORANGEFS_READDIR_END) { + gossip_debug(GOSSIP_DIR_DEBUG, + "Skipping to termination path\n"); + return 0; + } + + gossip_debug(GOSSIP_DIR_DEBUG, + "orangefs_readdir called on %s (pos=%llu)\n", + dentry->d_name.name, llu(pos)); + + memset(&readdir_response, 0, sizeof(readdir_response)); + + new_op = op_alloc(ORANGEFS_VFS_OP_READDIR); + if (!new_op) + return -ENOMEM; + + /* + * Only the indices are shared. No memory is actually shared, but the + * mechanism is used. + */ + new_op->uses_shared_memory = 1; + new_op->upcall.req.readdir.refn = orangefs_inode->refn; + new_op->upcall.req.readdir.max_dirent_count = + ORANGEFS_MAX_DIRENT_COUNT_READDIR; + + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: upcall.req.readdir.refn.khandle: %pU\n", + __func__, + &new_op->upcall.req.readdir.refn.khandle); + + new_op->upcall.req.readdir.token = *ptoken; + +get_new_buffer_index: + buffer_index = orangefs_readdir_index_get(); + if (buffer_index < 0) { + ret = buffer_index; + gossip_lerr("orangefs_readdir: orangefs_readdir_index_get() failure (%d)\n", + ret); + goto out_free_op; + } + new_op->upcall.req.readdir.buf_index = buffer_index; + + ret = service_operation(new_op, + "orangefs_readdir", + get_interruptible_flag(dentry->d_inode)); + + gossip_debug(GOSSIP_DIR_DEBUG, + "Readdir downcall status is %d. ret:%d\n", + new_op->downcall.status, + ret); + + orangefs_readdir_index_put(buffer_index); + + if (ret == -EAGAIN && op_state_purged(new_op)) { + /* Client-core indices are invalid after it restarted. */ + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: Getting new buffer_index for retry of readdir..\n", + __func__); + goto get_new_buffer_index; + } + + if (ret == -EIO && op_state_purged(new_op)) { + gossip_err("%s: Client is down. Aborting readdir call.\n", + __func__); + goto out_free_op; + } + + if (ret < 0 || new_op->downcall.status != 0) { + gossip_debug(GOSSIP_DIR_DEBUG, + "Readdir request failed. Status:%d\n", + new_op->downcall.status); + if (ret >= 0) + ret = new_op->downcall.status; + goto out_free_op; + } + + dents_buf = new_op->downcall.trailer_buf; + if (dents_buf == NULL) { + gossip_err("Invalid NULL buffer in readdir response\n"); + ret = -ENOMEM; + goto out_free_op; + } + + bytes_decoded = decode_dirents(dents_buf, new_op->downcall.trailer_size, + &readdir_response); + if (bytes_decoded < 0) { + ret = bytes_decoded; + gossip_err("Could not decode readdir from buffer %d\n", ret); + goto out_vfree; + } + + if (bytes_decoded != new_op->downcall.trailer_size) { + gossip_err("orangefs_readdir: # bytes decoded (%ld) " + "!= trailer size (%ld)\n", + bytes_decoded, + (long)new_op->downcall.trailer_size); + ret = -EINVAL; + goto out_destroy_handle; + } + + /* + * orangefs doesn't actually store dot and dot-dot, but + * we need to have them represented. + */ + if (pos == 0) { + ino = get_ino_from_khandle(dentry->d_inode); + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: calling dir_emit of \".\" with pos = %llu\n", + __func__, + llu(pos)); + ret = dir_emit(ctx, ".", 1, ino, DT_DIR); + pos += 1; + } + + if (pos == 1) { + ino = get_parent_ino_from_dentry(dentry); + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: calling dir_emit of \"..\" with pos = %llu\n", + __func__, + llu(pos)); + ret = dir_emit(ctx, "..", 2, ino, DT_DIR); + pos += 1; + } + + /* + * we stored ORANGEFS_ITERATE_NEXT in ctx->pos last time around + * to prevent "finding" dot and dot-dot on any iteration + * other than the first. + */ + if (ctx->pos == ORANGEFS_ITERATE_NEXT) + ctx->pos = 0; + + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: dirent_outcount:%d:\n", + __func__, + readdir_response.orangefs_dirent_outcount); + for (i = ctx->pos; + i < readdir_response.orangefs_dirent_outcount; + i++) { + len = readdir_response.dirent_array[i].d_length; + current_entry = readdir_response.dirent_array[i].d_name; + current_ino = orangefs_khandle_to_ino( + &readdir_response.dirent_array[i].khandle); + + gossip_debug(GOSSIP_DIR_DEBUG, + "calling dir_emit for %s with len %d" + ", ctx->pos %ld\n", + current_entry, + len, + (unsigned long)ctx->pos); + /* + * type is unknown. We don't return object type + * in the dirent_array. This leaves getdents + * clueless about type. + */ + ret = + dir_emit(ctx, current_entry, len, current_ino, DT_UNKNOWN); + if (!ret) + break; + ctx->pos++; + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: ctx->pos:%lld\n", + __func__, + lld(ctx->pos)); + + } + + /* + * we ran all the way through the last batch, set up for + * getting another batch... + */ + if (ret) { + *ptoken = readdir_response.token; + ctx->pos = ORANGEFS_ITERATE_NEXT; + } + + /* + * Did we hit the end of the directory? + */ + if (readdir_response.token == ORANGEFS_READDIR_END && + !buffer_full) { + gossip_debug(GOSSIP_DIR_DEBUG, + "End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n"); + ctx->pos = ORANGEFS_READDIR_END; + } + +out_destroy_handle: + /* kfree(NULL) is safe */ + kfree(readdir_response.dirent_array); +out_vfree: + gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n", dents_buf); + vfree(dents_buf); +out_free_op: + op_release(new_op); + gossip_debug(GOSSIP_DIR_DEBUG, "orangefs_readdir returning %d\n", ret); + return ret; +} + +static int orangefs_dir_open(struct inode *inode, struct file *file) +{ + __u64 *ptoken; + + file->private_data = kmalloc(sizeof(__u64), GFP_KERNEL); + if (!file->private_data) + return -ENOMEM; + + ptoken = file->private_data; + *ptoken = ORANGEFS_READDIR_START; + return 0; +} + +static int orangefs_dir_release(struct inode *inode, struct file *file) +{ + orangefs_flush_inode(inode); + kfree(file->private_data); + return 0; +} + +/** ORANGEFS implementation of VFS directory operations */ +const struct file_operations orangefs_dir_operations = { + .read = generic_read_dir, + .iterate = orangefs_readdir, + .open = orangefs_dir_open, + .release = orangefs_dir_release, +}; diff --git a/fs/orangefs/downcall.h b/fs/orangefs/downcall.h new file mode 100644 index 000000000000..66b99210f1f9 --- /dev/null +++ b/fs/orangefs/downcall.h @@ -0,0 +1,133 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * Definitions of downcalls used in Linux kernel module. + */ + +#ifndef __DOWNCALL_H +#define __DOWNCALL_H + +/* + * Sanitized the device-client core interaction + * for clean 32-64 bit usage + */ +struct orangefs_io_response { + __s64 amt_complete; +}; + +struct orangefs_lookup_response { + struct orangefs_object_kref refn; +}; + +struct orangefs_create_response { + struct orangefs_object_kref refn; +}; + +struct orangefs_symlink_response { + struct orangefs_object_kref refn; +}; + +struct orangefs_getattr_response { + struct ORANGEFS_sys_attr_s attributes; + char link_target[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_mkdir_response { + struct orangefs_object_kref refn; +}; + +/* + * duplication of some system interface structures so that I don't have + * to allocate extra memory + */ +struct orangefs_dirent { + char *d_name; + int d_length; + struct orangefs_khandle khandle; +}; + +struct orangefs_statfs_response { + __s64 block_size; + __s64 blocks_total; + __s64 blocks_avail; + __s64 files_total; + __s64 files_avail; +}; + +struct orangefs_fs_mount_response { + __s32 fs_id; + __s32 id; + struct orangefs_khandle root_khandle; +}; + +/* the getxattr response is the attribute value */ +struct orangefs_getxattr_response { + __s32 val_sz; + __s32 __pad1; + char val[ORANGEFS_MAX_XATTR_VALUELEN]; +}; + +/* the listxattr response is an array of attribute names */ +struct orangefs_listxattr_response { + __s32 returned_count; + __s32 __pad1; + __u64 token; + char key[ORANGEFS_MAX_XATTR_LISTLEN * ORANGEFS_MAX_XATTR_NAMELEN]; + __s32 keylen; + __s32 __pad2; + __s32 lengths[ORANGEFS_MAX_XATTR_LISTLEN]; +}; + +struct orangefs_param_response { + __s64 value; +}; + +#define PERF_COUNT_BUF_SIZE 4096 +struct orangefs_perf_count_response { + char buffer[PERF_COUNT_BUF_SIZE]; +}; + +#define FS_KEY_BUF_SIZE 4096 +struct orangefs_fs_key_response { + __s32 fs_keylen; + __s32 __pad1; + char fs_key[FS_KEY_BUF_SIZE]; +}; + +struct orangefs_downcall_s { + __s32 type; + __s32 status; + /* currently trailer is used only by readdir */ + __s64 trailer_size; + char *trailer_buf; + + union { + struct orangefs_io_response io; + struct orangefs_lookup_response lookup; + struct orangefs_create_response create; + struct orangefs_symlink_response sym; + struct orangefs_getattr_response getattr; + struct orangefs_mkdir_response mkdir; + struct orangefs_statfs_response statfs; + struct orangefs_fs_mount_response fs_mount; + struct orangefs_getxattr_response getxattr; + struct orangefs_listxattr_response listxattr; + struct orangefs_param_response param; + struct orangefs_perf_count_response perf_count; + struct orangefs_fs_key_response fs_key; + } resp; +}; + +struct orangefs_readdir_response_s { + __u64 token; + __u64 directory_version; + __u32 __pad2; + __u32 orangefs_dirent_outcount; + struct orangefs_dirent *dirent_array; +}; + +#endif /* __DOWNCALL_H */ diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c new file mode 100644 index 000000000000..ae92795ed965 --- /dev/null +++ b/fs/orangefs/file.c @@ -0,0 +1,717 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * Linux VFS file operations. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" +#include <linux/fs.h> +#include <linux/pagemap.h> + +/* + * Copy to client-core's address space from the buffers specified + * by the iovec upto total_size bytes. + * NOTE: the iovector can either contain addresses which + * can futher be kernel-space or user-space addresses. + * or it can pointers to struct page's + */ +static int precopy_buffers(int buffer_index, + struct iov_iter *iter, + size_t total_size) +{ + int ret = 0; + /* + * copy data from application/kernel by pulling it out + * of the iovec. + */ + + + if (total_size) { + ret = orangefs_bufmap_copy_from_iovec(iter, + buffer_index, + total_size); + if (ret < 0) + gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n", + __func__, + (long)ret); + } + + if (ret < 0) + gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n", + __func__, + (long)ret); + return ret; +} + +/* + * Copy from client-core's address space to the buffers specified + * by the iovec upto total_size bytes. + * NOTE: the iovector can either contain addresses which + * can futher be kernel-space or user-space addresses. + * or it can pointers to struct page's + */ +static int postcopy_buffers(int buffer_index, + struct iov_iter *iter, + size_t total_size) +{ + int ret = 0; + /* + * copy data to application/kernel by pushing it out to + * the iovec. NOTE; target buffers can be addresses or + * struct page pointers. + */ + if (total_size) { + ret = orangefs_bufmap_copy_to_iovec(iter, + buffer_index, + total_size); + if (ret < 0) + gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n", + __func__, + (long)ret); + } + return ret; +} + +/* + * Post and wait for the I/O upcall to finish + */ +static ssize_t wait_for_direct_io(enum ORANGEFS_io_type type, struct inode *inode, + loff_t *offset, struct iov_iter *iter, + size_t total_size, loff_t readahead_size) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_khandle *handle = &orangefs_inode->refn.khandle; + struct orangefs_kernel_op_s *new_op = NULL; + struct iov_iter saved = *iter; + int buffer_index = -1; + ssize_t ret; + + new_op = op_alloc(ORANGEFS_VFS_OP_FILE_IO); + if (!new_op) + return -ENOMEM; + + /* synchronous I/O */ + new_op->upcall.req.io.readahead_size = readahead_size; + new_op->upcall.req.io.io_type = type; + new_op->upcall.req.io.refn = orangefs_inode->refn; + +populate_shared_memory: + /* get a shared buffer index */ + buffer_index = orangefs_bufmap_get(); + if (buffer_index < 0) { + ret = buffer_index; + gossip_debug(GOSSIP_FILE_DEBUG, + "%s: orangefs_bufmap_get failure (%zd)\n", + __func__, ret); + goto out; + } + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): GET op %p -> buffer_index %d\n", + __func__, + handle, + new_op, + buffer_index); + + new_op->uses_shared_memory = 1; + new_op->upcall.req.io.buf_index = buffer_index; + new_op->upcall.req.io.count = total_size; + new_op->upcall.req.io.offset = *offset; + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): offset: %llu total_size: %zd\n", + __func__, + handle, + llu(*offset), + total_size); + /* + * Stage 1: copy the buffers into client-core's address space + * precopy_buffers only pertains to writes. + */ + if (type == ORANGEFS_IO_WRITE) { + ret = precopy_buffers(buffer_index, + iter, + total_size); + if (ret < 0) + goto out; + } + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): Calling post_io_request with tag (%llu)\n", + __func__, + handle, + llu(new_op->tag)); + + /* Stage 2: Service the I/O operation */ + ret = service_operation(new_op, + type == ORANGEFS_IO_WRITE ? + "file_write" : + "file_read", + get_interruptible_flag(inode)); + + /* + * If service_operation() returns -EAGAIN #and# the operation was + * purged from orangefs_request_list or htable_ops_in_progress, then + * we know that the client was restarted, causing the shared memory + * area to be wiped clean. To restart a write operation in this + * case, we must re-copy the data from the user's iovec to a NEW + * shared memory location. To restart a read operation, we must get + * a new shared memory location. + */ + if (ret == -EAGAIN && op_state_purged(new_op)) { + orangefs_bufmap_put(buffer_index); + buffer_index = -1; + if (type == ORANGEFS_IO_WRITE) + *iter = saved; + gossip_debug(GOSSIP_FILE_DEBUG, + "%s:going to repopulate_shared_memory.\n", + __func__); + goto populate_shared_memory; + } + + if (ret < 0) { + if (ret == -EINTR) { + /* + * We can't return EINTR if any data was written, + * it's not POSIX. It is minimally acceptable + * to give a partial write, the way NFS does. + * + * It would be optimal to return all or nothing, + * but if a userspace write is bigger than + * an IO buffer, and the interrupt occurs + * between buffer writes, that would not be + * possible. + */ + switch (new_op->op_state - OP_VFS_STATE_GIVEN_UP) { + /* + * If the op was waiting when the interrupt + * occurred, then the client-core did not + * trigger the write. + */ + case OP_VFS_STATE_WAITING: + if (*offset == 0) + ret = -EINTR; + else + ret = 0; + break; + /* + * If the op was in progress when the interrupt + * occurred, then the client-core was able to + * trigger the write. + */ + case OP_VFS_STATE_INPROGR: + ret = total_size; + break; + default: + gossip_err("%s: unexpected op state :%d:.\n", + __func__, + new_op->op_state); + ret = 0; + break; + } + gossip_debug(GOSSIP_FILE_DEBUG, + "%s: got EINTR, state:%d: %p\n", + __func__, + new_op->op_state, + new_op); + } else { + gossip_err("%s: error in %s handle %pU, returning %zd\n", + __func__, + type == ORANGEFS_IO_READ ? + "read from" : "write to", + handle, ret); + } + if (orangefs_cancel_op_in_progress(new_op)) + return ret; + + goto out; + } + + /* + * Stage 3: Post copy buffers from client-core's address space + * postcopy_buffers only pertains to reads. + */ + if (type == ORANGEFS_IO_READ) { + ret = postcopy_buffers(buffer_index, + iter, + new_op->downcall.resp.io.amt_complete); + if (ret < 0) + goto out; + } + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): Amount %s, returned by the sys-io call:%d\n", + __func__, + handle, + type == ORANGEFS_IO_READ ? "read" : "written", + (int)new_op->downcall.resp.io.amt_complete); + + ret = new_op->downcall.resp.io.amt_complete; + +out: + if (buffer_index >= 0) { + orangefs_bufmap_put(buffer_index); + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): PUT buffer_index %d\n", + __func__, handle, buffer_index); + buffer_index = -1; + } + op_release(new_op); + return ret; +} + +/* + * Common entry point for read/write/readv/writev + * This function will dispatch it to either the direct I/O + * or buffered I/O path depending on the mount options and/or + * augmented/extended metadata attached to the file. + * Note: File extended attributes override any mount options. + */ +static ssize_t do_readv_writev(enum ORANGEFS_io_type type, struct file *file, + loff_t *offset, struct iov_iter *iter) +{ + struct inode *inode = file->f_mapping->host; + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_khandle *handle = &orangefs_inode->refn.khandle; + size_t count = iov_iter_count(iter); + ssize_t total_count = 0; + ssize_t ret = -EINVAL; + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n", + __func__, + handle, + (int)count); + + if (type == ORANGEFS_IO_WRITE) { + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): proceeding with offset : %llu, " + "size %d\n", + __func__, + handle, + llu(*offset), + (int)count); + } + + if (count == 0) { + ret = 0; + goto out; + } + + while (iov_iter_count(iter)) { + size_t each_count = iov_iter_count(iter); + size_t amt_complete; + + /* how much to transfer in this loop iteration */ + if (each_count > orangefs_bufmap_size_query()) + each_count = orangefs_bufmap_size_query(); + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): size of each_count(%d)\n", + __func__, + handle, + (int)each_count); + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): BEFORE wait_for_io: offset is %d\n", + __func__, + handle, + (int)*offset); + + ret = wait_for_direct_io(type, inode, offset, iter, + each_count, 0); + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): return from wait_for_io:%d\n", + __func__, + handle, + (int)ret); + + if (ret < 0) + goto out; + + *offset += ret; + total_count += ret; + amt_complete = ret; + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): AFTER wait_for_io: offset is %d\n", + __func__, + handle, + (int)*offset); + + /* + * if we got a short I/O operations, + * fall out and return what we got so far + */ + if (amt_complete < each_count) + break; + } /*end while */ + +out: + if (total_count > 0) + ret = total_count; + if (ret > 0) { + if (type == ORANGEFS_IO_READ) { + file_accessed(file); + } else { + SetMtimeFlag(orangefs_inode); + inode->i_mtime = CURRENT_TIME; + mark_inode_dirty_sync(inode); + } + } + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): Value(%d) returned.\n", + __func__, + handle, + (int)ret); + + return ret; +} + +/* + * Read data from a specified offset in a file (referenced by inode). + * Data may be placed either in a user or kernel buffer. + */ +ssize_t orangefs_inode_read(struct inode *inode, + struct iov_iter *iter, + loff_t *offset, + loff_t readahead_size) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + size_t count = iov_iter_count(iter); + size_t bufmap_size; + ssize_t ret = -EINVAL; + + g_orangefs_stats.reads++; + + bufmap_size = orangefs_bufmap_size_query(); + if (count > bufmap_size) { + gossip_debug(GOSSIP_FILE_DEBUG, + "%s: count is too large (%zd/%zd)!\n", + __func__, count, bufmap_size); + return -EINVAL; + } + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU) %zd@%llu\n", + __func__, + &orangefs_inode->refn.khandle, + count, + llu(*offset)); + + ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, offset, iter, + count, readahead_size); + if (ret > 0) + *offset += ret; + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): Value(%zd) returned.\n", + __func__, + &orangefs_inode->refn.khandle, + ret); + + return ret; +} + +static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + struct file *file = iocb->ki_filp; + loff_t pos = *(&iocb->ki_pos); + ssize_t rc = 0; + + BUG_ON(iocb->private); + + gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n"); + + g_orangefs_stats.reads++; + + rc = do_readv_writev(ORANGEFS_IO_READ, file, &pos, iter); + iocb->ki_pos = pos; + + return rc; +} + +static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + struct file *file = iocb->ki_filp; + loff_t pos; + ssize_t rc; + + BUG_ON(iocb->private); + + gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n"); + + mutex_lock(&file->f_mapping->host->i_mutex); + + /* Make sure generic_write_checks sees an up to date inode size. */ + if (file->f_flags & O_APPEND) { + rc = orangefs_inode_getattr(file->f_mapping->host, 0, 1); + if (rc == -ESTALE) + rc = -EIO; + if (rc) { + gossip_err("%s: orangefs_inode_getattr failed, " + "rc:%zd:.\n", __func__, rc); + goto out; + } + } + + if (file->f_pos > i_size_read(file->f_mapping->host)) + orangefs_i_size_write(file->f_mapping->host, file->f_pos); + + rc = generic_write_checks(iocb, iter); + + if (rc <= 0) { + gossip_err("%s: generic_write_checks failed, rc:%zd:.\n", + __func__, rc); + goto out; + } + + /* + * if we are appending, generic_write_checks would have updated + * pos to the end of the file, so we will wait till now to set + * pos... + */ + pos = *(&iocb->ki_pos); + + rc = do_readv_writev(ORANGEFS_IO_WRITE, + file, + &pos, + iter); + if (rc < 0) { + gossip_err("%s: do_readv_writev failed, rc:%zd:.\n", + __func__, rc); + goto out; + } + + iocb->ki_pos = pos; + g_orangefs_stats.writes++; + +out: + + mutex_unlock(&file->f_mapping->host->i_mutex); + return rc; +} + +/* + * Perform a miscellaneous operation on a file. + */ +static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret = -ENOTTY; + __u64 val = 0; + unsigned long uval; + + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_ioctl: called with cmd %d\n", + cmd); + + /* + * we understand some general ioctls on files, such as the immutable + * and append flags + */ + if (cmd == FS_IOC_GETFLAGS) { + val = 0; + ret = orangefs_inode_getxattr(file_inode(file), + ORANGEFS_XATTR_NAME_DEFAULT_PREFIX, + "user.pvfs2.meta_hint", + &val, sizeof(val)); + if (ret < 0 && ret != -ENODATA) + return ret; + else if (ret == -ENODATA) + val = 0; + uval = val; + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_ioctl: FS_IOC_GETFLAGS: %llu\n", + (unsigned long long)uval); + return put_user(uval, (int __user *)arg); + } else if (cmd == FS_IOC_SETFLAGS) { + ret = 0; + if (get_user(uval, (int __user *)arg)) + return -EFAULT; + /* + * ORANGEFS_MIRROR_FL is set internally when the mirroring mode + * is turned on for a file. The user is not allowed to turn + * on this bit, but the bit is present if the user first gets + * the flags and then updates the flags with some new + * settings. So, we ignore it in the following edit. bligon. + */ + if ((uval & ~ORANGEFS_MIRROR_FL) & + (~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL))) { + gossip_err("orangefs_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n"); + return -EINVAL; + } + val = uval; + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_ioctl: FS_IOC_SETFLAGS: %llu\n", + (unsigned long long)val); + ret = orangefs_inode_setxattr(file_inode(file), + ORANGEFS_XATTR_NAME_DEFAULT_PREFIX, + "user.pvfs2.meta_hint", + &val, sizeof(val), 0); + } + + return ret; +} + +/* + * Memory map a region of a file. + */ +static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma) +{ + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_file_mmap: called on %s\n", + (file ? + (char *)file->f_path.dentry->d_name.name : + (char *)"Unknown")); + + /* set the sequential readahead hint */ + vma->vm_flags |= VM_SEQ_READ; + vma->vm_flags &= ~VM_RAND_READ; + + /* Use readonly mmap since we cannot support writable maps. */ + return generic_file_readonly_mmap(file, vma); +} + +#define mapping_nrpages(idata) ((idata)->nrpages) + +/* + * Called to notify the module that there are no more references to + * this file (i.e. no processes have it open). + * + * \note Not called when each file is closed. + */ +static int orangefs_file_release(struct inode *inode, struct file *file) +{ + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_file_release: called on %s\n", + file->f_path.dentry->d_name.name); + + orangefs_flush_inode(inode); + + /* + * remove all associated inode pages from the page cache and mmap + * readahead cache (if any); this forces an expensive refresh of + * data for the next caller of mmap (or 'get_block' accesses) + */ + if (file->f_path.dentry->d_inode && + file->f_path.dentry->d_inode->i_mapping && + mapping_nrpages(&file->f_path.dentry->d_inode->i_data)) + truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping, + 0); + return 0; +} + +/* + * Push all data for a specific file onto permanent storage. + */ +static int orangefs_fsync(struct file *file, + loff_t start, + loff_t end, + int datasync) +{ + int ret = -EINVAL; + struct orangefs_inode_s *orangefs_inode = + ORANGEFS_I(file->f_path.dentry->d_inode); + struct orangefs_kernel_op_s *new_op = NULL; + + /* required call */ + filemap_write_and_wait_range(file->f_mapping, start, end); + + new_op = op_alloc(ORANGEFS_VFS_OP_FSYNC); + if (!new_op) + return -ENOMEM; + new_op->upcall.req.fsync.refn = orangefs_inode->refn; + + ret = service_operation(new_op, + "orangefs_fsync", + get_interruptible_flag(file->f_path.dentry->d_inode)); + + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_fsync got return value of %d\n", + ret); + + op_release(new_op); + + orangefs_flush_inode(file->f_path.dentry->d_inode); + return ret; +} + +/* + * Change the file pointer position for an instance of an open file. + * + * \note If .llseek is overriden, we must acquire lock as described in + * Documentation/filesystems/Locking. + * + * Future upgrade could support SEEK_DATA and SEEK_HOLE but would + * require much changes to the FS + */ +static loff_t orangefs_file_llseek(struct file *file, loff_t offset, int origin) +{ + int ret = -EINVAL; + struct inode *inode = file_inode(file); + + if (origin == SEEK_END) { + /* + * revalidate the inode's file size. + * NOTE: We are only interested in file size here, + * so we set mask accordingly. + */ + ret = orangefs_inode_getattr(file->f_mapping->host, 0, 1); + if (ret == -ESTALE) + ret = -EIO; + if (ret) { + gossip_debug(GOSSIP_FILE_DEBUG, + "%s:%s:%d calling make bad inode\n", + __FILE__, + __func__, + __LINE__); + return ret; + } + } + + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_file_llseek: offset is %ld | origin is %d" + " | inode size is %lu\n", + (long)offset, + origin, + (unsigned long)i_size_read(inode)); + + return generic_file_llseek(file, offset, origin); +} + +/* + * Support local locks (locks that only this kernel knows about) + * if Orangefs was mounted -o local_lock. + */ +static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl) +{ + int rc = -EINVAL; + + if (ORANGEFS_SB(filp->f_inode->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) { + if (cmd == F_GETLK) { + rc = 0; + posix_test_lock(filp, fl); + } else { + rc = posix_lock_file(filp, fl, NULL); + } + } + + return rc; +} + +/** ORANGEFS implementation of VFS file operations */ +const struct file_operations orangefs_file_operations = { + .llseek = orangefs_file_llseek, + .read_iter = orangefs_file_read_iter, + .write_iter = orangefs_file_write_iter, + .lock = orangefs_lock, + .unlocked_ioctl = orangefs_ioctl, + .mmap = orangefs_file_mmap, + .open = generic_file_open, + .release = orangefs_file_release, + .fsync = orangefs_fsync, +}; diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c new file mode 100644 index 000000000000..0166faabf8f2 --- /dev/null +++ b/fs/orangefs/inode.c @@ -0,0 +1,475 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * Linux VFS inode operations. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" + +static int read_one_page(struct page *page) +{ + int ret; + int max_block; + ssize_t bytes_read = 0; + struct inode *inode = page->mapping->host; + const __u32 blocksize = PAGE_SIZE; /* inode->i_blksize */ + const __u32 blockbits = PAGE_SHIFT; /* inode->i_blkbits */ + struct iov_iter to; + struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE}; + + iov_iter_bvec(&to, ITER_BVEC | READ, &bv, 1, PAGE_SIZE); + + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs_readpage called with page %p\n", + page); + + max_block = ((inode->i_size / blocksize) + 1); + + if (page->index < max_block) { + loff_t blockptr_offset = (((loff_t) page->index) << blockbits); + + bytes_read = orangefs_inode_read(inode, + &to, + &blockptr_offset, + inode->i_size); + } + /* this will only zero remaining unread portions of the page data */ + iov_iter_zero(~0U, &to); + /* takes care of potential aliasing */ + flush_dcache_page(page); + if (bytes_read < 0) { + ret = bytes_read; + SetPageError(page); + } else { + SetPageUptodate(page); + if (PageError(page)) + ClearPageError(page); + ret = 0; + } + /* unlock the page after the ->readpage() routine completes */ + unlock_page(page); + return ret; +} + +static int orangefs_readpage(struct file *file, struct page *page) +{ + return read_one_page(page); +} + +static int orangefs_readpages(struct file *file, + struct address_space *mapping, + struct list_head *pages, + unsigned nr_pages) +{ + int page_idx; + int ret; + + gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_readpages called\n"); + + for (page_idx = 0; page_idx < nr_pages; page_idx++) { + struct page *page; + + page = list_entry(pages->prev, struct page, lru); + list_del(&page->lru); + if (!add_to_page_cache(page, + mapping, + page->index, + GFP_KERNEL)) { + ret = read_one_page(page); + gossip_debug(GOSSIP_INODE_DEBUG, + "failure adding page to cache, read_one_page returned: %d\n", + ret); + } else { + put_page(page); + } + } + BUG_ON(!list_empty(pages)); + return 0; +} + +static void orangefs_invalidatepage(struct page *page, + unsigned int offset, + unsigned int length) +{ + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs_invalidatepage called on page %p " + "(offset is %u)\n", + page, + offset); + + ClearPageUptodate(page); + ClearPageMappedToDisk(page); + return; + +} + +static int orangefs_releasepage(struct page *page, gfp_t foo) +{ + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs_releasepage called on page %p\n", + page); + return 0; +} + +/* + * Having a direct_IO entry point in the address_space_operations + * struct causes the kernel to allows us to use O_DIRECT on + * open. Nothing will ever call this thing, but in the future we + * will need to be able to use O_DIRECT on open in order to support + * AIO. Modeled after NFS, they do this too. + */ +/* + * static ssize_t orangefs_direct_IO(int rw, + * struct kiocb *iocb, + * struct iov_iter *iter, + * loff_t offset) + *{ + * gossip_debug(GOSSIP_INODE_DEBUG, + * "orangefs_direct_IO: %s\n", + * iocb->ki_filp->f_path.dentry->d_name.name); + * + * return -EINVAL; + *} + */ + +struct backing_dev_info orangefs_backing_dev_info = { + .name = "orangefs", + .ra_pages = 0, + .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, +}; + +/** ORANGEFS2 implementation of address space operations */ +const struct address_space_operations orangefs_address_operations = { + .readpage = orangefs_readpage, + .readpages = orangefs_readpages, + .invalidatepage = orangefs_invalidatepage, + .releasepage = orangefs_releasepage, +/* .direct_IO = orangefs_direct_IO */ +}; + +static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op; + loff_t orig_size; + int ret = -EINVAL; + + gossip_debug(GOSSIP_INODE_DEBUG, + "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n", + __func__, + get_khandle_from_ino(inode), + &orangefs_inode->refn.khandle, + orangefs_inode->refn.fs_id, + iattr->ia_size); + + /* Ensure that we have a up to date size, so we know if it changed. */ + ret = orangefs_inode_getattr(inode, 0, 1); + if (ret == -ESTALE) + ret = -EIO; + if (ret) { + gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n", + __func__, ret); + return ret; + } + orig_size = i_size_read(inode); + + truncate_setsize(inode, iattr->ia_size); + + new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE); + if (!new_op) + return -ENOMEM; + + new_op->upcall.req.truncate.refn = orangefs_inode->refn; + new_op->upcall.req.truncate.size = (__s64) iattr->ia_size; + + ret = service_operation(new_op, __func__, + get_interruptible_flag(inode)); + + /* + * the truncate has no downcall members to retrieve, but + * the status value tells us if it went through ok or not + */ + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs: orangefs_truncate got return value of %d\n", + ret); + + op_release(new_op); + + if (ret != 0) + return ret; + + /* + * Only change the c/mtime if we are changing the size or we are + * explicitly asked to change it. This handles the semantic difference + * between truncate() and ftruncate() as implemented in the VFS. + * + * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a + * special case where we need to update the times despite not having + * these flags set. For all other operations the VFS set these flags + * explicitly if it wants a timestamp update. + */ + if (orig_size != i_size_read(inode) && + !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) { + iattr->ia_ctime = iattr->ia_mtime = + current_fs_time(inode->i_sb); + iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME; + } + + return ret; +} + +/* + * Change attributes of an object referenced by dentry. + */ +int orangefs_setattr(struct dentry *dentry, struct iattr *iattr) +{ + int ret = -EINVAL; + struct inode *inode = dentry->d_inode; + + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs_setattr: called on %s\n", + dentry->d_name.name); + + ret = inode_change_ok(inode, iattr); + if (ret) + goto out; + + if ((iattr->ia_valid & ATTR_SIZE) && + iattr->ia_size != i_size_read(inode)) { + ret = orangefs_setattr_size(inode, iattr); + if (ret) + goto out; + } + + setattr_copy(inode, iattr); + mark_inode_dirty(inode); + + ret = orangefs_inode_setattr(inode, iattr); + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs_setattr: inode_setattr returned %d\n", + ret); + + if (!ret && (iattr->ia_valid & ATTR_MODE)) + /* change mod on a file that has ACLs */ + ret = posix_acl_chmod(inode, inode->i_mode); + +out: + gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n", ret); + return ret; +} + +/* + * Obtain attributes of an object given a dentry + */ +int orangefs_getattr(struct vfsmount *mnt, + struct dentry *dentry, + struct kstat *kstat) +{ + int ret = -ENOENT; + struct inode *inode = dentry->d_inode; + struct orangefs_inode_s *orangefs_inode = NULL; + + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs_getattr: called on %s\n", + dentry->d_name.name); + + ret = orangefs_inode_getattr(inode, 0, 1); + if (ret == 0) { + generic_fillattr(inode, kstat); + + /* override block size reported to stat */ + orangefs_inode = ORANGEFS_I(inode); + kstat->blksize = orangefs_inode->blksize; + } + return ret; +} + +int orangefs_permission(struct inode *inode, int mask) +{ + int ret; + + if (mask & MAY_NOT_BLOCK) + return -ECHILD; + + gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__); + + /* Make sure the permission (and other common attrs) are up to date. */ + ret = orangefs_inode_getattr(inode, 0, 0); + if (ret < 0) + return ret; + + return generic_permission(inode, mask); +} + +/* ORANGEDS2 implementation of VFS inode operations for files */ +struct inode_operations orangefs_file_inode_operations = { + .get_acl = orangefs_get_acl, + .set_acl = orangefs_set_acl, + .setattr = orangefs_setattr, + .getattr = orangefs_getattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = orangefs_listxattr, + .removexattr = generic_removexattr, + .permission = orangefs_permission, +}; + +static int orangefs_init_iops(struct inode *inode) +{ + inode->i_mapping->a_ops = &orangefs_address_operations; + + switch (inode->i_mode & S_IFMT) { + case S_IFREG: + inode->i_op = &orangefs_file_inode_operations; + inode->i_fop = &orangefs_file_operations; + inode->i_blkbits = PAGE_SHIFT; + break; + case S_IFLNK: + inode->i_op = &orangefs_symlink_inode_operations; + break; + case S_IFDIR: + inode->i_op = &orangefs_dir_inode_operations; + inode->i_fop = &orangefs_dir_operations; + break; + default: + gossip_debug(GOSSIP_INODE_DEBUG, + "%s: unsupported mode\n", + __func__); + return -EINVAL; + } + + return 0; +} + +/* + * Given a ORANGEFS object identifier (fsid, handle), convert it into a ino_t type + * that will be used as a hash-index from where the handle will + * be searched for in the VFS hash table of inodes. + */ +static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref) +{ + if (!ref) + return 0; + return orangefs_khandle_to_ino(&(ref->khandle)); +} + +/* + * Called to set up an inode from iget5_locked. + */ +static int orangefs_set_inode(struct inode *inode, void *data) +{ + struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data; + ORANGEFS_I(inode)->refn.fs_id = ref->fs_id; + ORANGEFS_I(inode)->refn.khandle = ref->khandle; + return 0; +} + +/* + * Called to determine if handles match. + */ +static int orangefs_test_inode(struct inode *inode, void *data) +{ + struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data; + struct orangefs_inode_s *orangefs_inode = NULL; + + orangefs_inode = ORANGEFS_I(inode); + return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle), &(ref->khandle)) + && orangefs_inode->refn.fs_id == ref->fs_id); +} + +/* + * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS + * file handle. + * + * @sb: the file system super block instance. + * @ref: The ORANGEFS object for which we are trying to locate an inode structure. + */ +struct inode *orangefs_iget(struct super_block *sb, struct orangefs_object_kref *ref) +{ + struct inode *inode = NULL; + unsigned long hash; + int error; + + hash = orangefs_handle_hash(ref); + inode = iget5_locked(sb, hash, orangefs_test_inode, orangefs_set_inode, ref); + if (!inode || !(inode->i_state & I_NEW)) + return inode; + + error = orangefs_inode_getattr(inode, 1, 0); + if (error) { + iget_failed(inode); + return ERR_PTR(error); + } + + inode->i_ino = hash; /* needed for stat etc */ + orangefs_init_iops(inode); + unlock_new_inode(inode); + + gossip_debug(GOSSIP_INODE_DEBUG, + "iget handle %pU, fsid %d hash %ld i_ino %lu\n", + &ref->khandle, + ref->fs_id, + hash, + inode->i_ino); + + return inode; +} + +/* + * Allocate an inode for a newly created file and insert it into the inode hash. + */ +struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir, + int mode, dev_t dev, struct orangefs_object_kref *ref) +{ + unsigned long hash = orangefs_handle_hash(ref); + struct inode *inode; + int error; + + gossip_debug(GOSSIP_INODE_DEBUG, + "%s:(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n", + __func__, + sb, + MAJOR(dev), + MINOR(dev), + mode); + + inode = new_inode(sb); + if (!inode) + return NULL; + + orangefs_set_inode(inode, ref); + inode->i_ino = hash; /* needed for stat etc */ + + error = orangefs_inode_getattr(inode, 1, 0); + if (error) + goto out_iput; + + orangefs_init_iops(inode); + + inode->i_mode = mode; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inode->i_size = PAGE_SIZE; + inode->i_rdev = dev; + + error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref); + if (error < 0) + goto out_iput; + + gossip_debug(GOSSIP_INODE_DEBUG, + "Initializing ACL's for inode %pU\n", + get_khandle_from_ino(inode)); + orangefs_init_acl(inode, dir); + return inode; + +out_iput: + iput(inode); + return ERR_PTR(error); +} diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c new file mode 100644 index 000000000000..5a60c508af4e --- /dev/null +++ b/fs/orangefs/namei.c @@ -0,0 +1,462 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * Linux VFS namei operations. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" + +/* + * Get a newly allocated inode to go with a negative dentry. + */ +static int orangefs_create(struct inode *dir, + struct dentry *dentry, + umode_t mode, + bool exclusive) +{ + struct orangefs_inode_s *parent = ORANGEFS_I(dir); + struct orangefs_kernel_op_s *new_op; + struct inode *inode; + int ret; + + gossip_debug(GOSSIP_NAME_DEBUG, "%s: %s\n", + __func__, + dentry->d_name.name); + + new_op = op_alloc(ORANGEFS_VFS_OP_CREATE); + if (!new_op) + return -ENOMEM; + + new_op->upcall.req.create.parent_refn = parent->refn; + + fill_default_sys_attrs(new_op->upcall.req.create.attributes, + ORANGEFS_TYPE_METAFILE, mode); + + strncpy(new_op->upcall.req.create.d_name, + dentry->d_name.name, ORANGEFS_NAME_MAX); + + ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: %s: handle:%pU: fsid:%d: new_op:%p: ret:%d:\n", + __func__, + dentry->d_name.name, + &new_op->downcall.resp.create.refn.khandle, + new_op->downcall.resp.create.refn.fs_id, + new_op, + ret); + + if (ret < 0) + goto out; + + inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0, + &new_op->downcall.resp.create.refn); + if (IS_ERR(inode)) { + gossip_err("%s: Failed to allocate inode for file :%s:\n", + __func__, + dentry->d_name.name); + ret = PTR_ERR(inode); + goto out; + } + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: Assigned inode :%pU: for file :%s:\n", + __func__, + get_khandle_from_ino(inode), + dentry->d_name.name); + + d_instantiate(dentry, inode); + unlock_new_inode(inode); + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: dentry instantiated for %s\n", + __func__, + dentry->d_name.name); + + SetMtimeFlag(parent); + dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb); + mark_inode_dirty_sync(dir); + ret = 0; +out: + op_release(new_op); + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: %s: returning %d\n", + __func__, + dentry->d_name.name, + ret); + return ret; +} + +/* + * Attempt to resolve an object name (dentry->d_name), parent handle, and + * fsid into a handle for the object. + */ +static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags) +{ + struct orangefs_inode_s *parent = ORANGEFS_I(dir); + struct orangefs_kernel_op_s *new_op; + struct inode *inode; + struct dentry *res; + int ret = -EINVAL; + + /* + * in theory we could skip a lookup here (if the intent is to + * create) in order to avoid a potentially failed lookup, but + * leaving it in can skip a valid lookup and try to create a file + * that already exists (e.g. the vfs already handles checking for + * -EEXIST on O_EXCL opens, which is broken if we skip this lookup + * in the create path) + */ + gossip_debug(GOSSIP_NAME_DEBUG, "%s called on %s\n", + __func__, dentry->d_name.name); + + if (dentry->d_name.len > (ORANGEFS_NAME_MAX - 1)) + return ERR_PTR(-ENAMETOOLONG); + + new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP); + if (!new_op) + return ERR_PTR(-ENOMEM); + + new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW; + + gossip_debug(GOSSIP_NAME_DEBUG, "%s:%s:%d using parent %pU\n", + __FILE__, + __func__, + __LINE__, + &parent->refn.khandle); + new_op->upcall.req.lookup.parent_refn = parent->refn; + + strncpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name, + ORANGEFS_NAME_MAX); + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: doing lookup on %s under %pU,%d\n", + __func__, + new_op->upcall.req.lookup.d_name, + &new_op->upcall.req.lookup.parent_refn.khandle, + new_op->upcall.req.lookup.parent_refn.fs_id); + + ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); + + gossip_debug(GOSSIP_NAME_DEBUG, + "Lookup Got %pU, fsid %d (ret=%d)\n", + &new_op->downcall.resp.lookup.refn.khandle, + new_op->downcall.resp.lookup.refn.fs_id, + ret); + + if (ret < 0) { + if (ret == -ENOENT) { + /* + * if no inode was found, add a negative dentry to + * dcache anyway; if we don't, we don't hold expected + * lookup semantics and we most noticeably break + * during directory renames. + * + * however, if the operation failed or exited, do not + * add the dentry (e.g. in the case that a touch is + * issued on a file that already exists that was + * interrupted during this lookup -- no need to add + * another negative dentry for an existing file) + */ + + gossip_debug(GOSSIP_NAME_DEBUG, + "orangefs_lookup: Adding *negative* dentry " + "%p for %s\n", + dentry, + dentry->d_name.name); + + d_add(dentry, NULL); + res = NULL; + goto out; + } + + /* must be a non-recoverable error */ + res = ERR_PTR(ret); + goto out; + } + + inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn); + if (IS_ERR(inode)) { + gossip_debug(GOSSIP_NAME_DEBUG, + "error %ld from iget\n", PTR_ERR(inode)); + res = ERR_CAST(inode); + goto out; + } + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s:%s:%d " + "Found good inode [%lu] with count [%d]\n", + __FILE__, + __func__, + __LINE__, + inode->i_ino, + (int)atomic_read(&inode->i_count)); + + /* update dentry/inode pair into dcache */ + res = d_splice_alias(inode, dentry); + + gossip_debug(GOSSIP_NAME_DEBUG, + "Lookup success (inode ct = %d)\n", + (int)atomic_read(&inode->i_count)); +out: + op_release(new_op); + return res; +} + +/* return 0 on success; non-zero otherwise */ +static int orangefs_unlink(struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + struct orangefs_inode_s *parent = ORANGEFS_I(dir); + struct orangefs_kernel_op_s *new_op; + int ret; + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: called on %s\n" + " (inode %pU): Parent is %pU | fs_id %d\n", + __func__, + dentry->d_name.name, + get_khandle_from_ino(inode), + &parent->refn.khandle, + parent->refn.fs_id); + + new_op = op_alloc(ORANGEFS_VFS_OP_REMOVE); + if (!new_op) + return -ENOMEM; + + new_op->upcall.req.remove.parent_refn = parent->refn; + strncpy(new_op->upcall.req.remove.d_name, dentry->d_name.name, + ORANGEFS_NAME_MAX); + + ret = service_operation(new_op, "orangefs_unlink", + get_interruptible_flag(inode)); + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: service_operation returned:%d:\n", + __func__, + ret); + + op_release(new_op); + + if (!ret) { + drop_nlink(inode); + + SetMtimeFlag(parent); + dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb); + mark_inode_dirty_sync(dir); + } + return ret; +} + +static int orangefs_symlink(struct inode *dir, + struct dentry *dentry, + const char *symname) +{ + struct orangefs_inode_s *parent = ORANGEFS_I(dir); + struct orangefs_kernel_op_s *new_op; + struct inode *inode; + int mode = 755; + int ret; + + gossip_debug(GOSSIP_NAME_DEBUG, "%s: called\n", __func__); + + if (!symname) + return -EINVAL; + + if (strlen(symname)+1 > ORANGEFS_NAME_MAX) + return -ENAMETOOLONG; + + new_op = op_alloc(ORANGEFS_VFS_OP_SYMLINK); + if (!new_op) + return -ENOMEM; + + new_op->upcall.req.sym.parent_refn = parent->refn; + + fill_default_sys_attrs(new_op->upcall.req.sym.attributes, + ORANGEFS_TYPE_SYMLINK, + mode); + + strncpy(new_op->upcall.req.sym.entry_name, + dentry->d_name.name, + ORANGEFS_NAME_MAX); + strncpy(new_op->upcall.req.sym.target, symname, ORANGEFS_NAME_MAX); + + ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); + + gossip_debug(GOSSIP_NAME_DEBUG, + "Symlink Got ORANGEFS handle %pU on fsid %d (ret=%d)\n", + &new_op->downcall.resp.sym.refn.khandle, + new_op->downcall.resp.sym.refn.fs_id, ret); + + if (ret < 0) { + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: failed with error code %d\n", + __func__, ret); + goto out; + } + + inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0, + &new_op->downcall.resp.sym.refn); + if (IS_ERR(inode)) { + gossip_err + ("*** Failed to allocate orangefs symlink inode\n"); + ret = PTR_ERR(inode); + goto out; + } + + gossip_debug(GOSSIP_NAME_DEBUG, + "Assigned symlink inode new number of %pU\n", + get_khandle_from_ino(inode)); + + d_instantiate(dentry, inode); + unlock_new_inode(inode); + + gossip_debug(GOSSIP_NAME_DEBUG, + "Inode (Symlink) %pU -> %s\n", + get_khandle_from_ino(inode), + dentry->d_name.name); + + SetMtimeFlag(parent); + dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb); + mark_inode_dirty_sync(dir); + ret = 0; +out: + op_release(new_op); + return ret; +} + +static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + struct orangefs_inode_s *parent = ORANGEFS_I(dir); + struct orangefs_kernel_op_s *new_op; + struct inode *inode; + int ret; + + new_op = op_alloc(ORANGEFS_VFS_OP_MKDIR); + if (!new_op) + return -ENOMEM; + + new_op->upcall.req.mkdir.parent_refn = parent->refn; + + fill_default_sys_attrs(new_op->upcall.req.mkdir.attributes, + ORANGEFS_TYPE_DIRECTORY, mode); + + strncpy(new_op->upcall.req.mkdir.d_name, + dentry->d_name.name, ORANGEFS_NAME_MAX); + + ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); + + gossip_debug(GOSSIP_NAME_DEBUG, + "Mkdir Got ORANGEFS handle %pU on fsid %d\n", + &new_op->downcall.resp.mkdir.refn.khandle, + new_op->downcall.resp.mkdir.refn.fs_id); + + if (ret < 0) { + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: failed with error code %d\n", + __func__, ret); + goto out; + } + + inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0, + &new_op->downcall.resp.mkdir.refn); + if (IS_ERR(inode)) { + gossip_err("*** Failed to allocate orangefs dir inode\n"); + ret = PTR_ERR(inode); + goto out; + } + + gossip_debug(GOSSIP_NAME_DEBUG, + "Assigned dir inode new number of %pU\n", + get_khandle_from_ino(inode)); + + d_instantiate(dentry, inode); + unlock_new_inode(inode); + + gossip_debug(GOSSIP_NAME_DEBUG, + "Inode (Directory) %pU -> %s\n", + get_khandle_from_ino(inode), + dentry->d_name.name); + + /* + * NOTE: we have no good way to keep nlink consistent for directories + * across clients; keep constant at 1. + */ + SetMtimeFlag(parent); + dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb); + mark_inode_dirty_sync(dir); +out: + op_release(new_op); + return ret; +} + +static int orangefs_rename(struct inode *old_dir, + struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry) +{ + struct orangefs_kernel_op_s *new_op; + int ret; + + gossip_debug(GOSSIP_NAME_DEBUG, + "orangefs_rename: called (%s/%s => %s/%s) ct=%d\n", + old_dentry->d_parent->d_name.name, + old_dentry->d_name.name, + new_dentry->d_parent->d_name.name, + new_dentry->d_name.name, + d_count(new_dentry)); + + new_op = op_alloc(ORANGEFS_VFS_OP_RENAME); + if (!new_op) + return -EINVAL; + + new_op->upcall.req.rename.old_parent_refn = ORANGEFS_I(old_dir)->refn; + new_op->upcall.req.rename.new_parent_refn = ORANGEFS_I(new_dir)->refn; + + strncpy(new_op->upcall.req.rename.d_old_name, + old_dentry->d_name.name, + ORANGEFS_NAME_MAX); + strncpy(new_op->upcall.req.rename.d_new_name, + new_dentry->d_name.name, + ORANGEFS_NAME_MAX); + + ret = service_operation(new_op, + "orangefs_rename", + get_interruptible_flag(old_dentry->d_inode)); + + gossip_debug(GOSSIP_NAME_DEBUG, + "orangefs_rename: got downcall status %d\n", + ret); + + if (new_dentry->d_inode) + new_dentry->d_inode->i_ctime = CURRENT_TIME; + + op_release(new_op); + return ret; +} + +/* ORANGEFS implementation of VFS inode operations for directories */ +struct inode_operations orangefs_dir_inode_operations = { + .lookup = orangefs_lookup, + .get_acl = orangefs_get_acl, + .set_acl = orangefs_set_acl, + .create = orangefs_create, + .unlink = orangefs_unlink, + .symlink = orangefs_symlink, + .mkdir = orangefs_mkdir, + .rmdir = orangefs_unlink, + .rename = orangefs_rename, + .setattr = orangefs_setattr, + .getattr = orangefs_getattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = orangefs_listxattr, + .permission = orangefs_permission, +}; diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c new file mode 100644 index 000000000000..75375e90a63f --- /dev/null +++ b/fs/orangefs/orangefs-bufmap.c @@ -0,0 +1,556 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" + +struct slot_map { + int c; + wait_queue_head_t q; + int count; + unsigned long *map; +}; + +static struct slot_map rw_map = { + .c = -1, + .q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q) +}; +static struct slot_map readdir_map = { + .c = -1, + .q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q) +}; + + +static void install(struct slot_map *m, int count, unsigned long *map) +{ + spin_lock(&m->q.lock); + m->c = m->count = count; + m->map = map; + wake_up_all_locked(&m->q); + spin_unlock(&m->q.lock); +} + +static void mark_killed(struct slot_map *m) +{ + spin_lock(&m->q.lock); + m->c -= m->count + 1; + spin_unlock(&m->q.lock); +} + +static void run_down(struct slot_map *m) +{ + DEFINE_WAIT(wait); + spin_lock(&m->q.lock); + if (m->c != -1) { + for (;;) { + if (likely(list_empty(&wait.task_list))) + __add_wait_queue_tail(&m->q, &wait); + set_current_state(TASK_UNINTERRUPTIBLE); + + if (m->c == -1) + break; + + spin_unlock(&m->q.lock); + schedule(); + spin_lock(&m->q.lock); + } + __remove_wait_queue(&m->q, &wait); + __set_current_state(TASK_RUNNING); + } + m->map = NULL; + spin_unlock(&m->q.lock); +} + +static void put(struct slot_map *m, int slot) +{ + int v; + spin_lock(&m->q.lock); + __clear_bit(slot, m->map); + v = ++m->c; + if (unlikely(v == 1)) /* no free slots -> one free slot */ + wake_up_locked(&m->q); + else if (unlikely(v == -1)) /* finished dying */ + wake_up_all_locked(&m->q); + spin_unlock(&m->q.lock); +} + +static int wait_for_free(struct slot_map *m) +{ + long left = slot_timeout_secs * HZ; + DEFINE_WAIT(wait); + + do { + long n = left, t; + if (likely(list_empty(&wait.task_list))) + __add_wait_queue_tail_exclusive(&m->q, &wait); + set_current_state(TASK_INTERRUPTIBLE); + + if (m->c > 0) + break; + + if (m->c < 0) { + /* we are waiting for map to be installed */ + /* it would better be there soon, or we go away */ + if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ) + n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ; + } + spin_unlock(&m->q.lock); + t = schedule_timeout(n); + spin_lock(&m->q.lock); + if (unlikely(!t) && n != left && m->c < 0) + left = t; + else + left = t + (left - n); + if (unlikely(signal_pending(current))) + left = -EINTR; + } while (left > 0); + + if (!list_empty(&wait.task_list)) + list_del(&wait.task_list); + else if (left <= 0 && waitqueue_active(&m->q)) + __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL); + __set_current_state(TASK_RUNNING); + + if (likely(left > 0)) + return 0; + + return left < 0 ? -EINTR : -ETIMEDOUT; +} + +static int get(struct slot_map *m) +{ + int res = 0; + spin_lock(&m->q.lock); + if (unlikely(m->c <= 0)) + res = wait_for_free(m); + if (likely(!res)) { + m->c--; + res = find_first_zero_bit(m->map, m->count); + __set_bit(res, m->map); + } + spin_unlock(&m->q.lock); + return res; +} + +/* used to describe mapped buffers */ +struct orangefs_bufmap_desc { + void *uaddr; /* user space address pointer */ + struct page **page_array; /* array of mapped pages */ + int array_count; /* size of above arrays */ + struct list_head list_link; +}; + +static struct orangefs_bufmap { + int desc_size; + int desc_shift; + int desc_count; + int total_size; + int page_count; + + struct page **page_array; + struct orangefs_bufmap_desc *desc_array; + + /* array to track usage of buffer descriptors */ + unsigned long *buffer_index_array; + + /* array to track usage of buffer descriptors for readdir */ +#define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG) + unsigned long readdir_index_array[N]; +#undef N +} *__orangefs_bufmap; + +static DEFINE_SPINLOCK(orangefs_bufmap_lock); + +static void +orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap) +{ + int i; + + for (i = 0; i < bufmap->page_count; i++) + put_page(bufmap->page_array[i]); +} + +static void +orangefs_bufmap_free(struct orangefs_bufmap *bufmap) +{ + kfree(bufmap->page_array); + kfree(bufmap->desc_array); + kfree(bufmap->buffer_index_array); + kfree(bufmap); +} + +/* + * XXX: Can the size and shift change while the caller gives up the + * XXX: lock between calling this and doing something useful? + */ + +int orangefs_bufmap_size_query(void) +{ + struct orangefs_bufmap *bufmap; + int size = 0; + spin_lock(&orangefs_bufmap_lock); + bufmap = __orangefs_bufmap; + if (bufmap) + size = bufmap->desc_size; + spin_unlock(&orangefs_bufmap_lock); + return size; +} + +int orangefs_bufmap_shift_query(void) +{ + struct orangefs_bufmap *bufmap; + int shift = 0; + spin_lock(&orangefs_bufmap_lock); + bufmap = __orangefs_bufmap; + if (bufmap) + shift = bufmap->desc_shift; + spin_unlock(&orangefs_bufmap_lock); + return shift; +} + +static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq); +static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq); + +/* + * orangefs_get_bufmap_init + * + * If bufmap_init is 1, then the shared memory system, including the + * buffer_index_array, is available. Otherwise, it is not. + * + * returns the value of bufmap_init + */ +int orangefs_get_bufmap_init(void) +{ + return __orangefs_bufmap ? 1 : 0; +} + + +static struct orangefs_bufmap * +orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc) +{ + struct orangefs_bufmap *bufmap; + + bufmap = kzalloc(sizeof(*bufmap), GFP_KERNEL); + if (!bufmap) + goto out; + + bufmap->total_size = user_desc->total_size; + bufmap->desc_count = user_desc->count; + bufmap->desc_size = user_desc->size; + bufmap->desc_shift = ilog2(bufmap->desc_size); + + bufmap->buffer_index_array = + kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL); + if (!bufmap->buffer_index_array) { + gossip_err("orangefs: could not allocate %d buffer indices\n", + bufmap->desc_count); + goto out_free_bufmap; + } + + bufmap->desc_array = + kcalloc(bufmap->desc_count, sizeof(struct orangefs_bufmap_desc), + GFP_KERNEL); + if (!bufmap->desc_array) { + gossip_err("orangefs: could not allocate %d descriptors\n", + bufmap->desc_count); + goto out_free_index_array; + } + + bufmap->page_count = bufmap->total_size / PAGE_SIZE; + + /* allocate storage to track our page mappings */ + bufmap->page_array = + kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL); + if (!bufmap->page_array) + goto out_free_desc_array; + + return bufmap; + +out_free_desc_array: + kfree(bufmap->desc_array); +out_free_index_array: + kfree(bufmap->buffer_index_array); +out_free_bufmap: + kfree(bufmap); +out: + return NULL; +} + +static int +orangefs_bufmap_map(struct orangefs_bufmap *bufmap, + struct ORANGEFS_dev_map_desc *user_desc) +{ + int pages_per_desc = bufmap->desc_size / PAGE_SIZE; + int offset = 0, ret, i; + + /* map the pages */ + ret = get_user_pages_fast((unsigned long)user_desc->ptr, + bufmap->page_count, 1, bufmap->page_array); + + if (ret < 0) + return ret; + + if (ret != bufmap->page_count) { + gossip_err("orangefs error: asked for %d pages, only got %d.\n", + bufmap->page_count, ret); + + for (i = 0; i < ret; i++) { + SetPageError(bufmap->page_array[i]); + put_page(bufmap->page_array[i]); + } + return -ENOMEM; + } + + /* + * ideally we want to get kernel space pointers for each page, but + * we can't kmap that many pages at once if highmem is being used. + * so instead, we just kmap/kunmap the page address each time the + * kaddr is needed. + */ + for (i = 0; i < bufmap->page_count; i++) + flush_dcache_page(bufmap->page_array[i]); + + /* build a list of available descriptors */ + for (offset = 0, i = 0; i < bufmap->desc_count; i++) { + bufmap->desc_array[i].page_array = &bufmap->page_array[offset]; + bufmap->desc_array[i].array_count = pages_per_desc; + bufmap->desc_array[i].uaddr = + (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE)); + offset += pages_per_desc; + } + + return 0; +} + +/* + * orangefs_bufmap_initialize() + * + * initializes the mapped buffer interface + * + * returns 0 on success, -errno on failure + */ +int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc) +{ + struct orangefs_bufmap *bufmap; + int ret = -EINVAL; + + gossip_debug(GOSSIP_BUFMAP_DEBUG, + "orangefs_bufmap_initialize: called (ptr (" + "%p) sz (%d) cnt(%d).\n", + user_desc->ptr, + user_desc->size, + user_desc->count); + + /* + * sanity check alignment and size of buffer that caller wants to + * work with + */ + if (PAGE_ALIGN((unsigned long)user_desc->ptr) != + (unsigned long)user_desc->ptr) { + gossip_err("orangefs error: memory alignment (front). %p\n", + user_desc->ptr); + goto out; + } + + if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size)) + != (unsigned long)(user_desc->ptr + user_desc->total_size)) { + gossip_err("orangefs error: memory alignment (back).(%p + %d)\n", + user_desc->ptr, + user_desc->total_size); + goto out; + } + + if (user_desc->total_size != (user_desc->size * user_desc->count)) { + gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n", + user_desc->total_size, + user_desc->size, + user_desc->count); + goto out; + } + + if ((user_desc->size % PAGE_SIZE) != 0) { + gossip_err("orangefs error: bufmap size not page size divisible (%d).\n", + user_desc->size); + goto out; + } + + ret = -ENOMEM; + bufmap = orangefs_bufmap_alloc(user_desc); + if (!bufmap) + goto out; + + ret = orangefs_bufmap_map(bufmap, user_desc); + if (ret) + goto out_free_bufmap; + + + spin_lock(&orangefs_bufmap_lock); + if (__orangefs_bufmap) { + spin_unlock(&orangefs_bufmap_lock); + gossip_err("orangefs: error: bufmap already initialized.\n"); + ret = -EINVAL; + goto out_unmap_bufmap; + } + __orangefs_bufmap = bufmap; + install(&rw_map, + bufmap->desc_count, + bufmap->buffer_index_array); + install(&readdir_map, + ORANGEFS_READDIR_DEFAULT_DESC_COUNT, + bufmap->readdir_index_array); + spin_unlock(&orangefs_bufmap_lock); + + gossip_debug(GOSSIP_BUFMAP_DEBUG, + "orangefs_bufmap_initialize: exiting normally\n"); + return 0; + +out_unmap_bufmap: + orangefs_bufmap_unmap(bufmap); +out_free_bufmap: + orangefs_bufmap_free(bufmap); +out: + return ret; +} + +/* + * orangefs_bufmap_finalize() + * + * shuts down the mapped buffer interface and releases any resources + * associated with it + * + * no return value + */ +void orangefs_bufmap_finalize(void) +{ + struct orangefs_bufmap *bufmap = __orangefs_bufmap; + if (!bufmap) + return; + gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n"); + mark_killed(&rw_map); + mark_killed(&readdir_map); + gossip_debug(GOSSIP_BUFMAP_DEBUG, + "orangefs_bufmap_finalize: exiting normally\n"); +} + +void orangefs_bufmap_run_down(void) +{ + struct orangefs_bufmap *bufmap = __orangefs_bufmap; + if (!bufmap) + return; + run_down(&rw_map); + run_down(&readdir_map); + spin_lock(&orangefs_bufmap_lock); + __orangefs_bufmap = NULL; + spin_unlock(&orangefs_bufmap_lock); + orangefs_bufmap_unmap(bufmap); + orangefs_bufmap_free(bufmap); +} + +/* + * orangefs_bufmap_get() + * + * gets a free mapped buffer descriptor, will sleep until one becomes + * available if necessary + * + * returns slot on success, -errno on failure + */ +int orangefs_bufmap_get(void) +{ + return get(&rw_map); +} + +/* + * orangefs_bufmap_put() + * + * returns a mapped buffer descriptor to the collection + * + * no return value + */ +void orangefs_bufmap_put(int buffer_index) +{ + put(&rw_map, buffer_index); +} + +/* + * orangefs_readdir_index_get() + * + * gets a free descriptor, will sleep until one becomes + * available if necessary. + * Although the readdir buffers are not mapped into kernel space + * we could do that at a later point of time. Regardless, these + * indices are used by the client-core. + * + * returns slot on success, -errno on failure + */ +int orangefs_readdir_index_get(void) +{ + return get(&readdir_map); +} + +void orangefs_readdir_index_put(int buffer_index) +{ + put(&readdir_map, buffer_index); +} + +/* + * we've been handed an iovec, we need to copy it to + * the shared memory descriptor at "buffer_index". + */ +int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter, + int buffer_index, + size_t size) +{ + struct orangefs_bufmap_desc *to; + int i; + + gossip_debug(GOSSIP_BUFMAP_DEBUG, + "%s: buffer_index:%d: size:%zu:\n", + __func__, buffer_index, size); + + to = &__orangefs_bufmap->desc_array[buffer_index]; + for (i = 0; size; i++) { + struct page *page = to->page_array[i]; + size_t n = size; + if (n > PAGE_SIZE) + n = PAGE_SIZE; + n = copy_page_from_iter(page, 0, n, iter); + if (!n) + return -EFAULT; + size -= n; + } + return 0; + +} + +/* + * we've been handed an iovec, we need to fill it from + * the shared memory descriptor at "buffer_index". + */ +int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter, + int buffer_index, + size_t size) +{ + struct orangefs_bufmap_desc *from; + int i; + + from = &__orangefs_bufmap->desc_array[buffer_index]; + gossip_debug(GOSSIP_BUFMAP_DEBUG, + "%s: buffer_index:%d: size:%zu:\n", + __func__, buffer_index, size); + + + for (i = 0; size; i++) { + struct page *page = from->page_array[i]; + size_t n = size; + if (n > PAGE_SIZE) + n = PAGE_SIZE; + n = copy_page_to_iter(page, 0, n, iter); + if (!n) + return -EFAULT; + size -= n; + } + return 0; +} diff --git a/fs/orangefs/orangefs-bufmap.h b/fs/orangefs/orangefs-bufmap.h new file mode 100644 index 000000000000..71f64f4057b5 --- /dev/null +++ b/fs/orangefs/orangefs-bufmap.h @@ -0,0 +1,36 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#ifndef __ORANGEFS_BUFMAP_H +#define __ORANGEFS_BUFMAP_H + +int orangefs_bufmap_size_query(void); + +int orangefs_bufmap_shift_query(void); + +int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc); + +void orangefs_bufmap_finalize(void); + +void orangefs_bufmap_run_down(void); + +int orangefs_bufmap_get(void); + +void orangefs_bufmap_put(int buffer_index); + +int orangefs_readdir_index_get(void); + +void orangefs_readdir_index_put(int buffer_index); + +int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter, + int buffer_index, + size_t size); + +int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter, + int buffer_index, + size_t size); + +#endif /* __ORANGEFS_BUFMAP_H */ diff --git a/fs/orangefs/orangefs-cache.c b/fs/orangefs/orangefs-cache.c new file mode 100644 index 000000000000..900a2e38e11b --- /dev/null +++ b/fs/orangefs/orangefs-cache.c @@ -0,0 +1,161 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" + +/* tags assigned to kernel upcall operations */ +static __u64 next_tag_value; +static DEFINE_SPINLOCK(next_tag_value_lock); + +/* the orangefs memory caches */ + +/* a cache for orangefs upcall/downcall operations */ +static struct kmem_cache *op_cache; + +int op_cache_initialize(void) +{ + op_cache = kmem_cache_create("orangefs_op_cache", + sizeof(struct orangefs_kernel_op_s), + 0, + ORANGEFS_CACHE_CREATE_FLAGS, + NULL); + + if (!op_cache) { + gossip_err("Cannot create orangefs_op_cache\n"); + return -ENOMEM; + } + + /* initialize our atomic tag counter */ + spin_lock(&next_tag_value_lock); + next_tag_value = 100; + spin_unlock(&next_tag_value_lock); + return 0; +} + +int op_cache_finalize(void) +{ + kmem_cache_destroy(op_cache); + return 0; +} + +char *get_opname_string(struct orangefs_kernel_op_s *new_op) +{ + if (new_op) { + __s32 type = new_op->upcall.type; + + if (type == ORANGEFS_VFS_OP_FILE_IO) + return "OP_FILE_IO"; + else if (type == ORANGEFS_VFS_OP_LOOKUP) + return "OP_LOOKUP"; + else if (type == ORANGEFS_VFS_OP_CREATE) + return "OP_CREATE"; + else if (type == ORANGEFS_VFS_OP_GETATTR) + return "OP_GETATTR"; + else if (type == ORANGEFS_VFS_OP_REMOVE) + return "OP_REMOVE"; + else if (type == ORANGEFS_VFS_OP_MKDIR) + return "OP_MKDIR"; + else if (type == ORANGEFS_VFS_OP_READDIR) + return "OP_READDIR"; + else if (type == ORANGEFS_VFS_OP_READDIRPLUS) + return "OP_READDIRPLUS"; + else if (type == ORANGEFS_VFS_OP_SETATTR) + return "OP_SETATTR"; + else if (type == ORANGEFS_VFS_OP_SYMLINK) + return "OP_SYMLINK"; + else if (type == ORANGEFS_VFS_OP_RENAME) + return "OP_RENAME"; + else if (type == ORANGEFS_VFS_OP_STATFS) + return "OP_STATFS"; + else if (type == ORANGEFS_VFS_OP_TRUNCATE) + return "OP_TRUNCATE"; + else if (type == ORANGEFS_VFS_OP_MMAP_RA_FLUSH) + return "OP_MMAP_RA_FLUSH"; + else if (type == ORANGEFS_VFS_OP_FS_MOUNT) + return "OP_FS_MOUNT"; + else if (type == ORANGEFS_VFS_OP_FS_UMOUNT) + return "OP_FS_UMOUNT"; + else if (type == ORANGEFS_VFS_OP_GETXATTR) + return "OP_GETXATTR"; + else if (type == ORANGEFS_VFS_OP_SETXATTR) + return "OP_SETXATTR"; + else if (type == ORANGEFS_VFS_OP_LISTXATTR) + return "OP_LISTXATTR"; + else if (type == ORANGEFS_VFS_OP_REMOVEXATTR) + return "OP_REMOVEXATTR"; + else if (type == ORANGEFS_VFS_OP_PARAM) + return "OP_PARAM"; + else if (type == ORANGEFS_VFS_OP_PERF_COUNT) + return "OP_PERF_COUNT"; + else if (type == ORANGEFS_VFS_OP_CANCEL) + return "OP_CANCEL"; + else if (type == ORANGEFS_VFS_OP_FSYNC) + return "OP_FSYNC"; + else if (type == ORANGEFS_VFS_OP_FSKEY) + return "OP_FSKEY"; + } + return "OP_UNKNOWN?"; +} + +void orangefs_new_tag(struct orangefs_kernel_op_s *op) +{ + spin_lock(&next_tag_value_lock); + op->tag = next_tag_value++; + if (next_tag_value == 0) + next_tag_value = 100; + spin_unlock(&next_tag_value_lock); +} + +struct orangefs_kernel_op_s *op_alloc(__s32 type) +{ + struct orangefs_kernel_op_s *new_op = NULL; + + new_op = kmem_cache_zalloc(op_cache, GFP_KERNEL); + if (new_op) { + INIT_LIST_HEAD(&new_op->list); + spin_lock_init(&new_op->lock); + init_completion(&new_op->waitq); + + new_op->upcall.type = ORANGEFS_VFS_OP_INVALID; + new_op->downcall.type = ORANGEFS_VFS_OP_INVALID; + new_op->downcall.status = -1; + + new_op->op_state = OP_VFS_STATE_UNKNOWN; + + /* initialize the op specific tag and upcall credentials */ + orangefs_new_tag(new_op); + new_op->upcall.type = type; + new_op->attempts = 0; + gossip_debug(GOSSIP_CACHE_DEBUG, + "Alloced OP (%p: %llu %s)\n", + new_op, + llu(new_op->tag), + get_opname_string(new_op)); + + new_op->upcall.uid = from_kuid(current_user_ns(), + current_fsuid()); + + new_op->upcall.gid = from_kgid(current_user_ns(), + current_fsgid()); + } else { + gossip_err("op_alloc: kmem_cache_zalloc failed!\n"); + } + return new_op; +} + +void op_release(struct orangefs_kernel_op_s *orangefs_op) +{ + if (orangefs_op) { + gossip_debug(GOSSIP_CACHE_DEBUG, + "Releasing OP (%p: %llu)\n", + orangefs_op, + llu(orangefs_op->tag)); + kmem_cache_free(op_cache, orangefs_op); + } else { + gossip_err("NULL pointer in op_release\n"); + } +} diff --git a/fs/orangefs/orangefs-debug.h b/fs/orangefs/orangefs-debug.h new file mode 100644 index 000000000000..387db17cde2b --- /dev/null +++ b/fs/orangefs/orangefs-debug.h @@ -0,0 +1,92 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* This file just defines debugging masks to be used with the gossip + * logging utility. All debugging masks for ORANGEFS are kept here to make + * sure we don't have collisions. + */ + +#ifndef __ORANGEFS_DEBUG_H +#define __ORANGEFS_DEBUG_H + +#ifdef __KERNEL__ +#include <linux/types.h> +#else +#include <stdint.h> +#endif + +#define GOSSIP_NO_DEBUG (__u64)0 + +#define GOSSIP_SUPER_DEBUG ((__u64)1 << 0) +#define GOSSIP_INODE_DEBUG ((__u64)1 << 1) +#define GOSSIP_FILE_DEBUG ((__u64)1 << 2) +#define GOSSIP_DIR_DEBUG ((__u64)1 << 3) +#define GOSSIP_UTILS_DEBUG ((__u64)1 << 4) +#define GOSSIP_WAIT_DEBUG ((__u64)1 << 5) +#define GOSSIP_ACL_DEBUG ((__u64)1 << 6) +#define GOSSIP_DCACHE_DEBUG ((__u64)1 << 7) +#define GOSSIP_DEV_DEBUG ((__u64)1 << 8) +#define GOSSIP_NAME_DEBUG ((__u64)1 << 9) +#define GOSSIP_BUFMAP_DEBUG ((__u64)1 << 10) +#define GOSSIP_CACHE_DEBUG ((__u64)1 << 11) +#define GOSSIP_DEBUGFS_DEBUG ((__u64)1 << 12) +#define GOSSIP_XATTR_DEBUG ((__u64)1 << 13) +#define GOSSIP_INIT_DEBUG ((__u64)1 << 14) +#define GOSSIP_SYSFS_DEBUG ((__u64)1 << 15) + +#define GOSSIP_MAX_NR 16 +#define GOSSIP_MAX_DEBUG (((__u64)1 << GOSSIP_MAX_NR) - 1) + +/*function prototypes*/ +__u64 ORANGEFS_kmod_eventlog_to_mask(const char *event_logging); +__u64 ORANGEFS_debug_eventlog_to_mask(const char *event_logging); +char *ORANGEFS_debug_mask_to_eventlog(__u64 mask); +char *ORANGEFS_kmod_mask_to_eventlog(__u64 mask); + +/* a private internal type */ +struct __keyword_mask_s { + const char *keyword; + __u64 mask_val; +}; + +/* + * Map all kmod keywords to kmod debug masks here. Keep this + * structure "packed": + * + * "all" is always last... + * + * keyword mask_val index + * foo 1 0 + * bar 2 1 + * baz 4 2 + * qux 8 3 + * . . . + */ +static struct __keyword_mask_s s_kmod_keyword_mask_map[] = { + {"super", GOSSIP_SUPER_DEBUG}, + {"inode", GOSSIP_INODE_DEBUG}, + {"file", GOSSIP_FILE_DEBUG}, + {"dir", GOSSIP_DIR_DEBUG}, + {"utils", GOSSIP_UTILS_DEBUG}, + {"wait", GOSSIP_WAIT_DEBUG}, + {"acl", GOSSIP_ACL_DEBUG}, + {"dcache", GOSSIP_DCACHE_DEBUG}, + {"dev", GOSSIP_DEV_DEBUG}, + {"name", GOSSIP_NAME_DEBUG}, + {"bufmap", GOSSIP_BUFMAP_DEBUG}, + {"cache", GOSSIP_CACHE_DEBUG}, + {"debugfs", GOSSIP_DEBUGFS_DEBUG}, + {"xattr", GOSSIP_XATTR_DEBUG}, + {"init", GOSSIP_INIT_DEBUG}, + {"sysfs", GOSSIP_SYSFS_DEBUG}, + {"none", GOSSIP_NO_DEBUG}, + {"all", GOSSIP_MAX_DEBUG} +}; + +static const int num_kmod_keyword_mask_map = (int) + (sizeof(s_kmod_keyword_mask_map) / sizeof(struct __keyword_mask_s)); + +#endif /* __ORANGEFS_DEBUG_H */ diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c new file mode 100644 index 000000000000..19670b8b4053 --- /dev/null +++ b/fs/orangefs/orangefs-debugfs.c @@ -0,0 +1,455 @@ +/* + * What: /sys/kernel/debug/orangefs/debug-help + * Date: June 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * List of client and kernel debug keywords. + * + * + * What: /sys/kernel/debug/orangefs/client-debug + * Date: June 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * Debug setting for "the client", the userspace + * helper for the kernel module. + * + * + * What: /sys/kernel/debug/orangefs/kernel-debug + * Date: June 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * Debug setting for the orangefs kernel module. + * + * Any of the keywords, or comma-separated lists + * of keywords, from debug-help can be catted to + * client-debug or kernel-debug. + * + * "none", "all" and "verbose" are special keywords + * for client-debug. Setting client-debug to "all" + * is kind of like trying to drink water from a + * fire hose, "verbose" triggers most of the same + * output except for the constant flow of output + * from the main wait loop. + * + * "none" and "all" are similar settings for kernel-debug + * no need for a "verbose". + */ +#include <linux/debugfs.h> +#include <linux/slab.h> + +#include <linux/uaccess.h> + +#include "orangefs-debugfs.h" +#include "protocol.h" +#include "orangefs-kernel.h" + +static int orangefs_debug_disabled = 1; + +static int orangefs_debug_help_open(struct inode *, struct file *); + +const struct file_operations debug_help_fops = { + .open = orangefs_debug_help_open, + .read = seq_read, + .release = seq_release, + .llseek = seq_lseek, +}; + +static void *help_start(struct seq_file *, loff_t *); +static void *help_next(struct seq_file *, void *, loff_t *); +static void help_stop(struct seq_file *, void *); +static int help_show(struct seq_file *, void *); + +static const struct seq_operations help_debug_ops = { + .start = help_start, + .next = help_next, + .stop = help_stop, + .show = help_show, +}; + +/* + * Used to protect data in ORANGEFS_KMOD_DEBUG_FILE and + * ORANGEFS_KMOD_DEBUG_FILE. + */ +static DEFINE_MUTEX(orangefs_debug_lock); + +int orangefs_debug_open(struct inode *, struct file *); + +static ssize_t orangefs_debug_read(struct file *, + char __user *, + size_t, + loff_t *); + +static ssize_t orangefs_debug_write(struct file *, + const char __user *, + size_t, + loff_t *); + +static const struct file_operations kernel_debug_fops = { + .open = orangefs_debug_open, + .read = orangefs_debug_read, + .write = orangefs_debug_write, + .llseek = generic_file_llseek, +}; + +/* + * initialize kmod debug operations, create orangefs debugfs dir and + * ORANGEFS_KMOD_DEBUG_HELP_FILE. + */ +int orangefs_debugfs_init(void) +{ + + int rc = -ENOMEM; + + debug_dir = debugfs_create_dir("orangefs", NULL); + if (!debug_dir) { + pr_info("%s: debugfs_create_dir failed.\n", __func__); + goto out; + } + + help_file_dentry = debugfs_create_file(ORANGEFS_KMOD_DEBUG_HELP_FILE, + 0444, + debug_dir, + debug_help_string, + &debug_help_fops); + if (!help_file_dentry) { + pr_info("%s: debugfs_create_file failed.\n", __func__); + goto out; + } + + orangefs_debug_disabled = 0; + rc = 0; + +out: + + return rc; +} + +void orangefs_debugfs_cleanup(void) +{ + if (debug_dir) + debugfs_remove_recursive(debug_dir); +} + +/* open ORANGEFS_KMOD_DEBUG_HELP_FILE */ +static int orangefs_debug_help_open(struct inode *inode, struct file *file) +{ + int rc = -ENODEV; + int ret; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "orangefs_debug_help_open: start\n"); + + if (orangefs_debug_disabled) + goto out; + + ret = seq_open(file, &help_debug_ops); + if (ret) + goto out; + + ((struct seq_file *)(file->private_data))->private = inode->i_private; + + rc = 0; + +out: + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "orangefs_debug_help_open: rc:%d:\n", + rc); + return rc; +} + +/* + * I think start always gets called again after stop. Start + * needs to return NULL when it is done. The whole "payload" + * in this case is a single (long) string, so by the second + * time we get to start (pos = 1), we're done. + */ +static void *help_start(struct seq_file *m, loff_t *pos) +{ + void *payload = NULL; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_start: start\n"); + + if (*pos == 0) + payload = m->private; + + return payload; +} + +static void *help_next(struct seq_file *m, void *v, loff_t *pos) +{ + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_next: start\n"); + + return NULL; +} + +static void help_stop(struct seq_file *m, void *p) +{ + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_stop: start\n"); +} + +static int help_show(struct seq_file *m, void *v) +{ + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_show: start\n"); + + seq_puts(m, v); + + return 0; +} + +/* + * initialize the kernel-debug file. + */ +int orangefs_kernel_debug_init(void) +{ + int rc = -ENOMEM; + struct dentry *ret; + char *k_buffer = NULL; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__); + + k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); + if (!k_buffer) + goto out; + + if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) { + strcpy(k_buffer, kernel_debug_string); + strcat(k_buffer, "\n"); + } else { + strcpy(k_buffer, "none\n"); + pr_info("%s: overflow 1!\n", __func__); + } + + ret = debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, + 0444, + debug_dir, + k_buffer, + &kernel_debug_fops); + if (!ret) { + pr_info("%s: failed to create %s.\n", + __func__, + ORANGEFS_KMOD_DEBUG_FILE); + goto out; + } + + rc = 0; + +out: + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc); + return rc; +} + +/* + * initialize the client-debug file. + */ +int orangefs_client_debug_init(void) +{ + + int rc = -ENOMEM; + char *c_buffer = NULL; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__); + + c_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); + if (!c_buffer) + goto out; + + if (strlen(client_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) { + strcpy(c_buffer, client_debug_string); + strcat(c_buffer, "\n"); + } else { + strcpy(c_buffer, "none\n"); + pr_info("%s: overflow! 2\n", __func__); + } + + client_debug_dentry = debugfs_create_file(ORANGEFS_CLIENT_DEBUG_FILE, + 0444, + debug_dir, + c_buffer, + &kernel_debug_fops); + if (!client_debug_dentry) { + pr_info("%s: failed to create updated %s.\n", + __func__, + ORANGEFS_CLIENT_DEBUG_FILE); + goto out; + } + + rc = 0; + +out: + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc); + return rc; +} + +/* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/ +int orangefs_debug_open(struct inode *inode, struct file *file) +{ + int rc = -ENODEV; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "%s: orangefs_debug_disabled: %d\n", + __func__, + orangefs_debug_disabled); + + if (orangefs_debug_disabled) + goto out; + + rc = 0; + mutex_lock(&orangefs_debug_lock); + file->private_data = inode->i_private; + mutex_unlock(&orangefs_debug_lock); + +out: + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "orangefs_debug_open: rc: %d\n", + rc); + return rc; +} + +static ssize_t orangefs_debug_read(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + char *buf; + int sprintf_ret; + ssize_t read_ret = -ENOMEM; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "orangefs_debug_read: start\n"); + + buf = kmalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); + if (!buf) + goto out; + + mutex_lock(&orangefs_debug_lock); + sprintf_ret = sprintf(buf, "%s", (char *)file->private_data); + mutex_unlock(&orangefs_debug_lock); + + read_ret = simple_read_from_buffer(ubuf, count, ppos, buf, sprintf_ret); + + kfree(buf); + +out: + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "orangefs_debug_read: ret: %zu\n", + read_ret); + + return read_ret; +} + +static ssize_t orangefs_debug_write(struct file *file, + const char __user *ubuf, + size_t count, + loff_t *ppos) +{ + char *buf; + int rc = -EFAULT; + size_t silly = 0; + char *debug_string; + struct orangefs_kernel_op_s *new_op = NULL; + struct client_debug_mask c_mask = { NULL, 0, 0 }; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "orangefs_debug_write: %s\n", + file->f_path.dentry->d_name.name); + + /* + * Thwart users who try to jamb a ridiculous number + * of bytes into the debug file... + */ + if (count > ORANGEFS_MAX_DEBUG_STRING_LEN + 1) { + silly = count; + count = ORANGEFS_MAX_DEBUG_STRING_LEN + 1; + } + + buf = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); + if (!buf) + goto out; + + if (copy_from_user(buf, ubuf, count - 1)) { + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "%s: copy_from_user failed!\n", + __func__); + goto out; + } + + /* + * Map the keyword string from userspace into a valid debug mask. + * The mapping process involves mapping the human-inputted string + * into a valid mask, and then rebuilding the string from the + * verified valid mask. + * + * A service operation is required to set a new client-side + * debug mask. + */ + if (!strcmp(file->f_path.dentry->d_name.name, + ORANGEFS_KMOD_DEBUG_FILE)) { + debug_string_to_mask(buf, &gossip_debug_mask, 0); + debug_mask_to_string(&gossip_debug_mask, 0); + debug_string = kernel_debug_string; + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "New kernel debug string is %s\n", + kernel_debug_string); + } else { + /* Can't reset client debug mask if client is not running. */ + if (is_daemon_in_service()) { + pr_info("%s: Client not running :%d:\n", + __func__, + is_daemon_in_service()); + goto out; + } + + debug_string_to_mask(buf, &c_mask, 1); + debug_mask_to_string(&c_mask, 1); + debug_string = client_debug_string; + + new_op = op_alloc(ORANGEFS_VFS_OP_PARAM); + if (!new_op) { + pr_info("%s: op_alloc failed!\n", __func__); + goto out; + } + + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_TWO_MASK_VALUES; + new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET; + memset(new_op->upcall.req.param.s_value, + 0, + ORANGEFS_MAX_DEBUG_STRING_LEN); + sprintf(new_op->upcall.req.param.s_value, + "%llx %llx\n", + c_mask.mask1, + c_mask.mask2); + + /* service_operation returns 0 on success... */ + rc = service_operation(new_op, + "orangefs_param", + ORANGEFS_OP_INTERRUPTIBLE); + + if (rc) + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "%s: service_operation failed! rc:%d:\n", + __func__, + rc); + + op_release(new_op); + } + + mutex_lock(&orangefs_debug_lock); + memset(file->f_inode->i_private, 0, ORANGEFS_MAX_DEBUG_STRING_LEN); + sprintf((char *)file->f_inode->i_private, "%s\n", debug_string); + mutex_unlock(&orangefs_debug_lock); + + *ppos += count; + if (silly) + rc = silly; + else + rc = count; + +out: + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "orangefs_debug_write: rc: %d\n", + rc); + kfree(buf); + return rc; +} diff --git a/fs/orangefs/orangefs-debugfs.h b/fs/orangefs/orangefs-debugfs.h new file mode 100644 index 000000000000..e4828c0e3ef9 --- /dev/null +++ b/fs/orangefs/orangefs-debugfs.h @@ -0,0 +1,3 @@ +int orangefs_debugfs_init(void); +int orangefs_kernel_debug_init(void); +void orangefs_debugfs_cleanup(void); diff --git a/fs/orangefs/orangefs-dev-proto.h b/fs/orangefs/orangefs-dev-proto.h new file mode 100644 index 000000000000..9eac9d9a3f3a --- /dev/null +++ b/fs/orangefs/orangefs-dev-proto.h @@ -0,0 +1,62 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#ifndef _ORANGEFS_DEV_PROTO_H +#define _ORANGEFS_DEV_PROTO_H + +/* + * types and constants shared between user space and kernel space for + * device interaction using a common protocol + */ + +/* + * valid orangefs kernel operation types + */ +#define ORANGEFS_VFS_OP_INVALID 0xFF000000 +#define ORANGEFS_VFS_OP_FILE_IO 0xFF000001 +#define ORANGEFS_VFS_OP_LOOKUP 0xFF000002 +#define ORANGEFS_VFS_OP_CREATE 0xFF000003 +#define ORANGEFS_VFS_OP_GETATTR 0xFF000004 +#define ORANGEFS_VFS_OP_REMOVE 0xFF000005 +#define ORANGEFS_VFS_OP_MKDIR 0xFF000006 +#define ORANGEFS_VFS_OP_READDIR 0xFF000007 +#define ORANGEFS_VFS_OP_SETATTR 0xFF000008 +#define ORANGEFS_VFS_OP_SYMLINK 0xFF000009 +#define ORANGEFS_VFS_OP_RENAME 0xFF00000A +#define ORANGEFS_VFS_OP_STATFS 0xFF00000B +#define ORANGEFS_VFS_OP_TRUNCATE 0xFF00000C +#define ORANGEFS_VFS_OP_MMAP_RA_FLUSH 0xFF00000D +#define ORANGEFS_VFS_OP_FS_MOUNT 0xFF00000E +#define ORANGEFS_VFS_OP_FS_UMOUNT 0xFF00000F +#define ORANGEFS_VFS_OP_GETXATTR 0xFF000010 +#define ORANGEFS_VFS_OP_SETXATTR 0xFF000011 +#define ORANGEFS_VFS_OP_LISTXATTR 0xFF000012 +#define ORANGEFS_VFS_OP_REMOVEXATTR 0xFF000013 +#define ORANGEFS_VFS_OP_PARAM 0xFF000014 +#define ORANGEFS_VFS_OP_PERF_COUNT 0xFF000015 +#define ORANGEFS_VFS_OP_CANCEL 0xFF00EE00 +#define ORANGEFS_VFS_OP_FSYNC 0xFF00EE01 +#define ORANGEFS_VFS_OP_FSKEY 0xFF00EE02 +#define ORANGEFS_VFS_OP_READDIRPLUS 0xFF00EE03 + +/* + * Misc constants. Please retain them as multiples of 8! + * Otherwise 32-64 bit interactions will be messed up :) + */ +#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000400 +#define ORANGEFS_MAX_DEBUG_ARRAY_LEN 0x00000800 + +/* + * The maximum number of directory entries in a single request is 96. + * XXX: Why can this not be higher. The client-side code can handle up to 512. + * XXX: What happens if we expect more than the client can return? + */ +#define ORANGEFS_MAX_DIRENT_COUNT_READDIR 96 + +#include "upcall.h" +#include "downcall.h" + +#endif diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h new file mode 100644 index 000000000000..a9925e296ceb --- /dev/null +++ b/fs/orangefs/orangefs-kernel.h @@ -0,0 +1,623 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * The ORANGEFS Linux kernel support allows ORANGEFS volumes to be mounted and + * accessed through the Linux VFS (i.e. using standard I/O system calls). + * This support is only needed on clients that wish to mount the file system. + * + */ + +/* + * Declarations and macros for the ORANGEFS Linux kernel support. + */ + +#ifndef __ORANGEFSKERNEL_H +#define __ORANGEFSKERNEL_H + +#include <linux/kernel.h> +#include <linux/moduleparam.h> +#include <linux/statfs.h> +#include <linux/backing-dev.h> +#include <linux/device.h> +#include <linux/mpage.h> +#include <linux/namei.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/vmalloc.h> + +#include <linux/aio.h> +#include <linux/posix_acl.h> +#include <linux/posix_acl_xattr.h> +#include <linux/compat.h> +#include <linux/mount.h> +#include <linux/uaccess.h> +#include <linux/atomic.h> +#include <linux/uio.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/wait.h> +#include <linux/dcache.h> +#include <linux/pagemap.h> +#include <linux/poll.h> +#include <linux/rwsem.h> +#include <linux/xattr.h> +#include <linux/exportfs.h> + +#include <asm/unaligned.h> + +#include "orangefs-dev-proto.h" + +#ifdef ORANGEFS_KERNEL_DEBUG +#define ORANGEFS_DEFAULT_OP_TIMEOUT_SECS 10 +#else +#define ORANGEFS_DEFAULT_OP_TIMEOUT_SECS 20 +#endif + +#define ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS 30 + +#define ORANGEFS_DEFAULT_SLOT_TIMEOUT_SECS 900 /* 15 minutes */ + +#define ORANGEFS_REQDEVICE_NAME "pvfs2-req" + +#define ORANGEFS_DEVREQ_MAGIC 0x20030529 +#define ORANGEFS_LINK_MAX 0x000000FF +#define ORANGEFS_PURGE_RETRY_COUNT 0x00000005 +#define ORANGEFS_MAX_NUM_OPTIONS 0x00000004 +#define ORANGEFS_MAX_MOUNT_OPT_LEN 0x00000080 +#define ORANGEFS_MAX_FSKEY_LEN 64 + +#define MAX_DEV_REQ_UPSIZE (2 * sizeof(__s32) + \ +sizeof(__u64) + sizeof(struct orangefs_upcall_s)) +#define MAX_DEV_REQ_DOWNSIZE (2 * sizeof(__s32) + \ +sizeof(__u64) + sizeof(struct orangefs_downcall_s)) + +/* + * valid orangefs kernel operation states + * + * unknown - op was just initialized + * waiting - op is on request_list (upward bound) + * inprogr - op is in progress (waiting for downcall) + * serviced - op has matching downcall; ok + * purged - op has to start a timer since client-core + * exited uncleanly before servicing op + * given up - submitter has given up waiting for it + */ +enum orangefs_vfs_op_states { + OP_VFS_STATE_UNKNOWN = 0, + OP_VFS_STATE_WAITING = 1, + OP_VFS_STATE_INPROGR = 2, + OP_VFS_STATE_SERVICED = 4, + OP_VFS_STATE_PURGED = 8, + OP_VFS_STATE_GIVEN_UP = 16, +}; + +/* + * An array of client_debug_mask will be built to hold debug keyword/mask + * values fetched from userspace. + */ +struct client_debug_mask { + char *keyword; + __u64 mask1; + __u64 mask2; +}; + +/* + * orangefs kernel memory related flags + */ + +#if ((defined ORANGEFS_KERNEL_DEBUG) && (defined CONFIG_DEBUG_SLAB)) +#define ORANGEFS_CACHE_CREATE_FLAGS SLAB_RED_ZONE +#else +#define ORANGEFS_CACHE_CREATE_FLAGS 0 +#endif /* ((defined ORANGEFS_KERNEL_DEBUG) && (defined CONFIG_DEBUG_SLAB)) */ + +/* orangefs xattr and acl related defines */ +#define ORANGEFS_XATTR_INDEX_POSIX_ACL_ACCESS 1 +#define ORANGEFS_XATTR_INDEX_POSIX_ACL_DEFAULT 2 +#define ORANGEFS_XATTR_INDEX_TRUSTED 3 +#define ORANGEFS_XATTR_INDEX_DEFAULT 4 + +#define ORANGEFS_XATTR_NAME_ACL_ACCESS XATTR_NAME_POSIX_ACL_ACCESS +#define ORANGEFS_XATTR_NAME_ACL_DEFAULT XATTR_NAME_POSIX_ACL_DEFAULT +#define ORANGEFS_XATTR_NAME_TRUSTED_PREFIX "trusted." +#define ORANGEFS_XATTR_NAME_DEFAULT_PREFIX "" + +/* these functions are defined in orangefs-utils.c */ +int orangefs_prepare_cdm_array(char *debug_array_string); +int orangefs_prepare_debugfs_help_string(int); + +/* defined in orangefs-debugfs.c */ +int orangefs_client_debug_init(void); + +void debug_string_to_mask(char *, void *, int); +void do_c_mask(int, char *, struct client_debug_mask **); +void do_k_mask(int, char *, __u64 **); + +void debug_mask_to_string(void *, int); +void do_k_string(void *, int); +void do_c_string(void *, int); +int check_amalgam_keyword(void *, int); +int keyword_is_amalgam(char *); + +/*these variables are defined in orangefs-mod.c */ +extern char kernel_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN]; +extern char client_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN]; +extern char client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN]; +extern unsigned int kernel_mask_set_mod_init; + +extern int orangefs_init_acl(struct inode *inode, struct inode *dir); +extern const struct xattr_handler *orangefs_xattr_handlers[]; + +extern struct posix_acl *orangefs_get_acl(struct inode *inode, int type); +extern int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type); + +/* + * Redefine xtvec structure so that we could move helper functions out of + * the define + */ +struct xtvec { + __kernel_off_t xtv_off; /* must be off_t */ + __kernel_size_t xtv_len; /* must be size_t */ +}; + +/* + * orangefs data structures + */ +struct orangefs_kernel_op_s { + enum orangefs_vfs_op_states op_state; + __u64 tag; + + /* + * Set uses_shared_memory to non zero if this operation uses + * shared memory. If true, then a retry on the op must also + * get a new shared memory buffer and re-populate it. + * Cancels don't care - it only matters for service_operation() + * retry logics and cancels don't go through it anymore. It + * safely stays non-zero when we use it as slot_to_free. + */ + union { + int uses_shared_memory; + int slot_to_free; + }; + + struct orangefs_upcall_s upcall; + struct orangefs_downcall_s downcall; + + struct completion waitq; + spinlock_t lock; + + int attempts; + + struct list_head list; +}; + +#define set_op_state_waiting(op) ((op)->op_state = OP_VFS_STATE_WAITING) +#define set_op_state_inprogress(op) ((op)->op_state = OP_VFS_STATE_INPROGR) +#define set_op_state_given_up(op) ((op)->op_state = OP_VFS_STATE_GIVEN_UP) +static inline void set_op_state_serviced(struct orangefs_kernel_op_s *op) +{ + op->op_state = OP_VFS_STATE_SERVICED; + complete(&op->waitq); +} + +#define op_state_waiting(op) ((op)->op_state & OP_VFS_STATE_WAITING) +#define op_state_in_progress(op) ((op)->op_state & OP_VFS_STATE_INPROGR) +#define op_state_serviced(op) ((op)->op_state & OP_VFS_STATE_SERVICED) +#define op_state_purged(op) ((op)->op_state & OP_VFS_STATE_PURGED) +#define op_state_given_up(op) ((op)->op_state & OP_VFS_STATE_GIVEN_UP) +#define op_is_cancel(op) ((op)->upcall.type == ORANGEFS_VFS_OP_CANCEL) + +void op_release(struct orangefs_kernel_op_s *op); + +extern void orangefs_bufmap_put(int); +static inline void put_cancel(struct orangefs_kernel_op_s *op) +{ + orangefs_bufmap_put(op->slot_to_free); + op_release(op); +} + +static inline void set_op_state_purged(struct orangefs_kernel_op_s *op) +{ + spin_lock(&op->lock); + if (unlikely(op_is_cancel(op))) { + list_del_init(&op->list); + spin_unlock(&op->lock); + put_cancel(op); + } else { + op->op_state |= OP_VFS_STATE_PURGED; + complete(&op->waitq); + spin_unlock(&op->lock); + } +} + +/* per inode private orangefs info */ +struct orangefs_inode_s { + struct orangefs_object_kref refn; + char link_target[ORANGEFS_NAME_MAX]; + __s64 blksize; + /* + * Reading/Writing Extended attributes need to acquire the appropriate + * reader/writer semaphore on the orangefs_inode_s structure. + */ + struct rw_semaphore xattr_sem; + + struct inode vfs_inode; + sector_t last_failed_block_index_read; + + /* + * State of in-memory attributes not yet flushed to disk associated + * with this object + */ + unsigned long pinode_flags; +}; + +#define P_ATIME_FLAG 0 +#define P_MTIME_FLAG 1 +#define P_CTIME_FLAG 2 +#define P_MODE_FLAG 3 + +#define ClearAtimeFlag(pinode) clear_bit(P_ATIME_FLAG, &(pinode)->pinode_flags) +#define SetAtimeFlag(pinode) set_bit(P_ATIME_FLAG, &(pinode)->pinode_flags) +#define AtimeFlag(pinode) test_bit(P_ATIME_FLAG, &(pinode)->pinode_flags) + +#define ClearMtimeFlag(pinode) clear_bit(P_MTIME_FLAG, &(pinode)->pinode_flags) +#define SetMtimeFlag(pinode) set_bit(P_MTIME_FLAG, &(pinode)->pinode_flags) +#define MtimeFlag(pinode) test_bit(P_MTIME_FLAG, &(pinode)->pinode_flags) + +#define ClearCtimeFlag(pinode) clear_bit(P_CTIME_FLAG, &(pinode)->pinode_flags) +#define SetCtimeFlag(pinode) set_bit(P_CTIME_FLAG, &(pinode)->pinode_flags) +#define CtimeFlag(pinode) test_bit(P_CTIME_FLAG, &(pinode)->pinode_flags) + +#define ClearModeFlag(pinode) clear_bit(P_MODE_FLAG, &(pinode)->pinode_flags) +#define SetModeFlag(pinode) set_bit(P_MODE_FLAG, &(pinode)->pinode_flags) +#define ModeFlag(pinode) test_bit(P_MODE_FLAG, &(pinode)->pinode_flags) + +/* per superblock private orangefs info */ +struct orangefs_sb_info_s { + struct orangefs_khandle root_khandle; + __s32 fs_id; + int id; + int flags; +#define ORANGEFS_OPT_INTR 0x01 +#define ORANGEFS_OPT_LOCAL_LOCK 0x02 + char devname[ORANGEFS_MAX_SERVER_ADDR_LEN]; + struct super_block *sb; + int mount_pending; + struct list_head list; +}; + +/* + * structure that holds the state of any async I/O operation issued + * through the VFS. Needed especially to handle cancellation requests + * or even completion notification so that the VFS client-side daemon + * can free up its vfs_request slots. + */ +struct orangefs_kiocb_s { + /* the pointer to the task that initiated the AIO */ + struct task_struct *tsk; + + /* pointer to the kiocb that kicked this operation */ + struct kiocb *kiocb; + + /* buffer index that was used for the I/O */ + struct orangefs_bufmap *bufmap; + int buffer_index; + + /* orangefs kernel operation type */ + struct orangefs_kernel_op_s *op; + + /* The user space buffers from/to which I/O is being staged */ + struct iovec *iov; + + /* number of elements in the iovector */ + unsigned long nr_segs; + + /* set to indicate the type of the operation */ + int rw; + + /* file offset */ + loff_t offset; + + /* and the count in bytes */ + size_t bytes_to_be_copied; + + ssize_t bytes_copied; + int needs_cleanup; +}; + +struct orangefs_stats { + unsigned long cache_hits; + unsigned long cache_misses; + unsigned long reads; + unsigned long writes; +}; + +extern struct orangefs_stats g_orangefs_stats; + +/* + * NOTE: See Documentation/filesystems/porting for information + * on implementing FOO_I and properly accessing fs private data + */ +static inline struct orangefs_inode_s *ORANGEFS_I(struct inode *inode) +{ + return container_of(inode, struct orangefs_inode_s, vfs_inode); +} + +static inline struct orangefs_sb_info_s *ORANGEFS_SB(struct super_block *sb) +{ + return (struct orangefs_sb_info_s *) sb->s_fs_info; +} + +/* ino_t descends from "unsigned long", 8 bytes, 64 bits. */ +static inline ino_t orangefs_khandle_to_ino(struct orangefs_khandle *khandle) +{ + union { + unsigned char u[8]; + __u64 ino; + } ihandle; + + ihandle.u[0] = khandle->u[0] ^ khandle->u[4]; + ihandle.u[1] = khandle->u[1] ^ khandle->u[5]; + ihandle.u[2] = khandle->u[2] ^ khandle->u[6]; + ihandle.u[3] = khandle->u[3] ^ khandle->u[7]; + ihandle.u[4] = khandle->u[12] ^ khandle->u[8]; + ihandle.u[5] = khandle->u[13] ^ khandle->u[9]; + ihandle.u[6] = khandle->u[14] ^ khandle->u[10]; + ihandle.u[7] = khandle->u[15] ^ khandle->u[11]; + + return ihandle.ino; +} + +static inline struct orangefs_khandle *get_khandle_from_ino(struct inode *inode) +{ + return &(ORANGEFS_I(inode)->refn.khandle); +} + +static inline __s32 get_fsid_from_ino(struct inode *inode) +{ + return ORANGEFS_I(inode)->refn.fs_id; +} + +static inline ino_t get_ino_from_khandle(struct inode *inode) +{ + struct orangefs_khandle *khandle; + ino_t ino; + + khandle = get_khandle_from_ino(inode); + ino = orangefs_khandle_to_ino(khandle); + return ino; +} + +static inline ino_t get_parent_ino_from_dentry(struct dentry *dentry) +{ + return get_ino_from_khandle(dentry->d_parent->d_inode); +} + +static inline int is_root_handle(struct inode *inode) +{ + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s: root handle: %pU, this handle: %pU:\n", + __func__, + &ORANGEFS_SB(inode->i_sb)->root_khandle, + get_khandle_from_ino(inode)); + + if (ORANGEFS_khandle_cmp(&(ORANGEFS_SB(inode->i_sb)->root_khandle), + get_khandle_from_ino(inode))) + return 0; + else + return 1; +} + +static inline int match_handle(struct orangefs_khandle resp_handle, + struct inode *inode) +{ + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s: one handle: %pU, another handle:%pU:\n", + __func__, + &resp_handle, + get_khandle_from_ino(inode)); + + if (ORANGEFS_khandle_cmp(&resp_handle, get_khandle_from_ino(inode))) + return 0; + else + return 1; +} + +/* + * defined in orangefs-cache.c + */ +int op_cache_initialize(void); +int op_cache_finalize(void); +struct orangefs_kernel_op_s *op_alloc(__s32 type); +void orangefs_new_tag(struct orangefs_kernel_op_s *op); +char *get_opname_string(struct orangefs_kernel_op_s *new_op); + +int orangefs_inode_cache_initialize(void); +int orangefs_inode_cache_finalize(void); + +/* + * defined in orangefs-mod.c + */ +void purge_inprogress_ops(void); + +/* + * defined in waitqueue.c + */ +void purge_waiting_ops(void); + +/* + * defined in super.c + */ +struct dentry *orangefs_mount(struct file_system_type *fst, + int flags, + const char *devname, + void *data); + +void orangefs_kill_sb(struct super_block *sb); +int orangefs_remount(struct orangefs_sb_info_s *); + +int fsid_key_table_initialize(void); +void fsid_key_table_finalize(void); + +/* + * defined in inode.c + */ +__u32 convert_to_orangefs_mask(unsigned long lite_mask); +struct inode *orangefs_new_inode(struct super_block *sb, + struct inode *dir, + int mode, + dev_t dev, + struct orangefs_object_kref *ref); + +int orangefs_setattr(struct dentry *dentry, struct iattr *iattr); + +int orangefs_getattr(struct vfsmount *mnt, + struct dentry *dentry, + struct kstat *kstat); + +int orangefs_permission(struct inode *inode, int mask); + +/* + * defined in xattr.c + */ +int orangefs_setxattr(struct dentry *dentry, + const char *name, + const void *value, + size_t size, + int flags); + +ssize_t orangefs_getxattr(struct dentry *dentry, + const char *name, + void *buffer, + size_t size); + +ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size); + +/* + * defined in namei.c + */ +struct inode *orangefs_iget(struct super_block *sb, + struct orangefs_object_kref *ref); + +ssize_t orangefs_inode_read(struct inode *inode, + struct iov_iter *iter, + loff_t *offset, + loff_t readahead_size); + +/* + * defined in devorangefs-req.c + */ +int orangefs_dev_init(void); +void orangefs_dev_cleanup(void); +int is_daemon_in_service(void); +bool __is_daemon_in_service(void); + +/* + * defined in orangefs-utils.c + */ +__s32 fsid_of_op(struct orangefs_kernel_op_s *op); + +int orangefs_flush_inode(struct inode *inode); + +ssize_t orangefs_inode_getxattr(struct inode *inode, + const char *prefix, + const char *name, + void *buffer, + size_t size); + +int orangefs_inode_setxattr(struct inode *inode, + const char *prefix, + const char *name, + const void *value, + size_t size, + int flags); + +int orangefs_inode_getattr(struct inode *inode, int new, int size); + +int orangefs_inode_check_changed(struct inode *inode); + +int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr); + +void orangefs_make_bad_inode(struct inode *inode); + +int orangefs_unmount_sb(struct super_block *sb); + +bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op); + +int orangefs_normalize_to_errno(__s32 error_code); + +extern struct mutex devreq_mutex; +extern struct mutex request_mutex; +extern int debug; +extern int op_timeout_secs; +extern int slot_timeout_secs; +extern struct list_head orangefs_superblocks; +extern spinlock_t orangefs_superblocks_lock; +extern struct list_head orangefs_request_list; +extern spinlock_t orangefs_request_list_lock; +extern wait_queue_head_t orangefs_request_list_waitq; +extern struct list_head *htable_ops_in_progress; +extern spinlock_t htable_ops_in_progress_lock; +extern int hash_table_size; + +extern const struct address_space_operations orangefs_address_operations; +extern struct backing_dev_info orangefs_backing_dev_info; +extern struct inode_operations orangefs_file_inode_operations; +extern const struct file_operations orangefs_file_operations; +extern struct inode_operations orangefs_symlink_inode_operations; +extern struct inode_operations orangefs_dir_inode_operations; +extern const struct file_operations orangefs_dir_operations; +extern const struct dentry_operations orangefs_dentry_operations; +extern const struct file_operations orangefs_devreq_file_operations; + +extern wait_queue_head_t orangefs_bufmap_init_waitq; + +/* + * misc convenience macros + */ + +#define ORANGEFS_OP_INTERRUPTIBLE 1 /* service_operation() is interruptible */ +#define ORANGEFS_OP_PRIORITY 2 /* service_operation() is high priority */ +#define ORANGEFS_OP_CANCELLATION 4 /* this is a cancellation */ +#define ORANGEFS_OP_NO_MUTEX 8 /* don't acquire request_mutex */ +#define ORANGEFS_OP_ASYNC 16 /* Queue it, but don't wait */ + +int service_operation(struct orangefs_kernel_op_s *op, + const char *op_name, + int flags); + +#define get_interruptible_flag(inode) \ + ((ORANGEFS_SB(inode->i_sb)->flags & ORANGEFS_OPT_INTR) ? \ + ORANGEFS_OP_INTERRUPTIBLE : 0) + +#define fill_default_sys_attrs(sys_attr, type, mode) \ +do { \ + sys_attr.owner = from_kuid(current_user_ns(), current_fsuid()); \ + sys_attr.group = from_kgid(current_user_ns(), current_fsgid()); \ + sys_attr.perms = ORANGEFS_util_translate_mode(mode); \ + sys_attr.mtime = 0; \ + sys_attr.atime = 0; \ + sys_attr.ctime = 0; \ + sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE; \ +} while (0) + +static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size) +{ +#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) + mutex_lock(&inode->i_mutex); +#endif + i_size_write(inode, i_size); +#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) + mutex_unlock(&inode->i_mutex); +#endif +} + +#endif /* __ORANGEFSKERNEL_H */ diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c new file mode 100644 index 000000000000..6f072a8c0de1 --- /dev/null +++ b/fs/orangefs/orangefs-mod.c @@ -0,0 +1,293 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * Changes by Acxiom Corporation to add proc file handler for pvfs2 client + * parameters, Copyright Acxiom Corporation, 2005. + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-debugfs.h" +#include "orangefs-sysfs.h" + +/* ORANGEFS_VERSION is a ./configure define */ +#ifndef ORANGEFS_VERSION +#define ORANGEFS_VERSION "upstream" +#endif + +/* + * global variables declared here + */ + +/* array of client debug keyword/mask values */ +struct client_debug_mask *cdm_array; +int cdm_element_count; + +char kernel_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN] = "none"; +char client_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN]; +char client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN]; + +char *debug_help_string; +int help_string_initialized; +struct dentry *help_file_dentry; +struct dentry *client_debug_dentry; +struct dentry *debug_dir; +int client_verbose_index; +int client_all_index; +struct orangefs_stats g_orangefs_stats; + +/* the size of the hash tables for ops in progress */ +int hash_table_size = 509; + +static ulong module_parm_debug_mask; +__u64 gossip_debug_mask; +struct client_debug_mask client_debug_mask = { NULL, 0, 0 }; +unsigned int kernel_mask_set_mod_init; /* implicitly false */ +int op_timeout_secs = ORANGEFS_DEFAULT_OP_TIMEOUT_SECS; +int slot_timeout_secs = ORANGEFS_DEFAULT_SLOT_TIMEOUT_SECS; + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("ORANGEFS Development Team"); +MODULE_DESCRIPTION("The Linux Kernel VFS interface to ORANGEFS"); +MODULE_PARM_DESC(module_parm_debug_mask, "debugging level (see orangefs-debug.h for values)"); +MODULE_PARM_DESC(op_timeout_secs, "Operation timeout in seconds"); +MODULE_PARM_DESC(slot_timeout_secs, "Slot timeout in seconds"); +MODULE_PARM_DESC(hash_table_size, + "size of hash table for operations in progress"); + +static struct file_system_type orangefs_fs_type = { + .name = "pvfs2", + .mount = orangefs_mount, + .kill_sb = orangefs_kill_sb, + .owner = THIS_MODULE, +}; + +module_param(hash_table_size, int, 0); +module_param(module_parm_debug_mask, ulong, 0644); +module_param(op_timeout_secs, int, 0); +module_param(slot_timeout_secs, int, 0); + +/* synchronizes the request device file */ +DEFINE_MUTEX(devreq_mutex); + +/* + * Blocks non-priority requests from being queued for servicing. This + * could be used for protecting the request list data structure, but + * for now it's only being used to stall the op addition to the request + * list + */ +DEFINE_MUTEX(request_mutex); + +/* hash table for storing operations waiting for matching downcall */ +struct list_head *htable_ops_in_progress; +DEFINE_SPINLOCK(htable_ops_in_progress_lock); + +/* list for queueing upcall operations */ +LIST_HEAD(orangefs_request_list); + +/* used to protect the above orangefs_request_list */ +DEFINE_SPINLOCK(orangefs_request_list_lock); + +/* used for incoming request notification */ +DECLARE_WAIT_QUEUE_HEAD(orangefs_request_list_waitq); + +static int __init orangefs_init(void) +{ + int ret = -1; + __u32 i = 0; + + /* convert input debug mask to a 64-bit unsigned integer */ + gossip_debug_mask = (unsigned long long) module_parm_debug_mask; + + /* + * set the kernel's gossip debug string; invalid mask values will + * be ignored. + */ + debug_mask_to_string(&gossip_debug_mask, 0); + + /* remove any invalid values from the mask */ + debug_string_to_mask(kernel_debug_string, &gossip_debug_mask, 0); + + /* + * if the mask has a non-zero value, then indicate that the mask + * was set when the kernel module was loaded. The orangefs dev ioctl + * command will look at this boolean to determine if the kernel's + * debug mask should be overwritten when the client-core is started. + */ + if (gossip_debug_mask != 0) + kernel_mask_set_mod_init = true; + + pr_info("%s: called with debug mask: :%s: :%llx:\n", + __func__, + kernel_debug_string, + (unsigned long long)gossip_debug_mask); + + ret = bdi_init(&orangefs_backing_dev_info); + + if (ret) + return ret; + + if (op_timeout_secs < 0) + op_timeout_secs = 0; + + if (slot_timeout_secs < 0) + slot_timeout_secs = 0; + + /* initialize global book keeping data structures */ + ret = op_cache_initialize(); + if (ret < 0) + goto err; + + ret = orangefs_inode_cache_initialize(); + if (ret < 0) + goto cleanup_op; + + htable_ops_in_progress = + kcalloc(hash_table_size, sizeof(struct list_head), GFP_KERNEL); + if (!htable_ops_in_progress) { + gossip_err("Failed to initialize op hashtable"); + ret = -ENOMEM; + goto cleanup_inode; + } + + /* initialize a doubly linked at each hash table index */ + for (i = 0; i < hash_table_size; i++) + INIT_LIST_HEAD(&htable_ops_in_progress[i]); + + ret = fsid_key_table_initialize(); + if (ret < 0) + goto cleanup_progress_table; + + /* + * Build the contents of /sys/kernel/debug/orangefs/debug-help + * from the keywords in the kernel keyword/mask array. + * + * The keywords in the client keyword/mask array are + * unknown at boot time. + * + * orangefs_prepare_debugfs_help_string will be used again + * later to rebuild the debug-help file after the client starts + * and passes along the needed info. The argument signifies + * which time orangefs_prepare_debugfs_help_string is being + * called. + */ + ret = orangefs_prepare_debugfs_help_string(1); + if (ret) + goto cleanup_key_table; + + ret = orangefs_debugfs_init(); + if (ret) + goto debugfs_init_failed; + + ret = orangefs_kernel_debug_init(); + if (ret) + goto kernel_debug_init_failed; + + ret = orangefs_sysfs_init(); + if (ret) + goto sysfs_init_failed; + + /* Initialize the orangefsdev subsystem. */ + ret = orangefs_dev_init(); + if (ret < 0) { + gossip_err("%s: could not initialize device subsystem %d!\n", + __func__, + ret); + goto cleanup_device; + } + + ret = register_filesystem(&orangefs_fs_type); + if (ret == 0) { + pr_info("orangefs: module version %s loaded\n", ORANGEFS_VERSION); + ret = 0; + goto out; + } + + orangefs_sysfs_exit(); + +cleanup_device: + orangefs_dev_cleanup(); + +sysfs_init_failed: + +kernel_debug_init_failed: + +debugfs_init_failed: + orangefs_debugfs_cleanup(); + +cleanup_key_table: + fsid_key_table_finalize(); + +cleanup_progress_table: + kfree(htable_ops_in_progress); + +cleanup_inode: + orangefs_inode_cache_finalize(); + +cleanup_op: + op_cache_finalize(); + +err: + bdi_destroy(&orangefs_backing_dev_info); + +out: + return ret; +} + +static void __exit orangefs_exit(void) +{ + int i = 0; + gossip_debug(GOSSIP_INIT_DEBUG, "orangefs: orangefs_exit called\n"); + + unregister_filesystem(&orangefs_fs_type); + orangefs_debugfs_cleanup(); + orangefs_sysfs_exit(); + fsid_key_table_finalize(); + orangefs_dev_cleanup(); + BUG_ON(!list_empty(&orangefs_request_list)); + for (i = 0; i < hash_table_size; i++) + BUG_ON(!list_empty(&htable_ops_in_progress[i])); + + orangefs_inode_cache_finalize(); + op_cache_finalize(); + + kfree(htable_ops_in_progress); + + bdi_destroy(&orangefs_backing_dev_info); + + pr_info("orangefs: module version %s unloaded\n", ORANGEFS_VERSION); +} + +/* + * What we do in this function is to walk the list of operations + * that are in progress in the hash table and mark them as purged as well. + */ +void purge_inprogress_ops(void) +{ + int i; + + for (i = 0; i < hash_table_size; i++) { + struct orangefs_kernel_op_s *op; + struct orangefs_kernel_op_s *next; + + spin_lock(&htable_ops_in_progress_lock); + list_for_each_entry_safe(op, + next, + &htable_ops_in_progress[i], + list) { + set_op_state_purged(op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(op), + op->op_state, + current->comm); + } + spin_unlock(&htable_ops_in_progress_lock); + } +} + +module_init(orangefs_init); +module_exit(orangefs_exit); diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c new file mode 100644 index 000000000000..5c03113e3ad2 --- /dev/null +++ b/fs/orangefs/orangefs-sysfs.c @@ -0,0 +1,1772 @@ +/* + * Documentation/ABI/stable/orangefs-sysfs: + * + * What: /sys/fs/orangefs/perf_counter_reset + * Date: June 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * echo a 0 or a 1 into perf_counter_reset to + * reset all the counters in + * /sys/fs/orangefs/perf_counters + * except ones with PINT_PERF_PRESERVE set. + * + * + * What: /sys/fs/orangefs/perf_counters/... + * Date: Jun 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * Counters and settings for various caches. + * Read only. + * + * + * What: /sys/fs/orangefs/perf_time_interval_secs + * Date: Jun 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * Length of perf counter intervals in + * seconds. + * + * + * What: /sys/fs/orangefs/perf_history_size + * Date: Jun 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * The perf_counters cache statistics have N, or + * perf_history_size, samples. The default is + * one. + * + * Every perf_time_interval_secs the (first) + * samples are reset. + * + * If N is greater than one, the "current" set + * of samples is reset, and the samples from the + * other N-1 intervals remain available. + * + * + * What: /sys/fs/orangefs/op_timeout_secs + * Date: Jun 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * Service operation timeout in seconds. + * + * + * What: /sys/fs/orangefs/slot_timeout_secs + * Date: Jun 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * "Slot" timeout in seconds. A "slot" + * is an indexed buffer in the shared + * memory segment used for communication + * between the kernel module and userspace. + * Slots are requested and waited for, + * the wait times out after slot_timeout_secs. + * + * + * What: /sys/fs/orangefs/acache/... + * Date: Jun 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * Attribute cache configurable settings. + * + * + * What: /sys/fs/orangefs/ncache/... + * Date: Jun 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * Name cache configurable settings. + * + * + * What: /sys/fs/orangefs/capcache/... + * Date: Jun 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * Capability cache configurable settings. + * + * + * What: /sys/fs/orangefs/ccache/... + * Date: Jun 2015 + * Contact: Mike Marshall <hubcap@omnibond.com> + * Description: + * Credential cache configurable settings. + * + */ + +#include <linux/fs.h> +#include <linux/kobject.h> +#include <linux/string.h> +#include <linux/sysfs.h> +#include <linux/module.h> +#include <linux/init.h> + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-sysfs.h" + +#define ORANGEFS_KOBJ_ID "orangefs" +#define ACACHE_KOBJ_ID "acache" +#define CAPCACHE_KOBJ_ID "capcache" +#define CCACHE_KOBJ_ID "ccache" +#define NCACHE_KOBJ_ID "ncache" +#define PC_KOBJ_ID "pc" +#define STATS_KOBJ_ID "stats" + +struct orangefs_obj { + struct kobject kobj; + int op_timeout_secs; + int perf_counter_reset; + int perf_history_size; + int perf_time_interval_secs; + int slot_timeout_secs; +}; + +struct acache_orangefs_obj { + struct kobject kobj; + int hard_limit; + int reclaim_percentage; + int soft_limit; + int timeout_msecs; +}; + +struct capcache_orangefs_obj { + struct kobject kobj; + int hard_limit; + int reclaim_percentage; + int soft_limit; + int timeout_secs; +}; + +struct ccache_orangefs_obj { + struct kobject kobj; + int hard_limit; + int reclaim_percentage; + int soft_limit; + int timeout_secs; +}; + +struct ncache_orangefs_obj { + struct kobject kobj; + int hard_limit; + int reclaim_percentage; + int soft_limit; + int timeout_msecs; +}; + +struct pc_orangefs_obj { + struct kobject kobj; + char *acache; + char *capcache; + char *ncache; +}; + +struct stats_orangefs_obj { + struct kobject kobj; + int reads; + int writes; +}; + +struct orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct orangefs_obj *orangefs_obj, + struct orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct orangefs_obj *orangefs_obj, + struct orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +struct acache_orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct acache_orangefs_obj *acache_orangefs_obj, + struct acache_orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct acache_orangefs_obj *acache_orangefs_obj, + struct acache_orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +struct capcache_orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct capcache_orangefs_obj *capcache_orangefs_obj, + struct capcache_orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct capcache_orangefs_obj *capcache_orangefs_obj, + struct capcache_orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +struct ccache_orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct ccache_orangefs_obj *ccache_orangefs_obj, + struct ccache_orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct ccache_orangefs_obj *ccache_orangefs_obj, + struct ccache_orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +struct ncache_orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct ncache_orangefs_obj *ncache_orangefs_obj, + struct ncache_orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct ncache_orangefs_obj *ncache_orangefs_obj, + struct ncache_orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +struct pc_orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct pc_orangefs_obj *pc_orangefs_obj, + struct pc_orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct pc_orangefs_obj *pc_orangefs_obj, + struct pc_orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +struct stats_orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct stats_orangefs_obj *stats_orangefs_obj, + struct stats_orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct stats_orangefs_obj *stats_orangefs_obj, + struct stats_orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +static ssize_t orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct orangefs_attribute *attribute; + struct orangefs_obj *orangefs_obj; + int rc; + + attribute = container_of(attr, struct orangefs_attribute, attr); + orangefs_obj = container_of(kobj, struct orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(orangefs_obj, attribute, buf); + +out: + return rc; +} + +static ssize_t orangefs_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t len) +{ + struct orangefs_attribute *attribute; + struct orangefs_obj *orangefs_obj; + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "orangefs_attr_store: start\n"); + + attribute = container_of(attr, struct orangefs_attribute, attr); + orangefs_obj = container_of(kobj, struct orangefs_obj, kobj); + + if (!attribute->store) { + rc = -EIO; + goto out; + } + + rc = attribute->store(orangefs_obj, attribute, buf, len); + +out: + return rc; +} + +static const struct sysfs_ops orangefs_sysfs_ops = { + .show = orangefs_attr_show, + .store = orangefs_attr_store, +}; + +static ssize_t acache_orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct acache_orangefs_attribute *attribute; + struct acache_orangefs_obj *acache_orangefs_obj; + int rc; + + attribute = container_of(attr, struct acache_orangefs_attribute, attr); + acache_orangefs_obj = + container_of(kobj, struct acache_orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(acache_orangefs_obj, attribute, buf); + +out: + return rc; +} + +static ssize_t acache_orangefs_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t len) +{ + struct acache_orangefs_attribute *attribute; + struct acache_orangefs_obj *acache_orangefs_obj; + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "acache_orangefs_attr_store: start\n"); + + attribute = container_of(attr, struct acache_orangefs_attribute, attr); + acache_orangefs_obj = + container_of(kobj, struct acache_orangefs_obj, kobj); + + if (!attribute->store) { + rc = -EIO; + goto out; + } + + rc = attribute->store(acache_orangefs_obj, attribute, buf, len); + +out: + return rc; +} + +static const struct sysfs_ops acache_orangefs_sysfs_ops = { + .show = acache_orangefs_attr_show, + .store = acache_orangefs_attr_store, +}; + +static ssize_t capcache_orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct capcache_orangefs_attribute *attribute; + struct capcache_orangefs_obj *capcache_orangefs_obj; + int rc; + + attribute = + container_of(attr, struct capcache_orangefs_attribute, attr); + capcache_orangefs_obj = + container_of(kobj, struct capcache_orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(capcache_orangefs_obj, attribute, buf); + +out: + return rc; +} + +static ssize_t capcache_orangefs_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t len) +{ + struct capcache_orangefs_attribute *attribute; + struct capcache_orangefs_obj *capcache_orangefs_obj; + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "capcache_orangefs_attr_store: start\n"); + + attribute = + container_of(attr, struct capcache_orangefs_attribute, attr); + capcache_orangefs_obj = + container_of(kobj, struct capcache_orangefs_obj, kobj); + + if (!attribute->store) { + rc = -EIO; + goto out; + } + + rc = attribute->store(capcache_orangefs_obj, attribute, buf, len); + +out: + return rc; +} + +static const struct sysfs_ops capcache_orangefs_sysfs_ops = { + .show = capcache_orangefs_attr_show, + .store = capcache_orangefs_attr_store, +}; + +static ssize_t ccache_orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct ccache_orangefs_attribute *attribute; + struct ccache_orangefs_obj *ccache_orangefs_obj; + int rc; + + attribute = + container_of(attr, struct ccache_orangefs_attribute, attr); + ccache_orangefs_obj = + container_of(kobj, struct ccache_orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(ccache_orangefs_obj, attribute, buf); + +out: + return rc; +} + +static ssize_t ccache_orangefs_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t len) +{ + struct ccache_orangefs_attribute *attribute; + struct ccache_orangefs_obj *ccache_orangefs_obj; + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "ccache_orangefs_attr_store: start\n"); + + attribute = + container_of(attr, struct ccache_orangefs_attribute, attr); + ccache_orangefs_obj = + container_of(kobj, struct ccache_orangefs_obj, kobj); + + if (!attribute->store) { + rc = -EIO; + goto out; + } + + rc = attribute->store(ccache_orangefs_obj, attribute, buf, len); + +out: + return rc; +} + +static const struct sysfs_ops ccache_orangefs_sysfs_ops = { + .show = ccache_orangefs_attr_show, + .store = ccache_orangefs_attr_store, +}; + +static ssize_t ncache_orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct ncache_orangefs_attribute *attribute; + struct ncache_orangefs_obj *ncache_orangefs_obj; + int rc; + + attribute = container_of(attr, struct ncache_orangefs_attribute, attr); + ncache_orangefs_obj = + container_of(kobj, struct ncache_orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(ncache_orangefs_obj, attribute, buf); + +out: + return rc; +} + +static ssize_t ncache_orangefs_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t len) +{ + struct ncache_orangefs_attribute *attribute; + struct ncache_orangefs_obj *ncache_orangefs_obj; + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "ncache_orangefs_attr_store: start\n"); + + attribute = container_of(attr, struct ncache_orangefs_attribute, attr); + ncache_orangefs_obj = + container_of(kobj, struct ncache_orangefs_obj, kobj); + + if (!attribute->store) { + rc = -EIO; + goto out; + } + + rc = attribute->store(ncache_orangefs_obj, attribute, buf, len); + +out: + return rc; +} + +static const struct sysfs_ops ncache_orangefs_sysfs_ops = { + .show = ncache_orangefs_attr_show, + .store = ncache_orangefs_attr_store, +}; + +static ssize_t pc_orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct pc_orangefs_attribute *attribute; + struct pc_orangefs_obj *pc_orangefs_obj; + int rc; + + attribute = container_of(attr, struct pc_orangefs_attribute, attr); + pc_orangefs_obj = + container_of(kobj, struct pc_orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(pc_orangefs_obj, attribute, buf); + +out: + return rc; +} + +static const struct sysfs_ops pc_orangefs_sysfs_ops = { + .show = pc_orangefs_attr_show, +}; + +static ssize_t stats_orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct stats_orangefs_attribute *attribute; + struct stats_orangefs_obj *stats_orangefs_obj; + int rc; + + attribute = container_of(attr, struct stats_orangefs_attribute, attr); + stats_orangefs_obj = + container_of(kobj, struct stats_orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(stats_orangefs_obj, attribute, buf); + +out: + return rc; +} + +static const struct sysfs_ops stats_orangefs_sysfs_ops = { + .show = stats_orangefs_attr_show, +}; + +static void orangefs_release(struct kobject *kobj) +{ + struct orangefs_obj *orangefs_obj; + + orangefs_obj = container_of(kobj, struct orangefs_obj, kobj); + kfree(orangefs_obj); +} + +static void acache_orangefs_release(struct kobject *kobj) +{ + struct acache_orangefs_obj *acache_orangefs_obj; + + acache_orangefs_obj = + container_of(kobj, struct acache_orangefs_obj, kobj); + kfree(acache_orangefs_obj); +} + +static void capcache_orangefs_release(struct kobject *kobj) +{ + struct capcache_orangefs_obj *capcache_orangefs_obj; + + capcache_orangefs_obj = + container_of(kobj, struct capcache_orangefs_obj, kobj); + kfree(capcache_orangefs_obj); +} + +static void ccache_orangefs_release(struct kobject *kobj) +{ + struct ccache_orangefs_obj *ccache_orangefs_obj; + + ccache_orangefs_obj = + container_of(kobj, struct ccache_orangefs_obj, kobj); + kfree(ccache_orangefs_obj); +} + +static void ncache_orangefs_release(struct kobject *kobj) +{ + struct ncache_orangefs_obj *ncache_orangefs_obj; + + ncache_orangefs_obj = + container_of(kobj, struct ncache_orangefs_obj, kobj); + kfree(ncache_orangefs_obj); +} + +static void pc_orangefs_release(struct kobject *kobj) +{ + struct pc_orangefs_obj *pc_orangefs_obj; + + pc_orangefs_obj = + container_of(kobj, struct pc_orangefs_obj, kobj); + kfree(pc_orangefs_obj); +} + +static void stats_orangefs_release(struct kobject *kobj) +{ + struct stats_orangefs_obj *stats_orangefs_obj; + + stats_orangefs_obj = + container_of(kobj, struct stats_orangefs_obj, kobj); + kfree(stats_orangefs_obj); +} + +static ssize_t sysfs_int_show(char *kobj_id, char *buf, void *attr) +{ + int rc = -EIO; + struct orangefs_attribute *orangefs_attr; + struct stats_orangefs_attribute *stats_orangefs_attr; + + gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_int_show: id:%s:\n", kobj_id); + + if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) { + orangefs_attr = (struct orangefs_attribute *)attr; + + if (!strcmp(orangefs_attr->attr.name, "op_timeout_secs")) { + rc = scnprintf(buf, + PAGE_SIZE, + "%d\n", + op_timeout_secs); + goto out; + } else if (!strcmp(orangefs_attr->attr.name, + "slot_timeout_secs")) { + rc = scnprintf(buf, + PAGE_SIZE, + "%d\n", + slot_timeout_secs); + goto out; + } else { + goto out; + } + + } else if (!strcmp(kobj_id, STATS_KOBJ_ID)) { + stats_orangefs_attr = (struct stats_orangefs_attribute *)attr; + + if (!strcmp(stats_orangefs_attr->attr.name, "reads")) { + rc = scnprintf(buf, + PAGE_SIZE, + "%lu\n", + g_orangefs_stats.reads); + goto out; + } else if (!strcmp(stats_orangefs_attr->attr.name, "writes")) { + rc = scnprintf(buf, + PAGE_SIZE, + "%lu\n", + g_orangefs_stats.writes); + goto out; + } else { + goto out; + } + } + +out: + + return rc; +} + +static ssize_t int_orangefs_show(struct orangefs_obj *orangefs_obj, + struct orangefs_attribute *attr, + char *buf) +{ + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "int_orangefs_show:start attr->attr.name:%s:\n", + attr->attr.name); + + rc = sysfs_int_show(ORANGEFS_KOBJ_ID, buf, (void *) attr); + + return rc; +} + +static ssize_t int_stats_show(struct stats_orangefs_obj *stats_orangefs_obj, + struct stats_orangefs_attribute *attr, + char *buf) +{ + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "int_stats_show:start attr->attr.name:%s:\n", + attr->attr.name); + + rc = sysfs_int_show(STATS_KOBJ_ID, buf, (void *) attr); + + return rc; +} + +static ssize_t int_store(struct orangefs_obj *orangefs_obj, + struct orangefs_attribute *attr, + const char *buf, + size_t count) +{ + int rc = 0; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "int_store: start attr->attr.name:%s: buf:%s:\n", + attr->attr.name, buf); + + if (!strcmp(attr->attr.name, "op_timeout_secs")) { + rc = kstrtoint(buf, 0, &op_timeout_secs); + goto out; + } else if (!strcmp(attr->attr.name, "slot_timeout_secs")) { + rc = kstrtoint(buf, 0, &slot_timeout_secs); + goto out; + } else { + goto out; + } + +out: + if (rc) + rc = -EINVAL; + else + rc = count; + + return rc; +} + +/* + * obtain attribute values from userspace with a service operation. + */ +static int sysfs_service_op_show(char *kobj_id, char *buf, void *attr) +{ + struct orangefs_kernel_op_s *new_op = NULL; + int rc = 0; + char *ser_op_type = NULL; + struct orangefs_attribute *orangefs_attr; + struct acache_orangefs_attribute *acache_attr; + struct capcache_orangefs_attribute *capcache_attr; + struct ccache_orangefs_attribute *ccache_attr; + struct ncache_orangefs_attribute *ncache_attr; + struct pc_orangefs_attribute *pc_attr; + __u32 op_alloc_type; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "sysfs_service_op_show: id:%s:\n", + kobj_id); + + if (strcmp(kobj_id, PC_KOBJ_ID)) + op_alloc_type = ORANGEFS_VFS_OP_PARAM; + else + op_alloc_type = ORANGEFS_VFS_OP_PERF_COUNT; + + new_op = op_alloc(op_alloc_type); + if (!new_op) + return -ENOMEM; + + /* Can't do a service_operation if the client is not running... */ + rc = is_daemon_in_service(); + if (rc) { + pr_info("%s: Client not running :%d:\n", + __func__, + is_daemon_in_service()); + goto out; + } + + if (strcmp(kobj_id, PC_KOBJ_ID)) + new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_GET; + + if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) { + orangefs_attr = (struct orangefs_attribute *)attr; + + if (!strcmp(orangefs_attr->attr.name, "perf_history_size")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE; + else if (!strcmp(orangefs_attr->attr.name, + "perf_time_interval_secs")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS; + else if (!strcmp(orangefs_attr->attr.name, + "perf_counter_reset")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_PERF_RESET; + + } else if (!strcmp(kobj_id, ACACHE_KOBJ_ID)) { + acache_attr = (struct acache_orangefs_attribute *)attr; + + if (!strcmp(acache_attr->attr.name, "timeout_msecs")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS; + + if (!strcmp(acache_attr->attr.name, "hard_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT; + + if (!strcmp(acache_attr->attr.name, "soft_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT; + + if (!strcmp(acache_attr->attr.name, "reclaim_percentage")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE; + + } else if (!strcmp(kobj_id, CAPCACHE_KOBJ_ID)) { + capcache_attr = (struct capcache_orangefs_attribute *)attr; + + if (!strcmp(capcache_attr->attr.name, "timeout_secs")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS; + + if (!strcmp(capcache_attr->attr.name, "hard_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT; + + if (!strcmp(capcache_attr->attr.name, "soft_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT; + + if (!strcmp(capcache_attr->attr.name, "reclaim_percentage")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE; + + } else if (!strcmp(kobj_id, CCACHE_KOBJ_ID)) { + ccache_attr = (struct ccache_orangefs_attribute *)attr; + + if (!strcmp(ccache_attr->attr.name, "timeout_secs")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS; + + if (!strcmp(ccache_attr->attr.name, "hard_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT; + + if (!strcmp(ccache_attr->attr.name, "soft_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT; + + if (!strcmp(ccache_attr->attr.name, "reclaim_percentage")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE; + + } else if (!strcmp(kobj_id, NCACHE_KOBJ_ID)) { + ncache_attr = (struct ncache_orangefs_attribute *)attr; + + if (!strcmp(ncache_attr->attr.name, "timeout_msecs")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS; + + if (!strcmp(ncache_attr->attr.name, "hard_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT; + + if (!strcmp(ncache_attr->attr.name, "soft_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT; + + if (!strcmp(ncache_attr->attr.name, "reclaim_percentage")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE; + + } else if (!strcmp(kobj_id, PC_KOBJ_ID)) { + pc_attr = (struct pc_orangefs_attribute *)attr; + + if (!strcmp(pc_attr->attr.name, ACACHE_KOBJ_ID)) + new_op->upcall.req.perf_count.type = + ORANGEFS_PERF_COUNT_REQUEST_ACACHE; + + if (!strcmp(pc_attr->attr.name, CAPCACHE_KOBJ_ID)) + new_op->upcall.req.perf_count.type = + ORANGEFS_PERF_COUNT_REQUEST_CAPCACHE; + + if (!strcmp(pc_attr->attr.name, NCACHE_KOBJ_ID)) + new_op->upcall.req.perf_count.type = + ORANGEFS_PERF_COUNT_REQUEST_NCACHE; + + } else { + gossip_err("sysfs_service_op_show: unknown kobj_id:%s:\n", + kobj_id); + rc = -EINVAL; + goto out; + } + + + if (strcmp(kobj_id, PC_KOBJ_ID)) + ser_op_type = "orangefs_param"; + else + ser_op_type = "orangefs_perf_count"; + + /* + * The service_operation will return an errno return code on + * error, and zero on success. + */ + rc = service_operation(new_op, ser_op_type, ORANGEFS_OP_INTERRUPTIBLE); + +out: + if (!rc) { + if (strcmp(kobj_id, PC_KOBJ_ID)) { + rc = scnprintf(buf, + PAGE_SIZE, + "%d\n", + (int)new_op->downcall.resp.param.value); + } else { + rc = scnprintf( + buf, + PAGE_SIZE, + "%s", + new_op->downcall.resp.perf_count.buffer); + } + } + + op_release(new_op); + + return rc; + +} + +static ssize_t service_orangefs_show(struct orangefs_obj *orangefs_obj, + struct orangefs_attribute *attr, + char *buf) +{ + int rc = 0; + + rc = sysfs_service_op_show(ORANGEFS_KOBJ_ID, buf, (void *)attr); + + return rc; +} + +static ssize_t + service_acache_show(struct acache_orangefs_obj *acache_orangefs_obj, + struct acache_orangefs_attribute *attr, + char *buf) +{ + int rc = 0; + + rc = sysfs_service_op_show(ACACHE_KOBJ_ID, buf, (void *)attr); + + return rc; +} + +static ssize_t service_capcache_show(struct capcache_orangefs_obj + *capcache_orangefs_obj, + struct capcache_orangefs_attribute *attr, + char *buf) +{ + int rc = 0; + + rc = sysfs_service_op_show(CAPCACHE_KOBJ_ID, buf, (void *)attr); + + return rc; +} + +static ssize_t service_ccache_show(struct ccache_orangefs_obj + *ccache_orangefs_obj, + struct ccache_orangefs_attribute *attr, + char *buf) +{ + int rc = 0; + + rc = sysfs_service_op_show(CCACHE_KOBJ_ID, buf, (void *)attr); + + return rc; +} + +static ssize_t + service_ncache_show(struct ncache_orangefs_obj *ncache_orangefs_obj, + struct ncache_orangefs_attribute *attr, + char *buf) +{ + int rc = 0; + + rc = sysfs_service_op_show(NCACHE_KOBJ_ID, buf, (void *)attr); + + return rc; +} + +static ssize_t + service_pc_show(struct pc_orangefs_obj *pc_orangefs_obj, + struct pc_orangefs_attribute *attr, + char *buf) +{ + int rc = 0; + + rc = sysfs_service_op_show(PC_KOBJ_ID, buf, (void *)attr); + + return rc; +} + +/* + * pass attribute values back to userspace with a service operation. + * + * We have to do a memory allocation, an sscanf and a service operation. + * And we have to evaluate what the user entered, to make sure the + * value is within the range supported by the attribute. So, there's + * a lot of return code checking and mapping going on here. + * + * We want to return 1 if we think everything went OK, and + * EINVAL if not. + */ +static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr) +{ + struct orangefs_kernel_op_s *new_op = NULL; + int val = 0; + int rc = 0; + struct orangefs_attribute *orangefs_attr; + struct acache_orangefs_attribute *acache_attr; + struct capcache_orangefs_attribute *capcache_attr; + struct ccache_orangefs_attribute *ccache_attr; + struct ncache_orangefs_attribute *ncache_attr; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "sysfs_service_op_store: id:%s:\n", + kobj_id); + + new_op = op_alloc(ORANGEFS_VFS_OP_PARAM); + if (!new_op) + return -EINVAL; /* sic */ + + /* Can't do a service_operation if the client is not running... */ + rc = is_daemon_in_service(); + if (rc) { + pr_info("%s: Client not running :%d:\n", + __func__, + is_daemon_in_service()); + goto out; + } + + /* + * The value we want to send back to userspace is in buf. + */ + rc = kstrtoint(buf, 0, &val); + if (rc) + goto out; + + if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) { + orangefs_attr = (struct orangefs_attribute *)attr; + + if (!strcmp(orangefs_attr->attr.name, "perf_history_size")) { + if (val > 0) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(orangefs_attr->attr.name, + "perf_time_interval_secs")) { + if (val > 0) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(orangefs_attr->attr.name, + "perf_counter_reset")) { + if ((val == 0) || (val == 1)) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_PERF_RESET; + } else { + rc = 0; + goto out; + } + } + + } else if (!strcmp(kobj_id, ACACHE_KOBJ_ID)) { + acache_attr = (struct acache_orangefs_attribute *)attr; + + if (!strcmp(acache_attr->attr.name, "hard_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(acache_attr->attr.name, "soft_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(acache_attr->attr.name, + "reclaim_percentage")) { + if ((val > -1) && (val < 101)) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(acache_attr->attr.name, "timeout_msecs")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS; + } else { + rc = 0; + goto out; + } + } + + } else if (!strcmp(kobj_id, CAPCACHE_KOBJ_ID)) { + capcache_attr = (struct capcache_orangefs_attribute *)attr; + + if (!strcmp(capcache_attr->attr.name, "hard_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(capcache_attr->attr.name, "soft_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(capcache_attr->attr.name, + "reclaim_percentage")) { + if ((val > -1) && (val < 101)) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(capcache_attr->attr.name, "timeout_secs")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS; + } else { + rc = 0; + goto out; + } + } + + } else if (!strcmp(kobj_id, CCACHE_KOBJ_ID)) { + ccache_attr = (struct ccache_orangefs_attribute *)attr; + + if (!strcmp(ccache_attr->attr.name, "hard_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(ccache_attr->attr.name, "soft_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(ccache_attr->attr.name, + "reclaim_percentage")) { + if ((val > -1) && (val < 101)) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(ccache_attr->attr.name, "timeout_secs")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS; + } else { + rc = 0; + goto out; + } + } + + } else if (!strcmp(kobj_id, NCACHE_KOBJ_ID)) { + ncache_attr = (struct ncache_orangefs_attribute *)attr; + + if (!strcmp(ncache_attr->attr.name, "hard_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(ncache_attr->attr.name, "soft_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(ncache_attr->attr.name, + "reclaim_percentage")) { + if ((val > -1) && (val < 101)) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(ncache_attr->attr.name, "timeout_msecs")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS; + } else { + rc = 0; + goto out; + } + } + + } else { + gossip_err("sysfs_service_op_store: unknown kobj_id:%s:\n", + kobj_id); + rc = -EINVAL; + goto out; + } + + new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET; + + new_op->upcall.req.param.value = val; + + /* + * The service_operation will return a errno return code on + * error, and zero on success. + */ + rc = service_operation(new_op, "orangefs_param", ORANGEFS_OP_INTERRUPTIBLE); + + if (rc < 0) { + gossip_err("sysfs_service_op_store: service op returned:%d:\n", + rc); + rc = 0; + } else { + rc = 1; + } + +out: + op_release(new_op); + + if (rc == -ENOMEM || rc == 0) + rc = -EINVAL; + + return rc; +} + +static ssize_t + service_orangefs_store(struct orangefs_obj *orangefs_obj, + struct orangefs_attribute *attr, + const char *buf, + size_t count) +{ + int rc = 0; + + rc = sysfs_service_op_store(ORANGEFS_KOBJ_ID, buf, (void *) attr); + + /* rc should have an errno value if the service_op went bad. */ + if (rc == 1) + rc = count; + + return rc; +} + +static ssize_t + service_acache_store(struct acache_orangefs_obj *acache_orangefs_obj, + struct acache_orangefs_attribute *attr, + const char *buf, + size_t count) +{ + int rc = 0; + + rc = sysfs_service_op_store(ACACHE_KOBJ_ID, buf, (void *) attr); + + /* rc should have an errno value if the service_op went bad. */ + if (rc == 1) + rc = count; + + return rc; +} + +static ssize_t + service_capcache_store(struct capcache_orangefs_obj + *capcache_orangefs_obj, + struct capcache_orangefs_attribute *attr, + const char *buf, + size_t count) +{ + int rc = 0; + + rc = sysfs_service_op_store(CAPCACHE_KOBJ_ID, buf, (void *) attr); + + /* rc should have an errno value if the service_op went bad. */ + if (rc == 1) + rc = count; + + return rc; +} + +static ssize_t service_ccache_store(struct ccache_orangefs_obj + *ccache_orangefs_obj, + struct ccache_orangefs_attribute *attr, + const char *buf, + size_t count) +{ + int rc = 0; + + rc = sysfs_service_op_store(CCACHE_KOBJ_ID, buf, (void *) attr); + + /* rc should have an errno value if the service_op went bad. */ + if (rc == 1) + rc = count; + + return rc; +} + +static ssize_t + service_ncache_store(struct ncache_orangefs_obj *ncache_orangefs_obj, + struct ncache_orangefs_attribute *attr, + const char *buf, + size_t count) +{ + int rc = 0; + + rc = sysfs_service_op_store(NCACHE_KOBJ_ID, buf, (void *) attr); + + /* rc should have an errno value if the service_op went bad. */ + if (rc == 1) + rc = count; + + return rc; +} + +static struct orangefs_attribute op_timeout_secs_attribute = + __ATTR(op_timeout_secs, 0664, int_orangefs_show, int_store); + +static struct orangefs_attribute slot_timeout_secs_attribute = + __ATTR(slot_timeout_secs, 0664, int_orangefs_show, int_store); + +static struct orangefs_attribute perf_counter_reset_attribute = + __ATTR(perf_counter_reset, + 0664, + service_orangefs_show, + service_orangefs_store); + +static struct orangefs_attribute perf_history_size_attribute = + __ATTR(perf_history_size, + 0664, + service_orangefs_show, + service_orangefs_store); + +static struct orangefs_attribute perf_time_interval_secs_attribute = + __ATTR(perf_time_interval_secs, + 0664, + service_orangefs_show, + service_orangefs_store); + +static struct attribute *orangefs_default_attrs[] = { + &op_timeout_secs_attribute.attr, + &slot_timeout_secs_attribute.attr, + &perf_counter_reset_attribute.attr, + &perf_history_size_attribute.attr, + &perf_time_interval_secs_attribute.attr, + NULL, +}; + +static struct kobj_type orangefs_ktype = { + .sysfs_ops = &orangefs_sysfs_ops, + .release = orangefs_release, + .default_attrs = orangefs_default_attrs, +}; + +static struct acache_orangefs_attribute acache_hard_limit_attribute = + __ATTR(hard_limit, + 0664, + service_acache_show, + service_acache_store); + +static struct acache_orangefs_attribute acache_reclaim_percent_attribute = + __ATTR(reclaim_percentage, + 0664, + service_acache_show, + service_acache_store); + +static struct acache_orangefs_attribute acache_soft_limit_attribute = + __ATTR(soft_limit, + 0664, + service_acache_show, + service_acache_store); + +static struct acache_orangefs_attribute acache_timeout_msecs_attribute = + __ATTR(timeout_msecs, + 0664, + service_acache_show, + service_acache_store); + +static struct attribute *acache_orangefs_default_attrs[] = { + &acache_hard_limit_attribute.attr, + &acache_reclaim_percent_attribute.attr, + &acache_soft_limit_attribute.attr, + &acache_timeout_msecs_attribute.attr, + NULL, +}; + +static struct kobj_type acache_orangefs_ktype = { + .sysfs_ops = &acache_orangefs_sysfs_ops, + .release = acache_orangefs_release, + .default_attrs = acache_orangefs_default_attrs, +}; + +static struct capcache_orangefs_attribute capcache_hard_limit_attribute = + __ATTR(hard_limit, + 0664, + service_capcache_show, + service_capcache_store); + +static struct capcache_orangefs_attribute capcache_reclaim_percent_attribute = + __ATTR(reclaim_percentage, + 0664, + service_capcache_show, + service_capcache_store); + +static struct capcache_orangefs_attribute capcache_soft_limit_attribute = + __ATTR(soft_limit, + 0664, + service_capcache_show, + service_capcache_store); + +static struct capcache_orangefs_attribute capcache_timeout_secs_attribute = + __ATTR(timeout_secs, + 0664, + service_capcache_show, + service_capcache_store); + +static struct attribute *capcache_orangefs_default_attrs[] = { + &capcache_hard_limit_attribute.attr, + &capcache_reclaim_percent_attribute.attr, + &capcache_soft_limit_attribute.attr, + &capcache_timeout_secs_attribute.attr, + NULL, +}; + +static struct kobj_type capcache_orangefs_ktype = { + .sysfs_ops = &capcache_orangefs_sysfs_ops, + .release = capcache_orangefs_release, + .default_attrs = capcache_orangefs_default_attrs, +}; + +static struct ccache_orangefs_attribute ccache_hard_limit_attribute = + __ATTR(hard_limit, + 0664, + service_ccache_show, + service_ccache_store); + +static struct ccache_orangefs_attribute ccache_reclaim_percent_attribute = + __ATTR(reclaim_percentage, + 0664, + service_ccache_show, + service_ccache_store); + +static struct ccache_orangefs_attribute ccache_soft_limit_attribute = + __ATTR(soft_limit, + 0664, + service_ccache_show, + service_ccache_store); + +static struct ccache_orangefs_attribute ccache_timeout_secs_attribute = + __ATTR(timeout_secs, + 0664, + service_ccache_show, + service_ccache_store); + +static struct attribute *ccache_orangefs_default_attrs[] = { + &ccache_hard_limit_attribute.attr, + &ccache_reclaim_percent_attribute.attr, + &ccache_soft_limit_attribute.attr, + &ccache_timeout_secs_attribute.attr, + NULL, +}; + +static struct kobj_type ccache_orangefs_ktype = { + .sysfs_ops = &ccache_orangefs_sysfs_ops, + .release = ccache_orangefs_release, + .default_attrs = ccache_orangefs_default_attrs, +}; + +static struct ncache_orangefs_attribute ncache_hard_limit_attribute = + __ATTR(hard_limit, + 0664, + service_ncache_show, + service_ncache_store); + +static struct ncache_orangefs_attribute ncache_reclaim_percent_attribute = + __ATTR(reclaim_percentage, + 0664, + service_ncache_show, + service_ncache_store); + +static struct ncache_orangefs_attribute ncache_soft_limit_attribute = + __ATTR(soft_limit, + 0664, + service_ncache_show, + service_ncache_store); + +static struct ncache_orangefs_attribute ncache_timeout_msecs_attribute = + __ATTR(timeout_msecs, + 0664, + service_ncache_show, + service_ncache_store); + +static struct attribute *ncache_orangefs_default_attrs[] = { + &ncache_hard_limit_attribute.attr, + &ncache_reclaim_percent_attribute.attr, + &ncache_soft_limit_attribute.attr, + &ncache_timeout_msecs_attribute.attr, + NULL, +}; + +static struct kobj_type ncache_orangefs_ktype = { + .sysfs_ops = &ncache_orangefs_sysfs_ops, + .release = ncache_orangefs_release, + .default_attrs = ncache_orangefs_default_attrs, +}; + +static struct pc_orangefs_attribute pc_acache_attribute = + __ATTR(acache, + 0664, + service_pc_show, + NULL); + +static struct pc_orangefs_attribute pc_capcache_attribute = + __ATTR(capcache, + 0664, + service_pc_show, + NULL); + +static struct pc_orangefs_attribute pc_ncache_attribute = + __ATTR(ncache, + 0664, + service_pc_show, + NULL); + +static struct attribute *pc_orangefs_default_attrs[] = { + &pc_acache_attribute.attr, + &pc_capcache_attribute.attr, + &pc_ncache_attribute.attr, + NULL, +}; + +static struct kobj_type pc_orangefs_ktype = { + .sysfs_ops = &pc_orangefs_sysfs_ops, + .release = pc_orangefs_release, + .default_attrs = pc_orangefs_default_attrs, +}; + +static struct stats_orangefs_attribute stats_reads_attribute = + __ATTR(reads, + 0664, + int_stats_show, + NULL); + +static struct stats_orangefs_attribute stats_writes_attribute = + __ATTR(writes, + 0664, + int_stats_show, + NULL); + +static struct attribute *stats_orangefs_default_attrs[] = { + &stats_reads_attribute.attr, + &stats_writes_attribute.attr, + NULL, +}; + +static struct kobj_type stats_orangefs_ktype = { + .sysfs_ops = &stats_orangefs_sysfs_ops, + .release = stats_orangefs_release, + .default_attrs = stats_orangefs_default_attrs, +}; + +static struct orangefs_obj *orangefs_obj; +static struct acache_orangefs_obj *acache_orangefs_obj; +static struct capcache_orangefs_obj *capcache_orangefs_obj; +static struct ccache_orangefs_obj *ccache_orangefs_obj; +static struct ncache_orangefs_obj *ncache_orangefs_obj; +static struct pc_orangefs_obj *pc_orangefs_obj; +static struct stats_orangefs_obj *stats_orangefs_obj; + +int orangefs_sysfs_init(void) +{ + int rc = -EINVAL; + + gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_init: start\n"); + + /* create /sys/fs/orangefs. */ + orangefs_obj = kzalloc(sizeof(*orangefs_obj), GFP_KERNEL); + if (!orangefs_obj) + goto out; + + rc = kobject_init_and_add(&orangefs_obj->kobj, + &orangefs_ktype, + fs_kobj, + ORANGEFS_KOBJ_ID); + + if (rc) + goto ofs_obj_bail; + + kobject_uevent(&orangefs_obj->kobj, KOBJ_ADD); + + /* create /sys/fs/orangefs/acache. */ + acache_orangefs_obj = kzalloc(sizeof(*acache_orangefs_obj), GFP_KERNEL); + if (!acache_orangefs_obj) { + rc = -EINVAL; + goto ofs_obj_bail; + } + + rc = kobject_init_and_add(&acache_orangefs_obj->kobj, + &acache_orangefs_ktype, + &orangefs_obj->kobj, + ACACHE_KOBJ_ID); + + if (rc) + goto acache_obj_bail; + + kobject_uevent(&acache_orangefs_obj->kobj, KOBJ_ADD); + + /* create /sys/fs/orangefs/capcache. */ + capcache_orangefs_obj = + kzalloc(sizeof(*capcache_orangefs_obj), GFP_KERNEL); + if (!capcache_orangefs_obj) { + rc = -EINVAL; + goto acache_obj_bail; + } + + rc = kobject_init_and_add(&capcache_orangefs_obj->kobj, + &capcache_orangefs_ktype, + &orangefs_obj->kobj, + CAPCACHE_KOBJ_ID); + if (rc) + goto capcache_obj_bail; + + kobject_uevent(&capcache_orangefs_obj->kobj, KOBJ_ADD); + + /* create /sys/fs/orangefs/ccache. */ + ccache_orangefs_obj = + kzalloc(sizeof(*ccache_orangefs_obj), GFP_KERNEL); + if (!ccache_orangefs_obj) { + rc = -EINVAL; + goto capcache_obj_bail; + } + + rc = kobject_init_and_add(&ccache_orangefs_obj->kobj, + &ccache_orangefs_ktype, + &orangefs_obj->kobj, + CCACHE_KOBJ_ID); + if (rc) + goto ccache_obj_bail; + + kobject_uevent(&ccache_orangefs_obj->kobj, KOBJ_ADD); + + /* create /sys/fs/orangefs/ncache. */ + ncache_orangefs_obj = kzalloc(sizeof(*ncache_orangefs_obj), GFP_KERNEL); + if (!ncache_orangefs_obj) { + rc = -EINVAL; + goto ccache_obj_bail; + } + + rc = kobject_init_and_add(&ncache_orangefs_obj->kobj, + &ncache_orangefs_ktype, + &orangefs_obj->kobj, + NCACHE_KOBJ_ID); + + if (rc) + goto ncache_obj_bail; + + kobject_uevent(&ncache_orangefs_obj->kobj, KOBJ_ADD); + + /* create /sys/fs/orangefs/perf_counters. */ + pc_orangefs_obj = kzalloc(sizeof(*pc_orangefs_obj), GFP_KERNEL); + if (!pc_orangefs_obj) { + rc = -EINVAL; + goto ncache_obj_bail; + } + + rc = kobject_init_and_add(&pc_orangefs_obj->kobj, + &pc_orangefs_ktype, + &orangefs_obj->kobj, + "perf_counters"); + + if (rc) + goto pc_obj_bail; + + kobject_uevent(&pc_orangefs_obj->kobj, KOBJ_ADD); + + /* create /sys/fs/orangefs/stats. */ + stats_orangefs_obj = kzalloc(sizeof(*stats_orangefs_obj), GFP_KERNEL); + if (!stats_orangefs_obj) { + rc = -EINVAL; + goto pc_obj_bail; + } + + rc = kobject_init_and_add(&stats_orangefs_obj->kobj, + &stats_orangefs_ktype, + &orangefs_obj->kobj, + STATS_KOBJ_ID); + + if (rc) + goto stats_obj_bail; + + kobject_uevent(&stats_orangefs_obj->kobj, KOBJ_ADD); + goto out; + +stats_obj_bail: + kobject_put(&stats_orangefs_obj->kobj); + +pc_obj_bail: + kobject_put(&pc_orangefs_obj->kobj); + +ncache_obj_bail: + kobject_put(&ncache_orangefs_obj->kobj); + +ccache_obj_bail: + kobject_put(&ccache_orangefs_obj->kobj); + +capcache_obj_bail: + kobject_put(&capcache_orangefs_obj->kobj); + +acache_obj_bail: + kobject_put(&acache_orangefs_obj->kobj); + +ofs_obj_bail: + kobject_put(&orangefs_obj->kobj); +out: + return rc; +} + +void orangefs_sysfs_exit(void) +{ + gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_exit: start\n"); + + kobject_put(&acache_orangefs_obj->kobj); + kobject_put(&capcache_orangefs_obj->kobj); + kobject_put(&ccache_orangefs_obj->kobj); + kobject_put(&ncache_orangefs_obj->kobj); + kobject_put(&pc_orangefs_obj->kobj); + kobject_put(&stats_orangefs_obj->kobj); + + kobject_put(&orangefs_obj->kobj); +} diff --git a/fs/orangefs/orangefs-sysfs.h b/fs/orangefs/orangefs-sysfs.h new file mode 100644 index 000000000000..f0b76382db02 --- /dev/null +++ b/fs/orangefs/orangefs-sysfs.h @@ -0,0 +1,2 @@ +extern int orangefs_sysfs_init(void); +extern void orangefs_sysfs_exit(void); diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c new file mode 100644 index 000000000000..8277aba65e87 --- /dev/null +++ b/fs/orangefs/orangefs-utils.c @@ -0,0 +1,1048 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-dev-proto.h" +#include "orangefs-bufmap.h" + +__s32 fsid_of_op(struct orangefs_kernel_op_s *op) +{ + __s32 fsid = ORANGEFS_FS_ID_NULL; + + if (op) { + switch (op->upcall.type) { + case ORANGEFS_VFS_OP_FILE_IO: + fsid = op->upcall.req.io.refn.fs_id; + break; + case ORANGEFS_VFS_OP_LOOKUP: + fsid = op->upcall.req.lookup.parent_refn.fs_id; + break; + case ORANGEFS_VFS_OP_CREATE: + fsid = op->upcall.req.create.parent_refn.fs_id; + break; + case ORANGEFS_VFS_OP_GETATTR: + fsid = op->upcall.req.getattr.refn.fs_id; + break; + case ORANGEFS_VFS_OP_REMOVE: + fsid = op->upcall.req.remove.parent_refn.fs_id; + break; + case ORANGEFS_VFS_OP_MKDIR: + fsid = op->upcall.req.mkdir.parent_refn.fs_id; + break; + case ORANGEFS_VFS_OP_READDIR: + fsid = op->upcall.req.readdir.refn.fs_id; + break; + case ORANGEFS_VFS_OP_SETATTR: + fsid = op->upcall.req.setattr.refn.fs_id; + break; + case ORANGEFS_VFS_OP_SYMLINK: + fsid = op->upcall.req.sym.parent_refn.fs_id; + break; + case ORANGEFS_VFS_OP_RENAME: + fsid = op->upcall.req.rename.old_parent_refn.fs_id; + break; + case ORANGEFS_VFS_OP_STATFS: + fsid = op->upcall.req.statfs.fs_id; + break; + case ORANGEFS_VFS_OP_TRUNCATE: + fsid = op->upcall.req.truncate.refn.fs_id; + break; + case ORANGEFS_VFS_OP_MMAP_RA_FLUSH: + fsid = op->upcall.req.ra_cache_flush.refn.fs_id; + break; + case ORANGEFS_VFS_OP_FS_UMOUNT: + fsid = op->upcall.req.fs_umount.fs_id; + break; + case ORANGEFS_VFS_OP_GETXATTR: + fsid = op->upcall.req.getxattr.refn.fs_id; + break; + case ORANGEFS_VFS_OP_SETXATTR: + fsid = op->upcall.req.setxattr.refn.fs_id; + break; + case ORANGEFS_VFS_OP_LISTXATTR: + fsid = op->upcall.req.listxattr.refn.fs_id; + break; + case ORANGEFS_VFS_OP_REMOVEXATTR: + fsid = op->upcall.req.removexattr.refn.fs_id; + break; + case ORANGEFS_VFS_OP_FSYNC: + fsid = op->upcall.req.fsync.refn.fs_id; + break; + default: + break; + } + } + return fsid; +} + +static int orangefs_inode_flags(struct ORANGEFS_sys_attr_s *attrs) +{ + int flags = 0; + if (attrs->flags & ORANGEFS_IMMUTABLE_FL) + flags |= S_IMMUTABLE; + else + flags &= ~S_IMMUTABLE; + if (attrs->flags & ORANGEFS_APPEND_FL) + flags |= S_APPEND; + else + flags &= ~S_APPEND; + if (attrs->flags & ORANGEFS_NOATIME_FL) + flags |= S_NOATIME; + else + flags &= ~S_NOATIME; + return flags; +} + +static int orangefs_inode_perms(struct ORANGEFS_sys_attr_s *attrs) +{ + int perm_mode = 0; + + if (attrs->perms & ORANGEFS_O_EXECUTE) + perm_mode |= S_IXOTH; + if (attrs->perms & ORANGEFS_O_WRITE) + perm_mode |= S_IWOTH; + if (attrs->perms & ORANGEFS_O_READ) + perm_mode |= S_IROTH; + + if (attrs->perms & ORANGEFS_G_EXECUTE) + perm_mode |= S_IXGRP; + if (attrs->perms & ORANGEFS_G_WRITE) + perm_mode |= S_IWGRP; + if (attrs->perms & ORANGEFS_G_READ) + perm_mode |= S_IRGRP; + + if (attrs->perms & ORANGEFS_U_EXECUTE) + perm_mode |= S_IXUSR; + if (attrs->perms & ORANGEFS_U_WRITE) + perm_mode |= S_IWUSR; + if (attrs->perms & ORANGEFS_U_READ) + perm_mode |= S_IRUSR; + + if (attrs->perms & ORANGEFS_G_SGID) + perm_mode |= S_ISGID; + if (attrs->perms & ORANGEFS_U_SUID) + perm_mode |= S_ISUID; + + return perm_mode; +} + +/* + * NOTE: in kernel land, we never use the sys_attr->link_target for + * anything, so don't bother copying it into the sys_attr object here. + */ +static inline int copy_attributes_from_inode(struct inode *inode, + struct ORANGEFS_sys_attr_s *attrs, + struct iattr *iattr) +{ + umode_t tmp_mode; + + if (!iattr || !inode || !attrs) { + gossip_err("NULL iattr (%p), inode (%p), attrs (%p) " + "in copy_attributes_from_inode!\n", + iattr, + inode, + attrs); + return -EINVAL; + } + /* + * We need to be careful to only copy the attributes out of the + * iattr object that we know are valid. + */ + attrs->mask = 0; + if (iattr->ia_valid & ATTR_UID) { + attrs->owner = from_kuid(current_user_ns(), iattr->ia_uid); + attrs->mask |= ORANGEFS_ATTR_SYS_UID; + gossip_debug(GOSSIP_UTILS_DEBUG, "(UID) %d\n", attrs->owner); + } + if (iattr->ia_valid & ATTR_GID) { + attrs->group = from_kgid(current_user_ns(), iattr->ia_gid); + attrs->mask |= ORANGEFS_ATTR_SYS_GID; + gossip_debug(GOSSIP_UTILS_DEBUG, "(GID) %d\n", attrs->group); + } + + if (iattr->ia_valid & ATTR_ATIME) { + attrs->mask |= ORANGEFS_ATTR_SYS_ATIME; + if (iattr->ia_valid & ATTR_ATIME_SET) { + attrs->atime = (time64_t)iattr->ia_atime.tv_sec; + attrs->mask |= ORANGEFS_ATTR_SYS_ATIME_SET; + } + } + if (iattr->ia_valid & ATTR_MTIME) { + attrs->mask |= ORANGEFS_ATTR_SYS_MTIME; + if (iattr->ia_valid & ATTR_MTIME_SET) { + attrs->mtime = (time64_t)iattr->ia_mtime.tv_sec; + attrs->mask |= ORANGEFS_ATTR_SYS_MTIME_SET; + } + } + if (iattr->ia_valid & ATTR_CTIME) + attrs->mask |= ORANGEFS_ATTR_SYS_CTIME; + + /* + * ORANGEFS cannot set size with a setattr operation. Probably not likely + * to be requested through the VFS, but just in case, don't worry about + * ATTR_SIZE + */ + + if (iattr->ia_valid & ATTR_MODE) { + tmp_mode = iattr->ia_mode; + if (tmp_mode & (S_ISVTX)) { + if (is_root_handle(inode)) { + /* + * allow sticky bit to be set on root (since + * it shows up that way by default anyhow), + * but don't show it to the server + */ + tmp_mode -= S_ISVTX; + } else { + gossip_debug(GOSSIP_UTILS_DEBUG, + "User attempted to set sticky bit on non-root directory; returning EINVAL.\n"); + return -EINVAL; + } + } + + if (tmp_mode & (S_ISUID)) { + gossip_debug(GOSSIP_UTILS_DEBUG, + "Attempting to set setuid bit (not supported); returning EINVAL.\n"); + return -EINVAL; + } + + attrs->perms = ORANGEFS_util_translate_mode(tmp_mode); + attrs->mask |= ORANGEFS_ATTR_SYS_PERM; + } + + return 0; +} + +static int orangefs_inode_type(enum orangefs_ds_type objtype) +{ + if (objtype == ORANGEFS_TYPE_METAFILE) + return S_IFREG; + else if (objtype == ORANGEFS_TYPE_DIRECTORY) + return S_IFDIR; + else if (objtype == ORANGEFS_TYPE_SYMLINK) + return S_IFLNK; + else + return -1; +} + +static int orangefs_inode_is_stale(struct inode *inode, int new, + struct ORANGEFS_sys_attr_s *attrs, char *link_target) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + int type = orangefs_inode_type(attrs->objtype); + if (!new) { + /* + * If the inode type or symlink target have changed then this + * inode is stale. + */ + if (type == -1 || !(inode->i_mode & type)) { + orangefs_make_bad_inode(inode); + return 1; + } + if (type == S_IFLNK && strncmp(orangefs_inode->link_target, + link_target, ORANGEFS_NAME_MAX)) { + orangefs_make_bad_inode(inode); + return 1; + } + } + return 0; +} + +int orangefs_inode_getattr(struct inode *inode, int new, int size) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op; + loff_t inode_size, rounded_up_size; + int ret, type; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: called on inode %pU\n", __func__, + get_khandle_from_ino(inode)); + + new_op = op_alloc(ORANGEFS_VFS_OP_GETATTR); + if (!new_op) + return -ENOMEM; + new_op->upcall.req.getattr.refn = orangefs_inode->refn; + new_op->upcall.req.getattr.mask = size ? + ORANGEFS_ATTR_SYS_ALL_NOHINT : ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE; + + ret = service_operation(new_op, __func__, + get_interruptible_flag(inode)); + if (ret != 0) + goto out; + + type = orangefs_inode_type(new_op-> + downcall.resp.getattr.attributes.objtype); + ret = orangefs_inode_is_stale(inode, new, + &new_op->downcall.resp.getattr.attributes, + new_op->downcall.resp.getattr.link_target); + if (ret) { + ret = -ESTALE; + goto out; + } + + switch (type) { + case S_IFREG: + inode->i_flags = orangefs_inode_flags(&new_op-> + downcall.resp.getattr.attributes); + if (size) { + inode_size = (loff_t)new_op-> + downcall.resp.getattr.attributes.size; + rounded_up_size = + (inode_size + (4096 - (inode_size % 4096))); + inode->i_size = inode_size; + orangefs_inode->blksize = + new_op->downcall.resp.getattr.attributes.blksize; + spin_lock(&inode->i_lock); + inode->i_bytes = inode_size; + inode->i_blocks = + (unsigned long)(rounded_up_size / 512); + spin_unlock(&inode->i_lock); + } + break; + case S_IFDIR: + inode->i_size = PAGE_SIZE; + orangefs_inode->blksize = (1 << inode->i_blkbits); + spin_lock(&inode->i_lock); + inode_set_bytes(inode, inode->i_size); + spin_unlock(&inode->i_lock); + set_nlink(inode, 1); + break; + case S_IFLNK: + if (new) { + inode->i_size = (loff_t)strlen(new_op-> + downcall.resp.getattr.link_target); + orangefs_inode->blksize = (1 << inode->i_blkbits); + strlcpy(orangefs_inode->link_target, + new_op->downcall.resp.getattr.link_target, + ORANGEFS_NAME_MAX); + inode->i_link = orangefs_inode->link_target; + } + break; + } + + inode->i_uid = make_kuid(&init_user_ns, new_op-> + downcall.resp.getattr.attributes.owner); + inode->i_gid = make_kgid(&init_user_ns, new_op-> + downcall.resp.getattr.attributes.group); + inode->i_atime.tv_sec = (time64_t)new_op-> + downcall.resp.getattr.attributes.atime; + inode->i_mtime.tv_sec = (time64_t)new_op-> + downcall.resp.getattr.attributes.mtime; + inode->i_ctime.tv_sec = (time64_t)new_op-> + downcall.resp.getattr.attributes.ctime; + inode->i_atime.tv_nsec = 0; + inode->i_mtime.tv_nsec = 0; + inode->i_ctime.tv_nsec = 0; + + /* special case: mark the root inode as sticky */ + inode->i_mode = type | (is_root_handle(inode) ? S_ISVTX : 0) | + orangefs_inode_perms(&new_op->downcall.resp.getattr.attributes); + + ret = 0; +out: + op_release(new_op); + return ret; +} + +int orangefs_inode_check_changed(struct inode *inode) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op; + int ret; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: called on inode %pU\n", __func__, + get_khandle_from_ino(inode)); + + new_op = op_alloc(ORANGEFS_VFS_OP_GETATTR); + if (!new_op) + return -ENOMEM; + new_op->upcall.req.getattr.refn = orangefs_inode->refn; + new_op->upcall.req.getattr.mask = ORANGEFS_ATTR_SYS_TYPE | + ORANGEFS_ATTR_SYS_LNK_TARGET; + + ret = service_operation(new_op, __func__, + get_interruptible_flag(inode)); + if (ret != 0) + goto out; + + ret = orangefs_inode_is_stale(inode, 0, + &new_op->downcall.resp.getattr.attributes, + new_op->downcall.resp.getattr.link_target); +out: + op_release(new_op); + return ret; +} + +/* + * issues a orangefs setattr request to make sure the new attribute values + * take effect if successful. returns 0 on success; -errno otherwise + */ +int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op; + int ret; + + new_op = op_alloc(ORANGEFS_VFS_OP_SETATTR); + if (!new_op) + return -ENOMEM; + + new_op->upcall.req.setattr.refn = orangefs_inode->refn; + ret = copy_attributes_from_inode(inode, + &new_op->upcall.req.setattr.attributes, + iattr); + if (ret >= 0) { + ret = service_operation(new_op, __func__, + get_interruptible_flag(inode)); + + gossip_debug(GOSSIP_UTILS_DEBUG, + "orangefs_inode_setattr: returning %d\n", + ret); + } + + op_release(new_op); + + /* + * successful setattr should clear the atime, mtime and + * ctime flags. + */ + if (ret == 0) { + ClearAtimeFlag(orangefs_inode); + ClearMtimeFlag(orangefs_inode); + ClearCtimeFlag(orangefs_inode); + ClearModeFlag(orangefs_inode); + } + + return ret; +} + +int orangefs_flush_inode(struct inode *inode) +{ + /* + * If it is a dirty inode, this function gets called. + * Gather all the information that needs to be setattr'ed + * Right now, this will only be used for mode, atime, mtime + * and/or ctime. + */ + struct iattr wbattr; + int ret; + int mtime_flag; + int ctime_flag; + int atime_flag; + int mode_flag; + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + + memset(&wbattr, 0, sizeof(wbattr)); + + /* + * check inode flags up front, and clear them if they are set. This + * will prevent multiple processes from all trying to flush the same + * inode if they call close() simultaneously + */ + mtime_flag = MtimeFlag(orangefs_inode); + ClearMtimeFlag(orangefs_inode); + ctime_flag = CtimeFlag(orangefs_inode); + ClearCtimeFlag(orangefs_inode); + atime_flag = AtimeFlag(orangefs_inode); + ClearAtimeFlag(orangefs_inode); + mode_flag = ModeFlag(orangefs_inode); + ClearModeFlag(orangefs_inode); + + /* -- Lazy atime,mtime and ctime update -- + * Note: all times are dictated by server in the new scheme + * and not by the clients + * + * Also mode updates are being handled now.. + */ + + if (mtime_flag) + wbattr.ia_valid |= ATTR_MTIME; + if (ctime_flag) + wbattr.ia_valid |= ATTR_CTIME; + if (atime_flag) + wbattr.ia_valid |= ATTR_ATIME; + + if (mode_flag) { + wbattr.ia_mode = inode->i_mode; + wbattr.ia_valid |= ATTR_MODE; + } + + gossip_debug(GOSSIP_UTILS_DEBUG, + "*********** orangefs_flush_inode: %pU " + "(ia_valid %d)\n", + get_khandle_from_ino(inode), + wbattr.ia_valid); + if (wbattr.ia_valid == 0) { + gossip_debug(GOSSIP_UTILS_DEBUG, + "orangefs_flush_inode skipping setattr()\n"); + return 0; + } + + gossip_debug(GOSSIP_UTILS_DEBUG, + "orangefs_flush_inode (%pU) writing mode %o\n", + get_khandle_from_ino(inode), + inode->i_mode); + + ret = orangefs_inode_setattr(inode, &wbattr); + + return ret; +} + +int orangefs_unmount_sb(struct super_block *sb) +{ + int ret = -EINVAL; + struct orangefs_kernel_op_s *new_op = NULL; + + gossip_debug(GOSSIP_UTILS_DEBUG, + "orangefs_unmount_sb called on sb %p\n", + sb); + + new_op = op_alloc(ORANGEFS_VFS_OP_FS_UMOUNT); + if (!new_op) + return -ENOMEM; + new_op->upcall.req.fs_umount.id = ORANGEFS_SB(sb)->id; + new_op->upcall.req.fs_umount.fs_id = ORANGEFS_SB(sb)->fs_id; + strncpy(new_op->upcall.req.fs_umount.orangefs_config_server, + ORANGEFS_SB(sb)->devname, + ORANGEFS_MAX_SERVER_ADDR_LEN); + + gossip_debug(GOSSIP_UTILS_DEBUG, + "Attempting ORANGEFS Unmount via host %s\n", + new_op->upcall.req.fs_umount.orangefs_config_server); + + ret = service_operation(new_op, "orangefs_fs_umount", 0); + + gossip_debug(GOSSIP_UTILS_DEBUG, + "orangefs_unmount: got return value of %d\n", ret); + if (ret) + sb = ERR_PTR(ret); + else + ORANGEFS_SB(sb)->mount_pending = 1; + + op_release(new_op); + return ret; +} + +void orangefs_make_bad_inode(struct inode *inode) +{ + if (is_root_handle(inode)) { + /* + * if this occurs, the pvfs2-client-core was killed but we + * can't afford to lose the inode operations and such + * associated with the root handle in any case. + */ + gossip_debug(GOSSIP_UTILS_DEBUG, + "*** NOT making bad root inode %pU\n", + get_khandle_from_ino(inode)); + } else { + gossip_debug(GOSSIP_UTILS_DEBUG, + "*** making bad inode %pU\n", + get_khandle_from_ino(inode)); + make_bad_inode(inode); + } +} + +/* + * The following is a very dirty hack that is now a permanent part of the + * ORANGEFS protocol. See protocol.h for more error definitions. + */ + +/* The order matches include/orangefs-types.h in the OrangeFS source. */ +static int PINT_errno_mapping[] = { + 0, EPERM, ENOENT, EINTR, EIO, ENXIO, EBADF, EAGAIN, ENOMEM, + EFAULT, EBUSY, EEXIST, ENODEV, ENOTDIR, EISDIR, EINVAL, EMFILE, + EFBIG, ENOSPC, EROFS, EMLINK, EPIPE, EDEADLK, ENAMETOOLONG, + ENOLCK, ENOSYS, ENOTEMPTY, ELOOP, EWOULDBLOCK, ENOMSG, EUNATCH, + EBADR, EDEADLOCK, ENODATA, ETIME, ENONET, EREMOTE, ECOMM, + EPROTO, EBADMSG, EOVERFLOW, ERESTART, EMSGSIZE, EPROTOTYPE, + ENOPROTOOPT, EPROTONOSUPPORT, EOPNOTSUPP, EADDRINUSE, + EADDRNOTAVAIL, ENETDOWN, ENETUNREACH, ENETRESET, ENOBUFS, + ETIMEDOUT, ECONNREFUSED, EHOSTDOWN, EHOSTUNREACH, EALREADY, + EACCES, ECONNRESET, ERANGE +}; + +int orangefs_normalize_to_errno(__s32 error_code) +{ + __u32 i; + + /* Success */ + if (error_code == 0) { + return 0; + /* + * This shouldn't ever happen. If it does it should be fixed on the + * server. + */ + } else if (error_code > 0) { + gossip_err("orangefs: error status receieved.\n"); + gossip_err("orangefs: assuming error code is inverted.\n"); + error_code = -error_code; + } + + /* + * XXX: This is very bad since error codes from ORANGEFS may not be + * suitable for return into userspace. + */ + + /* + * Convert ORANGEFS error values into errno values suitable for return + * from the kernel. + */ + if ((-error_code) & ORANGEFS_NON_ERRNO_ERROR_BIT) { + if (((-error_code) & + (ORANGEFS_ERROR_NUMBER_BITS|ORANGEFS_NON_ERRNO_ERROR_BIT| + ORANGEFS_ERROR_BIT)) == ORANGEFS_ECANCEL) { + /* + * cancellation error codes generally correspond to + * a timeout from the client's perspective + */ + error_code = -ETIMEDOUT; + } else { + /* assume a default error code */ + gossip_err("orangefs: warning: got error code without errno equivalent: %d.\n", error_code); + error_code = -EINVAL; + } + + /* Convert ORANGEFS encoded errno values into regular errno values. */ + } else if ((-error_code) & ORANGEFS_ERROR_BIT) { + i = (-error_code) & ~(ORANGEFS_ERROR_BIT|ORANGEFS_ERROR_CLASS_BITS); + if (i < sizeof(PINT_errno_mapping)/sizeof(*PINT_errno_mapping)) + error_code = -PINT_errno_mapping[i]; + else + error_code = -EINVAL; + + /* + * Only ORANGEFS protocol error codes should ever come here. Otherwise + * there is a bug somewhere. + */ + } else { + gossip_err("orangefs: orangefs_normalize_to_errno: got error code which is not from ORANGEFS.\n"); + } + return error_code; +} + +#define NUM_MODES 11 +__s32 ORANGEFS_util_translate_mode(int mode) +{ + int ret = 0; + int i = 0; + static int modes[NUM_MODES] = { + S_IXOTH, S_IWOTH, S_IROTH, + S_IXGRP, S_IWGRP, S_IRGRP, + S_IXUSR, S_IWUSR, S_IRUSR, + S_ISGID, S_ISUID + }; + static int orangefs_modes[NUM_MODES] = { + ORANGEFS_O_EXECUTE, ORANGEFS_O_WRITE, ORANGEFS_O_READ, + ORANGEFS_G_EXECUTE, ORANGEFS_G_WRITE, ORANGEFS_G_READ, + ORANGEFS_U_EXECUTE, ORANGEFS_U_WRITE, ORANGEFS_U_READ, + ORANGEFS_G_SGID, ORANGEFS_U_SUID + }; + + for (i = 0; i < NUM_MODES; i++) + if (mode & modes[i]) + ret |= orangefs_modes[i]; + + return ret; +} +#undef NUM_MODES + +/* + * After obtaining a string representation of the client's debug + * keywords and their associated masks, this function is called to build an + * array of these values. + */ +int orangefs_prepare_cdm_array(char *debug_array_string) +{ + int i; + int rc = -EINVAL; + char *cds_head = NULL; + char *cds_delimiter = NULL; + int keyword_len = 0; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); + + /* + * figure out how many elements the cdm_array needs. + */ + for (i = 0; i < strlen(debug_array_string); i++) + if (debug_array_string[i] == '\n') + cdm_element_count++; + + if (!cdm_element_count) { + pr_info("No elements in client debug array string!\n"); + goto out; + } + + cdm_array = + kzalloc(cdm_element_count * sizeof(struct client_debug_mask), + GFP_KERNEL); + if (!cdm_array) { + pr_info("malloc failed for cdm_array!\n"); + rc = -ENOMEM; + goto out; + } + + cds_head = debug_array_string; + + for (i = 0; i < cdm_element_count; i++) { + cds_delimiter = strchr(cds_head, '\n'); + *cds_delimiter = '\0'; + + keyword_len = strcspn(cds_head, " "); + + cdm_array[i].keyword = kzalloc(keyword_len + 1, GFP_KERNEL); + if (!cdm_array[i].keyword) { + rc = -ENOMEM; + goto out; + } + + sscanf(cds_head, + "%s %llx %llx", + cdm_array[i].keyword, + (unsigned long long *)&(cdm_array[i].mask1), + (unsigned long long *)&(cdm_array[i].mask2)); + + if (!strcmp(cdm_array[i].keyword, ORANGEFS_VERBOSE)) + client_verbose_index = i; + + if (!strcmp(cdm_array[i].keyword, ORANGEFS_ALL)) + client_all_index = i; + + cds_head = cds_delimiter + 1; + } + + rc = cdm_element_count; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: rc:%d:\n", __func__, rc); + +out: + + return rc; + +} + +/* + * /sys/kernel/debug/orangefs/debug-help can be catted to + * see all the available kernel and client debug keywords. + * + * When the kernel boots, we have no idea what keywords the + * client supports, nor their associated masks. + * + * We pass through this function once at boot and stamp a + * boilerplate "we don't know" message for the client in the + * debug-help file. We pass through here again when the client + * starts and then we can fill out the debug-help file fully. + * + * The client might be restarted any number of times between + * reboots, we only build the debug-help file the first time. + */ +int orangefs_prepare_debugfs_help_string(int at_boot) +{ + int rc = -EINVAL; + int i; + int byte_count = 0; + char *client_title = "Client Debug Keywords:\n"; + char *kernel_title = "Kernel Debug Keywords:\n"; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); + + if (at_boot) { + byte_count += strlen(HELP_STRING_UNINITIALIZED); + client_title = HELP_STRING_UNINITIALIZED; + } else { + /* + * fill the client keyword/mask array and remember + * how many elements there were. + */ + cdm_element_count = + orangefs_prepare_cdm_array(client_debug_array_string); + if (cdm_element_count <= 0) + goto out; + + /* Count the bytes destined for debug_help_string. */ + byte_count += strlen(client_title); + + for (i = 0; i < cdm_element_count; i++) { + byte_count += strlen(cdm_array[i].keyword + 2); + if (byte_count >= DEBUG_HELP_STRING_SIZE) { + pr_info("%s: overflow 1!\n", __func__); + goto out; + } + } + + gossip_debug(GOSSIP_UTILS_DEBUG, + "%s: cdm_element_count:%d:\n", + __func__, + cdm_element_count); + } + + byte_count += strlen(kernel_title); + for (i = 0; i < num_kmod_keyword_mask_map; i++) { + byte_count += + strlen(s_kmod_keyword_mask_map[i].keyword + 2); + if (byte_count >= DEBUG_HELP_STRING_SIZE) { + pr_info("%s: overflow 2!\n", __func__); + goto out; + } + } + + /* build debug_help_string. */ + debug_help_string = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL); + if (!debug_help_string) { + rc = -ENOMEM; + goto out; + } + + strcat(debug_help_string, client_title); + + if (!at_boot) { + for (i = 0; i < cdm_element_count; i++) { + strcat(debug_help_string, "\t"); + strcat(debug_help_string, cdm_array[i].keyword); + strcat(debug_help_string, "\n"); + } + } + + strcat(debug_help_string, "\n"); + strcat(debug_help_string, kernel_title); + + for (i = 0; i < num_kmod_keyword_mask_map; i++) { + strcat(debug_help_string, "\t"); + strcat(debug_help_string, s_kmod_keyword_mask_map[i].keyword); + strcat(debug_help_string, "\n"); + } + + rc = 0; + +out: + + return rc; + +} + +/* + * kernel = type 0 + * client = type 1 + */ +void debug_mask_to_string(void *mask, int type) +{ + int i; + int len = 0; + char *debug_string; + int element_count = 0; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); + + if (type) { + debug_string = client_debug_string; + element_count = cdm_element_count; + } else { + debug_string = kernel_debug_string; + element_count = num_kmod_keyword_mask_map; + } + + memset(debug_string, 0, ORANGEFS_MAX_DEBUG_STRING_LEN); + + /* + * Some keywords, like "all" or "verbose", are amalgams of + * numerous other keywords. Make a special check for those + * before grinding through the whole mask only to find out + * later... + */ + if (check_amalgam_keyword(mask, type)) + goto out; + + /* Build the debug string. */ + for (i = 0; i < element_count; i++) + if (type) + do_c_string(mask, i); + else + do_k_string(mask, i); + + len = strlen(debug_string); + + if ((len) && (type)) + client_debug_string[len - 1] = '\0'; + else if (len) + kernel_debug_string[len - 1] = '\0'; + else if (type) + strcpy(client_debug_string, "none"); + else + strcpy(kernel_debug_string, "none"); + +out: +gossip_debug(GOSSIP_UTILS_DEBUG, "%s: string:%s:\n", __func__, debug_string); + + return; + +} + +void do_k_string(void *k_mask, int index) +{ + __u64 *mask = (__u64 *) k_mask; + + if (keyword_is_amalgam((char *) s_kmod_keyword_mask_map[index].keyword)) + goto out; + + if (*mask & s_kmod_keyword_mask_map[index].mask_val) { + if ((strlen(kernel_debug_string) + + strlen(s_kmod_keyword_mask_map[index].keyword)) + < ORANGEFS_MAX_DEBUG_STRING_LEN - 1) { + strcat(kernel_debug_string, + s_kmod_keyword_mask_map[index].keyword); + strcat(kernel_debug_string, ","); + } else { + gossip_err("%s: overflow!\n", __func__); + strcpy(kernel_debug_string, ORANGEFS_ALL); + goto out; + } + } + +out: + + return; +} + +void do_c_string(void *c_mask, int index) +{ + struct client_debug_mask *mask = (struct client_debug_mask *) c_mask; + + if (keyword_is_amalgam(cdm_array[index].keyword)) + goto out; + + if ((mask->mask1 & cdm_array[index].mask1) || + (mask->mask2 & cdm_array[index].mask2)) { + if ((strlen(client_debug_string) + + strlen(cdm_array[index].keyword) + 1) + < ORANGEFS_MAX_DEBUG_STRING_LEN - 2) { + strcat(client_debug_string, + cdm_array[index].keyword); + strcat(client_debug_string, ","); + } else { + gossip_err("%s: overflow!\n", __func__); + strcpy(client_debug_string, ORANGEFS_ALL); + goto out; + } + } +out: + return; +} + +int keyword_is_amalgam(char *keyword) +{ + int rc = 0; + + if ((!strcmp(keyword, ORANGEFS_ALL)) || (!strcmp(keyword, ORANGEFS_VERBOSE))) + rc = 1; + + return rc; +} + +/* + * kernel = type 0 + * client = type 1 + * + * return 1 if we found an amalgam. + */ +int check_amalgam_keyword(void *mask, int type) +{ + __u64 *k_mask; + struct client_debug_mask *c_mask; + int k_all_index = num_kmod_keyword_mask_map - 1; + int rc = 0; + + if (type) { + c_mask = (struct client_debug_mask *) mask; + + if ((c_mask->mask1 == cdm_array[client_all_index].mask1) && + (c_mask->mask2 == cdm_array[client_all_index].mask2)) { + strcpy(client_debug_string, ORANGEFS_ALL); + rc = 1; + goto out; + } + + if ((c_mask->mask1 == cdm_array[client_verbose_index].mask1) && + (c_mask->mask2 == cdm_array[client_verbose_index].mask2)) { + strcpy(client_debug_string, ORANGEFS_VERBOSE); + rc = 1; + goto out; + } + + } else { + k_mask = (__u64 *) mask; + + if (*k_mask >= s_kmod_keyword_mask_map[k_all_index].mask_val) { + strcpy(kernel_debug_string, ORANGEFS_ALL); + rc = 1; + goto out; + } + } + +out: + + return rc; +} + +/* + * kernel = type 0 + * client = type 1 + */ +void debug_string_to_mask(char *debug_string, void *mask, int type) +{ + char *unchecked_keyword; + int i; + char *strsep_fodder = kstrdup(debug_string, GFP_KERNEL); + char *original_pointer; + int element_count = 0; + struct client_debug_mask *c_mask; + __u64 *k_mask; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); + + if (type) { + c_mask = (struct client_debug_mask *)mask; + element_count = cdm_element_count; + } else { + k_mask = (__u64 *)mask; + *k_mask = 0; + element_count = num_kmod_keyword_mask_map; + } + + original_pointer = strsep_fodder; + while ((unchecked_keyword = strsep(&strsep_fodder, ","))) + if (strlen(unchecked_keyword)) { + for (i = 0; i < element_count; i++) + if (type) + do_c_mask(i, + unchecked_keyword, + &c_mask); + else + do_k_mask(i, + unchecked_keyword, + &k_mask); + } + + kfree(original_pointer); +} + +void do_c_mask(int i, + char *unchecked_keyword, + struct client_debug_mask **sane_mask) +{ + + if (!strcmp(cdm_array[i].keyword, unchecked_keyword)) { + (**sane_mask).mask1 = (**sane_mask).mask1 | cdm_array[i].mask1; + (**sane_mask).mask2 = (**sane_mask).mask2 | cdm_array[i].mask2; + } +} + +void do_k_mask(int i, char *unchecked_keyword, __u64 **sane_mask) +{ + + if (!strcmp(s_kmod_keyword_mask_map[i].keyword, unchecked_keyword)) + **sane_mask = (**sane_mask) | + s_kmod_keyword_mask_map[i].mask_val; +} diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h new file mode 100644 index 000000000000..50578a28bd9e --- /dev/null +++ b/fs/orangefs/protocol.h @@ -0,0 +1,452 @@ +#include <linux/types.h> +#include <linux/spinlock_types.h> +#include <linux/slab.h> +#include <linux/ioctl.h> + +extern struct client_debug_mask *cdm_array; +extern char *debug_help_string; +extern int help_string_initialized; +extern struct dentry *debug_dir; +extern struct dentry *help_file_dentry; +extern struct dentry *client_debug_dentry; +extern const struct file_operations debug_help_fops; +extern int client_all_index; +extern int client_verbose_index; +extern int cdm_element_count; +#define DEBUG_HELP_STRING_SIZE 4096 +#define HELP_STRING_UNINITIALIZED \ + "Client Debug Keywords are unknown until the first time\n" \ + "the client is started after boot.\n" +#define ORANGEFS_KMOD_DEBUG_HELP_FILE "debug-help" +#define ORANGEFS_KMOD_DEBUG_FILE "kernel-debug" +#define ORANGEFS_CLIENT_DEBUG_FILE "client-debug" +#define ORANGEFS_VERBOSE "verbose" +#define ORANGEFS_ALL "all" + +/* pvfs2-config.h ***********************************************************/ +#define ORANGEFS_VERSION_MAJOR 2 +#define ORANGEFS_VERSION_MINOR 9 +#define ORANGEFS_VERSION_SUB 0 + +/* khandle stuff ***********************************************************/ + +/* + * The 2.9 core will put 64 bit handles in here like this: + * 1234 0000 0000 5678 + * The 3.0 and beyond cores will put 128 bit handles in here like this: + * 1234 5678 90AB CDEF + * The kernel module will always use the first four bytes and + * the last four bytes as an inum. + */ +struct orangefs_khandle { + unsigned char u[16]; +} __aligned(8); + +/* + * kernel version of an object ref. + */ +struct orangefs_object_kref { + struct orangefs_khandle khandle; + __s32 fs_id; + __s32 __pad1; +}; + +/* + * compare 2 khandles assumes little endian thus from large address to + * small address + */ +static inline int ORANGEFS_khandle_cmp(const struct orangefs_khandle *kh1, + const struct orangefs_khandle *kh2) +{ + int i; + + for (i = 15; i >= 0; i--) { + if (kh1->u[i] > kh2->u[i]) + return 1; + if (kh1->u[i] < kh2->u[i]) + return -1; + } + + return 0; +} + +static inline void ORANGEFS_khandle_to(const struct orangefs_khandle *kh, + void *p, int size) +{ + + memset(p, 0, size); + memcpy(p, kh->u, 16); + +} + +static inline void ORANGEFS_khandle_from(struct orangefs_khandle *kh, + void *p, int size) +{ + memset(kh, 0, 16); + memcpy(kh->u, p, 16); + +} + +/* pvfs2-types.h ************************************************************/ +typedef __u32 ORANGEFS_uid; +typedef __u32 ORANGEFS_gid; +typedef __s32 ORANGEFS_fs_id; +typedef __u32 ORANGEFS_permissions; +typedef __u64 ORANGEFS_time; +typedef __s64 ORANGEFS_size; +typedef __u64 ORANGEFS_flags; +typedef __u64 ORANGEFS_ds_position; +typedef __s32 ORANGEFS_error; +typedef __s64 ORANGEFS_offset; + +#define ORANGEFS_SUPER_MAGIC 0x20030528 + +/* + * ORANGEFS error codes are a signed 32-bit integer. Error codes are negative, but + * the sign is stripped before decoding. + */ + +/* Bit 31 is not used since it is the sign. */ + +/* + * Bit 30 specifies that this is a ORANGEFS error. A ORANGEFS error is either an + * encoded errno value or a ORANGEFS protocol error. + */ +#define ORANGEFS_ERROR_BIT (1 << 30) + +/* + * Bit 29 specifies that this is a ORANGEFS protocol error and not an encoded + * errno value. + */ +#define ORANGEFS_NON_ERRNO_ERROR_BIT (1 << 29) + +/* + * Bits 9, 8, and 7 specify the error class, which encodes the section of + * server code the error originated in for logging purposes. It is not used + * in the kernel except to be masked out. + */ +#define ORANGEFS_ERROR_CLASS_BITS 0x380 + +/* Bits 6 - 0 are reserved for the actual error code. */ +#define ORANGEFS_ERROR_NUMBER_BITS 0x7f + +/* Encoded errno values decoded by PINT_errno_mapping in orangefs-utils.c. */ + +/* Our own ORANGEFS protocol error codes. */ +#define ORANGEFS_ECANCEL (1|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_EDEVINIT (2|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_EDETAIL (3|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_EHOSTNTFD (4|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_EADDRNTFD (5|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_ENORECVR (6|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_ETRYAGAIN (7|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_ENOTPVFS (8|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_ESECURITY (9|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) + +/* permission bits */ +#define ORANGEFS_O_EXECUTE (1 << 0) +#define ORANGEFS_O_WRITE (1 << 1) +#define ORANGEFS_O_READ (1 << 2) +#define ORANGEFS_G_EXECUTE (1 << 3) +#define ORANGEFS_G_WRITE (1 << 4) +#define ORANGEFS_G_READ (1 << 5) +#define ORANGEFS_U_EXECUTE (1 << 6) +#define ORANGEFS_U_WRITE (1 << 7) +#define ORANGEFS_U_READ (1 << 8) +/* no ORANGEFS_U_VTX (sticky bit) */ +#define ORANGEFS_G_SGID (1 << 10) +#define ORANGEFS_U_SUID (1 << 11) + +/* definition taken from stdint.h */ +#define INT32_MAX (2147483647) +#define ORANGEFS_ITERATE_START (INT32_MAX - 1) +#define ORANGEFS_ITERATE_END (INT32_MAX - 2) +#define ORANGEFS_ITERATE_NEXT (INT32_MAX - 3) +#define ORANGEFS_READDIR_START ORANGEFS_ITERATE_START +#define ORANGEFS_READDIR_END ORANGEFS_ITERATE_END +#define ORANGEFS_IMMUTABLE_FL FS_IMMUTABLE_FL +#define ORANGEFS_APPEND_FL FS_APPEND_FL +#define ORANGEFS_NOATIME_FL FS_NOATIME_FL +#define ORANGEFS_MIRROR_FL 0x01000000ULL +#define ORANGEFS_O_EXECUTE (1 << 0) +#define ORANGEFS_FS_ID_NULL ((__s32)0) +#define ORANGEFS_ATTR_SYS_UID (1 << 0) +#define ORANGEFS_ATTR_SYS_GID (1 << 1) +#define ORANGEFS_ATTR_SYS_PERM (1 << 2) +#define ORANGEFS_ATTR_SYS_ATIME (1 << 3) +#define ORANGEFS_ATTR_SYS_CTIME (1 << 4) +#define ORANGEFS_ATTR_SYS_MTIME (1 << 5) +#define ORANGEFS_ATTR_SYS_TYPE (1 << 6) +#define ORANGEFS_ATTR_SYS_ATIME_SET (1 << 7) +#define ORANGEFS_ATTR_SYS_MTIME_SET (1 << 8) +#define ORANGEFS_ATTR_SYS_SIZE (1 << 20) +#define ORANGEFS_ATTR_SYS_LNK_TARGET (1 << 24) +#define ORANGEFS_ATTR_SYS_DFILE_COUNT (1 << 25) +#define ORANGEFS_ATTR_SYS_DIRENT_COUNT (1 << 26) +#define ORANGEFS_ATTR_SYS_BLKSIZE (1 << 28) +#define ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT (1 << 29) +#define ORANGEFS_ATTR_SYS_COMMON_ALL \ + (ORANGEFS_ATTR_SYS_UID | \ + ORANGEFS_ATTR_SYS_GID | \ + ORANGEFS_ATTR_SYS_PERM | \ + ORANGEFS_ATTR_SYS_ATIME | \ + ORANGEFS_ATTR_SYS_CTIME | \ + ORANGEFS_ATTR_SYS_MTIME | \ + ORANGEFS_ATTR_SYS_TYPE) + +#define ORANGEFS_ATTR_SYS_ALL_SETABLE \ +(ORANGEFS_ATTR_SYS_COMMON_ALL-ORANGEFS_ATTR_SYS_TYPE) + +#define ORANGEFS_ATTR_SYS_ALL_NOHINT \ + (ORANGEFS_ATTR_SYS_COMMON_ALL | \ + ORANGEFS_ATTR_SYS_SIZE | \ + ORANGEFS_ATTR_SYS_LNK_TARGET | \ + ORANGEFS_ATTR_SYS_DFILE_COUNT | \ + ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT | \ + ORANGEFS_ATTR_SYS_DIRENT_COUNT | \ + ORANGEFS_ATTR_SYS_BLKSIZE) + +#define ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE \ + (ORANGEFS_ATTR_SYS_COMMON_ALL | \ + ORANGEFS_ATTR_SYS_LNK_TARGET | \ + ORANGEFS_ATTR_SYS_DFILE_COUNT | \ + ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT | \ + ORANGEFS_ATTR_SYS_DIRENT_COUNT | \ + ORANGEFS_ATTR_SYS_BLKSIZE) + +#define ORANGEFS_XATTR_REPLACE 0x2 +#define ORANGEFS_XATTR_CREATE 0x1 +#define ORANGEFS_MAX_SERVER_ADDR_LEN 256 +#define ORANGEFS_NAME_MAX 256 +/* + * max extended attribute name len as imposed by the VFS and exploited for the + * upcall request types. + * NOTE: Please retain them as multiples of 8 even if you wish to change them + * This is *NECESSARY* for supporting 32 bit user-space binaries on a 64-bit + * kernel. Due to implementation within DBPF, this really needs to be + * ORANGEFS_NAME_MAX, which it was the same value as, but no reason to let it + * break if that changes in the future. + */ +#define ORANGEFS_MAX_XATTR_NAMELEN ORANGEFS_NAME_MAX /* Not the same as + * XATTR_NAME_MAX defined + * by <linux/xattr.h> + */ +#define ORANGEFS_MAX_XATTR_VALUELEN 8192 /* Not the same as XATTR_SIZE_MAX + * defined by <linux/xattr.h> + */ +#define ORANGEFS_MAX_XATTR_LISTLEN 16 /* Not the same as XATTR_LIST_MAX + * defined by <linux/xattr.h> + */ +/* + * ORANGEFS I/O operation types, used in both system and server interfaces. + */ +enum ORANGEFS_io_type { + ORANGEFS_IO_READ = 1, + ORANGEFS_IO_WRITE = 2 +}; + +/* + * If this enum is modified the server parameters related to the precreate pool + * batch and low threshold sizes may need to be modified to reflect this + * change. + */ +enum orangefs_ds_type { + ORANGEFS_TYPE_NONE = 0, + ORANGEFS_TYPE_METAFILE = (1 << 0), + ORANGEFS_TYPE_DATAFILE = (1 << 1), + ORANGEFS_TYPE_DIRECTORY = (1 << 2), + ORANGEFS_TYPE_SYMLINK = (1 << 3), + ORANGEFS_TYPE_DIRDATA = (1 << 4), + ORANGEFS_TYPE_INTERNAL = (1 << 5) /* for the server's private use */ +}; + +/* + * ORANGEFS_certificate simply stores a buffer with the buffer size. + * The buffer can be converted to an OpenSSL X509 struct for use. + */ +struct ORANGEFS_certificate { + __u32 buf_size; + unsigned char *buf; +}; + +/* + * A credential identifies a user and is signed by the client/user + * private key. + */ +struct ORANGEFS_credential { + __u32 userid; /* user id */ + __u32 num_groups; /* length of group_array */ + __u32 *group_array; /* groups for which the user is a member */ + char *issuer; /* alias of the issuing server */ + __u64 timeout; /* seconds after epoch to time out */ + __u32 sig_size; /* length of the signature in bytes */ + unsigned char *signature; /* digital signature */ + struct ORANGEFS_certificate certificate; /* user certificate buffer */ +}; +#define extra_size_ORANGEFS_credential (ORANGEFS_REQ_LIMIT_GROUPS * \ + sizeof(__u32) + \ + ORANGEFS_REQ_LIMIT_ISSUER + \ + ORANGEFS_REQ_LIMIT_SIGNATURE + \ + extra_size_ORANGEFS_certificate) + +/* This structure is used by the VFS-client interaction alone */ +struct ORANGEFS_keyval_pair { + char key[ORANGEFS_MAX_XATTR_NAMELEN]; + __s32 key_sz; /* __s32 for portable, fixed-size structures */ + __s32 val_sz; + char val[ORANGEFS_MAX_XATTR_VALUELEN]; +}; + +/* pvfs2-sysint.h ***********************************************************/ +/* Describes attributes for a file, directory, or symlink. */ +struct ORANGEFS_sys_attr_s { + __u32 owner; + __u32 group; + __u32 perms; + __u64 atime; + __u64 mtime; + __u64 ctime; + __s64 size; + + /* NOTE: caller must free if valid */ + char *link_target; + + /* Changed to __s32 so that size of structure does not change */ + __s32 dfile_count; + + /* Changed to __s32 so that size of structure does not change */ + __s32 distr_dir_servers_initial; + + /* Changed to __s32 so that size of structure does not change */ + __s32 distr_dir_servers_max; + + /* Changed to __s32 so that size of structure does not change */ + __s32 distr_dir_split_size; + + __u32 mirror_copies_count; + + /* NOTE: caller must free if valid */ + char *dist_name; + + /* NOTE: caller must free if valid */ + char *dist_params; + + __s64 dirent_count; + enum orangefs_ds_type objtype; + __u64 flags; + __u32 mask; + __s64 blksize; +}; + +#define ORANGEFS_LOOKUP_LINK_NO_FOLLOW 0 + +/* pint-dev.h ***************************************************************/ + +/* parameter structure used in ORANGEFS_DEV_DEBUG ioctl command */ +struct dev_mask_info_s { + enum { + KERNEL_MASK, + CLIENT_MASK, + } mask_type; + __u64 mask_value; +}; + +struct dev_mask2_info_s { + __u64 mask1_value; + __u64 mask2_value; +}; + +/* pvfs2-util.h *************************************************************/ +__s32 ORANGEFS_util_translate_mode(int mode); + +/* pvfs2-debug.h ************************************************************/ +#include "orangefs-debug.h" + +/* pvfs2-internal.h *********************************************************/ +#define llu(x) (unsigned long long)(x) +#define lld(x) (long long)(x) + +/* pint-dev-shared.h ********************************************************/ +#define ORANGEFS_DEV_MAGIC 'k' + +#define ORANGEFS_READDIR_DEFAULT_DESC_COUNT 5 + +#define DEV_GET_MAGIC 0x1 +#define DEV_GET_MAX_UPSIZE 0x2 +#define DEV_GET_MAX_DOWNSIZE 0x3 +#define DEV_MAP 0x4 +#define DEV_REMOUNT_ALL 0x5 +#define DEV_DEBUG 0x6 +#define DEV_UPSTREAM 0x7 +#define DEV_CLIENT_MASK 0x8 +#define DEV_CLIENT_STRING 0x9 +#define DEV_MAX_NR 0xa + +/* supported ioctls, codes are with respect to user-space */ +enum { + ORANGEFS_DEV_GET_MAGIC = _IOW(ORANGEFS_DEV_MAGIC, DEV_GET_MAGIC, __s32), + ORANGEFS_DEV_GET_MAX_UPSIZE = + _IOW(ORANGEFS_DEV_MAGIC, DEV_GET_MAX_UPSIZE, __s32), + ORANGEFS_DEV_GET_MAX_DOWNSIZE = + _IOW(ORANGEFS_DEV_MAGIC, DEV_GET_MAX_DOWNSIZE, __s32), + ORANGEFS_DEV_MAP = _IO(ORANGEFS_DEV_MAGIC, DEV_MAP), + ORANGEFS_DEV_REMOUNT_ALL = _IO(ORANGEFS_DEV_MAGIC, DEV_REMOUNT_ALL), + ORANGEFS_DEV_DEBUG = _IOR(ORANGEFS_DEV_MAGIC, DEV_DEBUG, __s32), + ORANGEFS_DEV_UPSTREAM = _IOW(ORANGEFS_DEV_MAGIC, DEV_UPSTREAM, int), + ORANGEFS_DEV_CLIENT_MASK = _IOW(ORANGEFS_DEV_MAGIC, + DEV_CLIENT_MASK, + struct dev_mask2_info_s), + ORANGEFS_DEV_CLIENT_STRING = _IOW(ORANGEFS_DEV_MAGIC, + DEV_CLIENT_STRING, + char *), + ORANGEFS_DEV_MAXNR = DEV_MAX_NR, +}; + +/* + * version number for use in communicating between kernel space and user + * space. Zero signifies the upstream version of the kernel module. + */ +#define ORANGEFS_KERNEL_PROTO_VERSION 0 +#define ORANGEFS_MINIMUM_USERSPACE_VERSION 20903 + +/* + * describes memory regions to map in the ORANGEFS_DEV_MAP ioctl. + * NOTE: See devorangefs-req.c for 32 bit compat structure. + * Since this structure has a variable-sized layout that is different + * on 32 and 64 bit platforms, we need to normalize to a 64 bit layout + * on such systems before servicing ioctl calls from user-space binaries + * that may be 32 bit! + */ +struct ORANGEFS_dev_map_desc { + void *ptr; + __s32 total_size; + __s32 size; + __s32 count; +}; + +/* gossip.h *****************************************************************/ + +#ifdef GOSSIP_DISABLE_DEBUG +#define gossip_debug(mask, format, f...) do {} while (0) +#else +extern __u64 gossip_debug_mask; +extern struct client_debug_mask client_debug_mask; + +/* try to avoid function call overhead by checking masks in macro */ +#define gossip_debug(mask, format, f...) \ +do { \ + if (gossip_debug_mask & mask) \ + printk(format, ##f); \ +} while (0) +#endif /* GOSSIP_DISABLE_DEBUG */ + +/* do file and line number printouts w/ the GNU preprocessor */ +#define gossip_ldebug(mask, format, f...) \ + gossip_debug(mask, "%s: " format, __func__, ##f) + +#define gossip_err printk +#define gossip_lerr(format, f...) \ + gossip_err("%s line %d: " format, \ + __FILE__, \ + __LINE__, \ + ##f) diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c new file mode 100644 index 000000000000..b9da9a0281c9 --- /dev/null +++ b/fs/orangefs/super.c @@ -0,0 +1,559 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" + +#include <linux/parser.h> + +/* a cache for orangefs-inode objects (i.e. orangefs inode private data) */ +static struct kmem_cache *orangefs_inode_cache; + +/* list for storing orangefs specific superblocks in use */ +LIST_HEAD(orangefs_superblocks); + +DEFINE_SPINLOCK(orangefs_superblocks_lock); + +enum { + Opt_intr, + Opt_acl, + Opt_local_lock, + + Opt_err +}; + +static const match_table_t tokens = { + { Opt_acl, "acl" }, + { Opt_intr, "intr" }, + { Opt_local_lock, "local_lock" }, + { Opt_err, NULL } +}; + + +static int parse_mount_options(struct super_block *sb, char *options, + int silent) +{ + struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(sb); + substring_t args[MAX_OPT_ARGS]; + char *p; + + /* + * Force any potential flags that might be set from the mount + * to zero, ie, initialize to unset. + */ + sb->s_flags &= ~MS_POSIXACL; + orangefs_sb->flags &= ~ORANGEFS_OPT_INTR; + orangefs_sb->flags &= ~ORANGEFS_OPT_LOCAL_LOCK; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + + if (!*p) + continue; + + token = match_token(p, tokens, args); + switch (token) { + case Opt_acl: + sb->s_flags |= MS_POSIXACL; + break; + case Opt_intr: + orangefs_sb->flags |= ORANGEFS_OPT_INTR; + break; + case Opt_local_lock: + orangefs_sb->flags |= ORANGEFS_OPT_LOCAL_LOCK; + break; + default: + goto fail; + } + } + + return 0; +fail: + if (!silent) + gossip_err("Error: mount option [%s] is not supported.\n", p); + return -EINVAL; +} + +static void orangefs_inode_cache_ctor(void *req) +{ + struct orangefs_inode_s *orangefs_inode = req; + + inode_init_once(&orangefs_inode->vfs_inode); + init_rwsem(&orangefs_inode->xattr_sem); + + orangefs_inode->vfs_inode.i_version = 1; +} + +static struct inode *orangefs_alloc_inode(struct super_block *sb) +{ + struct orangefs_inode_s *orangefs_inode; + + orangefs_inode = kmem_cache_alloc(orangefs_inode_cache, GFP_KERNEL); + if (orangefs_inode == NULL) { + gossip_err("Failed to allocate orangefs_inode\n"); + return NULL; + } + + /* + * We want to clear everything except for rw_semaphore and the + * vfs_inode. + */ + memset(&orangefs_inode->refn.khandle, 0, 16); + orangefs_inode->refn.fs_id = ORANGEFS_FS_ID_NULL; + orangefs_inode->last_failed_block_index_read = 0; + memset(orangefs_inode->link_target, 0, sizeof(orangefs_inode->link_target)); + orangefs_inode->pinode_flags = 0; + + gossip_debug(GOSSIP_SUPER_DEBUG, + "orangefs_alloc_inode: allocated %p\n", + &orangefs_inode->vfs_inode); + return &orangefs_inode->vfs_inode; +} + +static void orangefs_destroy_inode(struct inode *inode) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + + gossip_debug(GOSSIP_SUPER_DEBUG, + "%s: deallocated %p destroying inode %pU\n", + __func__, orangefs_inode, get_khandle_from_ino(inode)); + + kmem_cache_free(orangefs_inode_cache, orangefs_inode); +} + +/* + * NOTE: information filled in here is typically reflected in the + * output of the system command 'df' +*/ +static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + int ret = -ENOMEM; + struct orangefs_kernel_op_s *new_op = NULL; + int flags = 0; + struct super_block *sb = NULL; + + sb = dentry->d_sb; + + gossip_debug(GOSSIP_SUPER_DEBUG, + "orangefs_statfs: called on sb %p (fs_id is %d)\n", + sb, + (int)(ORANGEFS_SB(sb)->fs_id)); + + new_op = op_alloc(ORANGEFS_VFS_OP_STATFS); + if (!new_op) + return ret; + new_op->upcall.req.statfs.fs_id = ORANGEFS_SB(sb)->fs_id; + + if (ORANGEFS_SB(sb)->flags & ORANGEFS_OPT_INTR) + flags = ORANGEFS_OP_INTERRUPTIBLE; + + ret = service_operation(new_op, "orangefs_statfs", flags); + + if (new_op->downcall.status < 0) + goto out_op_release; + + gossip_debug(GOSSIP_SUPER_DEBUG, + "%s: got %ld blocks available | " + "%ld blocks total | %ld block size | " + "%ld files total | %ld files avail\n", + __func__, + (long)new_op->downcall.resp.statfs.blocks_avail, + (long)new_op->downcall.resp.statfs.blocks_total, + (long)new_op->downcall.resp.statfs.block_size, + (long)new_op->downcall.resp.statfs.files_total, + (long)new_op->downcall.resp.statfs.files_avail); + + buf->f_type = sb->s_magic; + memcpy(&buf->f_fsid, &ORANGEFS_SB(sb)->fs_id, sizeof(buf->f_fsid)); + buf->f_bsize = new_op->downcall.resp.statfs.block_size; + buf->f_namelen = ORANGEFS_NAME_MAX; + + buf->f_blocks = (sector_t) new_op->downcall.resp.statfs.blocks_total; + buf->f_bfree = (sector_t) new_op->downcall.resp.statfs.blocks_avail; + buf->f_bavail = (sector_t) new_op->downcall.resp.statfs.blocks_avail; + buf->f_files = (sector_t) new_op->downcall.resp.statfs.files_total; + buf->f_ffree = (sector_t) new_op->downcall.resp.statfs.files_avail; + buf->f_frsize = sb->s_blocksize; + +out_op_release: + op_release(new_op); + gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_statfs: returning %d\n", ret); + return ret; +} + +/* + * Remount as initiated by VFS layer. We just need to reparse the mount + * options, no need to signal pvfs2-client-core about it. + */ +static int orangefs_remount_fs(struct super_block *sb, int *flags, char *data) +{ + gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_remount_fs: called\n"); + return parse_mount_options(sb, data, 1); +} + +/* + * Remount as initiated by pvfs2-client-core on restart. This is used to + * repopulate mount information left from previous pvfs2-client-core. + * + * the idea here is that given a valid superblock, we're + * re-initializing the user space client with the initial mount + * information specified when the super block was first initialized. + * this is very different than the first initialization/creation of a + * superblock. we use the special service_priority_operation to make + * sure that the mount gets ahead of any other pending operation that + * is waiting for servicing. this means that the pvfs2-client won't + * fail to start several times for all other pending operations before + * the client regains all of the mount information from us. + * NOTE: this function assumes that the request_mutex is already acquired! + */ +int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb) +{ + struct orangefs_kernel_op_s *new_op; + int ret = -EINVAL; + + gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_remount: called\n"); + + new_op = op_alloc(ORANGEFS_VFS_OP_FS_MOUNT); + if (!new_op) + return -ENOMEM; + strncpy(new_op->upcall.req.fs_mount.orangefs_config_server, + orangefs_sb->devname, + ORANGEFS_MAX_SERVER_ADDR_LEN); + + gossip_debug(GOSSIP_SUPER_DEBUG, + "Attempting ORANGEFS Remount via host %s\n", + new_op->upcall.req.fs_mount.orangefs_config_server); + + /* + * we assume that the calling function has already acquired the + * request_mutex to prevent other operations from bypassing + * this one + */ + ret = service_operation(new_op, "orangefs_remount", + ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX); + gossip_debug(GOSSIP_SUPER_DEBUG, + "orangefs_remount: mount got return value of %d\n", + ret); + if (ret == 0) { + /* + * store the id assigned to this sb -- it's just a + * short-lived mapping that the system interface uses + * to map this superblock to a particular mount entry + */ + orangefs_sb->id = new_op->downcall.resp.fs_mount.id; + orangefs_sb->mount_pending = 0; + } + + op_release(new_op); + return ret; +} + +int fsid_key_table_initialize(void) +{ + return 0; +} + +void fsid_key_table_finalize(void) +{ +} + +/* Called whenever the VFS dirties the inode in response to atime updates */ +static void orangefs_dirty_inode(struct inode *inode, int flags) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + + gossip_debug(GOSSIP_SUPER_DEBUG, + "orangefs_dirty_inode: %pU\n", + get_khandle_from_ino(inode)); + SetAtimeFlag(orangefs_inode); +} + +static const struct super_operations orangefs_s_ops = { + .alloc_inode = orangefs_alloc_inode, + .destroy_inode = orangefs_destroy_inode, + .dirty_inode = orangefs_dirty_inode, + .drop_inode = generic_delete_inode, + .statfs = orangefs_statfs, + .remount_fs = orangefs_remount_fs, + .show_options = generic_show_options, +}; + +static struct dentry *orangefs_fh_to_dentry(struct super_block *sb, + struct fid *fid, + int fh_len, + int fh_type) +{ + struct orangefs_object_kref refn; + + if (fh_len < 5 || fh_type > 2) + return NULL; + + ORANGEFS_khandle_from(&(refn.khandle), fid->raw, 16); + refn.fs_id = (u32) fid->raw[4]; + gossip_debug(GOSSIP_SUPER_DEBUG, + "fh_to_dentry: handle %pU, fs_id %d\n", + &refn.khandle, + refn.fs_id); + + return d_obtain_alias(orangefs_iget(sb, &refn)); +} + +static int orangefs_encode_fh(struct inode *inode, + __u32 *fh, + int *max_len, + struct inode *parent) +{ + int len = parent ? 10 : 5; + int type = 1; + struct orangefs_object_kref refn; + + if (*max_len < len) { + gossip_lerr("fh buffer is too small for encoding\n"); + *max_len = len; + type = 255; + goto out; + } + + refn = ORANGEFS_I(inode)->refn; + ORANGEFS_khandle_to(&refn.khandle, fh, 16); + fh[4] = refn.fs_id; + + gossip_debug(GOSSIP_SUPER_DEBUG, + "Encoding fh: handle %pU, fsid %u\n", + &refn.khandle, + refn.fs_id); + + + if (parent) { + refn = ORANGEFS_I(parent)->refn; + ORANGEFS_khandle_to(&refn.khandle, (char *) fh + 20, 16); + fh[9] = refn.fs_id; + + type = 2; + gossip_debug(GOSSIP_SUPER_DEBUG, + "Encoding parent: handle %pU, fsid %u\n", + &refn.khandle, + refn.fs_id); + } + *max_len = len; + +out: + return type; +} + +static const struct export_operations orangefs_export_ops = { + .encode_fh = orangefs_encode_fh, + .fh_to_dentry = orangefs_fh_to_dentry, +}; + +static int orangefs_fill_sb(struct super_block *sb, + struct orangefs_fs_mount_response *fs_mount, + void *data, int silent) +{ + int ret = -EINVAL; + struct inode *root = NULL; + struct dentry *root_dentry = NULL; + struct orangefs_object_kref root_object; + + /* alloc and init our private orangefs sb info */ + sb->s_fs_info = kzalloc(sizeof(struct orangefs_sb_info_s), GFP_KERNEL); + if (!ORANGEFS_SB(sb)) + return -ENOMEM; + ORANGEFS_SB(sb)->sb = sb; + + ORANGEFS_SB(sb)->root_khandle = fs_mount->root_khandle; + ORANGEFS_SB(sb)->fs_id = fs_mount->fs_id; + ORANGEFS_SB(sb)->id = fs_mount->id; + + if (data) { + ret = parse_mount_options(sb, data, silent); + if (ret) + return ret; + } + + /* Hang the xattr handlers off the superblock */ + sb->s_xattr = orangefs_xattr_handlers; + sb->s_magic = ORANGEFS_SUPER_MAGIC; + sb->s_op = &orangefs_s_ops; + sb->s_d_op = &orangefs_dentry_operations; + + sb->s_blocksize = orangefs_bufmap_size_query(); + sb->s_blocksize_bits = orangefs_bufmap_shift_query(); + sb->s_maxbytes = MAX_LFS_FILESIZE; + + root_object.khandle = ORANGEFS_SB(sb)->root_khandle; + root_object.fs_id = ORANGEFS_SB(sb)->fs_id; + gossip_debug(GOSSIP_SUPER_DEBUG, + "get inode %pU, fsid %d\n", + &root_object.khandle, + root_object.fs_id); + + root = orangefs_iget(sb, &root_object); + if (IS_ERR(root)) + return PTR_ERR(root); + + gossip_debug(GOSSIP_SUPER_DEBUG, + "Allocated root inode [%p] with mode %x\n", + root, + root->i_mode); + + /* allocates and places root dentry in dcache */ + root_dentry = d_make_root(root); + if (!root_dentry) + return -ENOMEM; + + sb->s_export_op = &orangefs_export_ops; + sb->s_root = root_dentry; + return 0; +} + +struct dentry *orangefs_mount(struct file_system_type *fst, + int flags, + const char *devname, + void *data) +{ + int ret = -EINVAL; + struct super_block *sb = ERR_PTR(-EINVAL); + struct orangefs_kernel_op_s *new_op; + struct dentry *d = ERR_PTR(-EINVAL); + + gossip_debug(GOSSIP_SUPER_DEBUG, + "orangefs_mount: called with devname %s\n", + devname); + + if (!devname) { + gossip_err("ERROR: device name not specified.\n"); + return ERR_PTR(-EINVAL); + } + + new_op = op_alloc(ORANGEFS_VFS_OP_FS_MOUNT); + if (!new_op) + return ERR_PTR(-ENOMEM); + + strncpy(new_op->upcall.req.fs_mount.orangefs_config_server, + devname, + ORANGEFS_MAX_SERVER_ADDR_LEN); + + gossip_debug(GOSSIP_SUPER_DEBUG, + "Attempting ORANGEFS Mount via host %s\n", + new_op->upcall.req.fs_mount.orangefs_config_server); + + ret = service_operation(new_op, "orangefs_mount", 0); + gossip_debug(GOSSIP_SUPER_DEBUG, + "orangefs_mount: mount got return value of %d\n", ret); + if (ret) + goto free_op; + + if (new_op->downcall.resp.fs_mount.fs_id == ORANGEFS_FS_ID_NULL) { + gossip_err("ERROR: Retrieved null fs_id\n"); + ret = -EINVAL; + goto free_op; + } + + sb = sget(fst, NULL, set_anon_super, flags, NULL); + + if (IS_ERR(sb)) { + d = ERR_CAST(sb); + goto free_op; + } + + ret = orangefs_fill_sb(sb, + &new_op->downcall.resp.fs_mount, data, + flags & MS_SILENT ? 1 : 0); + + if (ret) { + d = ERR_PTR(ret); + goto free_op; + } + + /* + * on successful mount, store the devname and data + * used + */ + strncpy(ORANGEFS_SB(sb)->devname, + devname, + ORANGEFS_MAX_SERVER_ADDR_LEN); + + /* mount_pending must be cleared */ + ORANGEFS_SB(sb)->mount_pending = 0; + + /* + * finally, add this sb to our list of known orangefs + * sb's + */ + gossip_debug(GOSSIP_SUPER_DEBUG, + "Adding SB %p to orangefs superblocks\n", + ORANGEFS_SB(sb)); + spin_lock(&orangefs_superblocks_lock); + list_add_tail(&ORANGEFS_SB(sb)->list, &orangefs_superblocks); + spin_unlock(&orangefs_superblocks_lock); + op_release(new_op); + return dget(sb->s_root); + +free_op: + gossip_err("orangefs_mount: mount request failed with %d\n", ret); + if (ret == -EINVAL) { + gossip_err("Ensure that all orangefs-servers have the same FS configuration files\n"); + gossip_err("Look at pvfs2-client-core log file (typically /tmp/pvfs2-client.log) for more details\n"); + } + + op_release(new_op); + + return d; +} + +void orangefs_kill_sb(struct super_block *sb) +{ + gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_kill_sb: called\n"); + + /* provided sb cleanup */ + kill_anon_super(sb); + + /* + * issue the unmount to userspace to tell it to remove the + * dynamic mount info it has for this superblock + */ + orangefs_unmount_sb(sb); + + /* remove the sb from our list of orangefs specific sb's */ + + spin_lock(&orangefs_superblocks_lock); + __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */ + ORANGEFS_SB(sb)->list.prev = NULL; + spin_unlock(&orangefs_superblocks_lock); + + /* + * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us + * gets completed before we free the dang thing. + */ + mutex_lock(&request_mutex); + mutex_unlock(&request_mutex); + + /* free the orangefs superblock private data */ + kfree(ORANGEFS_SB(sb)); +} + +int orangefs_inode_cache_initialize(void) +{ + orangefs_inode_cache = kmem_cache_create("orangefs_inode_cache", + sizeof(struct orangefs_inode_s), + 0, + ORANGEFS_CACHE_CREATE_FLAGS, + orangefs_inode_cache_ctor); + + if (!orangefs_inode_cache) { + gossip_err("Cannot create orangefs_inode_cache\n"); + return -ENOMEM; + } + return 0; +} + +int orangefs_inode_cache_finalize(void) +{ + kmem_cache_destroy(orangefs_inode_cache); + return 0; +} diff --git a/fs/orangefs/symlink.c b/fs/orangefs/symlink.c new file mode 100644 index 000000000000..6418dd638680 --- /dev/null +++ b/fs/orangefs/symlink.c @@ -0,0 +1,19 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" + +struct inode_operations orangefs_symlink_inode_operations = { + .readlink = generic_readlink, + .get_link = simple_get_link, + .setattr = orangefs_setattr, + .getattr = orangefs_getattr, + .listxattr = orangefs_listxattr, + .setxattr = generic_setxattr, + .permission = orangefs_permission, +}; diff --git a/fs/orangefs/upcall.h b/fs/orangefs/upcall.h new file mode 100644 index 000000000000..001b20239407 --- /dev/null +++ b/fs/orangefs/upcall.h @@ -0,0 +1,246 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#ifndef __UPCALL_H +#define __UPCALL_H + +/* + * Sanitized this header file to fix + * 32-64 bit interaction issues between + * client-core and device + */ +struct orangefs_io_request_s { + __s32 __pad1; + __s32 buf_index; + __s32 count; + __s32 __pad2; + __s64 offset; + struct orangefs_object_kref refn; + enum ORANGEFS_io_type io_type; + __s32 readahead_size; +}; + +struct orangefs_lookup_request_s { + __s32 sym_follow; + __s32 __pad1; + struct orangefs_object_kref parent_refn; + char d_name[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_create_request_s { + struct orangefs_object_kref parent_refn; + struct ORANGEFS_sys_attr_s attributes; + char d_name[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_symlink_request_s { + struct orangefs_object_kref parent_refn; + struct ORANGEFS_sys_attr_s attributes; + char entry_name[ORANGEFS_NAME_MAX]; + char target[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_getattr_request_s { + struct orangefs_object_kref refn; + __u32 mask; + __u32 __pad1; +}; + +struct orangefs_setattr_request_s { + struct orangefs_object_kref refn; + struct ORANGEFS_sys_attr_s attributes; +}; + +struct orangefs_remove_request_s { + struct orangefs_object_kref parent_refn; + char d_name[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_mkdir_request_s { + struct orangefs_object_kref parent_refn; + struct ORANGEFS_sys_attr_s attributes; + char d_name[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_readdir_request_s { + struct orangefs_object_kref refn; + __u64 token; + __s32 max_dirent_count; + __s32 buf_index; +}; + +struct orangefs_readdirplus_request_s { + struct orangefs_object_kref refn; + __u64 token; + __s32 max_dirent_count; + __u32 mask; + __s32 buf_index; + __s32 __pad1; +}; + +struct orangefs_rename_request_s { + struct orangefs_object_kref old_parent_refn; + struct orangefs_object_kref new_parent_refn; + char d_old_name[ORANGEFS_NAME_MAX]; + char d_new_name[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_statfs_request_s { + __s32 fs_id; + __s32 __pad1; +}; + +struct orangefs_truncate_request_s { + struct orangefs_object_kref refn; + __s64 size; +}; + +struct orangefs_mmap_ra_cache_flush_request_s { + struct orangefs_object_kref refn; +}; + +struct orangefs_fs_mount_request_s { + char orangefs_config_server[ORANGEFS_MAX_SERVER_ADDR_LEN]; +}; + +struct orangefs_fs_umount_request_s { + __s32 id; + __s32 fs_id; + char orangefs_config_server[ORANGEFS_MAX_SERVER_ADDR_LEN]; +}; + +struct orangefs_getxattr_request_s { + struct orangefs_object_kref refn; + __s32 key_sz; + __s32 __pad1; + char key[ORANGEFS_MAX_XATTR_NAMELEN]; +}; + +struct orangefs_setxattr_request_s { + struct orangefs_object_kref refn; + struct ORANGEFS_keyval_pair keyval; + __s32 flags; + __s32 __pad1; +}; + +struct orangefs_listxattr_request_s { + struct orangefs_object_kref refn; + __s32 requested_count; + __s32 __pad1; + __u64 token; +}; + +struct orangefs_removexattr_request_s { + struct orangefs_object_kref refn; + __s32 key_sz; + __s32 __pad1; + char key[ORANGEFS_MAX_XATTR_NAMELEN]; +}; + +struct orangefs_op_cancel_s { + __u64 op_tag; +}; + +struct orangefs_fsync_request_s { + struct orangefs_object_kref refn; +}; + +enum orangefs_param_request_type { + ORANGEFS_PARAM_REQUEST_SET = 1, + ORANGEFS_PARAM_REQUEST_GET = 2 +}; + +enum orangefs_param_request_op { + ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS = 1, + ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT = 2, + ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT = 3, + ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE = 4, + ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS = 5, + ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE = 6, + ORANGEFS_PARAM_REQUEST_OP_PERF_RESET = 7, + ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS = 8, + ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT = 9, + ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT = 10, + ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE = 11, + ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_TIMEOUT_MSECS = 12, + ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_HARD_LIMIT = 13, + ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_SOFT_LIMIT = 14, + ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_RECLAIM_PERCENTAGE = 15, + ORANGEFS_PARAM_REQUEST_OP_CLIENT_DEBUG = 16, + ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS = 17, + ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT = 18, + ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT = 19, + ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE = 20, + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS = 21, + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT = 22, + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT = 23, + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE = 24, + ORANGEFS_PARAM_REQUEST_OP_TWO_MASK_VALUES = 25, +}; + +struct orangefs_param_request_s { + enum orangefs_param_request_type type; + enum orangefs_param_request_op op; + __s64 value; + char s_value[ORANGEFS_MAX_DEBUG_STRING_LEN]; +}; + +enum orangefs_perf_count_request_type { + ORANGEFS_PERF_COUNT_REQUEST_ACACHE = 1, + ORANGEFS_PERF_COUNT_REQUEST_NCACHE = 2, + ORANGEFS_PERF_COUNT_REQUEST_CAPCACHE = 3, +}; + +struct orangefs_perf_count_request_s { + enum orangefs_perf_count_request_type type; + __s32 __pad1; +}; + +struct orangefs_fs_key_request_s { + __s32 fsid; + __s32 __pad1; +}; + +struct orangefs_upcall_s { + __s32 type; + __u32 uid; + __u32 gid; + int pid; + int tgid; + /* Trailers unused but must be retained for protocol compatibility. */ + __s64 trailer_size; + char *trailer_buf; + + union { + struct orangefs_io_request_s io; + struct orangefs_lookup_request_s lookup; + struct orangefs_create_request_s create; + struct orangefs_symlink_request_s sym; + struct orangefs_getattr_request_s getattr; + struct orangefs_setattr_request_s setattr; + struct orangefs_remove_request_s remove; + struct orangefs_mkdir_request_s mkdir; + struct orangefs_readdir_request_s readdir; + struct orangefs_readdirplus_request_s readdirplus; + struct orangefs_rename_request_s rename; + struct orangefs_statfs_request_s statfs; + struct orangefs_truncate_request_s truncate; + struct orangefs_mmap_ra_cache_flush_request_s ra_cache_flush; + struct orangefs_fs_mount_request_s fs_mount; + struct orangefs_fs_umount_request_s fs_umount; + struct orangefs_getxattr_request_s getxattr; + struct orangefs_setxattr_request_s setxattr; + struct orangefs_listxattr_request_s listxattr; + struct orangefs_removexattr_request_s removexattr; + struct orangefs_op_cancel_s cancel; + struct orangefs_fsync_request_s fsync; + struct orangefs_param_request_s param; + struct orangefs_perf_count_request_s perf_count; + struct orangefs_fs_key_request_s fs_key; + } req; +}; + +#endif /* __UPCALL_H */ diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c new file mode 100644 index 000000000000..31635bc303fe --- /dev/null +++ b/fs/orangefs/waitqueue.c @@ -0,0 +1,357 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * (C) 2011 Omnibond Systems + * + * Changes by Acxiom Corporation to implement generic service_operation() + * function, Copyright Acxiom Corporation, 2005. + * + * See COPYING in top-level directory. + */ + +/* + * In-kernel waitqueue operations. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" + +static int wait_for_matching_downcall(struct orangefs_kernel_op_s *, long, bool); +static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *); + +/* + * What we do in this function is to walk the list of operations that are + * present in the request queue and mark them as purged. + * NOTE: This is called from the device close after client-core has + * guaranteed that no new operations could appear on the list since the + * client-core is anyway going to exit. + */ +void purge_waiting_ops(void) +{ + struct orangefs_kernel_op_s *op; + + spin_lock(&orangefs_request_list_lock); + list_for_each_entry(op, &orangefs_request_list, list) { + gossip_debug(GOSSIP_WAIT_DEBUG, + "pvfs2-client-core: purging op tag %llu %s\n", + llu(op->tag), + get_opname_string(op)); + set_op_state_purged(op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(op), + op->op_state, + current->comm); + } + spin_unlock(&orangefs_request_list_lock); +} + +/* + * submits a ORANGEFS operation and waits for it to complete + * + * Note op->downcall.status will contain the status of the operation (in + * errno format), whether provided by pvfs2-client or a result of failure to + * service the operation. If the caller wishes to distinguish, then + * op->state can be checked to see if it was serviced or not. + * + * Returns contents of op->downcall.status for convenience + */ +int service_operation(struct orangefs_kernel_op_s *op, + const char *op_name, + int flags) +{ + long timeout = MAX_SCHEDULE_TIMEOUT; + int ret = 0; + + DEFINE_WAIT(wait_entry); + + op->upcall.tgid = current->tgid; + op->upcall.pid = current->pid; + +retry_servicing: + op->downcall.status = 0; + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: %s op:%p: process:%s: pid:%d:\n", + __func__, + op_name, + op, + current->comm, + current->pid); + + /* + * If ORANGEFS_OP_NO_MUTEX was set in flags, we need to avoid + * acquiring the request_mutex because we're servicing a + * high priority remount operation and the request_mutex is + * already taken. + */ + if (!(flags & ORANGEFS_OP_NO_MUTEX)) { + if (flags & ORANGEFS_OP_INTERRUPTIBLE) + ret = mutex_lock_interruptible(&request_mutex); + else + ret = mutex_lock_killable(&request_mutex); + /* + * check to see if we were interrupted while waiting for + * mutex + */ + if (ret < 0) { + op->downcall.status = ret; + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: service_operation interrupted.\n", + __func__); + return ret; + } + } + + /* queue up the operation */ + spin_lock(&orangefs_request_list_lock); + spin_lock(&op->lock); + set_op_state_waiting(op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(op), + op->op_state, + current->comm); + /* add high priority remount op to the front of the line. */ + if (flags & ORANGEFS_OP_PRIORITY) + list_add(&op->list, &orangefs_request_list); + else + list_add_tail(&op->list, &orangefs_request_list); + spin_unlock(&op->lock); + wake_up_interruptible(&orangefs_request_list_waitq); + if (!__is_daemon_in_service()) { + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s:client core is NOT in service.\n", + __func__); + timeout = op_timeout_secs * HZ; + } + spin_unlock(&orangefs_request_list_lock); + + if (!(flags & ORANGEFS_OP_NO_MUTEX)) + mutex_unlock(&request_mutex); + + ret = wait_for_matching_downcall(op, timeout, + flags & ORANGEFS_OP_INTERRUPTIBLE); + + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: wait_for_matching_downcall returned %d for %p\n", + __func__, + ret, + op); + + /* got matching downcall; make sure status is in errno format */ + if (!ret) { + spin_unlock(&op->lock); + op->downcall.status = + orangefs_normalize_to_errno(op->downcall.status); + ret = op->downcall.status; + goto out; + } + + /* failed to get matching downcall */ + if (ret == -ETIMEDOUT) { + gossip_err("%s: %s -- wait timed out; aborting attempt.\n", + __func__, + op_name); + } + + /* + * remove a waiting op from the request list or + * remove an in-progress op from the in-progress list. + */ + orangefs_clean_up_interrupted_operation(op); + + op->downcall.status = ret; + /* retry if operation has not been serviced and if requested */ + if (ret == -EAGAIN) { + op->attempts++; + timeout = op_timeout_secs * HZ; + gossip_debug(GOSSIP_WAIT_DEBUG, + "orangefs: tag %llu (%s)" + " -- operation to be retried (%d attempt)\n", + llu(op->tag), + op_name, + op->attempts); + + /* + * io ops (ops that use the shared memory buffer) have + * to be returned to their caller for a retry. Other ops + * can just be recycled here. + */ + if (!op->uses_shared_memory) + goto retry_servicing; + } + +out: + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: %s returning: %d for %p.\n", + __func__, + op_name, + ret, + op); + return ret; +} + +/* This can get called on an I/O op if it had a bad service_operation. */ +bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op) +{ + u64 tag = op->tag; + if (!op_state_in_progress(op)) + return false; + + op->slot_to_free = op->upcall.req.io.buf_index; + memset(&op->upcall, 0, sizeof(op->upcall)); + memset(&op->downcall, 0, sizeof(op->downcall)); + op->upcall.type = ORANGEFS_VFS_OP_CANCEL; + op->upcall.req.cancel.op_tag = tag; + op->downcall.type = ORANGEFS_VFS_OP_INVALID; + op->downcall.status = -1; + orangefs_new_tag(op); + + spin_lock(&orangefs_request_list_lock); + /* orangefs_request_list_lock is enough of a barrier here */ + if (!__is_daemon_in_service()) { + spin_unlock(&orangefs_request_list_lock); + return false; + } + spin_lock(&op->lock); + set_op_state_waiting(op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(op), + op->op_state, + current->comm); + list_add(&op->list, &orangefs_request_list); + spin_unlock(&op->lock); + spin_unlock(&orangefs_request_list_lock); + + gossip_debug(GOSSIP_WAIT_DEBUG, + "Attempting ORANGEFS operation cancellation of tag %llu\n", + llu(tag)); + return true; +} + +/* + * Change an op to the "given up" state and remove it from its list. + */ +static void + orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op) +{ + /* + * handle interrupted cases depending on what state we were in when + * the interruption is detected. + * + * Called with op->lock held. + */ + + /* + * List manipulation code elsewhere will ignore ops that + * have been given up upon. + */ + op->op_state |= OP_VFS_STATE_GIVEN_UP; + + if (list_empty(&op->list)) { + /* caught copying to/from daemon */ + BUG_ON(op_state_serviced(op)); + spin_unlock(&op->lock); + wait_for_completion(&op->waitq); + } else if (op_state_waiting(op)) { + /* + * upcall hasn't been read; remove op from upcall request + * list. + */ + spin_unlock(&op->lock); + spin_lock(&orangefs_request_list_lock); + list_del_init(&op->list); + spin_unlock(&orangefs_request_list_lock); + gossip_debug(GOSSIP_WAIT_DEBUG, + "Interrupted: Removed op %p from request_list\n", + op); + } else if (op_state_in_progress(op)) { + /* op must be removed from the in progress htable */ + spin_unlock(&op->lock); + spin_lock(&htable_ops_in_progress_lock); + list_del_init(&op->list); + spin_unlock(&htable_ops_in_progress_lock); + gossip_debug(GOSSIP_WAIT_DEBUG, + "Interrupted: Removed op %p" + " from htable_ops_in_progress\n", + op); + } else { + spin_unlock(&op->lock); + gossip_err("interrupted operation is in a weird state 0x%x\n", + op->op_state); + } + reinit_completion(&op->waitq); +} + +/* + * Sleeps on waitqueue waiting for matching downcall. + * If client-core finishes servicing, then we are good to go. + * else if client-core exits, we get woken up here, and retry with a timeout + * + * When this call returns to the caller, the specified op will no + * longer be in either the in_progress hash table or on the request list. + * + * Returns 0 on success and -errno on failure + * Errors are: + * EAGAIN in case we want the caller to requeue and try again.. + * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this + * operation since client-core seems to be exiting too often + * or if we were interrupted. + * + * Returns with op->lock taken. + */ +static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op, + long timeout, + bool interruptible) +{ + long n; + + /* + * There's a "schedule_timeout" inside of these wait + * primitives, during which the op is out of the hands of the + * user process that needs something done and is being + * manipulated by the client-core process. + */ + if (interruptible) + n = wait_for_completion_interruptible_timeout(&op->waitq, + timeout); + else + n = wait_for_completion_killable_timeout(&op->waitq, timeout); + + spin_lock(&op->lock); + + if (op_state_serviced(op)) + return 0; + + if (unlikely(n < 0)) { + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: operation interrupted, tag %llu, %p\n", + __func__, + llu(op->tag), + op); + return -EINTR; + } + if (op_state_purged(op)) { + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: operation purged, tag %llu, %p, %d\n", + __func__, + llu(op->tag), + op, + op->attempts); + return (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ? + -EAGAIN : + -EIO; + } + /* must have timed out, then... */ + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: operation timed out, tag %llu, %p, %d)\n", + __func__, + llu(op->tag), + op, + op->attempts); + return -ETIMEDOUT; +} diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c new file mode 100644 index 000000000000..ef5da7538cd5 --- /dev/null +++ b/fs/orangefs/xattr.c @@ -0,0 +1,545 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * Linux VFS extended attribute operations. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" +#include <linux/posix_acl_xattr.h> +#include <linux/xattr.h> + + +#define SYSTEM_ORANGEFS_KEY "system.pvfs2." +#define SYSTEM_ORANGEFS_KEY_LEN 13 + +/* + * this function returns + * 0 if the key corresponding to name is not meant to be printed as part + * of a listxattr. + * 1 if the key corresponding to name is meant to be returned as part of + * a listxattr. + * The ones that start SYSTEM_ORANGEFS_KEY are the ones to avoid printing. + */ +static int is_reserved_key(const char *key, size_t size) +{ + + if (size < SYSTEM_ORANGEFS_KEY_LEN) + return 1; + + return strncmp(key, SYSTEM_ORANGEFS_KEY, SYSTEM_ORANGEFS_KEY_LEN) ? 1 : 0; +} + +static inline int convert_to_internal_xattr_flags(int setxattr_flags) +{ + int internal_flag = 0; + + if (setxattr_flags & XATTR_REPLACE) { + /* Attribute must exist! */ + internal_flag = ORANGEFS_XATTR_REPLACE; + } else if (setxattr_flags & XATTR_CREATE) { + /* Attribute must not exist */ + internal_flag = ORANGEFS_XATTR_CREATE; + } + return internal_flag; +} + + +/* + * Tries to get a specified key's attributes of a given + * file into a user-specified buffer. Note that the getxattr + * interface allows for the users to probe the size of an + * extended attribute by passing in a value of 0 to size. + * Thus our return value is always the size of the attribute + * unless the key does not exist for the file and/or if + * there were errors in fetching the attribute value. + */ +ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix, + const char *name, void *buffer, size_t size) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op = NULL; + ssize_t ret = -ENOMEM; + ssize_t length = 0; + int fsuid; + int fsgid; + + gossip_debug(GOSSIP_XATTR_DEBUG, + "%s: prefix %s name %s, buffer_size %zd\n", + __func__, prefix, name, size); + + if (name == NULL || (size > 0 && buffer == NULL)) { + gossip_err("orangefs_inode_getxattr: bogus NULL pointers\n"); + return -EINVAL; + } + if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) { + gossip_err("Invalid key length (%d)\n", + (int)(strlen(name) + strlen(prefix))); + return -EINVAL; + } + + fsuid = from_kuid(current_user_ns(), current_fsuid()); + fsgid = from_kgid(current_user_ns(), current_fsgid()); + + gossip_debug(GOSSIP_XATTR_DEBUG, + "getxattr on inode %pU, name %s " + "(uid %o, gid %o)\n", + get_khandle_from_ino(inode), + name, + fsuid, + fsgid); + + down_read(&orangefs_inode->xattr_sem); + + new_op = op_alloc(ORANGEFS_VFS_OP_GETXATTR); + if (!new_op) + goto out_unlock; + + new_op->upcall.req.getxattr.refn = orangefs_inode->refn; + ret = snprintf((char *)new_op->upcall.req.getxattr.key, + ORANGEFS_MAX_XATTR_NAMELEN, "%s%s", prefix, name); + + /* + * NOTE: Although keys are meant to be NULL terminated textual + * strings, I am going to explicitly pass the length just in case + * we change this later on... + */ + new_op->upcall.req.getxattr.key_sz = ret + 1; + + ret = service_operation(new_op, "orangefs_inode_getxattr", + get_interruptible_flag(inode)); + if (ret != 0) { + if (ret == -ENOENT) { + ret = -ENODATA; + gossip_debug(GOSSIP_XATTR_DEBUG, + "orangefs_inode_getxattr: inode %pU key %s" + " does not exist!\n", + get_khandle_from_ino(inode), + (char *)new_op->upcall.req.getxattr.key); + } + goto out_release_op; + } + + /* + * Length returned includes null terminator. + */ + length = new_op->downcall.resp.getxattr.val_sz; + + /* + * Just return the length of the queried attribute. + */ + if (size == 0) { + ret = length; + goto out_release_op; + } + + /* + * Check to see if key length is > provided buffer size. + */ + if (length > size) { + ret = -ERANGE; + goto out_release_op; + } + + memset(buffer, 0, size); + memcpy(buffer, new_op->downcall.resp.getxattr.val, length); + gossip_debug(GOSSIP_XATTR_DEBUG, + "orangefs_inode_getxattr: inode %pU " + "key %s key_sz %d, val_len %d\n", + get_khandle_from_ino(inode), + (char *)new_op-> + upcall.req.getxattr.key, + (int)new_op-> + upcall.req.getxattr.key_sz, + (int)ret); + + ret = length; + +out_release_op: + op_release(new_op); +out_unlock: + up_read(&orangefs_inode->xattr_sem); + return ret; +} + +static int orangefs_inode_removexattr(struct inode *inode, + const char *prefix, + const char *name, + int flags) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op = NULL; + int ret = -ENOMEM; + + down_write(&orangefs_inode->xattr_sem); + new_op = op_alloc(ORANGEFS_VFS_OP_REMOVEXATTR); + if (!new_op) + goto out_unlock; + + new_op->upcall.req.removexattr.refn = orangefs_inode->refn; + /* + * NOTE: Although keys are meant to be NULL terminated + * textual strings, I am going to explicitly pass the + * length just in case we change this later on... + */ + ret = snprintf((char *)new_op->upcall.req.removexattr.key, + ORANGEFS_MAX_XATTR_NAMELEN, + "%s%s", + (prefix ? prefix : ""), + name); + new_op->upcall.req.removexattr.key_sz = ret + 1; + + gossip_debug(GOSSIP_XATTR_DEBUG, + "orangefs_inode_removexattr: key %s, key_sz %d\n", + (char *)new_op->upcall.req.removexattr.key, + (int)new_op->upcall.req.removexattr.key_sz); + + ret = service_operation(new_op, + "orangefs_inode_removexattr", + get_interruptible_flag(inode)); + if (ret == -ENOENT) { + /* + * Request to replace a non-existent attribute is an error. + */ + if (flags & XATTR_REPLACE) + ret = -ENODATA; + else + ret = 0; + } + + gossip_debug(GOSSIP_XATTR_DEBUG, + "orangefs_inode_removexattr: returning %d\n", ret); + + op_release(new_op); +out_unlock: + up_write(&orangefs_inode->xattr_sem); + return ret; +} + +/* + * Tries to set an attribute for a given key on a file. + * + * Returns a -ve number on error and 0 on success. Key is text, but value + * can be binary! + */ +int orangefs_inode_setxattr(struct inode *inode, const char *prefix, + const char *name, const void *value, size_t size, int flags) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op; + int internal_flag = 0; + int ret = -ENOMEM; + + gossip_debug(GOSSIP_XATTR_DEBUG, + "%s: prefix %s, name %s, buffer_size %zd\n", + __func__, prefix, name, size); + + if (size < 0 || + size >= ORANGEFS_MAX_XATTR_VALUELEN || + flags < 0) { + gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n", + (int)size, + flags); + return -EINVAL; + } + + if (name == NULL || + (size > 0 && value == NULL)) { + gossip_err("orangefs_inode_setxattr: bogus NULL pointers!\n"); + return -EINVAL; + } + + internal_flag = convert_to_internal_xattr_flags(flags); + + if (prefix) { + if (strlen(name) + strlen(prefix) >= ORANGEFS_MAX_XATTR_NAMELEN) { + gossip_err + ("orangefs_inode_setxattr: bogus key size (%d)\n", + (int)(strlen(name) + strlen(prefix))); + return -EINVAL; + } + } else { + if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) { + gossip_err + ("orangefs_inode_setxattr: bogus key size (%d)\n", + (int)(strlen(name))); + return -EINVAL; + } + } + + /* This is equivalent to a removexattr */ + if (size == 0 && value == NULL) { + gossip_debug(GOSSIP_XATTR_DEBUG, + "removing xattr (%s%s)\n", + prefix, + name); + return orangefs_inode_removexattr(inode, prefix, name, flags); + } + + gossip_debug(GOSSIP_XATTR_DEBUG, + "setxattr on inode %pU, name %s\n", + get_khandle_from_ino(inode), + name); + + down_write(&orangefs_inode->xattr_sem); + new_op = op_alloc(ORANGEFS_VFS_OP_SETXATTR); + if (!new_op) + goto out_unlock; + + + new_op->upcall.req.setxattr.refn = orangefs_inode->refn; + new_op->upcall.req.setxattr.flags = internal_flag; + /* + * NOTE: Although keys are meant to be NULL terminated textual + * strings, I am going to explicitly pass the length just in + * case we change this later on... + */ + ret = snprintf((char *)new_op->upcall.req.setxattr.keyval.key, + ORANGEFS_MAX_XATTR_NAMELEN, + "%s%s", + prefix, name); + new_op->upcall.req.setxattr.keyval.key_sz = ret + 1; + memcpy(new_op->upcall.req.setxattr.keyval.val, value, size); + new_op->upcall.req.setxattr.keyval.val_sz = size; + + gossip_debug(GOSSIP_XATTR_DEBUG, + "orangefs_inode_setxattr: key %s, key_sz %d " + " value size %zd\n", + (char *)new_op->upcall.req.setxattr.keyval.key, + (int)new_op->upcall.req.setxattr.keyval.key_sz, + size); + + ret = service_operation(new_op, + "orangefs_inode_setxattr", + get_interruptible_flag(inode)); + + gossip_debug(GOSSIP_XATTR_DEBUG, + "orangefs_inode_setxattr: returning %d\n", + ret); + + /* when request is serviced properly, free req op struct */ + op_release(new_op); +out_unlock: + up_write(&orangefs_inode->xattr_sem); + return ret; +} + +/* + * Tries to get a specified object's keys into a user-specified buffer of a + * given size. Note that like the previous instances of xattr routines, this + * also allows you to pass in a NULL pointer and 0 size to probe the size for + * subsequent memory allocations. Thus our return value is always the size of + * all the keys unless there were errors in fetching the keys! + */ +ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size) +{ + struct inode *inode = dentry->d_inode; + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op; + __u64 token = ORANGEFS_ITERATE_START; + ssize_t ret = -ENOMEM; + ssize_t total = 0; + int count_keys = 0; + int key_size; + int i = 0; + int returned_count = 0; + + if (size > 0 && buffer == NULL) { + gossip_err("%s: bogus NULL pointers\n", __func__); + return -EINVAL; + } + if (size < 0) { + gossip_err("Invalid size (%d)\n", (int)size); + return -EINVAL; + } + + down_read(&orangefs_inode->xattr_sem); + new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR); + if (!new_op) + goto out_unlock; + + if (buffer && size > 0) + memset(buffer, 0, size); + +try_again: + key_size = 0; + new_op->upcall.req.listxattr.refn = orangefs_inode->refn; + new_op->upcall.req.listxattr.token = token; + new_op->upcall.req.listxattr.requested_count = + (size == 0) ? 0 : ORANGEFS_MAX_XATTR_LISTLEN; + ret = service_operation(new_op, __func__, + get_interruptible_flag(inode)); + if (ret != 0) + goto done; + + if (size == 0) { + /* + * This is a bit of a big upper limit, but I did not want to + * spend too much time getting this correct, since users end + * up allocating memory rather than us... + */ + total = new_op->downcall.resp.listxattr.returned_count * + ORANGEFS_MAX_XATTR_NAMELEN; + goto done; + } + + returned_count = new_op->downcall.resp.listxattr.returned_count; + if (returned_count < 0 || + returned_count >= ORANGEFS_MAX_XATTR_LISTLEN) { + gossip_err("%s: impossible value for returned_count:%d:\n", + __func__, + returned_count); + ret = -EIO; + goto done; + } + + /* + * Check to see how much can be fit in the buffer. Fit only whole keys. + */ + for (i = 0; i < returned_count; i++) { + if (new_op->downcall.resp.listxattr.lengths[i] < 0 || + new_op->downcall.resp.listxattr.lengths[i] > + ORANGEFS_MAX_XATTR_NAMELEN) { + gossip_err("%s: impossible value for lengths[%d]\n", + __func__, + new_op->downcall.resp.listxattr.lengths[i]); + ret = -EIO; + goto done; + } + if (total + new_op->downcall.resp.listxattr.lengths[i] > size) + goto done; + + /* + * Since many dumb programs try to setxattr() on our reserved + * xattrs this is a feeble attempt at defeating those by not + * listing them in the output of listxattr.. sigh + */ + if (is_reserved_key(new_op->downcall.resp.listxattr.key + + key_size, + new_op->downcall.resp. + listxattr.lengths[i])) { + gossip_debug(GOSSIP_XATTR_DEBUG, "Copying key %d -> %s\n", + i, new_op->downcall.resp.listxattr.key + + key_size); + memcpy(buffer + total, + new_op->downcall.resp.listxattr.key + key_size, + new_op->downcall.resp.listxattr.lengths[i]); + total += new_op->downcall.resp.listxattr.lengths[i]; + count_keys++; + } else { + gossip_debug(GOSSIP_XATTR_DEBUG, "[RESERVED] key %d -> %s\n", + i, new_op->downcall.resp.listxattr.key + + key_size); + } + key_size += new_op->downcall.resp.listxattr.lengths[i]; + } + + /* + * Since the buffer was large enough, we might have to continue + * fetching more keys! + */ + token = new_op->downcall.resp.listxattr.token; + if (token != ORANGEFS_ITERATE_END) + goto try_again; + +done: + gossip_debug(GOSSIP_XATTR_DEBUG, "%s: returning %d" + " [size of buffer %ld] (filled in %d keys)\n", + __func__, + ret ? (int)ret : (int)total, + (long)size, + count_keys); + op_release(new_op); + if (ret == 0) + ret = total; +out_unlock: + up_read(&orangefs_inode->xattr_sem); + return ret; +} + +static int orangefs_xattr_set_default(const struct xattr_handler *handler, + struct dentry *dentry, + const char *name, + const void *buffer, + size_t size, + int flags) +{ + return orangefs_inode_setxattr(dentry->d_inode, + ORANGEFS_XATTR_NAME_DEFAULT_PREFIX, + name, + buffer, + size, + flags); +} + +static int orangefs_xattr_get_default(const struct xattr_handler *handler, + struct dentry *dentry, + const char *name, + void *buffer, + size_t size) +{ + return orangefs_inode_getxattr(dentry->d_inode, + ORANGEFS_XATTR_NAME_DEFAULT_PREFIX, + name, + buffer, + size); + +} + +static int orangefs_xattr_set_trusted(const struct xattr_handler *handler, + struct dentry *dentry, + const char *name, + const void *buffer, + size_t size, + int flags) +{ + return orangefs_inode_setxattr(dentry->d_inode, + ORANGEFS_XATTR_NAME_TRUSTED_PREFIX, + name, + buffer, + size, + flags); +} + +static int orangefs_xattr_get_trusted(const struct xattr_handler *handler, + struct dentry *dentry, + const char *name, + void *buffer, + size_t size) +{ + return orangefs_inode_getxattr(dentry->d_inode, + ORANGEFS_XATTR_NAME_TRUSTED_PREFIX, + name, + buffer, + size); +} + +static struct xattr_handler orangefs_xattr_trusted_handler = { + .prefix = ORANGEFS_XATTR_NAME_TRUSTED_PREFIX, + .get = orangefs_xattr_get_trusted, + .set = orangefs_xattr_set_trusted, +}; + +static struct xattr_handler orangefs_xattr_default_handler = { + /* + * NOTE: this is set to be the empty string. + * so that all un-prefixed xattrs keys get caught + * here! + */ + .prefix = ORANGEFS_XATTR_NAME_DEFAULT_PREFIX, + .get = orangefs_xattr_get_default, + .set = orangefs_xattr_set_default, +}; + +const struct xattr_handler *orangefs_xattr_handlers[] = { + &posix_acl_access_xattr_handler, + &posix_acl_default_xattr_handler, + &orangefs_xattr_trusted_handler, + &orangefs_xattr_default_handler, + NULL +}; diff --git a/fs/pipe.c b/fs/pipe.c index ab8dad3ccb6a..0d3f5165cb0b 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -134,7 +134,7 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe, if (page_count(page) == 1 && !pipe->tmp_page) pipe->tmp_page = page; else - page_cache_release(page); + put_page(page); } /** @@ -180,7 +180,7 @@ EXPORT_SYMBOL(generic_pipe_buf_steal); */ void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { - page_cache_get(buf->page); + get_page(buf->page); } EXPORT_SYMBOL(generic_pipe_buf_get); @@ -211,7 +211,7 @@ EXPORT_SYMBOL(generic_pipe_buf_confirm); void generic_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { - page_cache_release(buf->page); + put_page(buf->page); } EXPORT_SYMBOL(generic_pipe_buf_release); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 9df431642042..229cb546bee0 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -553,7 +553,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, if (radix_tree_exceptional_entry(page)) mss->swap += PAGE_SIZE; else - page_cache_release(page); + put_page(page); return; } diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 55bb57e6a30d..8afe10cf7df8 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -279,12 +279,12 @@ static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!page) return VM_FAULT_OOM; if (!PageUptodate(page)) { - offset = (loff_t) index << PAGE_CACHE_SHIFT; + offset = (loff_t) index << PAGE_SHIFT; buf = __va((page_to_pfn(page) << PAGE_SHIFT)); rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0); if (rc < 0) { unlock_page(page); - page_cache_release(page); + put_page(page); return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; } SetPageUptodate(page); diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index dc645b66cd79..45d6110744cb 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -420,8 +420,8 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent) pstore_sb = sb; sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = PSTOREFS_MAGIC; sb->s_op = &pstore_ops; sb->s_time_gran = 1; diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c index e1f37278cf97..144ceda4948e 100644 --- a/fs/qnx6/dir.c +++ b/fs/qnx6/dir.c @@ -35,9 +35,9 @@ static struct page *qnx6_get_page(struct inode *dir, unsigned long n) static unsigned last_entry(struct inode *inode, unsigned long page_nr) { unsigned long last_byte = inode->i_size; - last_byte -= page_nr << PAGE_CACHE_SHIFT; - if (last_byte > PAGE_CACHE_SIZE) - last_byte = PAGE_CACHE_SIZE; + last_byte -= page_nr << PAGE_SHIFT; + if (last_byte > PAGE_SIZE) + last_byte = PAGE_SIZE; return last_byte / QNX6_DIR_ENTRY_SIZE; } @@ -47,9 +47,9 @@ static struct qnx6_long_filename *qnx6_longname(struct super_block *sb, { struct qnx6_sb_info *sbi = QNX6_SB(sb); u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */ - u32 n = s >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits); /* in pages */ + u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */ /* within page */ - u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_CACHE_MASK; + u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK; struct address_space *mapping = sbi->longfile->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (IS_ERR(page)) @@ -115,8 +115,8 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx) struct qnx6_sb_info *sbi = QNX6_SB(s); loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1); unsigned long npages = dir_pages(inode); - unsigned long n = pos >> PAGE_CACHE_SHIFT; - unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE; + unsigned long n = pos >> PAGE_SHIFT; + unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE; bool done = false; ctx->pos = pos; @@ -131,7 +131,7 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx) if (IS_ERR(page)) { pr_err("%s(): read failed\n", __func__); - ctx->pos = (n + 1) << PAGE_CACHE_SHIFT; + ctx->pos = (n + 1) << PAGE_SHIFT; return PTR_ERR(page); } de = ((struct qnx6_dir_entry *)page_address(page)) + start; diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c index 47bb1de07155..1192422a1c56 100644 --- a/fs/qnx6/inode.c +++ b/fs/qnx6/inode.c @@ -542,8 +542,8 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino) iget_failed(inode); return ERR_PTR(-EIO); } - n = (ino - 1) >> (PAGE_CACHE_SHIFT - QNX6_INODE_SIZE_BITS); - offs = (ino - 1) & (~PAGE_CACHE_MASK >> QNX6_INODE_SIZE_BITS); + n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS); + offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS); mapping = sbi->inodes->i_mapping; page = read_mapping_page(mapping, n, NULL); if (IS_ERR(page)) { diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h index d3fb2b698800..f23b5c4a66ad 100644 --- a/fs/qnx6/qnx6.h +++ b/fs/qnx6/qnx6.h @@ -128,7 +128,7 @@ extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s, static inline void qnx6_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name, diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index ba827daea5a0..ff21980d0119 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2047,11 +2047,20 @@ int dquot_get_next_id(struct super_block *sb, struct kqid *qid) struct quota_info *dqopt = sb_dqopt(sb); int err; - if (!dqopt->ops[qid->type]->get_next_id) - return -ENOSYS; + mutex_lock(&dqopt->dqonoff_mutex); + if (!sb_has_quota_active(sb, qid->type)) { + err = -ESRCH; + goto out; + } + if (!dqopt->ops[qid->type]->get_next_id) { + err = -ENOSYS; + goto out; + } mutex_lock(&dqopt->dqio_mutex); err = dqopt->ops[qid->type]->get_next_id(sb, qid); mutex_unlock(&dqopt->dqio_mutex); +out: + mutex_unlock(&dqopt->dqonoff_mutex); return err; } diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index 38981b037524..1ab6e6c2e60e 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -223,8 +223,8 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent) return err; sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = RAMFS_MAGIC; sb->s_op = &ramfs_ops; sb->s_time_gran = 1; diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 9424a4ba93a9..389773711de4 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -180,11 +180,11 @@ int reiserfs_commit_page(struct inode *inode, struct page *page, int partial = 0; unsigned blocksize; struct buffer_head *bh, *head; - unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT; + unsigned long i_size_index = inode->i_size >> PAGE_SHIFT; int new; int logit = reiserfs_file_data_log(inode); struct super_block *s = inode->i_sb; - int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; + int bh_per_page = PAGE_SIZE / s->s_blocksize; struct reiserfs_transaction_handle th; int ret = 0; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index ae9e5b308cf9..d5c2e9c865de 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -386,7 +386,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block, goto finished; } /* read file tail into part of page */ - offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1); + offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1); copy_item_head(&tmp_ih, ih); /* @@ -587,10 +587,10 @@ static int convert_tail_for_hole(struct inode *inode, return -EIO; /* always try to read until the end of the block */ - tail_start = tail_offset & (PAGE_CACHE_SIZE - 1); + tail_start = tail_offset & (PAGE_SIZE - 1); tail_end = (tail_start | (bh_result->b_size - 1)) + 1; - index = tail_offset >> PAGE_CACHE_SHIFT; + index = tail_offset >> PAGE_SHIFT; /* * hole_page can be zero in case of direct_io, we are sure * that we cannot get here if we write with O_DIRECT into tail page @@ -629,7 +629,7 @@ static int convert_tail_for_hole(struct inode *inode, unlock: if (tail_page != hole_page) { unlock_page(tail_page); - page_cache_release(tail_page); + put_page(tail_page); } out: return retval; @@ -2189,11 +2189,11 @@ static int grab_tail_page(struct inode *inode, * we want the page with the last byte in the file, * not the page that will hold the next byte for appending */ - unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; + unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT; unsigned long pos = 0; unsigned long start = 0; unsigned long blocksize = inode->i_sb->s_blocksize; - unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1); + unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1); struct buffer_head *bh; struct buffer_head *head; struct page *page; @@ -2251,7 +2251,7 @@ out: unlock: unlock_page(page); - page_cache_release(page); + put_page(page); return error; } @@ -2265,7 +2265,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps) { struct reiserfs_transaction_handle th; /* we want the offset for the first byte after the end of the file */ - unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1); + unsigned long offset = inode->i_size & (PAGE_SIZE - 1); unsigned blocksize = inode->i_sb->s_blocksize; unsigned length; struct page *page = NULL; @@ -2345,7 +2345,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps) } } unlock_page(page); - page_cache_release(page); + put_page(page); } reiserfs_write_unlock(inode->i_sb); @@ -2354,7 +2354,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps) out: if (page) { unlock_page(page); - page_cache_release(page); + put_page(page); } reiserfs_write_unlock(inode->i_sb); @@ -2426,7 +2426,7 @@ research: } else if (is_direct_le_ih(ih)) { char *p; p = page_address(bh_result->b_page); - p += (byte_offset - 1) & (PAGE_CACHE_SIZE - 1); + p += (byte_offset - 1) & (PAGE_SIZE - 1); copy_size = ih_item_len(ih) - pos_in_item; fs_gen = get_generation(inode->i_sb); @@ -2525,7 +2525,7 @@ static int reiserfs_write_full_page(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; - unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; + unsigned long end_index = inode->i_size >> PAGE_SHIFT; int error = 0; unsigned long block; sector_t last_block; @@ -2535,7 +2535,7 @@ static int reiserfs_write_full_page(struct page *page, int checked = PageChecked(page); struct reiserfs_transaction_handle th; struct super_block *s = inode->i_sb; - int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; + int bh_per_page = PAGE_SIZE / s->s_blocksize; th.t_trans_id = 0; /* no logging allowed when nonblocking or from PF_MEMALLOC */ @@ -2564,16 +2564,16 @@ static int reiserfs_write_full_page(struct page *page, if (page->index >= end_index) { unsigned last_offset; - last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1); + last_offset = inode->i_size & (PAGE_SIZE - 1); /* no file contents in this page */ if (page->index >= end_index + 1 || !last_offset) { unlock_page(page); return 0; } - zero_user_segment(page, last_offset, PAGE_CACHE_SIZE); + zero_user_segment(page, last_offset, PAGE_SIZE); } bh = head; - block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); + block = page->index << (PAGE_SHIFT - s->s_blocksize_bits); last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; /* first map all the buffers, logging any direct items we find */ do { @@ -2774,7 +2774,7 @@ static int reiserfs_write_begin(struct file *file, *fsdata = (void *)(unsigned long)flags; } - index = pos >> PAGE_CACHE_SHIFT; + index = pos >> PAGE_SHIFT; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; @@ -2822,7 +2822,7 @@ static int reiserfs_write_begin(struct file *file, } if (ret) { unlock_page(page); - page_cache_release(page); + put_page(page); /* Truncate allocated blocks */ reiserfs_truncate_failed_write(inode); } @@ -2909,7 +2909,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, else th = NULL; - start = pos & (PAGE_CACHE_SIZE - 1); + start = pos & (PAGE_SIZE - 1); if (unlikely(copied < len)) { if (!PageUptodate(page)) copied = 0; @@ -2974,7 +2974,7 @@ out: if (locked) reiserfs_write_unlock(inode->i_sb); unlock_page(page); - page_cache_release(page); + put_page(page); if (pos + len > inode->i_size) reiserfs_truncate_failed_write(inode); @@ -2996,7 +2996,7 @@ int reiserfs_commit_write(struct file *f, struct page *page, unsigned from, unsigned to) { struct inode *inode = page->mapping->host; - loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + to; + loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to; int ret = 0; int update_sd = 0; struct reiserfs_transaction_handle *th = NULL; @@ -3181,7 +3181,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset, struct inode *inode = page->mapping->host; unsigned int curr_off = 0; unsigned int stop = offset + length; - int partial_page = (offset || length < PAGE_CACHE_SIZE); + int partial_page = (offset || length < PAGE_SIZE); int ret = 1; BUG_ON(!PageLocked(page)); diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index 036a1fc0a8c3..57045f423893 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c @@ -203,7 +203,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) * __reiserfs_write_begin on that page. This will force a * reiserfs_get_block to unpack the tail for us. */ - index = inode->i_size >> PAGE_CACHE_SHIFT; + index = inode->i_size >> PAGE_SHIFT; mapping = inode->i_mapping; page = grab_cache_page(mapping, index); retval = -ENOMEM; @@ -221,7 +221,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) out_unlock: unlock_page(page); - page_cache_release(page); + put_page(page); out: inode_unlock(inode); diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 44c2bdced1c8..2ace90e981f0 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -599,18 +599,18 @@ static int journal_list_still_alive(struct super_block *s, * This does a check to see if the buffer belongs to one of these * lost pages before doing the final put_bh. If page->mapping was * null, it tries to free buffers on the page, which should make the - * final page_cache_release drop the page from the lru. + * final put_page drop the page from the lru. */ static void release_buffer_page(struct buffer_head *bh) { struct page *page = bh->b_page; if (!page->mapping && trylock_page(page)) { - page_cache_get(page); + get_page(page); put_bh(bh); if (!page->mapping) try_to_free_buffers(page); unlock_page(page); - page_cache_release(page); + put_page(page); } else { put_bh(bh); } diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 24cbe013240f..5feacd689241 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -1342,7 +1342,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, */ data = kmap_atomic(un_bh->b_page); - off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); + off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_SIZE - 1)); memcpy(data + off, ih_item_body(PATH_PLAST_BUFFER(path), &s_ih), ret_value); @@ -1511,7 +1511,7 @@ static void unmap_buffers(struct page *page, loff_t pos) if (page) { if (page_has_buffers(page)) { - tail_index = pos & (PAGE_CACHE_SIZE - 1); + tail_index = pos & (PAGE_SIZE - 1); cur_index = 0; head = page_buffers(page); bh = head; diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c index f41e19b4bb42..2d5489b0a269 100644 --- a/fs/reiserfs/tail_conversion.c +++ b/fs/reiserfs/tail_conversion.c @@ -151,7 +151,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, */ if (up_to_date_bh) { unsigned pgoff = - (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); + (tail_offset + total_tail - 1) & (PAGE_SIZE - 1); char *kaddr = kmap_atomic(up_to_date_bh->b_page); memset(kaddr + pgoff, 0, blk_size - total_tail); kunmap_atomic(kaddr); @@ -271,7 +271,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th, * the page was locked and this part of the page was up to date when * indirect2direct was called, so we know the bytes are still valid */ - tail = tail + (pos & (PAGE_CACHE_SIZE - 1)); + tail = tail + (pos & (PAGE_SIZE - 1)); PATH_LAST_POSITION(path)++; diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 57e0b2310532..28f5f8b11370 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -415,7 +415,7 @@ out: static inline void reiserfs_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } static struct page *reiserfs_get_page(struct inode *dir, size_t n) @@ -427,7 +427,7 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n) * and an unlink/rmdir has just occurred - GFP_NOFS avoids this */ mapping_set_gfp_mask(mapping, GFP_NOFS); - page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL); + page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL); if (!IS_ERR(page)) { kmap(page); if (PageError(page)) @@ -526,10 +526,10 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th, while (buffer_pos < buffer_size || buffer_pos == 0) { size_t chunk; size_t skip = 0; - size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1)); + size_t page_offset = (file_pos & (PAGE_SIZE - 1)); - if (buffer_size - buffer_pos > PAGE_CACHE_SIZE) - chunk = PAGE_CACHE_SIZE; + if (buffer_size - buffer_pos > PAGE_SIZE) + chunk = PAGE_SIZE; else chunk = buffer_size - buffer_pos; @@ -546,8 +546,8 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th, struct reiserfs_xattr_header *rxh; skip = file_pos = sizeof(struct reiserfs_xattr_header); - if (chunk + skip > PAGE_CACHE_SIZE) - chunk = PAGE_CACHE_SIZE - skip; + if (chunk + skip > PAGE_SIZE) + chunk = PAGE_SIZE - skip; rxh = (struct reiserfs_xattr_header *)data; rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC); rxh->h_hash = cpu_to_le32(xahash); @@ -675,8 +675,8 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer, char *data; size_t skip = 0; - if (isize - file_pos > PAGE_CACHE_SIZE) - chunk = PAGE_CACHE_SIZE; + if (isize - file_pos > PAGE_SIZE) + chunk = PAGE_SIZE; else chunk = isize - file_pos; diff --git a/fs/splice.c b/fs/splice.c index 9947b5c69664..b018eb485019 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -88,7 +88,7 @@ out_unlock: static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { - page_cache_release(buf->page); + put_page(buf->page); buf->flags &= ~PIPE_BUF_FLAG_LRU; } @@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(splice_to_pipe); void spd_release_page(struct splice_pipe_desc *spd, unsigned int i) { - page_cache_release(spd->pages[i]); + put_page(spd->pages[i]); } /* @@ -328,9 +328,9 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, if (splice_grow_spd(pipe, &spd)) return -ENOMEM; - index = *ppos >> PAGE_CACHE_SHIFT; - loff = *ppos & ~PAGE_CACHE_MASK; - req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + index = *ppos >> PAGE_SHIFT; + loff = *ppos & ~PAGE_MASK; + req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT; nr_pages = min(req_pages, spd.nr_pages_max); /* @@ -365,7 +365,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, error = add_to_page_cache_lru(page, mapping, index, mapping_gfp_constraint(mapping, GFP_KERNEL)); if (unlikely(error)) { - page_cache_release(page); + put_page(page); if (error == -EEXIST) continue; break; @@ -385,7 +385,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, * Now loop over the map and see if we need to start IO on any * pages, fill in the partial map, etc. */ - index = *ppos >> PAGE_CACHE_SHIFT; + index = *ppos >> PAGE_SHIFT; nr_pages = spd.nr_pages; spd.nr_pages = 0; for (page_nr = 0; page_nr < nr_pages; page_nr++) { @@ -397,7 +397,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, /* * this_len is the max we'll use from this page */ - this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); + this_len = min_t(unsigned long, len, PAGE_SIZE - loff); page = spd.pages[page_nr]; if (PageReadahead(page)) @@ -426,7 +426,7 @@ retry_lookup: error = -ENOMEM; break; } - page_cache_release(spd.pages[page_nr]); + put_page(spd.pages[page_nr]); spd.pages[page_nr] = page; } /* @@ -456,7 +456,7 @@ fill_it: * i_size must be checked after PageUptodate. */ isize = i_size_read(mapping->host); - end_index = (isize - 1) >> PAGE_CACHE_SHIFT; + end_index = (isize - 1) >> PAGE_SHIFT; if (unlikely(!isize || index > end_index)) break; @@ -470,7 +470,7 @@ fill_it: /* * max good bytes in this page */ - plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; + plen = ((isize - 1) & ~PAGE_MASK) + 1; if (plen <= loff) break; @@ -494,8 +494,8 @@ fill_it: * we got, 'nr_pages' is how many pages are in the map. */ while (page_nr < nr_pages) - page_cache_release(spd.pages[page_nr++]); - in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; + put_page(spd.pages[page_nr++]); + in->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT; if (spd.nr_pages) error = splice_to_pipe(pipe, &spd); @@ -636,8 +636,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, goto shrink_ret; } - offset = *ppos & ~PAGE_CACHE_MASK; - nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + offset = *ppos & ~PAGE_MASK; + nr_pages = (len + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) { struct page *page; @@ -647,7 +647,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, if (!page) goto err; - this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); + this_len = min_t(size_t, len, PAGE_SIZE - offset); vec[i].iov_base = (void __user *) page_address(page); vec[i].iov_len = this_len; spd.pages[i] = page; diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 0cea9b9236d0..2c2618410d51 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -181,11 +181,11 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, in = min(bytes, msblk->devblksize - offset); bytes -= in; while (in) { - if (pg_offset == PAGE_CACHE_SIZE) { + if (pg_offset == PAGE_SIZE) { data = squashfs_next_page(output); pg_offset = 0; } - avail = min_t(int, in, PAGE_CACHE_SIZE - + avail = min_t(int, in, PAGE_SIZE - pg_offset); memcpy(data + pg_offset, bh[k]->b_data + offset, avail); diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 1cb70a0b2168..23813c078cc9 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c @@ -30,7 +30,7 @@ * access the metadata and fragment caches. * * To avoid out of memory and fragmentation issues with vmalloc the cache - * uses sequences of kmalloced PAGE_CACHE_SIZE buffers. + * uses sequences of kmalloced PAGE_SIZE buffers. * * It should be noted that the cache is not used for file datablocks, these * are decompressed and cached in the page-cache in the normal way. The @@ -231,7 +231,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache) /* * Initialise cache allocating the specified number of entries, each of * size block_size. To avoid vmalloc fragmentation issues each entry - * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers. + * is allocated as a sequence of kmalloced PAGE_SIZE buffers. */ struct squashfs_cache *squashfs_cache_init(char *name, int entries, int block_size) @@ -255,7 +255,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, cache->unused = entries; cache->entries = entries; cache->block_size = block_size; - cache->pages = block_size >> PAGE_CACHE_SHIFT; + cache->pages = block_size >> PAGE_SHIFT; cache->pages = cache->pages ? cache->pages : 1; cache->name = name; cache->num_waiters = 0; @@ -275,7 +275,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, } for (j = 0; j < cache->pages; j++) { - entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL); if (entry->data[j] == NULL) { ERROR("Failed to allocate %s buffer\n", name); goto cleanup; @@ -314,10 +314,10 @@ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry, return min(length, entry->length - offset); while (offset < entry->length) { - void *buff = entry->data[offset / PAGE_CACHE_SIZE] - + (offset % PAGE_CACHE_SIZE); + void *buff = entry->data[offset / PAGE_SIZE] + + (offset % PAGE_SIZE); int bytes = min_t(int, entry->length - offset, - PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE)); + PAGE_SIZE - (offset % PAGE_SIZE)); if (bytes >= remaining) { memcpy(buffer, buff, remaining); @@ -415,7 +415,7 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb, */ void *squashfs_read_table(struct super_block *sb, u64 block, int length) { - int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; int i, res; void *table, *buffer, **data; struct squashfs_page_actor *actor; @@ -436,7 +436,7 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length) goto failed2; } - for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) + for (i = 0; i < pages; i++, buffer += PAGE_SIZE) data[i] = buffer; res = squashfs_read_data(sb, block, length | diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c index e9034bf6e5ae..d2bc13636f79 100644 --- a/fs/squashfs/decompressor.c +++ b/fs/squashfs/decompressor.c @@ -102,7 +102,7 @@ static void *get_comp_opts(struct super_block *sb, unsigned short flags) * Read decompressor specific options from file system if present */ if (SQUASHFS_COMP_OPTS(flags)) { - buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (buffer == NULL) { comp_opts = ERR_PTR(-ENOMEM); goto out; diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index e5c9689062ba..13d80947bf9e 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -175,7 +175,7 @@ static long long read_indexes(struct super_block *sb, int n, { int err, i; long long block = 0; - __le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + __le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL); if (blist == NULL) { ERROR("read_indexes: Failed to allocate block_list\n"); @@ -183,7 +183,7 @@ static long long read_indexes(struct super_block *sb, int n, } while (n) { - int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2); + int blocks = min_t(int, n, PAGE_SIZE >> 2); err = squashfs_read_metadata(sb, blist, start_block, offset, blocks << 2); @@ -377,19 +377,19 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, struct inode *inode = page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; void *pageaddr; - int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; + int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; int start_index = page->index & ~mask, end_index = start_index | mask; /* * Loop copying datablock into pages. As the datablock likely covers - * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly + * many PAGE_SIZE pages (default block size is 128 KiB) explicitly * grab the pages from the page cache, except for the page that we've * been called to fill. */ for (i = start_index; i <= end_index && bytes > 0; i++, - bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { + bytes -= PAGE_SIZE, offset += PAGE_SIZE) { struct page *push_page; - int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0; + int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0; TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); @@ -404,14 +404,14 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, pageaddr = kmap_atomic(push_page); squashfs_copy_data(pageaddr, buffer, offset, avail); - memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); + memset(pageaddr + avail, 0, PAGE_SIZE - avail); kunmap_atomic(pageaddr); flush_dcache_page(push_page); SetPageUptodate(push_page); skip_page: unlock_page(push_page); if (i != page->index) - page_cache_release(push_page); + put_page(push_page); } } @@ -454,7 +454,7 @@ static int squashfs_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT); + int index = page->index >> (msblk->block_log - PAGE_SHIFT); int file_end = i_size_read(inode) >> msblk->block_log; int res; void *pageaddr; @@ -462,8 +462,8 @@ static int squashfs_readpage(struct file *file, struct page *page) TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", page->index, squashfs_i(inode)->start); - if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT)) + if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >> + PAGE_SHIFT)) goto out; if (index < file_end || squashfs_i(inode)->fragment_block == @@ -487,7 +487,7 @@ error_out: SetPageError(page); out: pageaddr = kmap_atomic(page); - memset(pageaddr, 0, PAGE_CACHE_SIZE); + memset(pageaddr, 0, PAGE_SIZE); kunmap_atomic(pageaddr); flush_dcache_page(page); if (!PageError(page)) diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c index 43e7a7eddac0..cb485d8e0e91 100644 --- a/fs/squashfs/file_direct.c +++ b/fs/squashfs/file_direct.c @@ -30,8 +30,8 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) struct inode *inode = target_page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; - int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; + int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT; + int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; int start_index = target_page->index & ~mask; int end_index = start_index | mask; int i, n, pages, missing_pages, bytes, res = -ENOMEM; @@ -68,7 +68,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) if (PageUptodate(page[i])) { unlock_page(page[i]); - page_cache_release(page[i]); + put_page(page[i]); page[i] = NULL; missing_pages++; } @@ -96,10 +96,10 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) goto mark_errored; /* Last page may have trailing bytes not filled */ - bytes = res % PAGE_CACHE_SIZE; + bytes = res % PAGE_SIZE; if (bytes) { pageaddr = kmap_atomic(page[pages - 1]); - memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); + memset(pageaddr + bytes, 0, PAGE_SIZE - bytes); kunmap_atomic(pageaddr); } @@ -109,7 +109,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) SetPageUptodate(page[i]); unlock_page(page[i]); if (page[i] != target_page) - page_cache_release(page[i]); + put_page(page[i]); } kfree(actor); @@ -127,7 +127,7 @@ mark_errored: flush_dcache_page(page[i]); SetPageError(page[i]); unlock_page(page[i]); - page_cache_release(page[i]); + put_page(page[i]); } out: @@ -153,21 +153,21 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, } for (n = 0; n < pages && bytes > 0; n++, - bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { - int avail = min_t(int, bytes, PAGE_CACHE_SIZE); + bytes -= PAGE_SIZE, offset += PAGE_SIZE) { + int avail = min_t(int, bytes, PAGE_SIZE); if (page[n] == NULL) continue; pageaddr = kmap_atomic(page[n]); squashfs_copy_data(pageaddr, buffer, offset, avail); - memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); + memset(pageaddr + avail, 0, PAGE_SIZE - avail); kunmap_atomic(pageaddr); flush_dcache_page(page[n]); SetPageUptodate(page[n]); unlock_page(page[n]); if (page[n] != target_page) - page_cache_release(page[n]); + put_page(page[n]); } out: diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c index c31e2bc9c081..ff4468bd18b0 100644 --- a/fs/squashfs/lz4_wrapper.c +++ b/fs/squashfs/lz4_wrapper.c @@ -117,13 +117,13 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm, data = squashfs_first_page(output); buff = stream->output; while (data) { - if (bytes <= PAGE_CACHE_SIZE) { + if (bytes <= PAGE_SIZE) { memcpy(data, buff, bytes); break; } - memcpy(data, buff, PAGE_CACHE_SIZE); - buff += PAGE_CACHE_SIZE; - bytes -= PAGE_CACHE_SIZE; + memcpy(data, buff, PAGE_SIZE); + buff += PAGE_SIZE; + bytes -= PAGE_SIZE; data = squashfs_next_page(output); } squashfs_finish_page(output); diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c index 244b9fbfff7b..934c17e96590 100644 --- a/fs/squashfs/lzo_wrapper.c +++ b/fs/squashfs/lzo_wrapper.c @@ -102,13 +102,13 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm, data = squashfs_first_page(output); buff = stream->output; while (data) { - if (bytes <= PAGE_CACHE_SIZE) { + if (bytes <= PAGE_SIZE) { memcpy(data, buff, bytes); break; } else { - memcpy(data, buff, PAGE_CACHE_SIZE); - buff += PAGE_CACHE_SIZE; - bytes -= PAGE_CACHE_SIZE; + memcpy(data, buff, PAGE_SIZE); + buff += PAGE_SIZE; + bytes -= PAGE_SIZE; data = squashfs_next_page(output); } } diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c index 5a1c11f56441..9b7b1b6a7892 100644 --- a/fs/squashfs/page_actor.c +++ b/fs/squashfs/page_actor.c @@ -48,7 +48,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer, if (actor == NULL) return NULL; - actor->length = length ? : pages * PAGE_CACHE_SIZE; + actor->length = length ? : pages * PAGE_SIZE; actor->buffer = buffer; actor->pages = pages; actor->next_page = 0; @@ -88,7 +88,7 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page, if (actor == NULL) return NULL; - actor->length = length ? : pages * PAGE_CACHE_SIZE; + actor->length = length ? : pages * PAGE_SIZE; actor->page = page; actor->pages = pages; actor->next_page = 0; diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h index 26dd82008b82..98537eab27e2 100644 --- a/fs/squashfs/page_actor.h +++ b/fs/squashfs/page_actor.h @@ -24,7 +24,7 @@ static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page, if (actor == NULL) return NULL; - actor->length = length ? : pages * PAGE_CACHE_SIZE; + actor->length = length ? : pages * PAGE_SIZE; actor->page = page; actor->pages = pages; actor->next_page = 0; diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 5e79bfa4f260..cf01e15a7b16 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -152,7 +152,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) * Check the system page size is not larger than the filesystem * block size (by default 128K). This is currently not supported. */ - if (PAGE_CACHE_SIZE > msblk->block_size) { + if (PAGE_SIZE > msblk->block_size) { ERROR("Page size > filesystem block size (%d). This is " "currently not supported!\n", msblk->block_size); goto failed_mount; diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c index dbcc2f54bad4..d688ef42a6a1 100644 --- a/fs/squashfs/symlink.c +++ b/fs/squashfs/symlink.c @@ -48,10 +48,10 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page) struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; struct squashfs_sb_info *msblk = sb->s_fs_info; - int index = page->index << PAGE_CACHE_SHIFT; + int index = page->index << PAGE_SHIFT; u64 block = squashfs_i(inode)->start; int offset = squashfs_i(inode)->offset; - int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE); + int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE); int bytes, copied; void *pageaddr; struct squashfs_cache_entry *entry; @@ -94,7 +94,7 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page) copied = squashfs_copy_data(pageaddr + bytes, entry, offset, length - bytes); if (copied == length - bytes) - memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length); + memset(pageaddr + length, 0, PAGE_SIZE - length); else block = entry->next_index; kunmap_atomic(pageaddr); diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c index c609624e4b8a..6bfaef73d065 100644 --- a/fs/squashfs/xz_wrapper.c +++ b/fs/squashfs/xz_wrapper.c @@ -141,7 +141,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm, stream->buf.in_pos = 0; stream->buf.in_size = 0; stream->buf.out_pos = 0; - stream->buf.out_size = PAGE_CACHE_SIZE; + stream->buf.out_size = PAGE_SIZE; stream->buf.out = squashfs_first_page(output); do { @@ -158,7 +158,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm, stream->buf.out = squashfs_next_page(output); if (stream->buf.out != NULL) { stream->buf.out_pos = 0; - total += PAGE_CACHE_SIZE; + total += PAGE_SIZE; } } diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c index 8727caba6882..2ec24d128bce 100644 --- a/fs/squashfs/zlib_wrapper.c +++ b/fs/squashfs/zlib_wrapper.c @@ -69,7 +69,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm, int zlib_err, zlib_init = 0, k = 0; z_stream *stream = strm; - stream->avail_out = PAGE_CACHE_SIZE; + stream->avail_out = PAGE_SIZE; stream->next_out = squashfs_first_page(output); stream->avail_in = 0; @@ -85,7 +85,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm, if (stream->avail_out == 0) { stream->next_out = squashfs_next_page(output); if (stream->next_out != NULL) - stream->avail_out = PAGE_CACHE_SIZE; + stream->avail_out = PAGE_SIZE; } if (!zlib_init) { diff --git a/fs/sync.c b/fs/sync.c index dd5d1711c7ac..2a54c1f22035 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -302,7 +302,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes, goto out; if (sizeof(pgoff_t) == 4) { - if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { + if (offset >= (0x100000000ULL << PAGE_SHIFT)) { /* * The range starts outside a 32 bit machine's * pagecache addressing capabilities. Let it "succeed" @@ -310,7 +310,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes, ret = 0; goto out; } - if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { + if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) { /* * Out to EOF */ diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c index 63c1bcb224ee..c0f0a3e643eb 100644 --- a/fs/sysv/dir.c +++ b/fs/sysv/dir.c @@ -30,7 +30,7 @@ const struct file_operations sysv_dir_operations = { static inline void dir_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) @@ -73,8 +73,8 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx) if (pos >= inode->i_size) return 0; - offset = pos & ~PAGE_CACHE_MASK; - n = pos >> PAGE_CACHE_SHIFT; + offset = pos & ~PAGE_MASK; + n = pos >> PAGE_SHIFT; for ( ; n < npages; n++, offset = 0) { char *kaddr, *limit; @@ -85,7 +85,7 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx) continue; kaddr = (char *)page_address(page); de = (struct sysv_dir_entry *)(kaddr+offset); - limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE; + limit = kaddr + PAGE_SIZE - SYSV_DIRSIZE; for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) { char *name = de->name; @@ -146,7 +146,7 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_ if (!IS_ERR(page)) { kaddr = (char*)page_address(page); de = (struct sysv_dir_entry *) kaddr; - kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; + kaddr += PAGE_SIZE - SYSV_DIRSIZE; for ( ; (char *) de <= kaddr ; de++) { if (!de->inode) continue; @@ -190,7 +190,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode) goto out; kaddr = (char*)page_address(page); de = (struct sysv_dir_entry *)kaddr; - kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; + kaddr += PAGE_SIZE - SYSV_DIRSIZE; while ((char *)de <= kaddr) { if (!de->inode) goto got_it; @@ -261,7 +261,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir) kmap(page); base = (char*)page_address(page); - memset(base, 0, PAGE_CACHE_SIZE); + memset(base, 0, PAGE_SIZE); de = (struct sysv_dir_entry *) base; de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); @@ -273,7 +273,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir) kunmap(page); err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE); fail: - page_cache_release(page); + put_page(page); return err; } @@ -296,7 +296,7 @@ int sysv_empty_dir(struct inode * inode) kaddr = (char *)page_address(page); de = (struct sysv_dir_entry *)kaddr; - kaddr += PAGE_CACHE_SIZE-SYSV_DIRSIZE; + kaddr += PAGE_SIZE-SYSV_DIRSIZE; for ( ;(char *)de <= kaddr; de++) { if (!de->inode) diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index 11e83ed0b4bf..90b60c03b588 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -264,11 +264,11 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, out_dir: if (dir_de) { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } out_old: kunmap(old_page); - page_cache_release(old_page); + put_page(old_page); out: return err; } diff --git a/fs/ubifs/Makefile b/fs/ubifs/Makefile index 2c6f0cb816b4..c54a24360f85 100644 --- a/fs/ubifs/Makefile +++ b/fs/ubifs/Makefile @@ -4,3 +4,4 @@ ubifs-y += shrinker.o journal.o file.o dir.o super.o sb.o io.o ubifs-y += tnc.o master.o scan.o replay.o log.o commit.o gc.o orphan.o ubifs-y += budget.o find.o tnc_commit.o compress.o lpt.o lprops.o ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o xattr.o debug.o +ubifs-y += misc.o diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 065c88f8e4b8..446753d8ac34 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -121,7 +121,7 @@ static int do_readpage(struct page *page) if (block >= beyond) { /* Reading beyond inode */ SetPageChecked(page); - memset(addr, 0, PAGE_CACHE_SIZE); + memset(addr, 0, PAGE_SIZE); goto out; } @@ -223,7 +223,7 @@ static int write_begin_slow(struct address_space *mapping, { struct inode *inode = mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct ubifs_budget_req req = { .new_page = 1 }; int uninitialized_var(err), appending = !!(pos + len > inode->i_size); struct page *page; @@ -254,13 +254,13 @@ static int write_begin_slow(struct address_space *mapping, } if (!PageUptodate(page)) { - if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) + if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) SetPageChecked(page); else { err = do_readpage(page); if (err) { unlock_page(page); - page_cache_release(page); + put_page(page); ubifs_release_budget(c, &req); return err; } @@ -428,7 +428,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, struct inode *inode = mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; int uninitialized_var(err), appending = !!(pos + len > inode->i_size); int skipped_read = 0; struct page *page; @@ -446,7 +446,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, if (!PageUptodate(page)) { /* The page is not loaded from the flash */ - if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) { + if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) { /* * We change whole page so no need to load it. But we * do not know whether this page exists on the media or @@ -462,7 +462,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, err = do_readpage(page); if (err) { unlock_page(page); - page_cache_release(page); + put_page(page); return err; } } @@ -494,7 +494,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, mutex_unlock(&ui->ui_mutex); } unlock_page(page); - page_cache_release(page); + put_page(page); return write_begin_slow(mapping, pos, len, pagep, flags); } @@ -549,12 +549,12 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld", inode->i_ino, pos, page->index, len, copied, inode->i_size); - if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) { + if (unlikely(copied < len && len == PAGE_SIZE)) { /* * VFS copied less data to the page that it intended and * declared in its '->write_begin()' call via the @len * argument. If the page was not up-to-date, and @len was - * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did + * @PAGE_SIZE, the 'ubifs_write_begin()' function did * not load it from the media (for optimization reasons). This * means that part of the page contains garbage. So read the * page now. @@ -593,7 +593,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, out: unlock_page(page); - page_cache_release(page); + put_page(page); return copied; } @@ -621,10 +621,10 @@ static int populate_page(struct ubifs_info *c, struct page *page, addr = zaddr = kmap(page); - end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; + end_index = (i_size - 1) >> PAGE_SHIFT; if (!i_size || page->index > end_index) { hole = 1; - memset(addr, 0, PAGE_CACHE_SIZE); + memset(addr, 0, PAGE_SIZE); goto out_hole; } @@ -673,7 +673,7 @@ static int populate_page(struct ubifs_info *c, struct page *page, } if (end_index == page->index) { - int len = i_size & (PAGE_CACHE_SIZE - 1); + int len = i_size & (PAGE_SIZE - 1); if (len && len < read) memset(zaddr + len, 0, read - len); @@ -773,7 +773,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, isize = i_size_read(inode); if (isize == 0) goto out_free; - end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); + end_index = ((isize - 1) >> PAGE_SHIFT); for (page_idx = 1; page_idx < page_cnt; page_idx++) { pgoff_t page_offset = offset + page_idx; @@ -788,7 +788,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, if (!PageUptodate(page)) err = populate_page(c, page, bu, &n); unlock_page(page); - page_cache_release(page); + put_page(page); if (err) break; } @@ -905,7 +905,7 @@ static int do_writepage(struct page *page, int len) #ifdef UBIFS_DEBUG struct ubifs_inode *ui = ubifs_inode(inode); spin_lock(&ui->ui_lock); - ubifs_assert(page->index <= ui->synced_i_size >> PAGE_CACHE_SHIFT); + ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT); spin_unlock(&ui->ui_lock); #endif @@ -1001,8 +1001,8 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) struct inode *inode = page->mapping->host; struct ubifs_inode *ui = ubifs_inode(inode); loff_t i_size = i_size_read(inode), synced_i_size; - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; - int err, len = i_size & (PAGE_CACHE_SIZE - 1); + pgoff_t end_index = i_size >> PAGE_SHIFT; + int err, len = i_size & (PAGE_SIZE - 1); void *kaddr; dbg_gen("ino %lu, pg %lu, pg flags %#lx", @@ -1021,7 +1021,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) /* Is the page fully inside @i_size? */ if (page->index < end_index) { - if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) { + if (page->index >= synced_i_size >> PAGE_SHIFT) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) goto out_unlock; @@ -1034,7 +1034,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) * with this. */ } - return do_writepage(page, PAGE_CACHE_SIZE); + return do_writepage(page, PAGE_SIZE); } /* @@ -1045,7 +1045,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) * writes to that region are not written out to the file." */ kaddr = kmap_atomic(page); - memset(kaddr + len, 0, PAGE_CACHE_SIZE - len); + memset(kaddr + len, 0, PAGE_SIZE - len); flush_dcache_page(page); kunmap_atomic(kaddr); @@ -1138,7 +1138,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, truncate_setsize(inode, new_size); if (offset) { - pgoff_t index = new_size >> PAGE_CACHE_SHIFT; + pgoff_t index = new_size >> PAGE_SHIFT; struct page *page; page = find_lock_page(inode->i_mapping, index); @@ -1157,9 +1157,9 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, clear_page_dirty_for_io(page); if (UBIFS_BLOCKS_PER_PAGE_SHIFT) offset = new_size & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); err = do_writepage(page, offset); - page_cache_release(page); + put_page(page); if (err) goto out_budg; /* @@ -1173,7 +1173,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, * having to read it. */ unlock_page(page); - page_cache_release(page); + put_page(page); } } } @@ -1285,7 +1285,7 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset, struct ubifs_info *c = inode->i_sb->s_fs_info; ubifs_assert(PagePrivate(page)); - if (offset || length < PAGE_CACHE_SIZE) + if (offset || length < PAGE_SIZE) /* Partial page remains dirty */ return; diff --git a/fs/ubifs/misc.c b/fs/ubifs/misc.c new file mode 100644 index 000000000000..486a2844949f --- /dev/null +++ b/fs/ubifs/misc.c @@ -0,0 +1,57 @@ +#include <linux/kernel.h> +#include "ubifs.h" + +/* Normal UBIFS messages */ +void ubifs_msg(const struct ubifs_info *c, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + pr_notice("UBIFS (ubi%d:%d): %pV\n", + c->vi.ubi_num, c->vi.vol_id, &vaf); + + va_end(args); +} \ + +/* UBIFS error messages */ +void ubifs_err(const struct ubifs_info *c, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + pr_err("UBIFS error (ubi%d:%d pid %d): %ps: %pV\n", + c->vi.ubi_num, c->vi.vol_id, current->pid, + __builtin_return_address(0), + &vaf); + + va_end(args); +} \ + +/* UBIFS warning messages */ +void ubifs_warn(const struct ubifs_info *c, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + pr_warn("UBIFS warning (ubi%d:%d pid %d): %ps: %pV\n", + c->vi.ubi_num, c->vi.vol_id, current->pid, + __builtin_return_address(0), + &vaf); + + va_end(args); +} diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index a233ba913be4..e98c24ee25a1 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2237,12 +2237,12 @@ static int __init ubifs_init(void) BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); /* - * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to + * We require that PAGE_SIZE is greater-than-or-equal-to * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2. */ - if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) { + if (PAGE_SIZE < UBIFS_BLOCK_SIZE) { pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes", - current->pid, (unsigned int)PAGE_CACHE_SIZE); + current->pid, (unsigned int)PAGE_SIZE); return -EINVAL; } diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index a5697de763f5..4cd7e569cd00 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -42,36 +42,12 @@ /* Version of this UBIFS implementation */ #define UBIFS_VERSION 1 -/* Normal UBIFS messages */ -#define ubifs_msg(c, fmt, ...) \ - pr_notice("UBIFS (ubi%d:%d): " fmt "\n", \ - (c)->vi.ubi_num, (c)->vi.vol_id, ##__VA_ARGS__) -/* UBIFS error messages */ -#define ubifs_err(c, fmt, ...) \ - pr_err("UBIFS error (ubi%d:%d pid %d): %s: " fmt "\n", \ - (c)->vi.ubi_num, (c)->vi.vol_id, current->pid, \ - __func__, ##__VA_ARGS__) -/* UBIFS warning messages */ -#define ubifs_warn(c, fmt, ...) \ - pr_warn("UBIFS warning (ubi%d:%d pid %d): %s: " fmt "\n", \ - (c)->vi.ubi_num, (c)->vi.vol_id, current->pid, \ - __func__, ##__VA_ARGS__) -/* - * A variant of 'ubifs_err()' which takes the UBIFS file-sytem description - * object as an argument. - */ -#define ubifs_errc(c, fmt, ...) \ - do { \ - if (!(c)->probing) \ - ubifs_err(c, fmt, ##__VA_ARGS__); \ - } while (0) - /* UBIFS file system VFS magic number */ #define UBIFS_SUPER_MAGIC 0x24051905 /* Number of UBIFS blocks per VFS page */ -#define UBIFS_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / UBIFS_BLOCK_SIZE) -#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_CACHE_SHIFT - UBIFS_BLOCK_SHIFT) +#define UBIFS_BLOCKS_PER_PAGE (PAGE_SIZE / UBIFS_BLOCK_SIZE) +#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_SHIFT - UBIFS_BLOCK_SHIFT) /* "File system end of life" sequence number watermark */ #define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL @@ -1802,4 +1778,21 @@ int ubifs_decompress(const struct ubifs_info *c, const void *buf, int len, #include "misc.h" #include "key.h" +/* Normal UBIFS messages */ +__printf(2, 3) +void ubifs_msg(const struct ubifs_info *c, const char *fmt, ...); +__printf(2, 3) +void ubifs_err(const struct ubifs_info *c, const char *fmt, ...); +__printf(2, 3) +void ubifs_warn(const struct ubifs_info *c, const char *fmt, ...); +/* + * A variant of 'ubifs_err()' which takes the UBIFS file-sytem description + * object as an argument. + */ +#define ubifs_errc(c, fmt, ...) \ +do { \ + if (!(c)->probing) \ + ubifs_err(c, fmt, ##__VA_ARGS__); \ +} while (0) + #endif /* !__UBIFS_H__ */ diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index c7f4d434d098..b043e044121d 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -59,7 +59,6 @@ #include <linux/fs.h> #include <linux/slab.h> #include <linux/xattr.h> -#include <linux/posix_acl_xattr.h> /* * Limit the number of extended attributes per inode so that the total size diff --git a/fs/udf/file.c b/fs/udf/file.c index 1af98963d860..877ba1c9b461 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -46,7 +46,7 @@ static void __udf_adinicb_readpage(struct page *page) kaddr = kmap(page); memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); - memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size); + memset(kaddr + inode->i_size, 0, PAGE_SIZE - inode->i_size); flush_dcache_page(page); SetPageUptodate(page); kunmap(page); @@ -87,14 +87,14 @@ static int udf_adinicb_write_begin(struct file *file, { struct page *page; - if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE)) + if (WARN_ON_ONCE(pos >= PAGE_SIZE)) return -EIO; page = grab_cache_page_write_begin(mapping, 0, flags); if (!page) return -ENOMEM; *pagep = page; - if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) + if (!PageUptodate(page) && len != PAGE_SIZE) __udf_adinicb_readpage(page); return 0; } diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 166d3ed32c39..2dc461eeb415 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -287,7 +287,7 @@ int udf_expand_file_adinicb(struct inode *inode) if (!PageUptodate(page)) { kaddr = kmap(page); memset(kaddr + iinfo->i_lenAlloc, 0x00, - PAGE_CACHE_SIZE - iinfo->i_lenAlloc); + PAGE_SIZE - iinfo->i_lenAlloc); memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, iinfo->i_lenAlloc); flush_dcache_page(page); @@ -319,7 +319,7 @@ int udf_expand_file_adinicb(struct inode *inode) inode->i_data.a_ops = &udf_adinicb_aops; up_write(&iinfo->i_data_sem); } - page_cache_release(page); + put_page(page); mark_inode_dirty(inode); return err; diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index dc5fae601c24..0447b949c7f5 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -237,7 +237,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg, sector_t newb, struct page *locked_page) { const unsigned blks_per_page = - 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); + 1 << (PAGE_SHIFT - inode->i_blkbits); const unsigned mask = blks_per_page - 1; struct address_space * const mapping = inode->i_mapping; pgoff_t index, cur_index, last_index; @@ -255,9 +255,9 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg, cur_index = locked_page->index; end = count + beg; - last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits); + last_index = end >> (PAGE_SHIFT - inode->i_blkbits); for (i = beg; i < end; i = (i | mask) + 1) { - index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits); + index = i >> (PAGE_SHIFT - inode->i_blkbits); if (likely(cur_index != index)) { page = ufs_get_locked_page(mapping, index); diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 74f2e80288bf..0b1457292734 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -62,7 +62,7 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) static inline void ufs_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) @@ -111,13 +111,13 @@ static void ufs_check_page(struct page *page) struct super_block *sb = dir->i_sb; char *kaddr = page_address(page); unsigned offs, rec_len; - unsigned limit = PAGE_CACHE_SIZE; + unsigned limit = PAGE_SIZE; const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1; struct ufs_dir_entry *p; char *error; - if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { - limit = dir->i_size & ~PAGE_CACHE_MASK; + if ((dir->i_size >> PAGE_SHIFT) == page->index) { + limit = dir->i_size & ~PAGE_MASK; if (limit & chunk_mask) goto Ebadsize; if (!limit) @@ -170,7 +170,7 @@ Einumber: bad_entry: ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - " "offset=%lu, rec_len=%d, name_len=%d", - dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, rec_len, ufs_get_de_namlen(sb, p)); goto fail; Eend: @@ -178,7 +178,7 @@ Eend: ufs_error(sb, __func__, "entry in directory #%lu spans the page boundary" "offset=%lu", - dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs); + dir->i_ino, (page->index<<PAGE_SHIFT)+offs); fail: SetPageChecked(page); SetPageError(page); @@ -211,9 +211,9 @@ ufs_last_byte(struct inode *inode, unsigned long page_nr) { unsigned last_byte = inode->i_size; - last_byte -= page_nr << PAGE_CACHE_SHIFT; - if (last_byte > PAGE_CACHE_SIZE) - last_byte = PAGE_CACHE_SIZE; + last_byte -= page_nr << PAGE_SHIFT; + if (last_byte > PAGE_SIZE) + last_byte = PAGE_SIZE; return last_byte; } @@ -341,7 +341,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode) kaddr = page_address(page); dir_end = kaddr + ufs_last_byte(dir, n); de = (struct ufs_dir_entry *)kaddr; - kaddr += PAGE_CACHE_SIZE - reclen; + kaddr += PAGE_SIZE - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { /* We hit i_size */ @@ -432,8 +432,8 @@ ufs_readdir(struct file *file, struct dir_context *ctx) loff_t pos = ctx->pos; struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; - unsigned int offset = pos & ~PAGE_CACHE_MASK; - unsigned long n = pos >> PAGE_CACHE_SHIFT; + unsigned int offset = pos & ~PAGE_MASK; + unsigned long n = pos >> PAGE_SHIFT; unsigned long npages = dir_pages(inode); unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); int need_revalidate = file->f_version != inode->i_version; @@ -454,14 +454,14 @@ ufs_readdir(struct file *file, struct dir_context *ctx) ufs_error(sb, __func__, "bad page in #%lu", inode->i_ino); - ctx->pos += PAGE_CACHE_SIZE - offset; + ctx->pos += PAGE_SIZE - offset; return -EIO; } kaddr = page_address(page); if (unlikely(need_revalidate)) { if (offset) { offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); - ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; + ctx->pos = (n<<PAGE_SHIFT) + offset; } file->f_version = inode->i_version; need_revalidate = 0; @@ -574,7 +574,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir) kmap(page); base = (char*)page_address(page); - memset(base, 0, PAGE_CACHE_SIZE); + memset(base, 0, PAGE_SIZE); de = (struct ufs_dir_entry *) base; @@ -594,7 +594,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir) err = ufs_commit_chunk(page, 0, chunk_size); fail: - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index d897e169ab9c..9f49431e798d 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -1051,13 +1051,13 @@ static int ufs_alloc_lastblock(struct inode *inode, loff_t size) lastfrag--; lastpage = ufs_get_locked_page(mapping, lastfrag >> - (PAGE_CACHE_SHIFT - inode->i_blkbits)); + (PAGE_SHIFT - inode->i_blkbits)); if (IS_ERR(lastpage)) { err = -EIO; goto out; } - end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); + end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1); bh = page_buffers(lastpage); for (i = 0; i < end; ++i) bh = bh->b_this_page; diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index acf4a3b61b81..a1559f762805 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -305,7 +305,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0); else { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } inode_dec_link_count(old_dir); } @@ -315,11 +315,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, out_dir: if (dir_de) { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } out_old: kunmap(old_page); - page_cache_release(old_page); + put_page(old_page); out: return err; } diff --git a/fs/ufs/util.c b/fs/ufs/util.c index b6c2f94e041e..a409e3e7827a 100644 --- a/fs/ufs/util.c +++ b/fs/ufs/util.c @@ -261,14 +261,14 @@ struct page *ufs_get_locked_page(struct address_space *mapping, if (unlikely(page->mapping == NULL)) { /* Truncate got there first */ unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; goto out; } if (!PageUptodate(page) || PageError(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); printk(KERN_ERR "ufs_change_blocknr: " "can not read page: ino %lu, index: %lu\n", diff --git a/fs/ufs/util.h b/fs/ufs/util.h index 954175928240..b7fbf53dbc81 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h @@ -283,7 +283,7 @@ extern struct page *ufs_get_locked_page(struct address_space *mapping, static inline void ufs_put_locked_page(struct page *page) { unlock_page(page); - page_cache_release(page); + put_page(page); } diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index f64639176670..3542d94fddce 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -121,4 +121,5 @@ xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o -xfs-$(CONFIG_NFSD_PNFS) += xfs_pnfs.o +xfs-$(CONFIG_NFSD_BLOCKLAYOUT) += xfs_pnfs.o +xfs-$(CONFIG_NFSD_SCSILAYOUT) += xfs_pnfs.o diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 041b6948aecc..ce41d7fe753c 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -3742,11 +3742,11 @@ xfs_bmap_btalloc( args.prod = align; if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) args.mod = (xfs_extlen_t)(args.prod - args.mod); - } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) { + } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { args.prod = 1; args.mod = 0; } else { - args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog; + args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) args.mod = (xfs_extlen_t)(args.prod - args.mod); } diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index d445a64b979e..e49b2406d15d 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -704,7 +704,7 @@ next_buffer: xfs_iunlock(ip, XFS_ILOCK_EXCL); out_invalidate: - xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE); + xfs_vm_invalidatepage(page, 0, PAGE_SIZE); return; } @@ -925,9 +925,9 @@ xfs_do_writepage( * ---------------------------------^------------------| */ offset = i_size_read(inode); - end_index = offset >> PAGE_CACHE_SHIFT; + end_index = offset >> PAGE_SHIFT; if (page->index < end_index) - end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT; + end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT; else { /* * Check whether the page to write out is beyond or straddles @@ -940,7 +940,7 @@ xfs_do_writepage( * | | Straddles | * ---------------------------------^-----------|--------| */ - unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1); + unsigned offset_into_page = offset & (PAGE_SIZE - 1); /* * Skip the page if it is fully outside i_size, e.g. due to a @@ -971,7 +971,7 @@ xfs_do_writepage( * memory is zeroed when mapped, and writes to that region are * not written out to the file." */ - zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE); + zero_user_segment(page, offset_into_page, PAGE_SIZE); /* Adjust the end_offset to the end of file */ end_offset = offset; @@ -1475,7 +1475,7 @@ xfs_vm_write_failed( loff_t block_offset; loff_t block_start; loff_t block_end; - loff_t from = pos & (PAGE_CACHE_SIZE - 1); + loff_t from = pos & (PAGE_SIZE - 1); loff_t to = from + len; struct buffer_head *bh, *head; struct xfs_mount *mp = XFS_I(inode)->i_mount; @@ -1491,7 +1491,7 @@ xfs_vm_write_failed( * start of the page by using shifts rather than masks the mismatch * problem. */ - block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT; + block_offset = (pos >> PAGE_SHIFT) << PAGE_SHIFT; ASSERT(block_offset + from == pos); @@ -1558,12 +1558,12 @@ xfs_vm_write_begin( struct page **pagep, void **fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct page *page; int status; struct xfs_mount *mp = XFS_I(mapping->host)->i_mount; - ASSERT(len <= PAGE_CACHE_SIZE); + ASSERT(len <= PAGE_SIZE); page = grab_cache_page_write_begin(mapping, index, flags); if (!page) @@ -1592,7 +1592,7 @@ xfs_vm_write_begin( truncate_pagecache_range(inode, start, pos + len); } - page_cache_release(page); + put_page(page); page = NULL; } @@ -1620,7 +1620,7 @@ xfs_vm_write_end( { int ret; - ASSERT(len <= PAGE_CACHE_SIZE); + ASSERT(len <= PAGE_SIZE); ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); if (unlikely(ret < len)) { diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index a32c1dcae2ff..3b6309865c65 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1237,7 +1237,7 @@ xfs_free_file_space( /* wait for the completion of any pending DIOs */ inode_dio_wait(VFS_I(ip)); - rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); + rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE); ioffset = round_down(offset, rounding); iendoffset = round_up(offset + len, rounding) - 1; error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset, @@ -1466,7 +1466,7 @@ xfs_shift_file_space( if (error) return error; error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, - offset >> PAGE_CACHE_SHIFT, -1); + offset >> PAGE_SHIFT, -1); if (error) return error; diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c index 2816d42507bc..a1b2dd828b9d 100644 --- a/fs/xfs/xfs_export.c +++ b/fs/xfs/xfs_export.c @@ -246,7 +246,7 @@ const struct export_operations xfs_export_operations = { .fh_to_parent = xfs_fs_fh_to_parent, .get_parent = xfs_fs_get_parent, .commit_metadata = xfs_fs_nfs_commit_metadata, -#ifdef CONFIG_NFSD_PNFS +#ifdef CONFIG_NFSD_BLOCKLAYOUT .get_uuid = xfs_fs_get_uuid, .map_blocks = xfs_fs_map_blocks, .commit_blocks = xfs_fs_commit_blocks, diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index ac0fd32de31e..569938a4a357 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -106,8 +106,8 @@ xfs_iozero( unsigned offset, bytes; void *fsdata; - offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ - bytes = PAGE_CACHE_SIZE - offset; + offset = (pos & (PAGE_SIZE -1)); /* Within page */ + bytes = PAGE_SIZE - offset; if (bytes > count) bytes = count; @@ -799,8 +799,8 @@ xfs_file_dio_aio_write( /* see generic_file_direct_write() for why this is necessary */ if (mapping->nrpages) { invalidate_inode_pages2_range(mapping, - pos >> PAGE_CACHE_SHIFT, - end >> PAGE_CACHE_SHIFT); + pos >> PAGE_SHIFT, + end >> PAGE_SHIFT); } if (ret > 0) { @@ -1207,9 +1207,9 @@ xfs_find_get_desired_pgoff( pagevec_init(&pvec, 0); - index = startoff >> PAGE_CACHE_SHIFT; + index = startoff >> PAGE_SHIFT; endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); - end = endoff >> PAGE_CACHE_SHIFT; + end = endoff >> PAGE_SHIFT; do { int want; unsigned nr_pages; diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index ec0e239a0fa9..a8192dc797dc 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -135,7 +135,7 @@ typedef __u32 xfs_nlink_t; * Size of block device i/o is parameterized here. * Currently the system supports page-sized i/o. */ -#define BLKDEV_IOSHIFT PAGE_CACHE_SHIFT +#define BLKDEV_IOSHIFT PAGE_SHIFT #define BLKDEV_IOSIZE (1<<BLKDEV_IOSHIFT) /* number of BB's per block device block */ #define BLKDEV_BB BTOBB(BLKDEV_IOSIZE) diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 536a0ee9cd5a..cfd4210dd015 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -171,7 +171,7 @@ xfs_sb_validate_fsb_count( ASSERT(sbp->sb_blocklog >= BBSHIFT); /* Limited by ULONG_MAX of page cache index */ - if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) + if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) return -EFBIG; return 0; } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index bac6b3435591..eafe257b357a 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -231,12 +231,12 @@ static inline unsigned long xfs_preferred_iosize(xfs_mount_t *mp) { if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE) - return PAGE_CACHE_SIZE; + return PAGE_SIZE; return (mp->m_swidth ? (mp->m_swidth << mp->m_sb.sb_blocklog) : ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ? (1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) : - PAGE_CACHE_SIZE)); + PAGE_SIZE)); } #define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \ diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index ade236e90bb3..51ddaf2c2b8c 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -293,8 +293,8 @@ xfs_fs_commit_blocks( * Make sure reads through the pagecache see the new data. */ error = invalidate_inode_pages2_range(inode->i_mapping, - start >> PAGE_CACHE_SHIFT, - (end - 1) >> PAGE_CACHE_SHIFT); + start >> PAGE_SHIFT, + (end - 1) >> PAGE_SHIFT); WARN_ON_ONCE(error); error = xfs_iomap_write_unwritten(ip, start, length); diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h index 8147ac108820..93f74853961b 100644 --- a/fs/xfs/xfs_pnfs.h +++ b/fs/xfs/xfs_pnfs.h @@ -1,7 +1,7 @@ #ifndef _XFS_PNFS_H #define _XFS_PNFS_H 1 -#ifdef CONFIG_NFSD_PNFS +#if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT) int xfs_fs_get_uuid(struct super_block *sb, u8 *buf, u32 *len, u64 *offset); int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length, struct iomap *iomap, bool write, u32 *device_generation); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index d760934109b5..187e14b696c2 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -556,10 +556,10 @@ xfs_max_file_offset( /* Figure out maximum filesize, on Linux this can depend on * the filesystem blocksize (on 32 bit platforms). * __block_write_begin does this in an [unsigned] long... - * page->index << (PAGE_CACHE_SHIFT - bbits) + * page->index << (PAGE_SHIFT - bbits) * So, for page sized blocks (4K on 32 bit platforms), * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is - * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) + * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1) * but for smaller blocksizes it is less (bbits = log2 bsize). * Note1: get_block_t takes a long (implicit cast from above) * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch @@ -570,10 +570,10 @@ xfs_max_file_offset( #if BITS_PER_LONG == 32 # if defined(CONFIG_LBDAF) ASSERT(sizeof(sector_t) == 8); - pagefactor = PAGE_CACHE_SIZE; + pagefactor = PAGE_SIZE; bitshift = BITS_PER_LONG; # else - pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); + pagefactor = PAGE_SIZE >> (PAGE_SHIFT - blockshift); # endif #endif |