summaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_aops.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_aops.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c67
1 files changed, 31 insertions, 36 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c02f7c5b7462..6cbbd165c60d 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -372,7 +372,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
* assumes that all buffers on the page are started at the same time.
*
* The fix is two passes across the ioend list - one to start writeback on the
- * bufferheads, and then the second one submit them for I/O.
+ * buffer_heads, and then submit them for I/O on the second pass.
*/
STATIC void
xfs_submit_ioend(
@@ -699,7 +699,7 @@ xfs_convert_page(
/*
* page_dirty is initially a count of buffers on the page before
- * EOF and is decrememted as we move each into a cleanable state.
+ * EOF and is decremented as we move each into a cleanable state.
*
* Derivation:
*
@@ -842,7 +842,7 @@ xfs_cluster_write(
* page if possible.
* The bh->b_state's cannot know if any of the blocks or which block for
* that matter are dirty due to mmap writes, and therefore bh uptodate is
- * only vaild if the page itself isn't completely uptodate. Some layers
+ * only valid if the page itself isn't completely uptodate. Some layers
* may clear the page dirty flag prior to calling write page, under the
* assumption the entire page will be written out; by not writing out the
* whole page the page can be reused before all valid dirty data is
@@ -892,7 +892,7 @@ xfs_page_state_convert(
/*
* page_dirty is initially a count of buffers on the page before
- * EOF and is decrememted as we move each into a cleanable state.
+ * EOF and is decremented as we move each into a cleanable state.
*
* Derivation:
*
@@ -1223,10 +1223,9 @@ free_buffers:
}
STATIC int
-__xfs_get_block(
+__xfs_get_blocks(
struct inode *inode,
sector_t iblock,
- unsigned long blocks,
struct buffer_head *bh_result,
int create,
int direct,
@@ -1236,22 +1235,17 @@ __xfs_get_block(
xfs_iomap_t iomap;
xfs_off_t offset;
ssize_t size;
- int retpbbm = 1;
+ int niomap = 1;
int error;
offset = (xfs_off_t)iblock << inode->i_blkbits;
- if (blocks)
- size = (ssize_t) min_t(xfs_off_t, LONG_MAX,
- (xfs_off_t)blocks << inode->i_blkbits);
- else
- size = 1 << inode->i_blkbits;
-
+ ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
+ size = bh_result->b_size;
VOP_BMAP(vp, offset, size,
- create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
+ create ? flags : BMAPI_READ, &iomap, &niomap, error);
if (error)
return -error;
-
- if (retpbbm == 0)
+ if (niomap == 0)
return 0;
if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
@@ -1271,12 +1265,16 @@ __xfs_get_block(
}
}
- /* If this is a realtime file, data might be on a new device */
+ /*
+ * If this is a realtime file, data may be on a different device.
+ * to that pointed to from the buffer_head b_bdev currently.
+ */
bh_result->b_bdev = iomap.iomap_target->bt_bdev;
- /* If we previously allocated a block out beyond eof and
- * we are now coming back to use it then we will need to
- * flag it as new even if it has a disk address.
+ /*
+ * If we previously allocated a block out beyond eof and we are
+ * now coming back to use it then we will need to flag it as new
+ * even if it has a disk address.
*/
if (create &&
((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
@@ -1292,26 +1290,24 @@ __xfs_get_block(
}
}
- if (blocks) {
+ if (direct || size > (1 << inode->i_blkbits)) {
ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
offset = min_t(xfs_off_t,
- iomap.iomap_bsize - iomap.iomap_delta,
- (xfs_off_t)blocks << inode->i_blkbits);
- bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
+ iomap.iomap_bsize - iomap.iomap_delta, size);
+ bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
}
return 0;
}
int
-xfs_get_block(
+xfs_get_blocks(
struct inode *inode,
sector_t iblock,
struct buffer_head *bh_result,
int create)
{
- return __xfs_get_block(inode, iblock,
- bh_result->b_size >> inode->i_blkbits,
+ return __xfs_get_blocks(inode, iblock,
bh_result, create, 0, BMAPI_WRITE);
}
@@ -1322,8 +1318,7 @@ xfs_get_blocks_direct(
struct buffer_head *bh_result,
int create)
{
- return __xfs_get_block(inode, iblock,
- bh_result->b_size >> inode->i_blkbits,
+ return __xfs_get_blocks(inode, iblock,
bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
}
@@ -1339,9 +1334,9 @@ xfs_end_io_direct(
/*
* Non-NULL private data means we need to issue a transaction to
* convert a range from unwritten to written extents. This needs
- * to happen from process contect but aio+dio I/O completion
+ * to happen from process context but aio+dio I/O completion
* happens from irq context so we need to defer it to a workqueue.
- * This is not nessecary for synchronous direct I/O, but we do
+ * This is not necessary for synchronous direct I/O, but we do
* it anyway to keep the code uniform and simpler.
*
* The core direct I/O code might be changed to always call the
@@ -1358,7 +1353,7 @@ xfs_end_io_direct(
}
/*
- * blockdev_direct_IO can return an error even afer the I/O
+ * blockdev_direct_IO can return an error even after the I/O
* completion handler was called. Thus we need to protect
* against double-freeing.
*/
@@ -1405,7 +1400,7 @@ xfs_vm_prepare_write(
unsigned int from,
unsigned int to)
{
- return block_prepare_write(page, from, to, xfs_get_block);
+ return block_prepare_write(page, from, to, xfs_get_blocks);
}
STATIC sector_t
@@ -1422,7 +1417,7 @@ xfs_vm_bmap(
VOP_RWLOCK(vp, VRWLOCK_READ);
VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
VOP_RWUNLOCK(vp, VRWLOCK_READ);
- return generic_block_bmap(mapping, block, xfs_get_block);
+ return generic_block_bmap(mapping, block, xfs_get_blocks);
}
STATIC int
@@ -1430,7 +1425,7 @@ xfs_vm_readpage(
struct file *unused,
struct page *page)
{
- return mpage_readpage(page, xfs_get_block);
+ return mpage_readpage(page, xfs_get_blocks);
}
STATIC int
@@ -1440,7 +1435,7 @@ xfs_vm_readpages(
struct list_head *pages,
unsigned nr_pages)
{
- return mpage_readpages(mapping, pages, nr_pages, xfs_get_block);
+ return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
}
STATIC void
OpenPOWER on IntegriCloud