diff options
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 46 |
1 files changed, 23 insertions, 23 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index f1dd70e201cf..6feecd279470 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -329,17 +329,15 @@ xfs_map_blocks( } STATIC int -xfs_iomap_valid( +xfs_imap_valid( struct inode *inode, struct xfs_bmbt_irec *imap, - loff_t offset) + xfs_off_t offset) { - struct xfs_mount *mp = XFS_I(inode)->i_mount; - xfs_off_t iomap_offset = XFS_FSB_TO_B(mp, imap->br_startoff); - xfs_off_t iomap_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount); + offset >>= inode->i_blkbits; - return offset >= iomap_offset && - offset < iomap_offset + iomap_bsize; + return offset >= imap->br_startoff && + offset < imap->br_startoff + imap->br_blockcount; } /* @@ -825,7 +823,7 @@ xfs_convert_page( else type = IO_DELAY; - if (!xfs_iomap_valid(inode, imap, offset)) { + if (!xfs_imap_valid(inode, imap, offset)) { done = 1; continue; } @@ -1069,7 +1067,7 @@ xfs_page_state_convert( __uint64_t end_offset; pgoff_t end_index, last_index, tlast; ssize_t size, len; - int flags, err, iomap_valid = 0, uptodate = 1; + int flags, err, imap_valid = 0, uptodate = 1; int page_dirty, count = 0; int trylock = 0; int all_bh = unmapped; @@ -1130,12 +1128,12 @@ xfs_page_state_convert( * the iomap is actually still valid, but the ioend * isn't. shouldn't happen too often. */ - iomap_valid = 0; + imap_valid = 0; continue; } - if (iomap_valid) - iomap_valid = xfs_iomap_valid(inode, &imap, offset); + if (imap_valid) + imap_valid = xfs_imap_valid(inode, &imap, offset); /* * First case, map an unwritten extent and prepare for @@ -1156,7 +1154,7 @@ xfs_page_state_convert( * Make sure we don't use a read-only iomap */ if (flags == BMAPI_READ) - iomap_valid = 0; + imap_valid = 0; if (buffer_unwritten(bh)) { type = IO_UNWRITTEN; @@ -1169,7 +1167,7 @@ xfs_page_state_convert( flags = BMAPI_WRITE | BMAPI_MMAP; } - if (!iomap_valid) { + if (!imap_valid) { /* * if we didn't have a valid mapping then we * need to ensure that we put the new mapping @@ -1190,9 +1188,10 @@ xfs_page_state_convert( &imap, flags); if (err) goto error; - iomap_valid = xfs_iomap_valid(inode, &imap, offset); + imap_valid = xfs_imap_valid(inode, &imap, + offset); } - if (iomap_valid) { + if (imap_valid) { xfs_map_at_offset(inode, bh, &imap, offset); if (startio) { xfs_add_to_ioend(inode, bh, offset, @@ -1212,7 +1211,7 @@ xfs_page_state_convert( * That means it must already have extents allocated * underneath it. Map the extent by reading it. */ - if (!iomap_valid || flags != BMAPI_READ) { + if (!imap_valid || flags != BMAPI_READ) { flags = BMAPI_READ; size = xfs_probe_cluster(inode, page, bh, head, 1); @@ -1220,7 +1219,8 @@ xfs_page_state_convert( &imap, flags); if (err) goto error; - iomap_valid = xfs_iomap_valid(inode, &imap, offset); + imap_valid = xfs_imap_valid(inode, &imap, + offset); } /* @@ -1234,18 +1234,18 @@ xfs_page_state_convert( type = IO_NEW; if (trylock_buffer(bh)) { ASSERT(buffer_mapped(bh)); - if (iomap_valid) + if (imap_valid) all_bh = 1; xfs_add_to_ioend(inode, bh, offset, type, - &ioend, !iomap_valid); + &ioend, !imap_valid); page_dirty--; count++; } else { - iomap_valid = 0; + imap_valid = 0; } } else if ((buffer_uptodate(bh) || PageUptodate(page)) && (unmapped || startio)) { - iomap_valid = 0; + imap_valid = 0; } if (!iohead) @@ -1259,7 +1259,7 @@ xfs_page_state_convert( if (startio) xfs_start_page_writeback(page, 1, count); - if (ioend && iomap_valid) { + if (ioend && imap_valid) { struct xfs_mount *m = XFS_I(inode)->i_mount; xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap.br_startoff); xfs_off_t iomap_bsize = XFS_FSB_TO_B(m, imap.br_blockcount); |