summaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_buf.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2014-10-02 09:04:31 +1000
committerDave Chinner <david@fromorbit.com>2014-10-02 09:04:31 +1000
commit61be9c529a4a715ab8679e9ca82bc3790c7ab66c (patch)
tree3e8d1c757133eb7c844cd4ba8815f4a6bc0218cb /fs/xfs/xfs_buf.c
parente8aaba9a783c8e5d2c58ebe69650ea31b91bb745 (diff)
downloadblackbird-op-linux-61be9c529a4a715ab8679e9ca82bc3790c7ab66c.tar.gz
blackbird-op-linux-61be9c529a4a715ab8679e9ca82bc3790c7ab66c.zip
xfs: rework xfs_buf_bio_endio error handling
Currently the report of a bio error from completion immediately marks the buffer with an error. The issue is that this is racy w.r.t. synchronous IO - the submitter can see b_error being set before the IO is complete, and hence we cannot differentiate between submission failures and completion failures. Add an internal b_io_error field protected by the b_lock to catch IO completion errors, and only propagate that to the buffer during final IO completion handling. Hence we can tell in xfs_buf_iorequest if we've had a submission failure bey checking bp->b_error before dropping our b_io_remaining reference - that reference will prevent b_io_error values from being propagated to b_error in the event that completion races with submission. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r--fs/xfs/xfs_buf.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index a046149e6099..170d6c0afe71 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1008,6 +1008,13 @@ xfs_buf_ioend(
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
+ /*
+ * Pull in IO completion errors now. We are guaranteed to be running
+ * single threaded, so we don't need the lock to read b_io_error.
+ */
+ if (!bp->b_error && bp->b_io_error)
+ xfs_buf_ioerror(bp, bp->b_io_error);
+
/* Only validate buffers that were read without errors */
if (read && !bp->b_error && bp->b_ops) {
ASSERT(!bp->b_iodone);
@@ -1192,8 +1199,12 @@ xfs_buf_bio_end_io(
* don't overwrite existing errors - otherwise we can lose errors on
* buffers that require multiple bios to complete.
*/
- if (!bp->b_error)
- xfs_buf_ioerror(bp, error);
+ if (error) {
+ spin_lock(&bp->b_lock);
+ if (!bp->b_io_error)
+ bp->b_io_error = error;
+ spin_unlock(&bp->b_lock);
+ }
if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
@@ -1379,6 +1390,9 @@ xfs_buf_iorequest(
if (bp->b_flags & XBF_WRITE)
xfs_buf_wait_unpin(bp);
+ /* clear the internal error state to avoid spurious errors */
+ bp->b_io_error = 0;
+
/*
* Take references to the buffer. For XBF_ASYNC buffers, holding a
* reference for as long as submission takes is all that is necessary
OpenPOWER on IntegriCloud