summaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Chinner <david@fromorbit.com>2014-09-23 22:55:51 +1000
committerDave Chinner <david@fromorbit.com>2014-09-23 22:55:51 +1000
commit33044dc408e6e6bb7f270c0a2e12598ef5592987 (patch)
tree67dedd3461599e28a95e298db436bc3fc4139d4f /fs/xfs
parentf6d31f4b0462898896ba68e491662958ce37d095 (diff)
parent2ebff7bbd785c86e12956388b9e6f6bb8ea5d21e (diff)
downloadblackbird-op-linux-33044dc408e6e6bb7f270c0a2e12598ef5592987.tar.gz
blackbird-op-linux-33044dc408e6e6bb7f270c0a2e12598ef5592987.zip
Merge branch 'xfs-misc-fixes-for-3.18-2' into for-next
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/libxfs/xfs_da_format.c1
-rw-r--r--fs/xfs/xfs_bmap_util.c2
-rw-r--r--fs/xfs/xfs_buf_item.c2
-rw-r--r--fs/xfs/xfs_icache.c1
-rw-r--r--fs/xfs/xfs_iops.c30
-rw-r--r--fs/xfs/xfs_log_cil.c47
-rw-r--r--fs/xfs/xfs_rtalloc.c2
7 files changed, 71 insertions, 14 deletions
diff --git a/fs/xfs/libxfs/xfs_da_format.c b/fs/xfs/libxfs/xfs_da_format.c
index c9aee52a37e2..7e42fdfd2f1d 100644
--- a/fs/xfs/libxfs/xfs_da_format.c
+++ b/fs/xfs/libxfs/xfs_da_format.c
@@ -270,7 +270,6 @@ xfs_dir3_data_get_ftype(
{
__uint8_t ftype = dep->name[dep->namelen];
- ASSERT(ftype < XFS_DIR3_FT_MAX);
if (ftype >= XFS_DIR3_FT_MAX)
return XFS_DIR3_FT_UNKNOWN;
return ftype;
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 809ae7d395c3..d8b77b5bf4d9 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1646,7 +1646,7 @@ xfs_swap_extents_check_format(
return 0;
}
-int
+static int
xfs_swap_extent_flush(
struct xfs_inode *ip)
{
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 76007deed31f..30fa5db9aea8 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -501,7 +501,7 @@ xfs_buf_item_unpin(
* buffer being bad..
*/
-DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
+static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
STATIC uint
xfs_buf_item_push(
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 981b2cf51985..b45f7b27b5df 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -33,7 +33,6 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_bmap_util.h"
-#include "xfs_quota.h"
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 72129493e9d3..ec6dcdc181ee 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -849,6 +849,36 @@ xfs_setattr_size(
return error;
truncate_setsize(inode, newsize);
+ /*
+ * The "we can't serialise against page faults" pain gets worse.
+ *
+ * If the file is mapped then we have to clean the page at the old EOF
+ * when extending the file. Extending the file can expose changes the
+ * underlying page mapping (e.g. from beyond EOF to a hole or
+ * unwritten), and so on the next attempt to write to that page we need
+ * to remap it for write. i.e. we need .page_mkwrite() to be called.
+ * Hence we need to clean the page to clean the pte and so a new write
+ * fault will be triggered appropriately.
+ *
+ * If we do it before we change the inode size, then we can race with a
+ * page fault that maps the page with exactly the same problem. If we do
+ * it after we change the file size, then a new page fault can come in
+ * and allocate space before we've run the rest of the truncate
+ * transaction. That's kinda grotesque, but it's better than have data
+ * over a hole, and so that's the lesser evil that has been chosen here.
+ *
+ * The real solution, however, is to have some mechanism for locking out
+ * page faults while a truncate is in progress.
+ */
+ if (newsize > oldsize && mapping_mapped(VFS_I(ip)->i_mapping)) {
+ error = filemap_write_and_wait_range(
+ VFS_I(ip)->i_mapping,
+ round_down(oldsize, PAGE_CACHE_SIZE),
+ round_up(oldsize, PAGE_CACHE_SIZE) - 1);
+ if (error)
+ return error;
+ }
+
tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
if (error)
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index f6b79e5325dd..f506c457011e 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -463,12 +463,40 @@ xlog_cil_push(
spin_unlock(&cil->xc_push_lock);
goto out_skip;
}
- spin_unlock(&cil->xc_push_lock);
/* check for a previously pushed seqeunce */
- if (push_seq < cil->xc_ctx->sequence)
+ if (push_seq < cil->xc_ctx->sequence) {
+ spin_unlock(&cil->xc_push_lock);
goto out_skip;
+ }
+
+ /*
+ * We are now going to push this context, so add it to the committing
+ * list before we do anything else. This ensures that anyone waiting on
+ * this push can easily detect the difference between a "push in
+ * progress" and "CIL is empty, nothing to do".
+ *
+ * IOWs, a wait loop can now check for:
+ * the current sequence not being found on the committing list;
+ * an empty CIL; and
+ * an unchanged sequence number
+ * to detect a push that had nothing to do and therefore does not need
+ * waiting on. If the CIL is not empty, we get put on the committing
+ * list before emptying the CIL and bumping the sequence number. Hence
+ * an empty CIL and an unchanged sequence number means we jumped out
+ * above after doing nothing.
+ *
+ * Hence the waiter will either find the commit sequence on the
+ * committing list or the sequence number will be unchanged and the CIL
+ * still dirty. In that latter case, the push has not yet started, and
+ * so the waiter will have to continue trying to check the CIL
+ * committing list until it is found. In extreme cases of delay, the
+ * sequence may fully commit between the attempts the wait makes to wait
+ * on the commit sequence.
+ */
+ list_add(&ctx->committing, &cil->xc_committing);
+ spin_unlock(&cil->xc_push_lock);
/*
* pull all the log vectors off the items in the CIL, and
@@ -532,7 +560,6 @@ xlog_cil_push(
*/
spin_lock(&cil->xc_push_lock);
cil->xc_current_sequence = new_ctx->sequence;
- list_add(&ctx->committing, &cil->xc_committing);
spin_unlock(&cil->xc_push_lock);
up_write(&cil->xc_ctx_lock);
@@ -855,13 +882,15 @@ restart:
* Hence by the time we have got here it our sequence may not have been
* pushed yet. This is true if the current sequence still matches the
* push sequence after the above wait loop and the CIL still contains
- * dirty objects.
+ * dirty objects. This is guaranteed by the push code first adding the
+ * context to the committing list before emptying the CIL.
*
- * When the push occurs, it will empty the CIL and atomically increment
- * the currect sequence past the push sequence and move it into the
- * committing list. Of course, if the CIL is clean at the time of the
- * push, it won't have pushed the CIL at all, so in that case we should
- * try the push for this sequence again from the start just in case.
+ * Hence if we don't find the context in the committing list and the
+ * current sequence number is unchanged then the CIL contents are
+ * significant. If the CIL is empty, if means there was nothing to push
+ * and that means there is nothing to wait for. If the CIL is not empty,
+ * it means we haven't yet started the push, because if it had started
+ * we would have found the context on the committing list.
*/
if (sequence == cil->xc_current_sequence &&
!list_empty(&cil->xc_cil)) {
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index d1160cce5dad..d45aebe04dde 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -46,7 +46,7 @@
* Keeps track of a current summary block, so we don't keep reading
* it from the buffer cache.
*/
-int
+static int
xfs_rtget_summary(
xfs_mount_t *mp, /* file system mount structure */
xfs_trans_t *tp, /* transaction pointer */
OpenPOWER on IntegriCloud