summaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r--fs/xfs/xfs_log.c143
1 files changed, 99 insertions, 44 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 8497a00e399d..ca4fd5bd8522 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -34,6 +34,7 @@
#include "xfs_trace.h"
#include "xfs_fsops.h"
#include "xfs_cksum.h"
+#include "xfs_sysfs.h"
kmem_zone_t *xfs_log_ticket_zone;
@@ -283,7 +284,7 @@ xlog_grant_head_wait(
return 0;
shutdown:
list_del_init(&tic->t_queue);
- return XFS_ERROR(EIO);
+ return -EIO;
}
/*
@@ -377,7 +378,7 @@ xfs_log_regrant(
int error = 0;
if (XLOG_FORCED_SHUTDOWN(log))
- return XFS_ERROR(EIO);
+ return -EIO;
XFS_STATS_INC(xs_try_logspace);
@@ -446,7 +447,7 @@ xfs_log_reserve(
ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
if (XLOG_FORCED_SHUTDOWN(log))
- return XFS_ERROR(EIO);
+ return -EIO;
XFS_STATS_INC(xs_try_logspace);
@@ -454,7 +455,7 @@ xfs_log_reserve(
tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
KM_SLEEP | KM_MAYFAIL);
if (!tic)
- return XFS_ERROR(ENOMEM);
+ return -ENOMEM;
tic->t_trans_type = t_type;
*ticp = tic;
@@ -590,7 +591,7 @@ xfs_log_release_iclog(
{
if (xlog_state_release_iclog(mp->m_log, iclog)) {
xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
- return EIO;
+ return -EIO;
}
return 0;
@@ -616,17 +617,19 @@ xfs_log_mount(
int error = 0;
int min_logfsbs;
- if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
- xfs_notice(mp, "Mounting Filesystem");
- else {
+ if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
+ xfs_notice(mp, "Mounting V%d Filesystem",
+ XFS_SB_VERSION_NUM(&mp->m_sb));
+ } else {
xfs_notice(mp,
-"Mounting filesystem in no-recovery mode. Filesystem will be inconsistent.");
+"Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.",
+ XFS_SB_VERSION_NUM(&mp->m_sb));
ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
}
mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
if (IS_ERR(mp->m_log)) {
- error = -PTR_ERR(mp->m_log);
+ error = PTR_ERR(mp->m_log);
goto out;
}
@@ -650,18 +653,18 @@ xfs_log_mount(
xfs_warn(mp,
"Log size %d blocks too small, minimum size is %d blocks",
mp->m_sb.sb_logblocks, min_logfsbs);
- error = EINVAL;
+ error = -EINVAL;
} else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) {
xfs_warn(mp,
"Log size %d blocks too large, maximum size is %lld blocks",
mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS);
- error = EINVAL;
+ error = -EINVAL;
} else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) {
xfs_warn(mp,
"log size %lld bytes too large, maximum size is %lld bytes",
XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
XFS_MAX_LOG_BYTES);
- error = EINVAL;
+ error = -EINVAL;
}
if (error) {
if (xfs_sb_version_hascrc(&mp->m_sb)) {
@@ -705,6 +708,11 @@ xfs_log_mount(
}
}
+ error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
+ "log");
+ if (error)
+ goto out_destroy_ail;
+
/* Normal transactions can now occur */
mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
@@ -945,6 +953,9 @@ xfs_log_unmount(
xfs_log_quiesce(mp);
xfs_trans_ail_destroy(mp);
+
+ xfs_sysfs_del(&mp->m_log->l_kobj);
+
xlog_dealloc_log(mp->m_log);
}
@@ -1163,7 +1174,7 @@ xlog_iodone(xfs_buf_t *bp)
/*
* Race to shutdown the filesystem if we see an error.
*/
- if (XFS_TEST_ERROR((xfs_buf_geterror(bp)), l->l_mp,
+ if (XFS_TEST_ERROR(bp->b_error, l->l_mp,
XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) {
xfs_buf_ioerror_alert(bp, __func__);
xfs_buf_stale(bp);
@@ -1181,11 +1192,14 @@ xlog_iodone(xfs_buf_t *bp)
/* log I/O is always issued ASYNC */
ASSERT(XFS_BUF_ISASYNC(bp));
xlog_state_done_syncing(iclog, aborted);
+
/*
- * do not reference the buffer (bp) here as we could race
- * with it being freed after writing the unmount record to the
- * log.
+ * drop the buffer lock now that we are done. Nothing references
+ * the buffer after this, so an unmount waiting on this lock can now
+ * tear it down safely. As such, it is unsafe to reference the buffer
+ * (bp) after the unlock as we could race with it being freed.
*/
+ xfs_buf_unlock(bp);
}
/*
@@ -1308,7 +1322,7 @@ xlog_alloc_log(
xlog_in_core_t *iclog, *prev_iclog=NULL;
xfs_buf_t *bp;
int i;
- int error = ENOMEM;
+ int error = -ENOMEM;
uint log2_size = 0;
log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
@@ -1335,7 +1349,7 @@ xlog_alloc_log(
xlog_grant_head_init(&log->l_reserve_head);
xlog_grant_head_init(&log->l_write_head);
- error = EFSCORRUPTED;
+ error = -EFSCORRUPTED;
if (xfs_sb_version_hassector(&mp->m_sb)) {
log2_size = mp->m_sb.sb_logsectlog;
if (log2_size < BBSHIFT) {
@@ -1364,12 +1378,26 @@ xlog_alloc_log(
xlog_get_iclog_buffer_size(mp, log);
- error = ENOMEM;
- bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0);
+ /*
+ * Use a NULL block for the extra log buffer used during splits so that
+ * it will trigger errors if we ever try to do IO on it without first
+ * having set it up properly.
+ */
+ error = -ENOMEM;
+ bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL,
+ BTOBB(log->l_iclog_size), 0);
if (!bp)
goto out_free_log;
- bp->b_iodone = xlog_iodone;
+
+ /*
+ * The iclogbuf buffer locks are held over IO but we are not going to do
+ * IO yet. Hence unlock the buffer so that the log IO path can grab it
+ * when appropriately.
+ */
ASSERT(xfs_buf_islocked(bp));
+ xfs_buf_unlock(bp);
+
+ bp->b_iodone = xlog_iodone;
log->l_xbuf = bp;
spin_lock_init(&log->l_icloglock);
@@ -1398,6 +1426,9 @@ xlog_alloc_log(
if (!bp)
goto out_free_iclog;
+ ASSERT(xfs_buf_islocked(bp));
+ xfs_buf_unlock(bp);
+
bp->b_iodone = xlog_iodone;
iclog->ic_bp = bp;
iclog->ic_data = bp->b_addr;
@@ -1422,7 +1453,6 @@ xlog_alloc_log(
iclog->ic_callback_tail = &(iclog->ic_callback);
iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
- ASSERT(xfs_buf_islocked(iclog->ic_bp));
init_waitqueue_head(&iclog->ic_force_wait);
init_waitqueue_head(&iclog->ic_write_wait);
@@ -1448,7 +1478,7 @@ out_free_iclog:
out_free_log:
kmem_free(log);
out:
- return ERR_PTR(-error);
+ return ERR_PTR(error);
} /* xlog_alloc_log */
@@ -1631,6 +1661,12 @@ xlog_cksum(
* we transition the iclogs to IOERROR state *after* flushing all existing
* iclogs to disk. This is because we don't want anymore new transactions to be
* started or completed afterwards.
+ *
+ * We lock the iclogbufs here so that we can serialise against IO completion
+ * during unmount. We might be processing a shutdown triggered during unmount,
+ * and that can occur asynchronously to the unmount thread, and hence we need to
+ * ensure that completes before tearing down the iclogbufs. Hence we need to
+ * hold the buffer lock across the log IO to acheive that.
*/
STATIC int
xlog_bdstrat(
@@ -1638,14 +1674,16 @@ xlog_bdstrat(
{
struct xlog_in_core *iclog = bp->b_fspriv;
+ xfs_buf_lock(bp);
if (iclog->ic_state & XLOG_STATE_IOERROR) {
- xfs_buf_ioerror(bp, EIO);
+ xfs_buf_ioerror(bp, -EIO);
xfs_buf_stale(bp);
xfs_buf_ioend(bp, 0);
/*
* It would seem logical to return EIO here, but we rely on
* the log state machine to propagate I/O errors instead of
- * doing it here.
+ * doing it here. Similarly, IO completion will unlock the
+ * buffer, so we don't do it here.
*/
return 0;
}
@@ -1847,14 +1885,28 @@ xlog_dealloc_log(
xlog_cil_destroy(log);
/*
- * always need to ensure that the extra buffer does not point to memory
- * owned by another log buffer before we free it.
+ * Cycle all the iclogbuf locks to make sure all log IO completion
+ * is done before we tear down these buffers.
*/
+ iclog = log->l_iclog;
+ for (i = 0; i < log->l_iclog_bufs; i++) {
+ xfs_buf_lock(iclog->ic_bp);
+ xfs_buf_unlock(iclog->ic_bp);
+ iclog = iclog->ic_next;
+ }
+
+ /*
+ * Always need to ensure that the extra buffer does not point to memory
+ * owned by another log buffer before we free it. Also, cycle the lock
+ * first to ensure we've completed IO on it.
+ */
+ xfs_buf_lock(log->l_xbuf);
+ xfs_buf_unlock(log->l_xbuf);
xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size));
xfs_buf_free(log->l_xbuf);
iclog = log->l_iclog;
- for (i=0; i<log->l_iclog_bufs; i++) {
+ for (i = 0; i < log->l_iclog_bufs; i++) {
xfs_buf_free(iclog->ic_bp);
next_iclog = iclog->ic_next;
kmem_free(iclog);
@@ -2323,7 +2375,7 @@ xlog_write(
ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
if (!ophdr)
- return XFS_ERROR(EIO);
+ return -EIO;
xlog_write_adv_cnt(&ptr, &len, &log_offset,
sizeof(struct xlog_op_header));
@@ -2822,7 +2874,7 @@ restart:
spin_lock(&log->l_icloglock);
if (XLOG_FORCED_SHUTDOWN(log)) {
spin_unlock(&log->l_icloglock);
- return XFS_ERROR(EIO);
+ return -EIO;
}
iclog = log->l_iclog;
@@ -3010,7 +3062,7 @@ xlog_state_release_iclog(
int sync = 0; /* do we sync? */
if (iclog->ic_state & XLOG_STATE_IOERROR)
- return XFS_ERROR(EIO);
+ return -EIO;
ASSERT(atomic_read(&iclog->ic_refcnt) > 0);
if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock))
@@ -3018,7 +3070,7 @@ xlog_state_release_iclog(
if (iclog->ic_state & XLOG_STATE_IOERROR) {
spin_unlock(&log->l_icloglock);
- return XFS_ERROR(EIO);
+ return -EIO;
}
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
iclog->ic_state == XLOG_STATE_WANT_SYNC);
@@ -3135,7 +3187,7 @@ _xfs_log_force(
iclog = log->l_iclog;
if (iclog->ic_state & XLOG_STATE_IOERROR) {
spin_unlock(&log->l_icloglock);
- return XFS_ERROR(EIO);
+ return -EIO;
}
/* If the head iclog is not active nor dirty, we just attach
@@ -3173,7 +3225,7 @@ _xfs_log_force(
spin_unlock(&log->l_icloglock);
if (xlog_state_release_iclog(log, iclog))
- return XFS_ERROR(EIO);
+ return -EIO;
if (log_flushed)
*log_flushed = 1;
@@ -3209,7 +3261,7 @@ maybe_sleep:
*/
if (iclog->ic_state & XLOG_STATE_IOERROR) {
spin_unlock(&log->l_icloglock);
- return XFS_ERROR(EIO);
+ return -EIO;
}
XFS_STATS_INC(xs_log_force_sleep);
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
@@ -3219,7 +3271,7 @@ maybe_sleep:
* and the memory read should be atomic.
*/
if (iclog->ic_state & XLOG_STATE_IOERROR)
- return XFS_ERROR(EIO);
+ return -EIO;
if (log_flushed)
*log_flushed = 1;
} else {
@@ -3287,7 +3339,7 @@ try_again:
iclog = log->l_iclog;
if (iclog->ic_state & XLOG_STATE_IOERROR) {
spin_unlock(&log->l_icloglock);
- return XFS_ERROR(EIO);
+ return -EIO;
}
do {
@@ -3338,7 +3390,7 @@ try_again:
xlog_state_switch_iclogs(log, iclog, 0);
spin_unlock(&log->l_icloglock);
if (xlog_state_release_iclog(log, iclog))
- return XFS_ERROR(EIO);
+ return -EIO;
if (log_flushed)
*log_flushed = 1;
spin_lock(&log->l_icloglock);
@@ -3353,7 +3405,7 @@ try_again:
*/
if (iclog->ic_state & XLOG_STATE_IOERROR) {
spin_unlock(&log->l_icloglock);
- return XFS_ERROR(EIO);
+ return -EIO;
}
XFS_STATS_INC(xs_log_force_sleep);
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
@@ -3363,7 +3415,7 @@ try_again:
* and the memory read should be atomic.
*/
if (iclog->ic_state & XLOG_STATE_IOERROR)
- return XFS_ERROR(EIO);
+ return -EIO;
if (log_flushed)
*log_flushed = 1;
@@ -3915,11 +3967,14 @@ xfs_log_force_umount(
retval = xlog_state_ioerror(log);
spin_unlock(&log->l_icloglock);
}
+
/*
- * Wake up everybody waiting on xfs_log_force.
- * Callback all log item committed functions as if the
- * log writes were completed.
+ * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
+ * as if the log writes were completed. The abort handling in the log
+ * item committed callback functions will do this again under lock to
+ * avoid races.
*/
+ wake_up_all(&log->l_cilp->xc_commit_wait);
xlog_state_do_callback(log, XFS_LI_ABORTED, NULL);
#ifdef XFSERRORDEBUG
OpenPOWER on IntegriCloud