summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/log.c
diff options
context:
space:
mode:
authorBob Peterson <rpeterso@redhat.com>2017-01-05 16:01:45 -0500
committerBob Peterson <rpeterso@redhat.com>2017-01-05 16:01:45 -0500
commitf07b352021483a3a38f081dc284928400a9c1d2c (patch)
treefeef7f57a98a0fc58ebfa693d7c435f2029c68bb /fs/gfs2/log.c
parent2fcf5cc3be06126f9aa2430ca6d739c8b3c5aaf5 (diff)
downloadblackbird-op-linux-f07b352021483a3a38f081dc284928400a9c1d2c.tar.gz
blackbird-op-linux-f07b352021483a3a38f081dc284928400a9c1d2c.zip
GFS2: Made logd daemon take into account log demand
Before this patch, the logd daemon only tried to flush things when the log blocks pinned exceeded a certain threshold. But when we're deleting very large files, it may require a huge number of journal blocks, and that, in turn, may exceed the threshold. This patch factors that into account. Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Diffstat (limited to 'fs/gfs2/log.c')
-rw-r--r--fs/gfs2/log.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index e58ccef09c91..4df349c7f022 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -349,6 +349,7 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
if (gfs2_assert_warn(sdp, blks) ||
gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
return -EINVAL;
+ atomic_add(blks, &sdp->sd_log_blks_needed);
retry:
free_blocks = atomic_read(&sdp->sd_log_blks_free);
if (unlikely(free_blocks <= wanted)) {
@@ -370,6 +371,7 @@ retry:
wake_up(&sdp->sd_reserving_log_wait);
goto retry;
}
+ atomic_sub(blks, &sdp->sd_log_blks_needed);
trace_gfs2_log_blocks(sdp, -blks);
/*
@@ -891,13 +893,16 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
{
- return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
+ return (atomic_read(&sdp->sd_log_pinned) +
+ atomic_read(&sdp->sd_log_blks_needed) >=
+ atomic_read(&sdp->sd_log_thresh1));
}
static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
{
unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
- return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
+ return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
+ atomic_read(&sdp->sd_log_thresh2);
}
/**
OpenPOWER on IntegriCloud