summaryrefslogtreecommitdiffstats
path: root/fs/f2fs
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2013-10-24 13:31:34 +0900
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-10-25 16:54:37 +0900
commit81eb8d6e2869b119d4a7b8c02091c3779733a3ac (patch)
treecdff776596fa79bfc9c2f2baf7aa59470dc29aec /fs/f2fs
parentaabe51364f44681cbd83fb1c27ef7d3dbe567c45 (diff)
downloadblackbird-op-linux-81eb8d6e2869b119d4a7b8c02091c3779733a3ac.tar.gz
blackbird-op-linux-81eb8d6e2869b119d4a7b8c02091c3779733a3ac.zip
f2fs: reclaim prefree segments periodically
Previously, f2fs postpones reclaiming prefree segments into free segments as much as possible. However, if user writes and deletes a bunch of data without any sync or fsync calls, some flash storages can suffer from garbage collections. So, this patch adds the reclaiming codes to f2fs_write_node_pages and background GC thread. If there are a lot of prefree segments, let's do checkpoint so that f2fs submits discard commands for the prefree regions to the flash storage. Signed-off-by: Changman Lee <cm224.lee@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/f2fs.h3
-rw-r--r--fs/f2fs/gc.c5
-rw-r--r--fs/f2fs/node.c3
-rw-r--r--fs/f2fs/segment.c1
-rw-r--r--fs/f2fs/segment.h7
5 files changed, 18 insertions, 1 deletions
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e8f3fd345ca3..6dff777cd94a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -299,6 +299,9 @@ struct f2fs_sm_info {
unsigned int main_segments; /* # of segments in main area */
unsigned int reserved_segments; /* # of reserved segments */
unsigned int ovp_segments; /* # of overprovision segments */
+
+ /* a threshold to reclaim prefree segments */
+ unsigned int rec_prefree_segments;
};
/*
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index cb286d7b02b2..783c6cc6253c 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -82,6 +82,11 @@ static int gc_thread_func(void *data)
/* if return value is not zero, no victim was selected */
if (f2fs_gc(sbi))
wait_ms = gc_th->no_gc_sleep_time;
+
+ /* balancing prefree segments */
+ if (excess_prefree_segs(sbi))
+ f2fs_sync_fs(sbi->sb, true);
+
} while (!kthread_should_stop());
return 0;
}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index cc119b65a0d3..89dd8a5a3ca3 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1206,7 +1206,8 @@ static int f2fs_write_node_pages(struct address_space *mapping,
long nr_to_write = wbc->nr_to_write;
/* First check balancing cached NAT entries */
- if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
+ if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
+ excess_prefree_segs(sbi)) {
f2fs_sync_fs(sbi->sb, true);
return 0;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 177a33b16b2c..62b52f2f293a 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1645,6 +1645,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
+ sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS;
err = build_sit_info(sbi);
if (err)
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 7f94d78cda3d..abe7094c4f7a 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -14,6 +14,8 @@
#define NULL_SEGNO ((unsigned int)(~0))
#define NULL_SECNO ((unsigned int)(~0))
+#define DEF_RECLAIM_PREFREE_SEGMENTS 100 /* 200MB of prefree segments */
+
/* L: Logical segment # in volume, R: Relative segment # in main area */
#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
#define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
@@ -472,6 +474,11 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
reserved_sections(sbi)));
}
+static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
+{
+ return (prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments);
+}
+
static inline int utilization(struct f2fs_sb_info *sbi)
{
return div_u64((u64)valid_user_blocks(sbi) * 100, sbi->user_block_count);
OpenPOWER on IntegriCloud