mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-24 05:01:03 +02:00
Merge tag 'f2fs-for-6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs
Pull f2fs updates from Jaegeuk Kim: "In this round, we've added two features: F2FS_IOC_START_ATOMIC_REPLACE and a per-block age-based extent cache. F2FS_IOC_START_ATOMIC_REPLACE is a variant of the previous atomic write feature which guarantees a per-file atomicity. It would be more efficient than AtomicFile implementation in Android framework. The per-block age-based extent cache implements another type of extent cache in memory which keeps the per-block age in a file, so that block allocator could split the hot and cold data blocks more accurately. Enhancements: - introduce F2FS_IOC_START_ATOMIC_REPLACE - refactor extent_cache to add a new per-block-age-based extent cache support - introduce discard_urgent_util, gc_mode, max_ordered_discard sysfs knobs - add proc entry to show discard_plist info - optimize iteration over sparse directories - add barrier mount option Bug fixes: - avoid victim selection from previous victim section - fix to enable compress for newly created file if extension matches - set zstd compress level correctly - initialize locks early in f2fs_fill_super() to fix bugs reported by syzbot - correct i_size change for atomic writes - allow to read node block after shutdown - allow to set compression for inlined file - fix gc mode when gc_urgent_high_remaining is 1 - should put a page when checking the summary info Minor fixes and various clean-ups in GC, discard, debugfs, sysfs, and doc" * tag 'f2fs-for-6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (63 commits) f2fs: reset wait_ms to default if any of the victims have been selected f2fs: fix some format WARNING in debug.c and sysfs.c f2fs: don't call f2fs_issue_discard_timeout() when discard_cmd_cnt is 0 in f2fs_put_super() f2fs: fix iostat parameter for discard f2fs: Fix spelling mistake in label: free_bio_enrty_cache -> free_bio_entry_cache f2fs: add block_age-based extent cache f2fs: allocate the extent_cache by default f2fs: refactor extent_cache to support for read and more f2fs: remove unnecessary __init_extent_tree f2fs: move internal functions into extent_cache.c f2fs: specify extent cache for read explicitly f2fs: introduce f2fs_is_readonly() for readability f2fs: remove F2FS_SET_FEATURE() and F2FS_CLEAR_FEATURE() macro f2fs: do some cleanup for f2fs module init MAINTAINERS: Add f2fs bug tracker link f2fs: remove the unused flush argument to change_curseg f2fs: open code allocate_segment_by_default f2fs: remove struct segment_allocation default_salloc_ops f2fs: introduce discard_urgent_util sysfs node f2fs: define MIN_DISCARD_GRANULARITY macro ...
This commit is contained in:
@@ -192,14 +192,19 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
|
||||
if (!f2fs_is_atomic_file(inode))
|
||||
return;
|
||||
|
||||
if (clean)
|
||||
truncate_inode_pages_final(inode->i_mapping);
|
||||
clear_inode_flag(fi->cow_inode, FI_COW_FILE);
|
||||
iput(fi->cow_inode);
|
||||
fi->cow_inode = NULL;
|
||||
release_atomic_write_cnt(inode);
|
||||
clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
|
||||
clear_inode_flag(inode, FI_ATOMIC_REPLACE);
|
||||
clear_inode_flag(inode, FI_ATOMIC_FILE);
|
||||
stat_dec_atomic_inode(inode);
|
||||
|
||||
if (clean) {
|
||||
truncate_inode_pages_final(inode->i_mapping);
|
||||
f2fs_i_size_write(inode, fi->original_i_size);
|
||||
}
|
||||
}
|
||||
|
||||
static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
|
||||
@@ -257,14 +262,19 @@ static void __complete_revoke_list(struct inode *inode, struct list_head *head,
|
||||
bool revoke)
|
||||
{
|
||||
struct revoke_entry *cur, *tmp;
|
||||
bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE);
|
||||
|
||||
list_for_each_entry_safe(cur, tmp, head, list) {
|
||||
if (revoke)
|
||||
__replace_atomic_write_block(inode, cur->index,
|
||||
cur->old_addr, NULL, true);
|
||||
|
||||
list_del(&cur->list);
|
||||
kmem_cache_free(revoke_entry_slab, cur);
|
||||
}
|
||||
|
||||
if (!revoke && truncate)
|
||||
f2fs_do_truncate_blocks(inode, 0, false);
|
||||
}
|
||||
|
||||
static int __f2fs_commit_atomic_write(struct inode *inode)
|
||||
@@ -335,10 +345,12 @@ next:
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
if (ret) {
|
||||
sbi->revoked_atomic_block += fi->atomic_write_cnt;
|
||||
else
|
||||
} else {
|
||||
sbi->committed_atomic_block += fi->atomic_write_cnt;
|
||||
set_inode_flag(inode, FI_ATOMIC_COMMITTED);
|
||||
}
|
||||
|
||||
__complete_revoke_list(inode, &revoke_list, ret ? true : false);
|
||||
|
||||
@@ -437,8 +449,14 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
|
||||
return;
|
||||
|
||||
/* try to shrink extent cache when there is no enough memory */
|
||||
if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
|
||||
f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
|
||||
if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
|
||||
f2fs_shrink_read_extent_tree(sbi,
|
||||
READ_EXTENT_CACHE_SHRINK_NUMBER);
|
||||
|
||||
/* try to shrink age extent cache when there is no enough memory */
|
||||
if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE))
|
||||
f2fs_shrink_age_extent_tree(sbi,
|
||||
AGE_EXTENT_CACHE_SHRINK_NUMBER);
|
||||
|
||||
/* check the # of cached NAT entries */
|
||||
if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
|
||||
@@ -620,12 +638,11 @@ int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
dev_t dev = sbi->sb->s_bdev->bd_dev;
|
||||
struct flush_cmd_control *fcc;
|
||||
int err = 0;
|
||||
|
||||
if (SM_I(sbi)->fcc_info) {
|
||||
fcc = SM_I(sbi)->fcc_info;
|
||||
if (fcc->f2fs_issue_flush)
|
||||
return err;
|
||||
return 0;
|
||||
goto init_thread;
|
||||
}
|
||||
|
||||
@@ -638,19 +655,20 @@ int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
|
||||
init_llist_head(&fcc->issue_list);
|
||||
SM_I(sbi)->fcc_info = fcc;
|
||||
if (!test_opt(sbi, FLUSH_MERGE))
|
||||
return err;
|
||||
return 0;
|
||||
|
||||
init_thread:
|
||||
fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
|
||||
"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
|
||||
if (IS_ERR(fcc->f2fs_issue_flush)) {
|
||||
err = PTR_ERR(fcc->f2fs_issue_flush);
|
||||
int err = PTR_ERR(fcc->f2fs_issue_flush);
|
||||
|
||||
kfree(fcc);
|
||||
SM_I(sbi)->fcc_info = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
|
||||
@@ -856,7 +874,7 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
|
||||
}
|
||||
mutex_unlock(&dirty_i->seglist_lock);
|
||||
|
||||
unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE];
|
||||
unusable = max(holes[DATA], holes[NODE]);
|
||||
if (unusable > ovp_holes)
|
||||
return unusable - ovp_holes;
|
||||
return 0;
|
||||
@@ -1052,8 +1070,8 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
|
||||
dpolicy->io_aware = true;
|
||||
dpolicy->sync = false;
|
||||
dpolicy->ordered = true;
|
||||
if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
|
||||
dpolicy->granularity = 1;
|
||||
if (utilization(sbi) > dcc->discard_urgent_util) {
|
||||
dpolicy->granularity = MIN_DISCARD_GRANULARITY;
|
||||
if (atomic_read(&dcc->discard_cmd_cnt))
|
||||
dpolicy->max_interval =
|
||||
dcc->min_discard_issue_time;
|
||||
@@ -1068,7 +1086,7 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
|
||||
} else if (discard_type == DPOLICY_UMOUNT) {
|
||||
dpolicy->io_aware = false;
|
||||
/* we need to issue all to keep CP_TRIMMED_FLAG */
|
||||
dpolicy->granularity = 1;
|
||||
dpolicy->granularity = MIN_DISCARD_GRANULARITY;
|
||||
dpolicy->timeout = true;
|
||||
}
|
||||
}
|
||||
@@ -1126,13 +1144,12 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
|
||||
if (time_to_inject(sbi, FAULT_DISCARD)) {
|
||||
f2fs_show_injection_info(sbi, FAULT_DISCARD);
|
||||
err = -EIO;
|
||||
goto submit;
|
||||
}
|
||||
err = __blkdev_issue_discard(bdev,
|
||||
} else {
|
||||
err = __blkdev_issue_discard(bdev,
|
||||
SECTOR_FROM_BLOCK(start),
|
||||
SECTOR_FROM_BLOCK(len),
|
||||
GFP_NOFS, &bio);
|
||||
submit:
|
||||
}
|
||||
if (err) {
|
||||
spin_lock_irqsave(&dc->lock, flags);
|
||||
if (dc->state == D_PARTIAL)
|
||||
@@ -1170,7 +1187,7 @@ submit:
|
||||
|
||||
atomic_inc(&dcc->issued_discard);
|
||||
|
||||
f2fs_update_iostat(sbi, NULL, FS_DISCARD, 1);
|
||||
f2fs_update_iostat(sbi, NULL, FS_DISCARD, len * F2FS_BLKSIZE);
|
||||
|
||||
lstart += len;
|
||||
start += len;
|
||||
@@ -1342,13 +1359,13 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
|
||||
}
|
||||
}
|
||||
|
||||
static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
|
||||
static void __queue_discard_cmd(struct f2fs_sb_info *sbi,
|
||||
struct block_device *bdev, block_t blkstart, block_t blklen)
|
||||
{
|
||||
block_t lblkstart = blkstart;
|
||||
|
||||
if (!f2fs_bdev_support_discard(bdev))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
trace_f2fs_queue_discard(bdev, blkstart, blklen);
|
||||
|
||||
@@ -1360,7 +1377,6 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
|
||||
mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
|
||||
__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
|
||||
mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
|
||||
@@ -1448,7 +1464,7 @@ retry:
|
||||
if (i + 1 < dpolicy->granularity)
|
||||
break;
|
||||
|
||||
if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
|
||||
if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered)
|
||||
return __issue_discard_cmd_orderly(sbi, dpolicy);
|
||||
|
||||
pend_list = &dcc->pend_list[i];
|
||||
@@ -1645,6 +1661,9 @@ bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
|
||||
struct discard_policy dpolicy;
|
||||
bool dropped;
|
||||
|
||||
if (!atomic_read(&dcc->discard_cmd_cnt))
|
||||
return false;
|
||||
|
||||
__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
|
||||
dcc->discard_granularity);
|
||||
__issue_discard_cmd(sbi, &dpolicy);
|
||||
@@ -1669,6 +1688,11 @@ static int issue_discard_thread(void *data)
|
||||
set_freezable();
|
||||
|
||||
do {
|
||||
wait_event_interruptible_timeout(*q,
|
||||
kthread_should_stop() || freezing(current) ||
|
||||
dcc->discard_wake,
|
||||
msecs_to_jiffies(wait_ms));
|
||||
|
||||
if (sbi->gc_mode == GC_URGENT_HIGH ||
|
||||
!f2fs_available_free_memory(sbi, DISCARD_CACHE))
|
||||
__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
|
||||
@@ -1676,14 +1700,6 @@ static int issue_discard_thread(void *data)
|
||||
__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
|
||||
dcc->discard_granularity);
|
||||
|
||||
if (!atomic_read(&dcc->discard_cmd_cnt))
|
||||
wait_ms = dpolicy.max_interval;
|
||||
|
||||
wait_event_interruptible_timeout(*q,
|
||||
kthread_should_stop() || freezing(current) ||
|
||||
dcc->discard_wake,
|
||||
msecs_to_jiffies(wait_ms));
|
||||
|
||||
if (dcc->discard_wake)
|
||||
dcc->discard_wake = 0;
|
||||
|
||||
@@ -1697,12 +1713,11 @@ static int issue_discard_thread(void *data)
|
||||
continue;
|
||||
if (kthread_should_stop())
|
||||
return 0;
|
||||
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
|
||||
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
|
||||
!atomic_read(&dcc->discard_cmd_cnt)) {
|
||||
wait_ms = dpolicy.max_interval;
|
||||
continue;
|
||||
}
|
||||
if (!atomic_read(&dcc->discard_cmd_cnt))
|
||||
continue;
|
||||
|
||||
sb_start_intwrite(sbi->sb);
|
||||
|
||||
@@ -1717,6 +1732,8 @@ static int issue_discard_thread(void *data)
|
||||
} else {
|
||||
wait_ms = dpolicy.max_interval;
|
||||
}
|
||||
if (!atomic_read(&dcc->discard_cmd_cnt))
|
||||
wait_ms = dpolicy.max_interval;
|
||||
|
||||
sb_end_intwrite(sbi->sb);
|
||||
|
||||
@@ -1760,7 +1777,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
|
||||
}
|
||||
|
||||
/* For conventional zones, use regular discard if supported */
|
||||
return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
|
||||
__queue_discard_cmd(sbi, bdev, lblkstart, blklen);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1771,7 +1789,8 @@ static int __issue_discard_async(struct f2fs_sb_info *sbi,
|
||||
if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
|
||||
return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
|
||||
#endif
|
||||
return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
|
||||
__queue_discard_cmd(sbi, bdev, blkstart, blklen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
|
||||
@@ -2025,8 +2044,10 @@ int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
|
||||
|
||||
dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
|
||||
"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
|
||||
if (IS_ERR(dcc->f2fs_issue_discard))
|
||||
if (IS_ERR(dcc->f2fs_issue_discard)) {
|
||||
err = PTR_ERR(dcc->f2fs_issue_discard);
|
||||
dcc->f2fs_issue_discard = NULL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -2046,6 +2067,7 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
|
||||
return -ENOMEM;
|
||||
|
||||
dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
|
||||
dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
|
||||
if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
|
||||
dcc->discard_granularity = sbi->blocks_per_seg;
|
||||
else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
|
||||
@@ -2066,6 +2088,7 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
|
||||
dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME;
|
||||
dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME;
|
||||
dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME;
|
||||
dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL;
|
||||
dcc->undiscard_blks = 0;
|
||||
dcc->next_pos = 0;
|
||||
dcc->root = RB_ROOT_CACHED;
|
||||
@@ -2096,8 +2119,7 @@ static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
|
||||
* Recovery can cache discard commands, so in error path of
|
||||
* fill_super(), it needs to give a chance to handle them.
|
||||
*/
|
||||
if (unlikely(atomic_read(&dcc->discard_cmd_cnt)))
|
||||
f2fs_issue_discard_timeout(sbi);
|
||||
f2fs_issue_discard_timeout(sbi);
|
||||
|
||||
kfree(dcc);
|
||||
SM_I(sbi)->dcc_info = NULL;
|
||||
@@ -2642,7 +2664,7 @@ bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
|
||||
* This function always allocates a used segment(from dirty seglist) by SSR
|
||||
* manner, so it should recover the existing segment information of valid blocks
|
||||
*/
|
||||
static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush)
|
||||
static void change_curseg(struct f2fs_sb_info *sbi, int type)
|
||||
{
|
||||
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
|
||||
struct curseg_info *curseg = CURSEG_I(sbi, type);
|
||||
@@ -2650,9 +2672,7 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush)
|
||||
struct f2fs_summary_block *sum_node;
|
||||
struct page *sum_page;
|
||||
|
||||
if (flush)
|
||||
write_sum_page(sbi, curseg->sum_blk,
|
||||
GET_SUM_BLOCK(sbi, curseg->segno));
|
||||
write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno));
|
||||
|
||||
__set_test_and_inuse(sbi, new_segno);
|
||||
|
||||
@@ -2691,7 +2711,7 @@ static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
|
||||
struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
|
||||
|
||||
curseg->seg_type = se->type;
|
||||
change_curseg(sbi, type, true);
|
||||
change_curseg(sbi, type);
|
||||
} else {
|
||||
/* allocate cold segment by default */
|
||||
curseg->seg_type = CURSEG_COLD_DATA;
|
||||
@@ -2835,31 +2855,20 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* flush out current segment and replace it with new segment
|
||||
* This function should be returned with success, otherwise BUG
|
||||
*/
|
||||
static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
|
||||
int type, bool force)
|
||||
static bool need_new_seg(struct f2fs_sb_info *sbi, int type)
|
||||
{
|
||||
struct curseg_info *curseg = CURSEG_I(sbi, type);
|
||||
|
||||
if (force)
|
||||
new_curseg(sbi, type, true);
|
||||
else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
|
||||
curseg->seg_type == CURSEG_WARM_NODE)
|
||||
new_curseg(sbi, type, false);
|
||||
else if (curseg->alloc_type == LFS &&
|
||||
is_next_segment_free(sbi, curseg, type) &&
|
||||
likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
|
||||
new_curseg(sbi, type, false);
|
||||
else if (f2fs_need_SSR(sbi) &&
|
||||
get_ssr_segment(sbi, type, SSR, 0))
|
||||
change_curseg(sbi, type, true);
|
||||
else
|
||||
new_curseg(sbi, type, false);
|
||||
|
||||
stat_inc_seg_type(sbi, curseg);
|
||||
if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
|
||||
curseg->seg_type == CURSEG_WARM_NODE)
|
||||
return true;
|
||||
if (curseg->alloc_type == LFS &&
|
||||
is_next_segment_free(sbi, curseg, type) &&
|
||||
likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
|
||||
return true;
|
||||
if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
|
||||
@@ -2877,7 +2886,7 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
|
||||
goto unlock;
|
||||
|
||||
if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
|
||||
change_curseg(sbi, type, true);
|
||||
change_curseg(sbi, type);
|
||||
else
|
||||
new_curseg(sbi, type, true);
|
||||
|
||||
@@ -2912,7 +2921,8 @@ static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
|
||||
return;
|
||||
alloc:
|
||||
old_segno = curseg->segno;
|
||||
SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
|
||||
new_curseg(sbi, type, true);
|
||||
stat_inc_seg_type(sbi, curseg);
|
||||
locate_dirty_segment(sbi, old_segno);
|
||||
}
|
||||
|
||||
@@ -2943,10 +2953,6 @@ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
|
||||
f2fs_up_read(&SM_I(sbi)->curseg_lock);
|
||||
}
|
||||
|
||||
static const struct segment_allocation default_salloc_ops = {
|
||||
.allocate_segment = allocate_segment_by_default,
|
||||
};
|
||||
|
||||
bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
|
||||
struct cp_control *cpc)
|
||||
{
|
||||
@@ -3152,10 +3158,28 @@ static int __get_segment_type_4(struct f2fs_io_info *fio)
|
||||
}
|
||||
}
|
||||
|
||||
static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
struct extent_info ei;
|
||||
|
||||
if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) {
|
||||
if (!ei.age)
|
||||
return NO_CHECK_TYPE;
|
||||
if (ei.age <= sbi->hot_data_age_threshold)
|
||||
return CURSEG_HOT_DATA;
|
||||
if (ei.age <= sbi->warm_data_age_threshold)
|
||||
return CURSEG_WARM_DATA;
|
||||
return CURSEG_COLD_DATA;
|
||||
}
|
||||
return NO_CHECK_TYPE;
|
||||
}
|
||||
|
||||
static int __get_segment_type_6(struct f2fs_io_info *fio)
|
||||
{
|
||||
if (fio->type == DATA) {
|
||||
struct inode *inode = fio->page->mapping->host;
|
||||
int type;
|
||||
|
||||
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
|
||||
return CURSEG_COLD_DATA_PINNED;
|
||||
@@ -3170,6 +3194,11 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
|
||||
}
|
||||
if (file_is_cold(inode) || f2fs_need_compress_data(inode))
|
||||
return CURSEG_COLD_DATA;
|
||||
|
||||
type = __get_age_segment_type(inode, fio->page->index);
|
||||
if (type != NO_CHECK_TYPE)
|
||||
return type;
|
||||
|
||||
if (file_is_hot(inode) ||
|
||||
is_inode_flag_set(inode, FI_HOT_DATA) ||
|
||||
f2fs_is_cow_file(inode))
|
||||
@@ -3266,11 +3295,19 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
update_sit_entry(sbi, old_blkaddr, -1);
|
||||
|
||||
if (!__has_curseg_space(sbi, curseg)) {
|
||||
if (from_gc)
|
||||
/*
|
||||
* Flush out current segment and replace it with new segment.
|
||||
*/
|
||||
if (from_gc) {
|
||||
get_atssr_segment(sbi, type, se->type,
|
||||
AT_SSR, se->mtime);
|
||||
else
|
||||
sit_i->s_ops->allocate_segment(sbi, type, false);
|
||||
} else {
|
||||
if (need_new_seg(sbi, type))
|
||||
new_curseg(sbi, type, false);
|
||||
else
|
||||
change_curseg(sbi, type);
|
||||
stat_inc_seg_type(sbi, curseg);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* segment dirty status should be updated after segment allocation,
|
||||
@@ -3280,6 +3317,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
|
||||
locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
|
||||
|
||||
if (IS_DATASEG(type))
|
||||
atomic64_inc(&sbi->allocated_data_blocks);
|
||||
|
||||
up_write(&sit_i->sentry_lock);
|
||||
|
||||
if (page && IS_NODESEG(type)) {
|
||||
@@ -3407,6 +3447,8 @@ void f2fs_outplace_write_data(struct dnode_of_data *dn,
|
||||
struct f2fs_summary sum;
|
||||
|
||||
f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
|
||||
if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO)
|
||||
f2fs_update_age_extent_cache(dn);
|
||||
set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
|
||||
do_write_page(&sum, fio);
|
||||
f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
|
||||
@@ -3531,7 +3573,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
/* change the current segment */
|
||||
if (segno != curseg->segno) {
|
||||
curseg->next_segno = segno;
|
||||
change_curseg(sbi, type, true);
|
||||
change_curseg(sbi, type);
|
||||
}
|
||||
|
||||
curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
|
||||
@@ -3559,7 +3601,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
if (recover_curseg) {
|
||||
if (old_cursegno != curseg->segno) {
|
||||
curseg->next_segno = old_cursegno;
|
||||
change_curseg(sbi, type, true);
|
||||
change_curseg(sbi, type);
|
||||
}
|
||||
curseg->next_blkoff = old_blkoff;
|
||||
curseg->alloc_type = old_alloc_type;
|
||||
@@ -4256,9 +4298,6 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
|
||||
return -ENOMEM;
|
||||
#endif
|
||||
|
||||
/* init SIT information */
|
||||
sit_i->s_ops = &default_salloc_ops;
|
||||
|
||||
sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
|
||||
sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
|
||||
sit_i->written_valid_blocks = 0;
|
||||
|
Reference in New Issue
Block a user