xref: /kernel/linux/linux-6.6/fs/f2fs/gc.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 *             http://www.samsung.com/
7 */
8#include <linux/fs.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/f2fs_fs.h>
12#include <linux/kthread.h>
13#include <linux/delay.h>
14#include <linux/freezer.h>
15#include <linux/sched/signal.h>
16#include <linux/random.h>
17#include <linux/sched/mm.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22#include "gc.h"
23#include "iostat.h"
24#include <trace/events/f2fs.h>
25
26static struct kmem_cache *victim_entry_slab;
27
28static unsigned int count_bits(const unsigned long *addr,
29				unsigned int offset, unsigned int len);
30
31static int gc_thread_func(void *data)
32{
33	struct f2fs_sb_info *sbi = data;
34	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36	wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
37	unsigned int wait_ms;
38	struct f2fs_gc_control gc_control = {
39		.victim_segno = NULL_SEGNO,
40		.should_migrate_blocks = false,
41		.err_gc_skipped = false };
42
43	wait_ms = gc_th->min_sleep_time;
44
45	set_freezable();
46	do {
47		bool sync_mode, foreground = false;
48
49		wait_event_interruptible_timeout(*wq,
50				kthread_should_stop() || freezing(current) ||
51				waitqueue_active(fggc_wq) ||
52				gc_th->gc_wake,
53				msecs_to_jiffies(wait_ms));
54
55		if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
56			foreground = true;
57
58		/* give it a try one time */
59		if (gc_th->gc_wake)
60			gc_th->gc_wake = false;
61
62		if (try_to_freeze() || f2fs_readonly(sbi->sb)) {
63			stat_other_skip_bggc_count(sbi);
64			continue;
65		}
66		if (kthread_should_stop())
67			break;
68
69		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
70			increase_sleep_time(gc_th, &wait_ms);
71			stat_other_skip_bggc_count(sbi);
72			continue;
73		}
74
75		if (time_to_inject(sbi, FAULT_CHECKPOINT))
76			f2fs_stop_checkpoint(sbi, false,
77					STOP_CP_REASON_FAULT_INJECT);
78
79		if (!sb_start_write_trylock(sbi->sb)) {
80			stat_other_skip_bggc_count(sbi);
81			continue;
82		}
83
84		/*
85		 * [GC triggering condition]
86		 * 0. GC is not conducted currently.
87		 * 1. There are enough dirty segments.
88		 * 2. IO subsystem is idle by checking the # of writeback pages.
89		 * 3. IO subsystem is idle by checking the # of requests in
90		 *    bdev's request list.
91		 *
92		 * Note) We have to avoid triggering GCs frequently.
93		 * Because it is possible that some segments can be
94		 * invalidated soon after by user update or deletion.
95		 * So, I'd like to wait some time to collect dirty segments.
96		 */
97		if (sbi->gc_mode == GC_URGENT_HIGH ||
98				sbi->gc_mode == GC_URGENT_MID) {
99			wait_ms = gc_th->urgent_sleep_time;
100			f2fs_down_write(&sbi->gc_lock);
101			goto do_gc;
102		}
103
104		if (foreground) {
105			f2fs_down_write(&sbi->gc_lock);
106			goto do_gc;
107		} else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
108			stat_other_skip_bggc_count(sbi);
109			goto next;
110		}
111
112		if (!is_idle(sbi, GC_TIME)) {
113			increase_sleep_time(gc_th, &wait_ms);
114			f2fs_up_write(&sbi->gc_lock);
115			stat_io_skip_bggc_count(sbi);
116			goto next;
117		}
118
119		if (has_enough_invalid_blocks(sbi))
120			decrease_sleep_time(gc_th, &wait_ms);
121		else
122			increase_sleep_time(gc_th, &wait_ms);
123do_gc:
124		stat_inc_gc_call_count(sbi, foreground ?
125					FOREGROUND : BACKGROUND);
126
127		sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
128
129		/* foreground GC was been triggered via f2fs_balance_fs() */
130		if (foreground)
131			sync_mode = false;
132
133		gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
134		gc_control.no_bg_gc = foreground;
135		gc_control.nr_free_secs = foreground ? 1 : 0;
136
137		/* if return value is not zero, no victim was selected */
138		if (f2fs_gc(sbi, &gc_control)) {
139			/* don't bother wait_ms by foreground gc */
140			if (!foreground)
141				wait_ms = gc_th->no_gc_sleep_time;
142		} else {
143			/* reset wait_ms to default sleep time */
144			if (wait_ms == gc_th->no_gc_sleep_time)
145				wait_ms = gc_th->min_sleep_time;
146		}
147
148		if (foreground)
149			wake_up_all(&gc_th->fggc_wq);
150
151		trace_f2fs_background_gc(sbi->sb, wait_ms,
152				prefree_segments(sbi), free_segments(sbi));
153
154		/* balancing f2fs's metadata periodically */
155		f2fs_balance_fs_bg(sbi, true);
156next:
157		if (sbi->gc_mode != GC_NORMAL) {
158			spin_lock(&sbi->gc_remaining_trials_lock);
159			if (sbi->gc_remaining_trials) {
160				sbi->gc_remaining_trials--;
161				if (!sbi->gc_remaining_trials)
162					sbi->gc_mode = GC_NORMAL;
163			}
164			spin_unlock(&sbi->gc_remaining_trials_lock);
165		}
166		sb_end_write(sbi->sb);
167
168	} while (!kthread_should_stop());
169	return 0;
170}
171
172int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
173{
174	struct f2fs_gc_kthread *gc_th;
175	dev_t dev = sbi->sb->s_bdev->bd_dev;
176
177	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
178	if (!gc_th)
179		return -ENOMEM;
180
181	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
182	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
183	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
184	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
185
186	gc_th->gc_wake = false;
187
188	sbi->gc_thread = gc_th;
189	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
190	init_waitqueue_head(&sbi->gc_thread->fggc_wq);
191	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
192			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
193	if (IS_ERR(gc_th->f2fs_gc_task)) {
194		int err = PTR_ERR(gc_th->f2fs_gc_task);
195
196		kfree(gc_th);
197		sbi->gc_thread = NULL;
198		return err;
199	}
200
201	return 0;
202}
203
204void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
205{
206	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
207
208	if (!gc_th)
209		return;
210	kthread_stop(gc_th->f2fs_gc_task);
211	wake_up_all(&gc_th->fggc_wq);
212	kfree(gc_th);
213	sbi->gc_thread = NULL;
214}
215
216static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
217{
218	int gc_mode;
219
220	if (gc_type == BG_GC) {
221		if (sbi->am.atgc_enabled)
222			gc_mode = GC_AT;
223		else
224			gc_mode = GC_CB;
225	} else {
226		gc_mode = GC_GREEDY;
227	}
228
229	switch (sbi->gc_mode) {
230	case GC_IDLE_CB:
231		gc_mode = GC_CB;
232		break;
233	case GC_IDLE_GREEDY:
234	case GC_URGENT_HIGH:
235		gc_mode = GC_GREEDY;
236		break;
237	case GC_IDLE_AT:
238		gc_mode = GC_AT;
239		break;
240	}
241
242	return gc_mode;
243}
244
245static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
246			int type, struct victim_sel_policy *p)
247{
248	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
249
250	if (p->alloc_mode == SSR) {
251		p->gc_mode = GC_GREEDY;
252		p->dirty_bitmap = dirty_i->dirty_segmap[type];
253		p->max_search = dirty_i->nr_dirty[type];
254		p->ofs_unit = 1;
255	} else if (p->alloc_mode == AT_SSR) {
256		p->gc_mode = GC_GREEDY;
257		p->dirty_bitmap = dirty_i->dirty_segmap[type];
258		p->max_search = dirty_i->nr_dirty[type];
259		p->ofs_unit = 1;
260	} else {
261		p->gc_mode = select_gc_type(sbi, gc_type);
262		p->ofs_unit = sbi->segs_per_sec;
263		if (__is_large_section(sbi)) {
264			p->dirty_bitmap = dirty_i->dirty_secmap;
265			p->max_search = count_bits(p->dirty_bitmap,
266						0, MAIN_SECS(sbi));
267		} else {
268			p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
269			p->max_search = dirty_i->nr_dirty[DIRTY];
270		}
271	}
272
273	/*
274	 * adjust candidates range, should select all dirty segments for
275	 * foreground GC and urgent GC cases.
276	 */
277	if (gc_type != FG_GC &&
278			(sbi->gc_mode != GC_URGENT_HIGH) &&
279			(p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
280			p->max_search > sbi->max_victim_search)
281		p->max_search = sbi->max_victim_search;
282
283	/* let's select beginning hot/small space first in no_heap mode*/
284	if (f2fs_need_rand_seg(sbi))
285		p->offset = get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec);
286	else if (test_opt(sbi, NOHEAP) &&
287		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
288		p->offset = 0;
289	else
290		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
291}
292
293static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
294				struct victim_sel_policy *p)
295{
296	/* SSR allocates in a segment unit */
297	if (p->alloc_mode == SSR)
298		return sbi->blocks_per_seg;
299	else if (p->alloc_mode == AT_SSR)
300		return UINT_MAX;
301
302	/* LFS */
303	if (p->gc_mode == GC_GREEDY)
304		return 2 * sbi->blocks_per_seg * p->ofs_unit;
305	else if (p->gc_mode == GC_CB)
306		return UINT_MAX;
307	else if (p->gc_mode == GC_AT)
308		return UINT_MAX;
309	else /* No other gc_mode */
310		return 0;
311}
312
313static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
314{
315	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
316	unsigned int secno;
317
318	/*
319	 * If the gc_type is FG_GC, we can select victim segments
320	 * selected by background GC before.
321	 * Those segments guarantee they have small valid blocks.
322	 */
323	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
324		if (sec_usage_check(sbi, secno))
325			continue;
326		clear_bit(secno, dirty_i->victim_secmap);
327		return GET_SEG_FROM_SEC(sbi, secno);
328	}
329	return NULL_SEGNO;
330}
331
332static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
333{
334	struct sit_info *sit_i = SIT_I(sbi);
335	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
336	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
337	unsigned long long mtime = 0;
338	unsigned int vblocks;
339	unsigned char age = 0;
340	unsigned char u;
341	unsigned int i;
342	unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
343
344	for (i = 0; i < usable_segs_per_sec; i++)
345		mtime += get_seg_entry(sbi, start + i)->mtime;
346	vblocks = get_valid_blocks(sbi, segno, true);
347
348	mtime = div_u64(mtime, usable_segs_per_sec);
349	vblocks = div_u64(vblocks, usable_segs_per_sec);
350
351	u = (vblocks * 100) >> sbi->log_blocks_per_seg;
352
353	/* Handle if the system time has changed by the user */
354	if (mtime < sit_i->min_mtime)
355		sit_i->min_mtime = mtime;
356	if (mtime > sit_i->max_mtime)
357		sit_i->max_mtime = mtime;
358	if (sit_i->max_mtime != sit_i->min_mtime)
359		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
360				sit_i->max_mtime - sit_i->min_mtime);
361
362	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
363}
364
365static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
366			unsigned int segno, struct victim_sel_policy *p)
367{
368	if (p->alloc_mode == SSR)
369		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
370
371	/* alloc_mode == LFS */
372	if (p->gc_mode == GC_GREEDY)
373		return get_valid_blocks(sbi, segno, true);
374	else if (p->gc_mode == GC_CB)
375		return get_cb_cost(sbi, segno);
376
377	f2fs_bug_on(sbi, 1);
378	return 0;
379}
380
381static unsigned int count_bits(const unsigned long *addr,
382				unsigned int offset, unsigned int len)
383{
384	unsigned int end = offset + len, sum = 0;
385
386	while (offset < end) {
387		if (test_bit(offset++, addr))
388			++sum;
389	}
390	return sum;
391}
392
393static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
394				struct rb_root_cached *root)
395{
396#ifdef CONFIG_F2FS_CHECK_FS
397	struct rb_node *cur = rb_first_cached(root), *next;
398	struct victim_entry *cur_ve, *next_ve;
399
400	while (cur) {
401		next = rb_next(cur);
402		if (!next)
403			return true;
404
405		cur_ve = rb_entry(cur, struct victim_entry, rb_node);
406		next_ve = rb_entry(next, struct victim_entry, rb_node);
407
408		if (cur_ve->mtime > next_ve->mtime) {
409			f2fs_info(sbi, "broken victim_rbtree, "
410				"cur_mtime(%llu) next_mtime(%llu)",
411				cur_ve->mtime, next_ve->mtime);
412			return false;
413		}
414		cur = next;
415	}
416#endif
417	return true;
418}
419
420static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
421					unsigned long long mtime)
422{
423	struct atgc_management *am = &sbi->am;
424	struct rb_node *node = am->root.rb_root.rb_node;
425	struct victim_entry *ve = NULL;
426
427	while (node) {
428		ve = rb_entry(node, struct victim_entry, rb_node);
429
430		if (mtime < ve->mtime)
431			node = node->rb_left;
432		else
433			node = node->rb_right;
434	}
435	return ve;
436}
437
438static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
439		unsigned long long mtime, unsigned int segno)
440{
441	struct atgc_management *am = &sbi->am;
442	struct victim_entry *ve;
443
444	ve =  f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
445
446	ve->mtime = mtime;
447	ve->segno = segno;
448
449	list_add_tail(&ve->list, &am->victim_list);
450	am->victim_count++;
451
452	return ve;
453}
454
455static void __insert_victim_entry(struct f2fs_sb_info *sbi,
456				unsigned long long mtime, unsigned int segno)
457{
458	struct atgc_management *am = &sbi->am;
459	struct rb_root_cached *root = &am->root;
460	struct rb_node **p = &root->rb_root.rb_node;
461	struct rb_node *parent = NULL;
462	struct victim_entry *ve;
463	bool left_most = true;
464
465	/* look up rb tree to find parent node */
466	while (*p) {
467		parent = *p;
468		ve = rb_entry(parent, struct victim_entry, rb_node);
469
470		if (mtime < ve->mtime) {
471			p = &(*p)->rb_left;
472		} else {
473			p = &(*p)->rb_right;
474			left_most = false;
475		}
476	}
477
478	ve = __create_victim_entry(sbi, mtime, segno);
479
480	rb_link_node(&ve->rb_node, parent, p);
481	rb_insert_color_cached(&ve->rb_node, root, left_most);
482}
483
484static void add_victim_entry(struct f2fs_sb_info *sbi,
485				struct victim_sel_policy *p, unsigned int segno)
486{
487	struct sit_info *sit_i = SIT_I(sbi);
488	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
489	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
490	unsigned long long mtime = 0;
491	unsigned int i;
492
493	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
494		if (p->gc_mode == GC_AT &&
495			get_valid_blocks(sbi, segno, true) == 0)
496			return;
497	}
498
499	for (i = 0; i < sbi->segs_per_sec; i++)
500		mtime += get_seg_entry(sbi, start + i)->mtime;
501	mtime = div_u64(mtime, sbi->segs_per_sec);
502
503	/* Handle if the system time has changed by the user */
504	if (mtime < sit_i->min_mtime)
505		sit_i->min_mtime = mtime;
506	if (mtime > sit_i->max_mtime)
507		sit_i->max_mtime = mtime;
508	if (mtime < sit_i->dirty_min_mtime)
509		sit_i->dirty_min_mtime = mtime;
510	if (mtime > sit_i->dirty_max_mtime)
511		sit_i->dirty_max_mtime = mtime;
512
513	/* don't choose young section as candidate */
514	if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
515		return;
516
517	__insert_victim_entry(sbi, mtime, segno);
518}
519
520static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
521						struct victim_sel_policy *p)
522{
523	struct sit_info *sit_i = SIT_I(sbi);
524	struct atgc_management *am = &sbi->am;
525	struct rb_root_cached *root = &am->root;
526	struct rb_node *node;
527	struct victim_entry *ve;
528	unsigned long long total_time;
529	unsigned long long age, u, accu;
530	unsigned long long max_mtime = sit_i->dirty_max_mtime;
531	unsigned long long min_mtime = sit_i->dirty_min_mtime;
532	unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
533	unsigned int vblocks;
534	unsigned int dirty_threshold = max(am->max_candidate_count,
535					am->candidate_ratio *
536					am->victim_count / 100);
537	unsigned int age_weight = am->age_weight;
538	unsigned int cost;
539	unsigned int iter = 0;
540
541	if (max_mtime < min_mtime)
542		return;
543
544	max_mtime += 1;
545	total_time = max_mtime - min_mtime;
546
547	accu = div64_u64(ULLONG_MAX, total_time);
548	accu = min_t(unsigned long long, div_u64(accu, 100),
549					DEFAULT_ACCURACY_CLASS);
550
551	node = rb_first_cached(root);
552next:
553	ve = rb_entry_safe(node, struct victim_entry, rb_node);
554	if (!ve)
555		return;
556
557	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
558		goto skip;
559
560	/* age = 10000 * x% * 60 */
561	age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
562								age_weight;
563
564	vblocks = get_valid_blocks(sbi, ve->segno, true);
565	f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
566
567	/* u = 10000 * x% * 40 */
568	u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
569							(100 - age_weight);
570
571	f2fs_bug_on(sbi, age + u >= UINT_MAX);
572
573	cost = UINT_MAX - (age + u);
574	iter++;
575
576	if (cost < p->min_cost ||
577			(cost == p->min_cost && age > p->oldest_age)) {
578		p->min_cost = cost;
579		p->oldest_age = age;
580		p->min_segno = ve->segno;
581	}
582skip:
583	if (iter < dirty_threshold) {
584		node = rb_next(node);
585		goto next;
586	}
587}
588
589/*
590 * select candidates around source section in range of
591 * [target - dirty_threshold, target + dirty_threshold]
592 */
593static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
594						struct victim_sel_policy *p)
595{
596	struct sit_info *sit_i = SIT_I(sbi);
597	struct atgc_management *am = &sbi->am;
598	struct victim_entry *ve;
599	unsigned long long age;
600	unsigned long long max_mtime = sit_i->dirty_max_mtime;
601	unsigned long long min_mtime = sit_i->dirty_min_mtime;
602	unsigned int seg_blocks = sbi->blocks_per_seg;
603	unsigned int vblocks;
604	unsigned int dirty_threshold = max(am->max_candidate_count,
605					am->candidate_ratio *
606					am->victim_count / 100);
607	unsigned int cost, iter;
608	int stage = 0;
609
610	if (max_mtime < min_mtime)
611		return;
612	max_mtime += 1;
613next_stage:
614	iter = 0;
615	ve = __lookup_victim_entry(sbi, p->age);
616next_node:
617	if (!ve) {
618		if (stage++ == 0)
619			goto next_stage;
620		return;
621	}
622
623	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
624		goto skip_node;
625
626	age = max_mtime - ve->mtime;
627
628	vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
629	f2fs_bug_on(sbi, !vblocks);
630
631	/* rare case */
632	if (vblocks == seg_blocks)
633		goto skip_node;
634
635	iter++;
636
637	age = max_mtime - abs(p->age - age);
638	cost = UINT_MAX - vblocks;
639
640	if (cost < p->min_cost ||
641			(cost == p->min_cost && age > p->oldest_age)) {
642		p->min_cost = cost;
643		p->oldest_age = age;
644		p->min_segno = ve->segno;
645	}
646skip_node:
647	if (iter < dirty_threshold) {
648		ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
649					rb_next(&ve->rb_node),
650					struct victim_entry, rb_node);
651		goto next_node;
652	}
653
654	if (stage++ == 0)
655		goto next_stage;
656}
657
658static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
659						struct victim_sel_policy *p)
660{
661	f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
662
663	if (p->gc_mode == GC_AT)
664		atgc_lookup_victim(sbi, p);
665	else if (p->alloc_mode == AT_SSR)
666		atssr_lookup_victim(sbi, p);
667	else
668		f2fs_bug_on(sbi, 1);
669}
670
671static void release_victim_entry(struct f2fs_sb_info *sbi)
672{
673	struct atgc_management *am = &sbi->am;
674	struct victim_entry *ve, *tmp;
675
676	list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
677		list_del(&ve->list);
678		kmem_cache_free(victim_entry_slab, ve);
679		am->victim_count--;
680	}
681
682	am->root = RB_ROOT_CACHED;
683
684	f2fs_bug_on(sbi, am->victim_count);
685	f2fs_bug_on(sbi, !list_empty(&am->victim_list));
686}
687
688static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
689{
690	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
691	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
692
693	if (!dirty_i->enable_pin_section)
694		return false;
695	if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
696		dirty_i->pinned_secmap_cnt++;
697	return true;
698}
699
700static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
701{
702	return dirty_i->pinned_secmap_cnt;
703}
704
705static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
706						unsigned int secno)
707{
708	return dirty_i->enable_pin_section &&
709		f2fs_pinned_section_exists(dirty_i) &&
710		test_bit(secno, dirty_i->pinned_secmap);
711}
712
713static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
714{
715	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
716
717	if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
718		memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
719		DIRTY_I(sbi)->pinned_secmap_cnt = 0;
720	}
721	DIRTY_I(sbi)->enable_pin_section = enable;
722}
723
724static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
725							unsigned int segno)
726{
727	if (!f2fs_is_pinned_file(inode))
728		return 0;
729	if (gc_type != FG_GC)
730		return -EBUSY;
731	if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
732		f2fs_pin_file_control(inode, true);
733	return -EAGAIN;
734}
735
736/*
737 * This function is called from two paths.
738 * One is garbage collection and the other is SSR segment selection.
739 * When it is called during GC, it just gets a victim segment
740 * and it does not remove it from dirty seglist.
741 * When it is called from SSR segment selection, it finds a segment
742 * which has minimum valid blocks and removes it from dirty seglist.
743 */
744int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
745			int gc_type, int type, char alloc_mode,
746			unsigned long long age)
747{
748	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
749	struct sit_info *sm = SIT_I(sbi);
750	struct victim_sel_policy p;
751	unsigned int secno, last_victim;
752	unsigned int last_segment;
753	unsigned int nsearched;
754	bool is_atgc;
755	int ret = 0;
756
757	mutex_lock(&dirty_i->seglist_lock);
758	last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
759
760	p.alloc_mode = alloc_mode;
761	p.age = age;
762	p.age_threshold = sbi->am.age_threshold;
763
764retry:
765	select_policy(sbi, gc_type, type, &p);
766	p.min_segno = NULL_SEGNO;
767	p.oldest_age = 0;
768	p.min_cost = get_max_cost(sbi, &p);
769
770	is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
771	nsearched = 0;
772
773	if (is_atgc)
774		SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
775
776	if (*result != NULL_SEGNO) {
777		if (!get_valid_blocks(sbi, *result, false)) {
778			ret = -ENODATA;
779			goto out;
780		}
781
782		if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
783			ret = -EBUSY;
784		else
785			p.min_segno = *result;
786		goto out;
787	}
788
789	ret = -ENODATA;
790	if (p.max_search == 0)
791		goto out;
792
793	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
794		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
795			p.min_segno = sbi->next_victim_seg[BG_GC];
796			*result = p.min_segno;
797			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
798			goto got_result;
799		}
800		if (gc_type == FG_GC &&
801				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
802			p.min_segno = sbi->next_victim_seg[FG_GC];
803			*result = p.min_segno;
804			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
805			goto got_result;
806		}
807	}
808
809	last_victim = sm->last_victim[p.gc_mode];
810	if (p.alloc_mode == LFS && gc_type == FG_GC) {
811		p.min_segno = check_bg_victims(sbi);
812		if (p.min_segno != NULL_SEGNO)
813			goto got_it;
814	}
815
816	while (1) {
817		unsigned long cost, *dirty_bitmap;
818		unsigned int unit_no, segno;
819
820		dirty_bitmap = p.dirty_bitmap;
821		unit_no = find_next_bit(dirty_bitmap,
822				last_segment / p.ofs_unit,
823				p.offset / p.ofs_unit);
824		segno = unit_no * p.ofs_unit;
825		if (segno >= last_segment) {
826			if (sm->last_victim[p.gc_mode]) {
827				last_segment =
828					sm->last_victim[p.gc_mode];
829				sm->last_victim[p.gc_mode] = 0;
830				p.offset = 0;
831				continue;
832			}
833			break;
834		}
835
836		p.offset = segno + p.ofs_unit;
837		nsearched++;
838
839#ifdef CONFIG_F2FS_CHECK_FS
840		/*
841		 * skip selecting the invalid segno (that is failed due to block
842		 * validity check failure during GC) to avoid endless GC loop in
843		 * such cases.
844		 */
845		if (test_bit(segno, sm->invalid_segmap))
846			goto next;
847#endif
848
849		secno = GET_SEC_FROM_SEG(sbi, segno);
850
851		if (sec_usage_check(sbi, secno))
852			goto next;
853
854		/* Don't touch checkpointed data */
855		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
856			if (p.alloc_mode == LFS) {
857				/*
858				 * LFS is set to find source section during GC.
859				 * The victim should have no checkpointed data.
860				 */
861				if (get_ckpt_valid_blocks(sbi, segno, true))
862					goto next;
863			} else {
864				/*
865				 * SSR | AT_SSR are set to find target segment
866				 * for writes which can be full by checkpointed
867				 * and newly written blocks.
868				 */
869				if (!f2fs_segment_has_free_slot(sbi, segno))
870					goto next;
871			}
872		}
873
874		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
875			goto next;
876
877		if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
878			goto next;
879
880		if (is_atgc) {
881			add_victim_entry(sbi, &p, segno);
882			goto next;
883		}
884
885		cost = get_gc_cost(sbi, segno, &p);
886
887		if (p.min_cost > cost) {
888			p.min_segno = segno;
889			p.min_cost = cost;
890		}
891next:
892		if (nsearched >= p.max_search) {
893			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
894				sm->last_victim[p.gc_mode] =
895					last_victim + p.ofs_unit;
896			else
897				sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
898			sm->last_victim[p.gc_mode] %=
899				(MAIN_SECS(sbi) * sbi->segs_per_sec);
900			break;
901		}
902	}
903
904	/* get victim for GC_AT/AT_SSR */
905	if (is_atgc) {
906		lookup_victim_by_age(sbi, &p);
907		release_victim_entry(sbi);
908	}
909
910	if (is_atgc && p.min_segno == NULL_SEGNO &&
911			sm->elapsed_time < p.age_threshold) {
912		p.age_threshold = 0;
913		goto retry;
914	}
915
916	if (p.min_segno != NULL_SEGNO) {
917got_it:
918		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
919got_result:
920		if (p.alloc_mode == LFS) {
921			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
922			if (gc_type == FG_GC)
923				sbi->cur_victim_sec = secno;
924			else
925				set_bit(secno, dirty_i->victim_secmap);
926		}
927		ret = 0;
928
929	}
930out:
931	if (p.min_segno != NULL_SEGNO)
932		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
933				sbi->cur_victim_sec,
934				prefree_segments(sbi), free_segments(sbi));
935	mutex_unlock(&dirty_i->seglist_lock);
936
937	return ret;
938}
939
940static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
941{
942	struct inode_entry *ie;
943
944	ie = radix_tree_lookup(&gc_list->iroot, ino);
945	if (ie)
946		return ie->inode;
947	return NULL;
948}
949
950static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
951{
952	struct inode_entry *new_ie;
953
954	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
955		iput(inode);
956		return;
957	}
958	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
959					GFP_NOFS, true, NULL);
960	new_ie->inode = inode;
961
962	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
963	list_add_tail(&new_ie->list, &gc_list->ilist);
964}
965
966static void put_gc_inode(struct gc_inode_list *gc_list)
967{
968	struct inode_entry *ie, *next_ie;
969
970	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
971		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
972		iput(ie->inode);
973		list_del(&ie->list);
974		kmem_cache_free(f2fs_inode_entry_slab, ie);
975	}
976}
977
978static int check_valid_map(struct f2fs_sb_info *sbi,
979				unsigned int segno, int offset)
980{
981	struct sit_info *sit_i = SIT_I(sbi);
982	struct seg_entry *sentry;
983	int ret;
984
985	down_read(&sit_i->sentry_lock);
986	sentry = get_seg_entry(sbi, segno);
987	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
988	up_read(&sit_i->sentry_lock);
989	return ret;
990}
991
992/*
993 * This function compares node address got in summary with that in NAT.
994 * On validity, copy that node with cold status, otherwise (invalid node)
995 * ignore that.
996 */
997static int gc_node_segment(struct f2fs_sb_info *sbi,
998		struct f2fs_summary *sum, unsigned int segno, int gc_type)
999{
1000	struct f2fs_summary *entry;
1001	block_t start_addr;
1002	int off;
1003	int phase = 0;
1004	bool fggc = (gc_type == FG_GC);
1005	int submitted = 0;
1006	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1007
1008	start_addr = START_BLOCK(sbi, segno);
1009
1010next_step:
1011	entry = sum;
1012
1013	if (fggc && phase == 2)
1014		atomic_inc(&sbi->wb_sync_req[NODE]);
1015
1016	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1017		nid_t nid = le32_to_cpu(entry->nid);
1018		struct page *node_page;
1019		struct node_info ni;
1020		int err;
1021
1022		/* stop BG_GC if there is not enough free sections. */
1023		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1024			return submitted;
1025
1026		if (check_valid_map(sbi, segno, off) == 0)
1027			continue;
1028
1029		if (phase == 0) {
1030			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1031							META_NAT, true);
1032			continue;
1033		}
1034
1035		if (phase == 1) {
1036			f2fs_ra_node_page(sbi, nid);
1037			continue;
1038		}
1039
1040		/* phase == 2 */
1041		node_page = f2fs_get_node_page(sbi, nid);
1042		if (IS_ERR(node_page))
1043			continue;
1044
1045		/* block may become invalid during f2fs_get_node_page */
1046		if (check_valid_map(sbi, segno, off) == 0) {
1047			f2fs_put_page(node_page, 1);
1048			continue;
1049		}
1050
1051		if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1052			f2fs_put_page(node_page, 1);
1053			continue;
1054		}
1055
1056		if (ni.blk_addr != start_addr + off) {
1057			f2fs_put_page(node_page, 1);
1058			continue;
1059		}
1060
1061		err = f2fs_move_node_page(node_page, gc_type);
1062		if (!err && gc_type == FG_GC)
1063			submitted++;
1064		stat_inc_node_blk_count(sbi, 1, gc_type);
1065	}
1066
1067	if (++phase < 3)
1068		goto next_step;
1069
1070	if (fggc)
1071		atomic_dec(&sbi->wb_sync_req[NODE]);
1072	return submitted;
1073}
1074
1075/*
1076 * Calculate start block index indicating the given node offset.
1077 * Be careful, caller should give this node offset only indicating direct node
1078 * blocks. If any node offsets, which point the other types of node blocks such
1079 * as indirect or double indirect node blocks, are given, it must be a caller's
1080 * bug.
1081 */
1082block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1083{
1084	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1085	unsigned int bidx;
1086
1087	if (node_ofs == 0)
1088		return 0;
1089
1090	if (node_ofs <= 2) {
1091		bidx = node_ofs - 1;
1092	} else if (node_ofs <= indirect_blks) {
1093		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1094
1095		bidx = node_ofs - 2 - dec;
1096	} else {
1097		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1098
1099		bidx = node_ofs - 5 - dec;
1100	}
1101	return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1102}
1103
1104static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1105		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1106{
1107	struct page *node_page;
1108	nid_t nid;
1109	unsigned int ofs_in_node, max_addrs, base;
1110	block_t source_blkaddr;
1111
1112	nid = le32_to_cpu(sum->nid);
1113	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1114
1115	node_page = f2fs_get_node_page(sbi, nid);
1116	if (IS_ERR(node_page))
1117		return false;
1118
1119	if (f2fs_get_node_info(sbi, nid, dni, false)) {
1120		f2fs_put_page(node_page, 1);
1121		return false;
1122	}
1123
1124	if (sum->version != dni->version) {
1125		f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1126			  __func__);
1127		set_sbi_flag(sbi, SBI_NEED_FSCK);
1128	}
1129
1130	if (f2fs_check_nid_range(sbi, dni->ino)) {
1131		f2fs_put_page(node_page, 1);
1132		return false;
1133	}
1134
1135	if (IS_INODE(node_page)) {
1136		base = offset_in_addr(F2FS_INODE(node_page));
1137		max_addrs = DEF_ADDRS_PER_INODE;
1138	} else {
1139		base = 0;
1140		max_addrs = DEF_ADDRS_PER_BLOCK;
1141	}
1142
1143	if (base + ofs_in_node >= max_addrs) {
1144		f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1145			base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1146		f2fs_put_page(node_page, 1);
1147		return false;
1148	}
1149
1150	*nofs = ofs_of_node(node_page);
1151	source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1152	f2fs_put_page(node_page, 1);
1153
1154	if (source_blkaddr != blkaddr) {
1155#ifdef CONFIG_F2FS_CHECK_FS
1156		unsigned int segno = GET_SEGNO(sbi, blkaddr);
1157		unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1158
1159		if (unlikely(check_valid_map(sbi, segno, offset))) {
1160			if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1161				f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1162					 blkaddr, source_blkaddr, segno);
1163				set_sbi_flag(sbi, SBI_NEED_FSCK);
1164			}
1165		}
1166#endif
1167		return false;
1168	}
1169	return true;
1170}
1171
1172static int ra_data_block(struct inode *inode, pgoff_t index)
1173{
1174	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1175	struct address_space *mapping = inode->i_mapping;
1176	struct dnode_of_data dn;
1177	struct page *page;
1178	struct f2fs_io_info fio = {
1179		.sbi = sbi,
1180		.ino = inode->i_ino,
1181		.type = DATA,
1182		.temp = COLD,
1183		.op = REQ_OP_READ,
1184		.op_flags = 0,
1185		.encrypted_page = NULL,
1186		.in_list = 0,
1187		.retry = 0,
1188	};
1189	int err;
1190
1191	page = f2fs_grab_cache_page(mapping, index, true);
1192	if (!page)
1193		return -ENOMEM;
1194
1195	if (f2fs_lookup_read_extent_cache_block(inode, index,
1196						&dn.data_blkaddr)) {
1197		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1198						DATA_GENERIC_ENHANCE_READ))) {
1199			err = -EFSCORRUPTED;
1200			f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1201			goto put_page;
1202		}
1203		goto got_it;
1204	}
1205
1206	set_new_dnode(&dn, inode, NULL, NULL, 0);
1207	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1208	if (err)
1209		goto put_page;
1210	f2fs_put_dnode(&dn);
1211
1212	if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1213		err = -ENOENT;
1214		goto put_page;
1215	}
1216	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1217						DATA_GENERIC_ENHANCE))) {
1218		err = -EFSCORRUPTED;
1219		f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1220		goto put_page;
1221	}
1222got_it:
1223	/* read page */
1224	fio.page = page;
1225	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1226
1227	/*
1228	 * don't cache encrypted data into meta inode until previous dirty
1229	 * data were writebacked to avoid racing between GC and flush.
1230	 */
1231	f2fs_wait_on_page_writeback(page, DATA, true, true);
1232
1233	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1234
1235	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1236					dn.data_blkaddr,
1237					FGP_LOCK | FGP_CREAT, GFP_NOFS);
1238	if (!fio.encrypted_page) {
1239		err = -ENOMEM;
1240		goto put_page;
1241	}
1242
1243	err = f2fs_submit_page_bio(&fio);
1244	if (err)
1245		goto put_encrypted_page;
1246	f2fs_put_page(fio.encrypted_page, 0);
1247	f2fs_put_page(page, 1);
1248
1249	f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1250	f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1251
1252	return 0;
1253put_encrypted_page:
1254	f2fs_put_page(fio.encrypted_page, 1);
1255put_page:
1256	f2fs_put_page(page, 1);
1257	return err;
1258}
1259
1260/*
1261 * Move data block via META_MAPPING while keeping locked data page.
1262 * This can be used to move blocks, aka LBAs, directly on disk.
1263 */
1264static int move_data_block(struct inode *inode, block_t bidx,
1265				int gc_type, unsigned int segno, int off)
1266{
1267	struct f2fs_io_info fio = {
1268		.sbi = F2FS_I_SB(inode),
1269		.ino = inode->i_ino,
1270		.type = DATA,
1271		.temp = COLD,
1272		.op = REQ_OP_READ,
1273		.op_flags = 0,
1274		.encrypted_page = NULL,
1275		.in_list = 0,
1276		.retry = 0,
1277	};
1278	struct dnode_of_data dn;
1279	struct f2fs_summary sum;
1280	struct node_info ni;
1281	struct page *page, *mpage;
1282	block_t newaddr;
1283	int err = 0;
1284	bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1285	int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1286				(fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1287				CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1288
1289	/* do not read out */
1290	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1291	if (!page)
1292		return -ENOMEM;
1293
1294	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1295		err = -ENOENT;
1296		goto out;
1297	}
1298
1299	err = f2fs_gc_pinned_control(inode, gc_type, segno);
1300	if (err)
1301		goto out;
1302
1303	set_new_dnode(&dn, inode, NULL, NULL, 0);
1304	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1305	if (err)
1306		goto out;
1307
1308	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1309		ClearPageUptodate(page);
1310		err = -ENOENT;
1311		goto put_out;
1312	}
1313
1314	/*
1315	 * don't cache encrypted data into meta inode until previous dirty
1316	 * data were writebacked to avoid racing between GC and flush.
1317	 */
1318	f2fs_wait_on_page_writeback(page, DATA, true, true);
1319
1320	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1321
1322	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1323	if (err)
1324		goto put_out;
1325
1326	/* read page */
1327	fio.page = page;
1328	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1329
1330	if (lfs_mode)
1331		f2fs_down_write(&fio.sbi->io_order_lock);
1332
1333	mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1334					fio.old_blkaddr, false);
1335	if (!mpage) {
1336		err = -ENOMEM;
1337		goto up_out;
1338	}
1339
1340	fio.encrypted_page = mpage;
1341
1342	/* read source block in mpage */
1343	if (!PageUptodate(mpage)) {
1344		err = f2fs_submit_page_bio(&fio);
1345		if (err) {
1346			f2fs_put_page(mpage, 1);
1347			goto up_out;
1348		}
1349
1350		f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1351							F2FS_BLKSIZE);
1352		f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1353							F2FS_BLKSIZE);
1354
1355		lock_page(mpage);
1356		if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1357						!PageUptodate(mpage))) {
1358			err = -EIO;
1359			f2fs_put_page(mpage, 1);
1360			goto up_out;
1361		}
1362	}
1363
1364	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1365
1366	/* allocate block address */
1367	f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1368				&sum, type, NULL);
1369
1370	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1371				newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1372	if (!fio.encrypted_page) {
1373		err = -ENOMEM;
1374		f2fs_put_page(mpage, 1);
1375		goto recover_block;
1376	}
1377
1378	/* write target block */
1379	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1380	memcpy(page_address(fio.encrypted_page),
1381				page_address(mpage), PAGE_SIZE);
1382	f2fs_put_page(mpage, 1);
1383
1384	f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
1385
1386	set_page_dirty(fio.encrypted_page);
1387	if (clear_page_dirty_for_io(fio.encrypted_page))
1388		dec_page_count(fio.sbi, F2FS_DIRTY_META);
1389
1390	set_page_writeback(fio.encrypted_page);
1391
1392	fio.op = REQ_OP_WRITE;
1393	fio.op_flags = REQ_SYNC;
1394	fio.new_blkaddr = newaddr;
1395	f2fs_submit_page_write(&fio);
1396	if (fio.retry) {
1397		err = -EAGAIN;
1398		if (PageWriteback(fio.encrypted_page))
1399			end_page_writeback(fio.encrypted_page);
1400		goto put_page_out;
1401	}
1402
1403	f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1404
1405	f2fs_update_data_blkaddr(&dn, newaddr);
1406	set_inode_flag(inode, FI_APPEND_WRITE);
1407put_page_out:
1408	f2fs_put_page(fio.encrypted_page, 1);
1409recover_block:
1410	if (err)
1411		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1412							true, true, true);
1413up_out:
1414	if (lfs_mode)
1415		f2fs_up_write(&fio.sbi->io_order_lock);
1416put_out:
1417	f2fs_put_dnode(&dn);
1418out:
1419	f2fs_put_page(page, 1);
1420	return err;
1421}
1422
1423static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1424							unsigned int segno, int off)
1425{
1426	struct page *page;
1427	int err = 0;
1428
1429	page = f2fs_get_lock_data_page(inode, bidx, true);
1430	if (IS_ERR(page))
1431		return PTR_ERR(page);
1432
1433	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1434		err = -ENOENT;
1435		goto out;
1436	}
1437
1438	err = f2fs_gc_pinned_control(inode, gc_type, segno);
1439	if (err)
1440		goto out;
1441
1442	if (gc_type == BG_GC) {
1443		if (PageWriteback(page)) {
1444			err = -EAGAIN;
1445			goto out;
1446		}
1447		set_page_dirty(page);
1448		set_page_private_gcing(page);
1449	} else {
1450		struct f2fs_io_info fio = {
1451			.sbi = F2FS_I_SB(inode),
1452			.ino = inode->i_ino,
1453			.type = DATA,
1454			.temp = COLD,
1455			.op = REQ_OP_WRITE,
1456			.op_flags = REQ_SYNC,
1457			.old_blkaddr = NULL_ADDR,
1458			.page = page,
1459			.encrypted_page = NULL,
1460			.need_lock = LOCK_REQ,
1461			.io_type = FS_GC_DATA_IO,
1462		};
1463		bool is_dirty = PageDirty(page);
1464
1465retry:
1466		f2fs_wait_on_page_writeback(page, DATA, true, true);
1467
1468		set_page_dirty(page);
1469		if (clear_page_dirty_for_io(page)) {
1470			inode_dec_dirty_pages(inode);
1471			f2fs_remove_dirty_inode(inode);
1472		}
1473
1474		set_page_private_gcing(page);
1475
1476		err = f2fs_do_write_data_page(&fio);
1477		if (err) {
1478			clear_page_private_gcing(page);
1479			if (err == -ENOMEM) {
1480				memalloc_retry_wait(GFP_NOFS);
1481				goto retry;
1482			}
1483			if (is_dirty)
1484				set_page_dirty(page);
1485		}
1486	}
1487out:
1488	f2fs_put_page(page, 1);
1489	return err;
1490}
1491
1492/*
1493 * This function tries to get parent node of victim data block, and identifies
1494 * data block validity. If the block is valid, copy that with cold status and
1495 * modify parent node.
1496 * If the parent node is not valid or the data block address is different,
1497 * the victim data block is ignored.
1498 */
1499static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1500		struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1501		bool force_migrate)
1502{
1503	struct super_block *sb = sbi->sb;
1504	struct f2fs_summary *entry;
1505	block_t start_addr;
1506	int off;
1507	int phase = 0;
1508	int submitted = 0;
1509	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1510
1511	start_addr = START_BLOCK(sbi, segno);
1512
1513next_step:
1514	entry = sum;
1515
1516	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1517		struct page *data_page;
1518		struct inode *inode;
1519		struct node_info dni; /* dnode info for the data */
1520		unsigned int ofs_in_node, nofs;
1521		block_t start_bidx;
1522		nid_t nid = le32_to_cpu(entry->nid);
1523
1524		/*
1525		 * stop BG_GC if there is not enough free sections.
1526		 * Or, stop GC if the segment becomes fully valid caused by
1527		 * race condition along with SSR block allocation.
1528		 */
1529		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1530			(!force_migrate && get_valid_blocks(sbi, segno, true) ==
1531							CAP_BLKS_PER_SEC(sbi)))
1532			return submitted;
1533
1534		if (check_valid_map(sbi, segno, off) == 0)
1535			continue;
1536
1537		if (phase == 0) {
1538			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1539							META_NAT, true);
1540			continue;
1541		}
1542
1543		if (phase == 1) {
1544			f2fs_ra_node_page(sbi, nid);
1545			continue;
1546		}
1547
1548		/* Get an inode by ino with checking validity */
1549		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1550			continue;
1551
1552		if (phase == 2) {
1553			f2fs_ra_node_page(sbi, dni.ino);
1554			continue;
1555		}
1556
1557		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1558
1559		if (phase == 3) {
1560			int err;
1561
1562			inode = f2fs_iget(sb, dni.ino);
1563			if (IS_ERR(inode) || is_bad_inode(inode) ||
1564					special_file(inode->i_mode))
1565				continue;
1566
1567			err = f2fs_gc_pinned_control(inode, gc_type, segno);
1568			if (err == -EAGAIN) {
1569				iput(inode);
1570				return submitted;
1571			}
1572
1573			if (!f2fs_down_write_trylock(
1574				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1575				iput(inode);
1576				sbi->skipped_gc_rwsem++;
1577				continue;
1578			}
1579
1580			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1581								ofs_in_node;
1582
1583			if (f2fs_post_read_required(inode)) {
1584				int err = ra_data_block(inode, start_bidx);
1585
1586				f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1587				if (err) {
1588					iput(inode);
1589					continue;
1590				}
1591				add_gc_inode(gc_list, inode);
1592				continue;
1593			}
1594
1595			data_page = f2fs_get_read_data_page(inode, start_bidx,
1596							REQ_RAHEAD, true, NULL);
1597			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1598			if (IS_ERR(data_page)) {
1599				iput(inode);
1600				continue;
1601			}
1602
1603			f2fs_put_page(data_page, 0);
1604			add_gc_inode(gc_list, inode);
1605			continue;
1606		}
1607
1608		/* phase 4 */
1609		inode = find_gc_inode(gc_list, dni.ino);
1610		if (inode) {
1611			struct f2fs_inode_info *fi = F2FS_I(inode);
1612			bool locked = false;
1613			int err;
1614
1615			if (S_ISREG(inode->i_mode)) {
1616				if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
1617					sbi->skipped_gc_rwsem++;
1618					continue;
1619				}
1620				if (!f2fs_down_write_trylock(
1621						&fi->i_gc_rwsem[READ])) {
1622					sbi->skipped_gc_rwsem++;
1623					f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1624					continue;
1625				}
1626				locked = true;
1627
1628				/* wait for all inflight aio data */
1629				inode_dio_wait(inode);
1630			}
1631
1632			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1633								+ ofs_in_node;
1634			if (f2fs_post_read_required(inode))
1635				err = move_data_block(inode, start_bidx,
1636							gc_type, segno, off);
1637			else
1638				err = move_data_page(inode, start_bidx, gc_type,
1639								segno, off);
1640
1641			if (!err && (gc_type == FG_GC ||
1642					f2fs_post_read_required(inode)))
1643				submitted++;
1644
1645			if (locked) {
1646				f2fs_up_write(&fi->i_gc_rwsem[READ]);
1647				f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1648			}
1649
1650			stat_inc_data_blk_count(sbi, 1, gc_type);
1651		}
1652	}
1653
1654	if (++phase < 5)
1655		goto next_step;
1656
1657	return submitted;
1658}
1659
1660static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1661			int gc_type)
1662{
1663	struct sit_info *sit_i = SIT_I(sbi);
1664	int ret;
1665
1666	down_write(&sit_i->sentry_lock);
1667	ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0);
1668	up_write(&sit_i->sentry_lock);
1669	return ret;
1670}
1671
1672static int do_garbage_collect(struct f2fs_sb_info *sbi,
1673				unsigned int start_segno,
1674				struct gc_inode_list *gc_list, int gc_type,
1675				bool force_migrate)
1676{
1677	struct page *sum_page;
1678	struct f2fs_summary_block *sum;
1679	struct blk_plug plug;
1680	unsigned int segno = start_segno;
1681	unsigned int end_segno = start_segno + sbi->segs_per_sec;
1682	int seg_freed = 0, migrated = 0;
1683	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1684						SUM_TYPE_DATA : SUM_TYPE_NODE;
1685	unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
1686	int submitted = 0;
1687
1688	if (__is_large_section(sbi))
1689		end_segno = rounddown(end_segno, sbi->segs_per_sec);
1690
1691	/*
1692	 * zone-capacity can be less than zone-size in zoned devices,
1693	 * resulting in less than expected usable segments in the zone,
1694	 * calculate the end segno in the zone which can be garbage collected
1695	 */
1696	if (f2fs_sb_has_blkzoned(sbi))
1697		end_segno -= sbi->segs_per_sec -
1698					f2fs_usable_segs_in_sec(sbi, segno);
1699
1700	sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1701
1702	/* readahead multi ssa blocks those have contiguous address */
1703	if (__is_large_section(sbi))
1704		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1705					end_segno - segno, META_SSA, true);
1706
1707	/* reference all summary page */
1708	while (segno < end_segno) {
1709		sum_page = f2fs_get_sum_page(sbi, segno++);
1710		if (IS_ERR(sum_page)) {
1711			int err = PTR_ERR(sum_page);
1712
1713			end_segno = segno - 1;
1714			for (segno = start_segno; segno < end_segno; segno++) {
1715				sum_page = find_get_page(META_MAPPING(sbi),
1716						GET_SUM_BLOCK(sbi, segno));
1717				f2fs_put_page(sum_page, 0);
1718				f2fs_put_page(sum_page, 0);
1719			}
1720			return err;
1721		}
1722		unlock_page(sum_page);
1723	}
1724
1725	blk_start_plug(&plug);
1726
1727	for (segno = start_segno; segno < end_segno; segno++) {
1728
1729		/* find segment summary of victim */
1730		sum_page = find_get_page(META_MAPPING(sbi),
1731					GET_SUM_BLOCK(sbi, segno));
1732		f2fs_put_page(sum_page, 0);
1733
1734		if (get_valid_blocks(sbi, segno, false) == 0)
1735			goto freed;
1736		if (gc_type == BG_GC && __is_large_section(sbi) &&
1737				migrated >= sbi->migration_granularity)
1738			goto skip;
1739		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1740			goto skip;
1741
1742		sum = page_address(sum_page);
1743		if (type != GET_SUM_TYPE((&sum->footer))) {
1744			f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1745				 segno, type, GET_SUM_TYPE((&sum->footer)));
1746			set_sbi_flag(sbi, SBI_NEED_FSCK);
1747			f2fs_stop_checkpoint(sbi, false,
1748				STOP_CP_REASON_CORRUPTED_SUMMARY);
1749			goto skip;
1750		}
1751
1752		/*
1753		 * this is to avoid deadlock:
1754		 * - lock_page(sum_page)         - f2fs_replace_block
1755		 *  - check_valid_map()            - down_write(sentry_lock)
1756		 *   - down_read(sentry_lock)     - change_curseg()
1757		 *                                  - lock_page(sum_page)
1758		 */
1759		if (type == SUM_TYPE_NODE)
1760			submitted += gc_node_segment(sbi, sum->entries, segno,
1761								gc_type);
1762		else
1763			submitted += gc_data_segment(sbi, sum->entries, gc_list,
1764							segno, gc_type,
1765							force_migrate);
1766
1767		stat_inc_gc_seg_count(sbi, data_type, gc_type);
1768		sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1769		migrated++;
1770
1771freed:
1772		if (gc_type == FG_GC &&
1773				get_valid_blocks(sbi, segno, false) == 0)
1774			seg_freed++;
1775
1776		if (__is_large_section(sbi))
1777			sbi->next_victim_seg[gc_type] =
1778				(segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
1779skip:
1780		f2fs_put_page(sum_page, 0);
1781	}
1782
1783	if (submitted)
1784		f2fs_submit_merged_write(sbi, data_type);
1785
1786	blk_finish_plug(&plug);
1787
1788	if (migrated)
1789		stat_inc_gc_sec_count(sbi, data_type, gc_type);
1790
1791	return seg_freed;
1792}
1793
1794int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1795{
1796	int gc_type = gc_control->init_gc_type;
1797	unsigned int segno = gc_control->victim_segno;
1798	int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
1799	int ret = 0;
1800	struct cp_control cpc;
1801	struct gc_inode_list gc_list = {
1802		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1803		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1804	};
1805	unsigned int skipped_round = 0, round = 0;
1806	unsigned int upper_secs;
1807
1808	trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1809				gc_control->nr_free_secs,
1810				get_pages(sbi, F2FS_DIRTY_NODES),
1811				get_pages(sbi, F2FS_DIRTY_DENTS),
1812				get_pages(sbi, F2FS_DIRTY_IMETA),
1813				free_sections(sbi),
1814				free_segments(sbi),
1815				reserved_segments(sbi),
1816				prefree_segments(sbi));
1817
1818	cpc.reason = __get_cp_reason(sbi);
1819gc_more:
1820	sbi->skipped_gc_rwsem = 0;
1821	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1822		ret = -EINVAL;
1823		goto stop;
1824	}
1825	if (unlikely(f2fs_cp_error(sbi))) {
1826		ret = -EIO;
1827		goto stop;
1828	}
1829
1830	/* Let's run FG_GC, if we don't have enough space. */
1831	if (has_not_enough_free_secs(sbi, 0, 0)) {
1832		gc_type = FG_GC;
1833
1834		/*
1835		 * For example, if there are many prefree_segments below given
1836		 * threshold, we can make them free by checkpoint. Then, we
1837		 * secure free segments which doesn't need fggc any more.
1838		 */
1839		if (prefree_segments(sbi)) {
1840			stat_inc_cp_call_count(sbi, TOTAL_CALL);
1841			ret = f2fs_write_checkpoint(sbi, &cpc);
1842			if (ret)
1843				goto stop;
1844			/* Reset due to checkpoint */
1845			sec_freed = 0;
1846		}
1847	}
1848
1849	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1850	if (gc_type == BG_GC && gc_control->no_bg_gc) {
1851		ret = -EINVAL;
1852		goto stop;
1853	}
1854retry:
1855	ret = __get_victim(sbi, &segno, gc_type);
1856	if (ret) {
1857		/* allow to search victim from sections has pinned data */
1858		if (ret == -ENODATA && gc_type == FG_GC &&
1859				f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1860			f2fs_unpin_all_sections(sbi, false);
1861			goto retry;
1862		}
1863		goto stop;
1864	}
1865
1866	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1867				gc_control->should_migrate_blocks);
1868	total_freed += seg_freed;
1869
1870	if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) {
1871		sec_freed++;
1872		total_sec_freed++;
1873	}
1874
1875	if (gc_type == FG_GC) {
1876		sbi->cur_victim_sec = NULL_SEGNO;
1877
1878		if (has_enough_free_secs(sbi, sec_freed, 0)) {
1879			if (!gc_control->no_bg_gc &&
1880			    total_sec_freed < gc_control->nr_free_secs)
1881				goto go_gc_more;
1882			goto stop;
1883		}
1884		if (sbi->skipped_gc_rwsem)
1885			skipped_round++;
1886		round++;
1887		if (skipped_round > MAX_SKIP_GC_COUNT &&
1888				skipped_round * 2 >= round) {
1889			stat_inc_cp_call_count(sbi, TOTAL_CALL);
1890			ret = f2fs_write_checkpoint(sbi, &cpc);
1891			goto stop;
1892		}
1893	} else if (has_enough_free_secs(sbi, 0, 0)) {
1894		goto stop;
1895	}
1896
1897	__get_secs_required(sbi, NULL, &upper_secs, NULL);
1898
1899	/*
1900	 * Write checkpoint to reclaim prefree segments.
1901	 * We need more three extra sections for writer's data/node/dentry.
1902	 */
1903	if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
1904				prefree_segments(sbi)) {
1905		stat_inc_cp_call_count(sbi, TOTAL_CALL);
1906		ret = f2fs_write_checkpoint(sbi, &cpc);
1907		if (ret)
1908			goto stop;
1909		/* Reset due to checkpoint */
1910		sec_freed = 0;
1911	}
1912go_gc_more:
1913	segno = NULL_SEGNO;
1914	goto gc_more;
1915
1916stop:
1917	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1918	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1919
1920	if (gc_type == FG_GC)
1921		f2fs_unpin_all_sections(sbi, true);
1922
1923	trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
1924				get_pages(sbi, F2FS_DIRTY_NODES),
1925				get_pages(sbi, F2FS_DIRTY_DENTS),
1926				get_pages(sbi, F2FS_DIRTY_IMETA),
1927				free_sections(sbi),
1928				free_segments(sbi),
1929				reserved_segments(sbi),
1930				prefree_segments(sbi));
1931
1932	f2fs_up_write(&sbi->gc_lock);
1933
1934	put_gc_inode(&gc_list);
1935
1936	if (gc_control->err_gc_skipped && !ret)
1937		ret = total_sec_freed ? 0 : -EAGAIN;
1938	return ret;
1939}
1940
1941int __init f2fs_create_garbage_collection_cache(void)
1942{
1943	victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1944					sizeof(struct victim_entry));
1945	return victim_entry_slab ? 0 : -ENOMEM;
1946}
1947
1948void f2fs_destroy_garbage_collection_cache(void)
1949{
1950	kmem_cache_destroy(victim_entry_slab);
1951}
1952
1953static void init_atgc_management(struct f2fs_sb_info *sbi)
1954{
1955	struct atgc_management *am = &sbi->am;
1956
1957	if (test_opt(sbi, ATGC) &&
1958		SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1959		am->atgc_enabled = true;
1960
1961	am->root = RB_ROOT_CACHED;
1962	INIT_LIST_HEAD(&am->victim_list);
1963	am->victim_count = 0;
1964
1965	am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1966	am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1967	am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1968	am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1969}
1970
1971void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1972{
1973	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1974
1975	/* give warm/cold data area from slower device */
1976	if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1977		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1978				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1979
1980	init_atgc_management(sbi);
1981}
1982
1983static int free_segment_range(struct f2fs_sb_info *sbi,
1984				unsigned int secs, bool gc_only)
1985{
1986	unsigned int segno, next_inuse, start, end;
1987	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1988	int gc_mode, gc_type;
1989	int err = 0;
1990	int type;
1991
1992	/* Force block allocation for GC */
1993	MAIN_SECS(sbi) -= secs;
1994	start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1995	end = MAIN_SEGS(sbi) - 1;
1996
1997	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1998	for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1999		if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2000			SIT_I(sbi)->last_victim[gc_mode] = 0;
2001
2002	for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2003		if (sbi->next_victim_seg[gc_type] >= start)
2004			sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2005	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2006
2007	/* Move out cursegs from the target range */
2008	for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
2009		f2fs_allocate_segment_for_resize(sbi, type, start, end);
2010
2011	/* do GC to move out valid blocks in the range */
2012	for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
2013		struct gc_inode_list gc_list = {
2014			.ilist = LIST_HEAD_INIT(gc_list.ilist),
2015			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
2016		};
2017
2018		do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
2019		put_gc_inode(&gc_list);
2020
2021		if (!gc_only && get_valid_blocks(sbi, segno, true)) {
2022			err = -EAGAIN;
2023			goto out;
2024		}
2025		if (fatal_signal_pending(current)) {
2026			err = -ERESTARTSYS;
2027			goto out;
2028		}
2029	}
2030	if (gc_only)
2031		goto out;
2032
2033	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2034	err = f2fs_write_checkpoint(sbi, &cpc);
2035	if (err)
2036		goto out;
2037
2038	next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2039	if (next_inuse <= end) {
2040		f2fs_err(sbi, "segno %u should be free but still inuse!",
2041			 next_inuse);
2042		f2fs_bug_on(sbi, 1);
2043	}
2044out:
2045	MAIN_SECS(sbi) += secs;
2046	return err;
2047}
2048
2049static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2050{
2051	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2052	int section_count;
2053	int segment_count;
2054	int segment_count_main;
2055	long long block_count;
2056	int segs = secs * sbi->segs_per_sec;
2057
2058	f2fs_down_write(&sbi->sb_lock);
2059
2060	section_count = le32_to_cpu(raw_sb->section_count);
2061	segment_count = le32_to_cpu(raw_sb->segment_count);
2062	segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2063	block_count = le64_to_cpu(raw_sb->block_count);
2064
2065	raw_sb->section_count = cpu_to_le32(section_count + secs);
2066	raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2067	raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2068	raw_sb->block_count = cpu_to_le64(block_count +
2069					(long long)segs * sbi->blocks_per_seg);
2070	if (f2fs_is_multi_device(sbi)) {
2071		int last_dev = sbi->s_ndevs - 1;
2072		int dev_segs =
2073			le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2074
2075		raw_sb->devs[last_dev].total_segments =
2076						cpu_to_le32(dev_segs + segs);
2077	}
2078
2079	f2fs_up_write(&sbi->sb_lock);
2080}
2081
2082static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2083{
2084	int segs = secs * sbi->segs_per_sec;
2085	long long blks = (long long)segs * sbi->blocks_per_seg;
2086	long long user_block_count =
2087				le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2088
2089	SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2090	MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2091	MAIN_SECS(sbi) += secs;
2092	FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2093	FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2094	F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2095
2096	if (f2fs_is_multi_device(sbi)) {
2097		int last_dev = sbi->s_ndevs - 1;
2098
2099		FDEV(last_dev).total_segments =
2100				(int)FDEV(last_dev).total_segments + segs;
2101		FDEV(last_dev).end_blk =
2102				(long long)FDEV(last_dev).end_blk + blks;
2103#ifdef CONFIG_BLK_DEV_ZONED
2104		FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2105					div_u64(blks, sbi->blocks_per_blkz);
2106#endif
2107	}
2108}
2109
2110int f2fs_resize_fs(struct file *filp, __u64 block_count)
2111{
2112	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2113	__u64 old_block_count, shrunk_blocks;
2114	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2115	unsigned int secs;
2116	int err = 0;
2117	__u32 rem;
2118
2119	old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2120	if (block_count > old_block_count)
2121		return -EINVAL;
2122
2123	if (f2fs_is_multi_device(sbi)) {
2124		int last_dev = sbi->s_ndevs - 1;
2125		__u64 last_segs = FDEV(last_dev).total_segments;
2126
2127		if (block_count + last_segs * sbi->blocks_per_seg <=
2128								old_block_count)
2129			return -EINVAL;
2130	}
2131
2132	/* new fs size should align to section size */
2133	div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2134	if (rem)
2135		return -EINVAL;
2136
2137	if (block_count == old_block_count)
2138		return 0;
2139
2140	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2141		f2fs_err(sbi, "Should run fsck to repair first.");
2142		return -EFSCORRUPTED;
2143	}
2144
2145	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2146		f2fs_err(sbi, "Checkpoint should be enabled.");
2147		return -EINVAL;
2148	}
2149
2150	err = mnt_want_write_file(filp);
2151	if (err)
2152		return err;
2153
2154	shrunk_blocks = old_block_count - block_count;
2155	secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2156
2157	/* stop other GC */
2158	if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2159		err = -EAGAIN;
2160		goto out_drop_write;
2161	}
2162
2163	/* stop CP to protect MAIN_SEC in free_segment_range */
2164	f2fs_lock_op(sbi);
2165
2166	spin_lock(&sbi->stat_lock);
2167	if (shrunk_blocks + valid_user_blocks(sbi) +
2168		sbi->current_reserved_blocks + sbi->unusable_block_count +
2169		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2170		err = -ENOSPC;
2171	spin_unlock(&sbi->stat_lock);
2172
2173	if (err)
2174		goto out_unlock;
2175
2176	err = free_segment_range(sbi, secs, true);
2177
2178out_unlock:
2179	f2fs_unlock_op(sbi);
2180	f2fs_up_write(&sbi->gc_lock);
2181out_drop_write:
2182	mnt_drop_write_file(filp);
2183	if (err)
2184		return err;
2185
2186	err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2187	if (err)
2188		return err;
2189
2190	if (f2fs_readonly(sbi->sb)) {
2191		err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2192		if (err)
2193			return err;
2194		return -EROFS;
2195	}
2196
2197	f2fs_down_write(&sbi->gc_lock);
2198	f2fs_down_write(&sbi->cp_global_sem);
2199
2200	spin_lock(&sbi->stat_lock);
2201	if (shrunk_blocks + valid_user_blocks(sbi) +
2202		sbi->current_reserved_blocks + sbi->unusable_block_count +
2203		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2204		err = -ENOSPC;
2205	else
2206		sbi->user_block_count -= shrunk_blocks;
2207	spin_unlock(&sbi->stat_lock);
2208	if (err)
2209		goto out_err;
2210
2211	set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2212	err = free_segment_range(sbi, secs, false);
2213	if (err)
2214		goto recover_out;
2215
2216	update_sb_metadata(sbi, -secs);
2217
2218	err = f2fs_commit_super(sbi, false);
2219	if (err) {
2220		update_sb_metadata(sbi, secs);
2221		goto recover_out;
2222	}
2223
2224	update_fs_metadata(sbi, -secs);
2225	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2226	set_sbi_flag(sbi, SBI_IS_DIRTY);
2227
2228	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2229	err = f2fs_write_checkpoint(sbi, &cpc);
2230	if (err) {
2231		update_fs_metadata(sbi, secs);
2232		update_sb_metadata(sbi, secs);
2233		f2fs_commit_super(sbi, false);
2234	}
2235recover_out:
2236	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2237	if (err) {
2238		set_sbi_flag(sbi, SBI_NEED_FSCK);
2239		f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2240
2241		spin_lock(&sbi->stat_lock);
2242		sbi->user_block_count += shrunk_blocks;
2243		spin_unlock(&sbi->stat_lock);
2244	}
2245out_err:
2246	f2fs_up_write(&sbi->cp_global_sem);
2247	f2fs_up_write(&sbi->gc_lock);
2248	thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2249	return err;
2250}
2251