1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/f2fs/gc.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8#include <linux/fs.h> 9#include <linux/module.h> 10#include <linux/mount.h> 11#include <linux/backing-dev.h> 12#include <linux/init.h> 13#include <linux/f2fs_fs.h> 14#include <linux/kthread.h> 15#include <linux/delay.h> 16#include <linux/freezer.h> 17#include <linux/sched/signal.h> 18 19#include "f2fs.h" 20#include "node.h" 21#include "segment.h" 22#include "gc.h" 23#include <trace/events/f2fs.h> 24 25static struct kmem_cache *victim_entry_slab; 26 27static unsigned int count_bits(const unsigned long *addr, 28 unsigned int offset, unsigned int len); 29 30static int gc_thread_func(void *data) 31{ 32 struct f2fs_sb_info *sbi = data; 33 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 34 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; 35 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq; 36 unsigned int wait_ms; 37 38 wait_ms = gc_th->min_sleep_time; 39 40 set_freezable(); 41 do { 42 bool sync_mode, foreground = false; 43 44 wait_event_interruptible_timeout(*wq, 45 kthread_should_stop() || freezing(current) || 46 waitqueue_active(fggc_wq) || 47 gc_th->gc_wake, 48 msecs_to_jiffies(wait_ms)); 49 50 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) 51 foreground = true; 52 53 /* give it a try one time */ 54 if (gc_th->gc_wake) 55 gc_th->gc_wake = 0; 56 57 if (try_to_freeze()) { 58 stat_other_skip_bggc_count(sbi); 59 continue; 60 } 61 if (kthread_should_stop()) 62 break; 63 64 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { 65 increase_sleep_time(gc_th, &wait_ms); 66 stat_other_skip_bggc_count(sbi); 67 continue; 68 } 69 70 if (time_to_inject(sbi, FAULT_CHECKPOINT)) { 71 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT); 72 f2fs_stop_checkpoint(sbi, false); 73 } 74 75 if (!sb_start_write_trylock(sbi->sb)) { 76 stat_other_skip_bggc_count(sbi); 77 continue; 78 } 79 80 /* 81 * [GC triggering condition] 82 * 0. GC is not conducted currently. 83 * 1. There are enough dirty segments. 84 * 2. IO subsystem is idle by checking the # of writeback pages. 85 * 3. IO subsystem is idle by checking the # of requests in 86 * bdev's request list. 87 * 88 * Note) We have to avoid triggering GCs frequently. 89 * Because it is possible that some segments can be 90 * invalidated soon after by user update or deletion. 91 * So, I'd like to wait some time to collect dirty segments. 92 */ 93 if (sbi->gc_mode == GC_URGENT_HIGH) { 94 wait_ms = gc_th->urgent_sleep_time; 95 down_write(&sbi->gc_lock); 96 goto do_gc; 97 } 98 99 if (foreground) { 100 down_write(&sbi->gc_lock); 101 goto do_gc; 102 } else if (!down_write_trylock(&sbi->gc_lock)) { 103 stat_other_skip_bggc_count(sbi); 104 goto next; 105 } 106 107 if (!is_idle(sbi, GC_TIME)) { 108 increase_sleep_time(gc_th, &wait_ms); 109 up_write(&sbi->gc_lock); 110 stat_io_skip_bggc_count(sbi); 111 goto next; 112 } 113 114 if (has_enough_invalid_blocks(sbi)) 115 decrease_sleep_time(gc_th, &wait_ms); 116 else 117 increase_sleep_time(gc_th, &wait_ms); 118do_gc: 119 if (!foreground) 120 stat_inc_bggc_count(sbi->stat_info); 121 122 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC; 123 124 /* foreground GC was been triggered via f2fs_balance_fs() */ 125 if (foreground) 126 sync_mode = false; 127 128 /* if return value is not zero, no victim was selected */ 129 if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO)) 130 wait_ms = gc_th->no_gc_sleep_time; 131 132 if (foreground) 133 wake_up_all(&gc_th->fggc_wq); 134 135 trace_f2fs_background_gc(sbi->sb, wait_ms, 136 prefree_segments(sbi), free_segments(sbi)); 137 138 /* balancing f2fs's metadata periodically */ 139 f2fs_balance_fs_bg(sbi, true); 140next: 141 sb_end_write(sbi->sb); 142 143 } while (!kthread_should_stop()); 144 return 0; 145} 146 147int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) 148{ 149 struct f2fs_gc_kthread *gc_th; 150 dev_t dev = sbi->sb->s_bdev->bd_dev; 151 int err = 0; 152 153 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); 154 if (!gc_th) { 155 err = -ENOMEM; 156 goto out; 157 } 158 159 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME; 160 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; 161 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; 162 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; 163 164 gc_th->gc_wake= 0; 165 166 sbi->gc_thread = gc_th; 167 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); 168 init_waitqueue_head(&sbi->gc_thread->fggc_wq); 169 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, 170 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); 171 if (IS_ERR(gc_th->f2fs_gc_task)) { 172 err = PTR_ERR(gc_th->f2fs_gc_task); 173 kfree(gc_th); 174 sbi->gc_thread = NULL; 175 } 176out: 177 return err; 178} 179 180void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) 181{ 182 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; 183 if (!gc_th) 184 return; 185 kthread_stop(gc_th->f2fs_gc_task); 186 wake_up_all(&gc_th->fggc_wq); 187 kfree(gc_th); 188 sbi->gc_thread = NULL; 189} 190 191static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) 192{ 193 int gc_mode; 194 195 if (gc_type == BG_GC) { 196 if (sbi->am.atgc_enabled) 197 gc_mode = GC_AT; 198 else 199 gc_mode = GC_CB; 200 } else { 201 gc_mode = GC_GREEDY; 202 } 203 204 switch (sbi->gc_mode) { 205 case GC_IDLE_CB: 206 gc_mode = GC_CB; 207 break; 208 case GC_IDLE_GREEDY: 209 case GC_URGENT_HIGH: 210 gc_mode = GC_GREEDY; 211 break; 212 case GC_IDLE_AT: 213 gc_mode = GC_AT; 214 break; 215 } 216 217 return gc_mode; 218} 219 220static void select_policy(struct f2fs_sb_info *sbi, int gc_type, 221 int type, struct victim_sel_policy *p) 222{ 223 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 224 225 if (p->alloc_mode == SSR) { 226 p->gc_mode = GC_GREEDY; 227 p->dirty_bitmap = dirty_i->dirty_segmap[type]; 228 p->max_search = dirty_i->nr_dirty[type]; 229 p->ofs_unit = 1; 230 } else if (p->alloc_mode == AT_SSR) { 231 p->gc_mode = GC_GREEDY; 232 p->dirty_bitmap = dirty_i->dirty_segmap[type]; 233 p->max_search = dirty_i->nr_dirty[type]; 234 p->ofs_unit = 1; 235 } else { 236 p->gc_mode = select_gc_type(sbi, gc_type); 237 p->ofs_unit = sbi->segs_per_sec; 238 if (__is_large_section(sbi)) { 239 p->dirty_bitmap = dirty_i->dirty_secmap; 240 p->max_search = count_bits(p->dirty_bitmap, 241 0, MAIN_SECS(sbi)); 242 } else { 243 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY]; 244 p->max_search = dirty_i->nr_dirty[DIRTY]; 245 } 246 } 247 248 /* 249 * adjust candidates range, should select all dirty segments for 250 * foreground GC and urgent GC cases. 251 */ 252 if (gc_type != FG_GC && 253 (sbi->gc_mode != GC_URGENT_HIGH) && 254 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) && 255 p->max_search > sbi->max_victim_search) 256 p->max_search = sbi->max_victim_search; 257 258 /* let's select beginning hot/small space first in no_heap mode*/ 259 if (test_opt(sbi, NOHEAP) && 260 (type == CURSEG_HOT_DATA || IS_NODESEG(type))) 261 p->offset = 0; 262 else 263 p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; 264} 265 266static unsigned int get_max_cost(struct f2fs_sb_info *sbi, 267 struct victim_sel_policy *p) 268{ 269 /* SSR allocates in a segment unit */ 270 if (p->alloc_mode == SSR) 271 return sbi->blocks_per_seg; 272 else if (p->alloc_mode == AT_SSR) 273 return UINT_MAX; 274 275 /* LFS */ 276 if (p->gc_mode == GC_GREEDY) 277 return 2 * sbi->blocks_per_seg * p->ofs_unit; 278 else if (p->gc_mode == GC_CB) 279 return UINT_MAX; 280 else if (p->gc_mode == GC_AT) 281 return UINT_MAX; 282 else /* No other gc_mode */ 283 return 0; 284} 285 286static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) 287{ 288 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 289 unsigned int secno; 290 291 /* 292 * If the gc_type is FG_GC, we can select victim segments 293 * selected by background GC before. 294 * Those segments guarantee they have small valid blocks. 295 */ 296 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { 297 if (sec_usage_check(sbi, secno)) 298 continue; 299 clear_bit(secno, dirty_i->victim_secmap); 300 return GET_SEG_FROM_SEC(sbi, secno); 301 } 302 return NULL_SEGNO; 303} 304 305static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) 306{ 307 struct sit_info *sit_i = SIT_I(sbi); 308 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 309 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); 310 unsigned long long mtime = 0; 311 unsigned int vblocks; 312 unsigned char age = 0; 313 unsigned char u; 314 unsigned int i; 315 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno); 316 317 for (i = 0; i < usable_segs_per_sec; i++) 318 mtime += get_seg_entry(sbi, start + i)->mtime; 319 vblocks = get_valid_blocks(sbi, segno, true); 320 321 mtime = div_u64(mtime, usable_segs_per_sec); 322 vblocks = div_u64(vblocks, usable_segs_per_sec); 323 324 u = (vblocks * 100) >> sbi->log_blocks_per_seg; 325 326 /* Handle if the system time has changed by the user */ 327 if (mtime < sit_i->min_mtime) 328 sit_i->min_mtime = mtime; 329 if (mtime > sit_i->max_mtime) 330 sit_i->max_mtime = mtime; 331 if (sit_i->max_mtime != sit_i->min_mtime) 332 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), 333 sit_i->max_mtime - sit_i->min_mtime); 334 335 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); 336} 337 338static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, 339 unsigned int segno, struct victim_sel_policy *p) 340{ 341 if (p->alloc_mode == SSR) 342 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; 343 344 /* alloc_mode == LFS */ 345 if (p->gc_mode == GC_GREEDY) 346 return get_valid_blocks(sbi, segno, true); 347 else if (p->gc_mode == GC_CB) 348 return get_cb_cost(sbi, segno); 349 350 f2fs_bug_on(sbi, 1); 351 return 0; 352} 353 354static unsigned int count_bits(const unsigned long *addr, 355 unsigned int offset, unsigned int len) 356{ 357 unsigned int end = offset + len, sum = 0; 358 359 while (offset < end) { 360 if (test_bit(offset++, addr)) 361 ++sum; 362 } 363 return sum; 364} 365 366static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi, 367 unsigned long long mtime, unsigned int segno, 368 struct rb_node *parent, struct rb_node **p, 369 bool left_most) 370{ 371 struct atgc_management *am = &sbi->am; 372 struct victim_entry *ve; 373 374 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS); 375 376 ve->mtime = mtime; 377 ve->segno = segno; 378 379 rb_link_node(&ve->rb_node, parent, p); 380 rb_insert_color_cached(&ve->rb_node, &am->root, left_most); 381 382 list_add_tail(&ve->list, &am->victim_list); 383 384 am->victim_count++; 385 386 return ve; 387} 388 389static void insert_victim_entry(struct f2fs_sb_info *sbi, 390 unsigned long long mtime, unsigned int segno) 391{ 392 struct atgc_management *am = &sbi->am; 393 struct rb_node **p; 394 struct rb_node *parent = NULL; 395 bool left_most = true; 396 397 p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most); 398 attach_victim_entry(sbi, mtime, segno, parent, p, left_most); 399} 400 401static void add_victim_entry(struct f2fs_sb_info *sbi, 402 struct victim_sel_policy *p, unsigned int segno) 403{ 404 struct sit_info *sit_i = SIT_I(sbi); 405 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); 406 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); 407 unsigned long long mtime = 0; 408 unsigned int i; 409 410 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 411 if (p->gc_mode == GC_AT && 412 get_valid_blocks(sbi, segno, true) == 0) 413 return; 414 } 415 416 for (i = 0; i < sbi->segs_per_sec; i++) 417 mtime += get_seg_entry(sbi, start + i)->mtime; 418 mtime = div_u64(mtime, sbi->segs_per_sec); 419 420 /* Handle if the system time has changed by the user */ 421 if (mtime < sit_i->min_mtime) 422 sit_i->min_mtime = mtime; 423 if (mtime > sit_i->max_mtime) 424 sit_i->max_mtime = mtime; 425 if (mtime < sit_i->dirty_min_mtime) 426 sit_i->dirty_min_mtime = mtime; 427 if (mtime > sit_i->dirty_max_mtime) 428 sit_i->dirty_max_mtime = mtime; 429 430 /* don't choose young section as candidate */ 431 if (sit_i->dirty_max_mtime - mtime < p->age_threshold) 432 return; 433 434 insert_victim_entry(sbi, mtime, segno); 435} 436 437static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi, 438 struct victim_sel_policy *p) 439{ 440 struct atgc_management *am = &sbi->am; 441 struct rb_node *parent = NULL; 442 bool left_most; 443 444 f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most); 445 446 return parent; 447} 448 449static void atgc_lookup_victim(struct f2fs_sb_info *sbi, 450 struct victim_sel_policy *p) 451{ 452 struct sit_info *sit_i = SIT_I(sbi); 453 struct atgc_management *am = &sbi->am; 454 struct rb_root_cached *root = &am->root; 455 struct rb_node *node; 456 struct rb_entry *re; 457 struct victim_entry *ve; 458 unsigned long long total_time; 459 unsigned long long age, u, accu; 460 unsigned long long max_mtime = sit_i->dirty_max_mtime; 461 unsigned long long min_mtime = sit_i->dirty_min_mtime; 462 unsigned int sec_blocks = BLKS_PER_SEC(sbi); 463 unsigned int vblocks; 464 unsigned int dirty_threshold = max(am->max_candidate_count, 465 am->candidate_ratio * 466 am->victim_count / 100); 467 unsigned int age_weight = am->age_weight; 468 unsigned int cost; 469 unsigned int iter = 0; 470 471 if (max_mtime < min_mtime) 472 return; 473 474 max_mtime += 1; 475 total_time = max_mtime - min_mtime; 476 477 accu = div64_u64(ULLONG_MAX, total_time); 478 accu = min_t(unsigned long long, div_u64(accu, 100), 479 DEFAULT_ACCURACY_CLASS); 480 481 node = rb_first_cached(root); 482next: 483 re = rb_entry_safe(node, struct rb_entry, rb_node); 484 if (!re) 485 return; 486 487 ve = (struct victim_entry *)re; 488 489 if (ve->mtime >= max_mtime || ve->mtime < min_mtime) 490 goto skip; 491 492 /* age = 10000 * x% * 60 */ 493 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) * 494 age_weight; 495 496 vblocks = get_valid_blocks(sbi, ve->segno, true); 497 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks); 498 499 /* u = 10000 * x% * 40 */ 500 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) * 501 (100 - age_weight); 502 503 f2fs_bug_on(sbi, age + u >= UINT_MAX); 504 505 cost = UINT_MAX - (age + u); 506 iter++; 507 508 if (cost < p->min_cost || 509 (cost == p->min_cost && age > p->oldest_age)) { 510 p->min_cost = cost; 511 p->oldest_age = age; 512 p->min_segno = ve->segno; 513 } 514skip: 515 if (iter < dirty_threshold) { 516 node = rb_next(node); 517 goto next; 518 } 519} 520 521/* 522 * select candidates around source section in range of 523 * [target - dirty_threshold, target + dirty_threshold] 524 */ 525static void atssr_lookup_victim(struct f2fs_sb_info *sbi, 526 struct victim_sel_policy *p) 527{ 528 struct sit_info *sit_i = SIT_I(sbi); 529 struct atgc_management *am = &sbi->am; 530 struct rb_node *node; 531 struct rb_entry *re; 532 struct victim_entry *ve; 533 unsigned long long age; 534 unsigned long long max_mtime = sit_i->dirty_max_mtime; 535 unsigned long long min_mtime = sit_i->dirty_min_mtime; 536 unsigned int seg_blocks = sbi->blocks_per_seg; 537 unsigned int vblocks; 538 unsigned int dirty_threshold = max(am->max_candidate_count, 539 am->candidate_ratio * 540 am->victim_count / 100); 541 unsigned int cost; 542 unsigned int iter = 0; 543 int stage = 0; 544 545 if (max_mtime < min_mtime) 546 return; 547 max_mtime += 1; 548next_stage: 549 node = lookup_central_victim(sbi, p); 550next_node: 551 re = rb_entry_safe(node, struct rb_entry, rb_node); 552 if (!re) { 553 if (stage == 0) 554 goto skip_stage; 555 return; 556 } 557 558 ve = (struct victim_entry *)re; 559 560 if (ve->mtime >= max_mtime || ve->mtime < min_mtime) 561 goto skip_node; 562 563 age = max_mtime - ve->mtime; 564 565 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks; 566 f2fs_bug_on(sbi, !vblocks); 567 568 /* rare case */ 569 if (vblocks == seg_blocks) 570 goto skip_node; 571 572 iter++; 573 574 age = max_mtime - abs(p->age - age); 575 cost = UINT_MAX - vblocks; 576 577 if (cost < p->min_cost || 578 (cost == p->min_cost && age > p->oldest_age)) { 579 p->min_cost = cost; 580 p->oldest_age = age; 581 p->min_segno = ve->segno; 582 } 583skip_node: 584 if (iter < dirty_threshold) { 585 if (stage == 0) 586 node = rb_prev(node); 587 else if (stage == 1) 588 node = rb_next(node); 589 goto next_node; 590 } 591skip_stage: 592 if (stage < 1) { 593 stage++; 594 iter = 0; 595 goto next_stage; 596 } 597} 598static void lookup_victim_by_age(struct f2fs_sb_info *sbi, 599 struct victim_sel_policy *p) 600{ 601 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, 602 &sbi->am.root, true)); 603 604 if (p->gc_mode == GC_AT) 605 atgc_lookup_victim(sbi, p); 606 else if (p->alloc_mode == AT_SSR) 607 atssr_lookup_victim(sbi, p); 608 else 609 f2fs_bug_on(sbi, 1); 610} 611 612static void release_victim_entry(struct f2fs_sb_info *sbi) 613{ 614 struct atgc_management *am = &sbi->am; 615 struct victim_entry *ve, *tmp; 616 617 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) { 618 list_del(&ve->list); 619 kmem_cache_free(victim_entry_slab, ve); 620 am->victim_count--; 621 } 622 623 am->root = RB_ROOT_CACHED; 624 625 f2fs_bug_on(sbi, am->victim_count); 626 f2fs_bug_on(sbi, !list_empty(&am->victim_list)); 627} 628 629/* 630 * This function is called from two paths. 631 * One is garbage collection and the other is SSR segment selection. 632 * When it is called during GC, it just gets a victim segment 633 * and it does not remove it from dirty seglist. 634 * When it is called from SSR segment selection, it finds a segment 635 * which has minimum valid blocks and removes it from dirty seglist. 636 */ 637static int get_victim_by_default(struct f2fs_sb_info *sbi, 638 unsigned int *result, int gc_type, int type, 639 char alloc_mode, unsigned long long age) 640{ 641 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 642 struct sit_info *sm = SIT_I(sbi); 643 struct victim_sel_policy p; 644 unsigned int secno, last_victim; 645 unsigned int last_segment; 646 unsigned int nsearched; 647 bool is_atgc; 648 int ret = 0; 649 650 mutex_lock(&dirty_i->seglist_lock); 651 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec; 652 653 p.alloc_mode = alloc_mode; 654 p.age = age; 655 p.age_threshold = sbi->am.age_threshold; 656 657retry: 658 select_policy(sbi, gc_type, type, &p); 659 p.min_segno = NULL_SEGNO; 660 p.oldest_age = 0; 661 p.min_cost = get_max_cost(sbi, &p); 662 663 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR); 664 nsearched = 0; 665 666 if (is_atgc) 667 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX; 668 669 if (*result != NULL_SEGNO) { 670 if (!get_valid_blocks(sbi, *result, false)) { 671 ret = -ENODATA; 672 goto out; 673 } 674 675 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) 676 ret = -EBUSY; 677 else 678 p.min_segno = *result; 679 goto out; 680 } 681 682 ret = -ENODATA; 683 if (p.max_search == 0) 684 goto out; 685 686 if (__is_large_section(sbi) && p.alloc_mode == LFS) { 687 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) { 688 p.min_segno = sbi->next_victim_seg[BG_GC]; 689 *result = p.min_segno; 690 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; 691 goto got_result; 692 } 693 if (gc_type == FG_GC && 694 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) { 695 p.min_segno = sbi->next_victim_seg[FG_GC]; 696 *result = p.min_segno; 697 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; 698 goto got_result; 699 } 700 } 701 702 last_victim = sm->last_victim[p.gc_mode]; 703 if (p.alloc_mode == LFS && gc_type == FG_GC) { 704 p.min_segno = check_bg_victims(sbi); 705 if (p.min_segno != NULL_SEGNO) 706 goto got_it; 707 } 708 709 while (1) { 710 unsigned long cost, *dirty_bitmap; 711 unsigned int unit_no, segno; 712 713 dirty_bitmap = p.dirty_bitmap; 714 unit_no = find_next_bit(dirty_bitmap, 715 last_segment / p.ofs_unit, 716 p.offset / p.ofs_unit); 717 segno = unit_no * p.ofs_unit; 718 if (segno >= last_segment) { 719 if (sm->last_victim[p.gc_mode]) { 720 last_segment = 721 sm->last_victim[p.gc_mode]; 722 sm->last_victim[p.gc_mode] = 0; 723 p.offset = 0; 724 continue; 725 } 726 break; 727 } 728 729 p.offset = segno + p.ofs_unit; 730 nsearched++; 731 732#ifdef CONFIG_F2FS_CHECK_FS 733 /* 734 * skip selecting the invalid segno (that is failed due to block 735 * validity check failure during GC) to avoid endless GC loop in 736 * such cases. 737 */ 738 if (test_bit(segno, sm->invalid_segmap)) 739 goto next; 740#endif 741 742 secno = GET_SEC_FROM_SEG(sbi, segno); 743 744 if (sec_usage_check(sbi, secno)) 745 goto next; 746 747 /* Don't touch checkpointed data */ 748 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 749 if (p.alloc_mode == LFS) { 750 /* 751 * LFS is set to find source section during GC. 752 * The victim should have no checkpointed data. 753 */ 754 if (get_ckpt_valid_blocks(sbi, segno, true)) 755 goto next; 756 } else { 757 /* 758 * SSR | AT_SSR are set to find target segment 759 * for writes which can be full by checkpointed 760 * and newly written blocks. 761 */ 762 if (!f2fs_segment_has_free_slot(sbi, segno)) 763 goto next; 764 } 765 } 766 767 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) 768 goto next; 769 770 if (is_atgc) { 771 add_victim_entry(sbi, &p, segno); 772 goto next; 773 } 774 775 cost = get_gc_cost(sbi, segno, &p); 776 777 if (p.min_cost > cost) { 778 p.min_segno = segno; 779 p.min_cost = cost; 780 } 781next: 782 if (nsearched >= p.max_search) { 783 if (!sm->last_victim[p.gc_mode] && segno <= last_victim) 784 sm->last_victim[p.gc_mode] = 785 last_victim + p.ofs_unit; 786 else 787 sm->last_victim[p.gc_mode] = segno + p.ofs_unit; 788 sm->last_victim[p.gc_mode] %= 789 (MAIN_SECS(sbi) * sbi->segs_per_sec); 790 break; 791 } 792 } 793 794 /* get victim for GC_AT/AT_SSR */ 795 if (is_atgc) { 796 lookup_victim_by_age(sbi, &p); 797 release_victim_entry(sbi); 798 } 799 800 if (is_atgc && p.min_segno == NULL_SEGNO && 801 sm->elapsed_time < p.age_threshold) { 802 p.age_threshold = 0; 803 goto retry; 804 } 805 806 if (p.min_segno != NULL_SEGNO) { 807got_it: 808 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; 809got_result: 810 if (p.alloc_mode == LFS) { 811 secno = GET_SEC_FROM_SEG(sbi, p.min_segno); 812 if (gc_type == FG_GC) 813 sbi->cur_victim_sec = secno; 814 else 815 set_bit(secno, dirty_i->victim_secmap); 816 } 817 ret = 0; 818 819 } 820out: 821 if (p.min_segno != NULL_SEGNO) 822 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, 823 sbi->cur_victim_sec, 824 prefree_segments(sbi), free_segments(sbi)); 825 mutex_unlock(&dirty_i->seglist_lock); 826 827 return ret; 828} 829 830static const struct victim_selection default_v_ops = { 831 .get_victim = get_victim_by_default, 832}; 833 834static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino) 835{ 836 struct inode_entry *ie; 837 838 ie = radix_tree_lookup(&gc_list->iroot, ino); 839 if (ie) 840 return ie->inode; 841 return NULL; 842} 843 844static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) 845{ 846 struct inode_entry *new_ie; 847 848 if (inode == find_gc_inode(gc_list, inode->i_ino)) { 849 iput(inode); 850 return; 851 } 852 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS); 853 new_ie->inode = inode; 854 855 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie); 856 list_add_tail(&new_ie->list, &gc_list->ilist); 857} 858 859static void put_gc_inode(struct gc_inode_list *gc_list) 860{ 861 struct inode_entry *ie, *next_ie; 862 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) { 863 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); 864 iput(ie->inode); 865 list_del(&ie->list); 866 kmem_cache_free(f2fs_inode_entry_slab, ie); 867 } 868} 869 870static int check_valid_map(struct f2fs_sb_info *sbi, 871 unsigned int segno, int offset) 872{ 873 struct sit_info *sit_i = SIT_I(sbi); 874 struct seg_entry *sentry; 875 int ret; 876 877 down_read(&sit_i->sentry_lock); 878 sentry = get_seg_entry(sbi, segno); 879 ret = f2fs_test_bit(offset, sentry->cur_valid_map); 880 up_read(&sit_i->sentry_lock); 881 return ret; 882} 883 884/* 885 * This function compares node address got in summary with that in NAT. 886 * On validity, copy that node with cold status, otherwise (invalid node) 887 * ignore that. 888 */ 889static int gc_node_segment(struct f2fs_sb_info *sbi, 890 struct f2fs_summary *sum, unsigned int segno, int gc_type) 891{ 892 struct f2fs_summary *entry; 893 block_t start_addr; 894 int off; 895 int phase = 0; 896 bool fggc = (gc_type == FG_GC); 897 int submitted = 0; 898 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); 899 900 start_addr = START_BLOCK(sbi, segno); 901 902next_step: 903 entry = sum; 904 905 if (fggc && phase == 2) 906 atomic_inc(&sbi->wb_sync_req[NODE]); 907 908 for (off = 0; off < usable_blks_in_seg; off++, entry++) { 909 nid_t nid = le32_to_cpu(entry->nid); 910 struct page *node_page; 911 struct node_info ni; 912 int err; 913 914 /* stop BG_GC if there is not enough free sections. */ 915 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) 916 return submitted; 917 918 if (check_valid_map(sbi, segno, off) == 0) 919 continue; 920 921 if (phase == 0) { 922 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, 923 META_NAT, true); 924 continue; 925 } 926 927 if (phase == 1) { 928 f2fs_ra_node_page(sbi, nid); 929 continue; 930 } 931 932 /* phase == 2 */ 933 node_page = f2fs_get_node_page(sbi, nid); 934 if (IS_ERR(node_page)) 935 continue; 936 937 /* block may become invalid during f2fs_get_node_page */ 938 if (check_valid_map(sbi, segno, off) == 0) { 939 f2fs_put_page(node_page, 1); 940 continue; 941 } 942 943 if (f2fs_get_node_info(sbi, nid, &ni)) { 944 f2fs_put_page(node_page, 1); 945 continue; 946 } 947 948 if (ni.blk_addr != start_addr + off) { 949 f2fs_put_page(node_page, 1); 950 continue; 951 } 952 953 err = f2fs_move_node_page(node_page, gc_type); 954 if (!err && gc_type == FG_GC) 955 submitted++; 956 stat_inc_node_blk_count(sbi, 1, gc_type); 957 } 958 959 if (++phase < 3) 960 goto next_step; 961 962 if (fggc) 963 atomic_dec(&sbi->wb_sync_req[NODE]); 964 return submitted; 965} 966 967/* 968 * Calculate start block index indicating the given node offset. 969 * Be careful, caller should give this node offset only indicating direct node 970 * blocks. If any node offsets, which point the other types of node blocks such 971 * as indirect or double indirect node blocks, are given, it must be a caller's 972 * bug. 973 */ 974block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode) 975{ 976 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; 977 unsigned int bidx; 978 979 if (node_ofs == 0) 980 return 0; 981 982 if (node_ofs <= 2) { 983 bidx = node_ofs - 1; 984 } else if (node_ofs <= indirect_blks) { 985 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); 986 bidx = node_ofs - 2 - dec; 987 } else { 988 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); 989 bidx = node_ofs - 5 - dec; 990 } 991 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode); 992} 993 994static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 995 struct node_info *dni, block_t blkaddr, unsigned int *nofs) 996{ 997 struct page *node_page; 998 nid_t nid; 999 unsigned int ofs_in_node, max_addrs, base; 1000 block_t source_blkaddr; 1001 1002 nid = le32_to_cpu(sum->nid); 1003 ofs_in_node = le16_to_cpu(sum->ofs_in_node); 1004 1005 node_page = f2fs_get_node_page(sbi, nid); 1006 if (IS_ERR(node_page)) 1007 return false; 1008 1009 if (f2fs_get_node_info(sbi, nid, dni)) { 1010 f2fs_put_page(node_page, 1); 1011 return false; 1012 } 1013 1014 if (sum->version != dni->version) { 1015 f2fs_warn(sbi, "%s: valid data with mismatched node version.", 1016 __func__); 1017 set_sbi_flag(sbi, SBI_NEED_FSCK); 1018 } 1019 1020 if (f2fs_check_nid_range(sbi, dni->ino)) { 1021 f2fs_put_page(node_page, 1); 1022 return false; 1023 } 1024 1025 if (IS_INODE(node_page)) { 1026 base = offset_in_addr(F2FS_INODE(node_page)); 1027 max_addrs = DEF_ADDRS_PER_INODE; 1028 } else { 1029 base = 0; 1030 max_addrs = DEF_ADDRS_PER_BLOCK; 1031 } 1032 1033 if (base + ofs_in_node >= max_addrs) { 1034 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u", 1035 base, ofs_in_node, max_addrs, dni->ino, dni->nid); 1036 f2fs_put_page(node_page, 1); 1037 return false; 1038 } 1039 1040 *nofs = ofs_of_node(node_page); 1041 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); 1042 f2fs_put_page(node_page, 1); 1043 1044 if (source_blkaddr != blkaddr) { 1045#ifdef CONFIG_F2FS_CHECK_FS 1046 unsigned int segno = GET_SEGNO(sbi, blkaddr); 1047 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 1048 1049 if (unlikely(check_valid_map(sbi, segno, offset))) { 1050 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) { 1051 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n", 1052 blkaddr, source_blkaddr, segno); 1053 set_sbi_flag(sbi, SBI_NEED_FSCK); 1054 } 1055 } 1056#endif 1057 return false; 1058 } 1059 return true; 1060} 1061 1062static int ra_data_block(struct inode *inode, pgoff_t index) 1063{ 1064 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1065 struct address_space *mapping = inode->i_mapping; 1066 struct dnode_of_data dn; 1067 struct page *page; 1068 struct extent_info ei = {0, 0, 0}; 1069 struct f2fs_io_info fio = { 1070 .sbi = sbi, 1071 .ino = inode->i_ino, 1072 .type = DATA, 1073 .temp = COLD, 1074 .op = REQ_OP_READ, 1075 .op_flags = 0, 1076 .encrypted_page = NULL, 1077 .in_list = false, 1078 .retry = false, 1079 }; 1080 int err; 1081 1082 page = f2fs_grab_cache_page(mapping, index, true); 1083 if (!page) 1084 return -ENOMEM; 1085 1086 if (f2fs_lookup_extent_cache(inode, index, &ei)) { 1087 dn.data_blkaddr = ei.blk + index - ei.fofs; 1088 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 1089 DATA_GENERIC_ENHANCE_READ))) { 1090 err = -EFSCORRUPTED; 1091 goto put_page; 1092 } 1093 goto got_it; 1094 } 1095 1096 set_new_dnode(&dn, inode, NULL, NULL, 0); 1097 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); 1098 if (err) 1099 goto put_page; 1100 f2fs_put_dnode(&dn); 1101 1102 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) { 1103 err = -ENOENT; 1104 goto put_page; 1105 } 1106 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 1107 DATA_GENERIC_ENHANCE))) { 1108 err = -EFSCORRUPTED; 1109 goto put_page; 1110 } 1111got_it: 1112 /* read page */ 1113 fio.page = page; 1114 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 1115 1116 /* 1117 * don't cache encrypted data into meta inode until previous dirty 1118 * data were writebacked to avoid racing between GC and flush. 1119 */ 1120 f2fs_wait_on_page_writeback(page, DATA, true, true); 1121 1122 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 1123 1124 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), 1125 dn.data_blkaddr, 1126 FGP_LOCK | FGP_CREAT, GFP_NOFS); 1127 if (!fio.encrypted_page) { 1128 err = -ENOMEM; 1129 goto put_page; 1130 } 1131 1132 err = f2fs_submit_page_bio(&fio); 1133 if (err) 1134 goto put_encrypted_page; 1135 f2fs_put_page(fio.encrypted_page, 0); 1136 f2fs_put_page(page, 1); 1137 1138 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); 1139 f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); 1140 1141 return 0; 1142put_encrypted_page: 1143 f2fs_put_page(fio.encrypted_page, 1); 1144put_page: 1145 f2fs_put_page(page, 1); 1146 return err; 1147} 1148 1149/* 1150 * Move data block via META_MAPPING while keeping locked data page. 1151 * This can be used to move blocks, aka LBAs, directly on disk. 1152 */ 1153static int move_data_block(struct inode *inode, block_t bidx, 1154 int gc_type, unsigned int segno, int off) 1155{ 1156 struct f2fs_io_info fio = { 1157 .sbi = F2FS_I_SB(inode), 1158 .ino = inode->i_ino, 1159 .type = DATA, 1160 .temp = COLD, 1161 .op = REQ_OP_READ, 1162 .op_flags = 0, 1163 .encrypted_page = NULL, 1164 .in_list = false, 1165 .retry = false, 1166 }; 1167 struct dnode_of_data dn; 1168 struct f2fs_summary sum; 1169 struct node_info ni; 1170 struct page *page, *mpage; 1171 block_t newaddr; 1172 int err = 0; 1173 bool lfs_mode = f2fs_lfs_mode(fio.sbi); 1174 int type = fio.sbi->am.atgc_enabled ? 1175 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA; 1176 1177 /* do not read out */ 1178 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false); 1179 if (!page) 1180 return -ENOMEM; 1181 1182 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { 1183 err = -ENOENT; 1184 goto out; 1185 } 1186 1187 if (f2fs_is_atomic_file(inode)) { 1188 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++; 1189 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++; 1190 err = -EAGAIN; 1191 goto out; 1192 } 1193 1194 if (f2fs_is_pinned_file(inode)) { 1195 if (gc_type == FG_GC) 1196 f2fs_pin_file_control(inode, true); 1197 err = -EAGAIN; 1198 goto out; 1199 } 1200 1201 set_new_dnode(&dn, inode, NULL, NULL, 0); 1202 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE); 1203 if (err) 1204 goto out; 1205 1206 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 1207 ClearPageUptodate(page); 1208 err = -ENOENT; 1209 goto put_out; 1210 } 1211 1212 /* 1213 * don't cache encrypted data into meta inode until previous dirty 1214 * data were writebacked to avoid racing between GC and flush. 1215 */ 1216 f2fs_wait_on_page_writeback(page, DATA, true, true); 1217 1218 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 1219 1220 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni); 1221 if (err) 1222 goto put_out; 1223 1224 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); 1225 1226 /* read page */ 1227 fio.page = page; 1228 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; 1229 1230 if (lfs_mode) 1231 down_write(&fio.sbi->io_order_lock); 1232 1233 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), 1234 fio.old_blkaddr, false); 1235 if (!mpage) { 1236 err = -ENOMEM; 1237 goto up_out; 1238 } 1239 1240 fio.encrypted_page = mpage; 1241 1242 /* read source block in mpage */ 1243 if (!PageUptodate(mpage)) { 1244 err = f2fs_submit_page_bio(&fio); 1245 if (err) { 1246 f2fs_put_page(mpage, 1); 1247 goto up_out; 1248 } 1249 1250 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); 1251 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); 1252 1253 lock_page(mpage); 1254 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || 1255 !PageUptodate(mpage))) { 1256 err = -EIO; 1257 f2fs_put_page(mpage, 1); 1258 goto up_out; 1259 } 1260 } 1261 1262 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, 1263 &sum, type, NULL, SEQ_NONE); 1264 1265 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), 1266 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); 1267 if (!fio.encrypted_page) { 1268 err = -ENOMEM; 1269 f2fs_put_page(mpage, 1); 1270 goto recover_block; 1271 } 1272 1273 /* write target block */ 1274 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); 1275 memcpy(page_address(fio.encrypted_page), 1276 page_address(mpage), PAGE_SIZE); 1277 f2fs_put_page(mpage, 1); 1278 invalidate_mapping_pages(META_MAPPING(fio.sbi), 1279 fio.old_blkaddr, fio.old_blkaddr); 1280 1281 set_page_dirty(fio.encrypted_page); 1282 if (clear_page_dirty_for_io(fio.encrypted_page)) 1283 dec_page_count(fio.sbi, F2FS_DIRTY_META); 1284 1285 set_page_writeback(fio.encrypted_page); 1286 ClearPageError(page); 1287 1288 /* allocate block address */ 1289 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true); 1290 1291 fio.op = REQ_OP_WRITE; 1292 fio.op_flags = REQ_SYNC; 1293 fio.new_blkaddr = newaddr; 1294 f2fs_submit_page_write(&fio); 1295 if (fio.retry) { 1296 err = -EAGAIN; 1297 if (PageWriteback(fio.encrypted_page)) 1298 end_page_writeback(fio.encrypted_page); 1299 goto put_page_out; 1300 } 1301 1302 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE); 1303 1304 f2fs_update_data_blkaddr(&dn, newaddr); 1305 set_inode_flag(inode, FI_APPEND_WRITE); 1306 if (page->index == 0) 1307 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); 1308put_page_out: 1309 f2fs_put_page(fio.encrypted_page, 1); 1310recover_block: 1311 if (err) 1312 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, 1313 true, true, true); 1314up_out: 1315 if (lfs_mode) 1316 up_write(&fio.sbi->io_order_lock); 1317put_out: 1318 f2fs_put_dnode(&dn); 1319out: 1320 f2fs_put_page(page, 1); 1321 return err; 1322} 1323 1324static int move_data_page(struct inode *inode, block_t bidx, int gc_type, 1325 unsigned int segno, int off) 1326{ 1327 struct page *page; 1328 int err = 0; 1329 1330 page = f2fs_get_lock_data_page(inode, bidx, true); 1331 if (IS_ERR(page)) 1332 return PTR_ERR(page); 1333 1334 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { 1335 err = -ENOENT; 1336 goto out; 1337 } 1338 1339 if (f2fs_is_atomic_file(inode)) { 1340 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++; 1341 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++; 1342 err = -EAGAIN; 1343 goto out; 1344 } 1345 if (f2fs_is_pinned_file(inode)) { 1346 if (gc_type == FG_GC) 1347 f2fs_pin_file_control(inode, true); 1348 err = -EAGAIN; 1349 goto out; 1350 } 1351 1352 if (gc_type == BG_GC) { 1353 if (PageWriteback(page)) { 1354 err = -EAGAIN; 1355 goto out; 1356 } 1357 set_page_dirty(page); 1358 set_cold_data(page); 1359 } else { 1360 struct f2fs_io_info fio = { 1361 .sbi = F2FS_I_SB(inode), 1362 .ino = inode->i_ino, 1363 .type = DATA, 1364 .temp = COLD, 1365 .op = REQ_OP_WRITE, 1366 .op_flags = REQ_SYNC, 1367 .old_blkaddr = NULL_ADDR, 1368 .page = page, 1369 .encrypted_page = NULL, 1370 .need_lock = LOCK_REQ, 1371 .io_type = FS_GC_DATA_IO, 1372 }; 1373 bool is_dirty = PageDirty(page); 1374 1375retry: 1376 f2fs_wait_on_page_writeback(page, DATA, true, true); 1377 1378 set_page_dirty(page); 1379 if (clear_page_dirty_for_io(page)) { 1380 inode_dec_dirty_pages(inode); 1381 f2fs_remove_dirty_inode(inode); 1382 } 1383 1384 set_cold_data(page); 1385 1386 err = f2fs_do_write_data_page(&fio); 1387 if (err) { 1388 clear_cold_data(page); 1389 if (err == -ENOMEM) { 1390 congestion_wait(BLK_RW_ASYNC, 1391 DEFAULT_IO_TIMEOUT); 1392 goto retry; 1393 } 1394 if (is_dirty) 1395 set_page_dirty(page); 1396 } 1397 } 1398out: 1399 f2fs_put_page(page, 1); 1400 return err; 1401} 1402 1403/* 1404 * This function tries to get parent node of victim data block, and identifies 1405 * data block validity. If the block is valid, copy that with cold status and 1406 * modify parent node. 1407 * If the parent node is not valid or the data block address is different, 1408 * the victim data block is ignored. 1409 */ 1410static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 1411 struct gc_inode_list *gc_list, unsigned int segno, int gc_type, 1412 bool force_migrate) 1413{ 1414 struct super_block *sb = sbi->sb; 1415 struct f2fs_summary *entry; 1416 block_t start_addr; 1417 int off; 1418 int phase = 0; 1419 int submitted = 0; 1420 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); 1421 1422 start_addr = START_BLOCK(sbi, segno); 1423 1424next_step: 1425 entry = sum; 1426 1427 for (off = 0; off < usable_blks_in_seg; off++, entry++) { 1428 struct page *data_page; 1429 struct inode *inode; 1430 struct node_info dni; /* dnode info for the data */ 1431 unsigned int ofs_in_node, nofs; 1432 block_t start_bidx; 1433 nid_t nid = le32_to_cpu(entry->nid); 1434 1435 /* 1436 * stop BG_GC if there is not enough free sections. 1437 * Or, stop GC if the segment becomes fully valid caused by 1438 * race condition along with SSR block allocation. 1439 */ 1440 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || 1441 (!force_migrate && get_valid_blocks(sbi, segno, true) == 1442 BLKS_PER_SEC(sbi))) 1443 return submitted; 1444 1445 if (check_valid_map(sbi, segno, off) == 0) 1446 continue; 1447 1448 if (phase == 0) { 1449 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, 1450 META_NAT, true); 1451 continue; 1452 } 1453 1454 if (phase == 1) { 1455 f2fs_ra_node_page(sbi, nid); 1456 continue; 1457 } 1458 1459 /* Get an inode by ino with checking validity */ 1460 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) 1461 continue; 1462 1463 if (phase == 2) { 1464 f2fs_ra_node_page(sbi, dni.ino); 1465 continue; 1466 } 1467 1468 ofs_in_node = le16_to_cpu(entry->ofs_in_node); 1469 1470 if (phase == 3) { 1471 inode = f2fs_iget(sb, dni.ino); 1472 if (IS_ERR(inode) || is_bad_inode(inode) || 1473 special_file(inode->i_mode)) { 1474 set_sbi_flag(sbi, SBI_NEED_FSCK); 1475 continue; 1476 } 1477 1478 if (!down_write_trylock( 1479 &F2FS_I(inode)->i_gc_rwsem[WRITE])) { 1480 iput(inode); 1481 sbi->skipped_gc_rwsem++; 1482 continue; 1483 } 1484 1485 start_bidx = f2fs_start_bidx_of_node(nofs, inode) + 1486 ofs_in_node; 1487 1488 if (f2fs_post_read_required(inode)) { 1489 int err = ra_data_block(inode, start_bidx); 1490 1491 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1492 if (err) { 1493 iput(inode); 1494 continue; 1495 } 1496 add_gc_inode(gc_list, inode); 1497 continue; 1498 } 1499 1500 data_page = f2fs_get_read_data_page(inode, 1501 start_bidx, REQ_RAHEAD, true); 1502 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1503 if (IS_ERR(data_page)) { 1504 iput(inode); 1505 continue; 1506 } 1507 1508 f2fs_put_page(data_page, 0); 1509 add_gc_inode(gc_list, inode); 1510 continue; 1511 } 1512 1513 /* phase 4 */ 1514 inode = find_gc_inode(gc_list, dni.ino); 1515 if (inode) { 1516 struct f2fs_inode_info *fi = F2FS_I(inode); 1517 bool locked = false; 1518 int err; 1519 1520 if (S_ISREG(inode->i_mode)) { 1521 if (!down_write_trylock(&fi->i_gc_rwsem[READ])) { 1522 sbi->skipped_gc_rwsem++; 1523 continue; 1524 } 1525 if (!down_write_trylock( 1526 &fi->i_gc_rwsem[WRITE])) { 1527 sbi->skipped_gc_rwsem++; 1528 up_write(&fi->i_gc_rwsem[READ]); 1529 continue; 1530 } 1531 locked = true; 1532 1533 /* wait for all inflight aio data */ 1534 inode_dio_wait(inode); 1535 } 1536 1537 start_bidx = f2fs_start_bidx_of_node(nofs, inode) 1538 + ofs_in_node; 1539 if (f2fs_post_read_required(inode)) 1540 err = move_data_block(inode, start_bidx, 1541 gc_type, segno, off); 1542 else 1543 err = move_data_page(inode, start_bidx, gc_type, 1544 segno, off); 1545 1546 if (!err && (gc_type == FG_GC || 1547 f2fs_post_read_required(inode))) 1548 submitted++; 1549 1550 if (locked) { 1551 up_write(&fi->i_gc_rwsem[WRITE]); 1552 up_write(&fi->i_gc_rwsem[READ]); 1553 } 1554 1555 stat_inc_data_blk_count(sbi, 1, gc_type); 1556 } 1557 } 1558 1559 if (++phase < 5) 1560 goto next_step; 1561 1562 return submitted; 1563} 1564 1565static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, 1566 int gc_type) 1567{ 1568 struct sit_info *sit_i = SIT_I(sbi); 1569 int ret; 1570 1571 down_write(&sit_i->sentry_lock); 1572 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, 1573 NO_CHECK_TYPE, LFS, 0); 1574 up_write(&sit_i->sentry_lock); 1575 return ret; 1576} 1577 1578static int do_garbage_collect(struct f2fs_sb_info *sbi, 1579 unsigned int start_segno, 1580 struct gc_inode_list *gc_list, int gc_type, 1581 bool force_migrate) 1582{ 1583 struct page *sum_page; 1584 struct f2fs_summary_block *sum; 1585 struct blk_plug plug; 1586 unsigned int segno = start_segno; 1587 unsigned int end_segno = start_segno + sbi->segs_per_sec; 1588 int seg_freed = 0, migrated = 0; 1589 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? 1590 SUM_TYPE_DATA : SUM_TYPE_NODE; 1591 int submitted = 0; 1592 1593 if (__is_large_section(sbi)) 1594 end_segno = rounddown(end_segno, sbi->segs_per_sec); 1595 1596 /* 1597 * zone-capacity can be less than zone-size in zoned devices, 1598 * resulting in less than expected usable segments in the zone, 1599 * calculate the end segno in the zone which can be garbage collected 1600 */ 1601 if (f2fs_sb_has_blkzoned(sbi)) 1602 end_segno -= sbi->segs_per_sec - 1603 f2fs_usable_segs_in_sec(sbi, segno); 1604 1605 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type); 1606 1607 /* readahead multi ssa blocks those have contiguous address */ 1608 if (__is_large_section(sbi)) 1609 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), 1610 end_segno - segno, META_SSA, true); 1611 1612 /* reference all summary page */ 1613 while (segno < end_segno) { 1614 sum_page = f2fs_get_sum_page(sbi, segno++); 1615 if (IS_ERR(sum_page)) { 1616 int err = PTR_ERR(sum_page); 1617 1618 end_segno = segno - 1; 1619 for (segno = start_segno; segno < end_segno; segno++) { 1620 sum_page = find_get_page(META_MAPPING(sbi), 1621 GET_SUM_BLOCK(sbi, segno)); 1622 f2fs_put_page(sum_page, 0); 1623 f2fs_put_page(sum_page, 0); 1624 } 1625 return err; 1626 } 1627 unlock_page(sum_page); 1628 } 1629 1630 blk_start_plug(&plug); 1631 1632 for (segno = start_segno; segno < end_segno; segno++) { 1633 1634 /* find segment summary of victim */ 1635 sum_page = find_get_page(META_MAPPING(sbi), 1636 GET_SUM_BLOCK(sbi, segno)); 1637 f2fs_put_page(sum_page, 0); 1638 1639 if (get_valid_blocks(sbi, segno, false) == 0) 1640 goto freed; 1641 if (gc_type == BG_GC && __is_large_section(sbi) && 1642 migrated >= sbi->migration_granularity) 1643 goto skip; 1644 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) 1645 goto skip; 1646 1647 sum = page_address(sum_page); 1648 if (type != GET_SUM_TYPE((&sum->footer))) { 1649 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", 1650 segno, type, GET_SUM_TYPE((&sum->footer))); 1651 set_sbi_flag(sbi, SBI_NEED_FSCK); 1652 f2fs_stop_checkpoint(sbi, false); 1653 goto skip; 1654 } 1655 1656 /* 1657 * this is to avoid deadlock: 1658 * - lock_page(sum_page) - f2fs_replace_block 1659 * - check_valid_map() - down_write(sentry_lock) 1660 * - down_read(sentry_lock) - change_curseg() 1661 * - lock_page(sum_page) 1662 */ 1663 if (type == SUM_TYPE_NODE) 1664 submitted += gc_node_segment(sbi, sum->entries, segno, 1665 gc_type); 1666 else 1667 submitted += gc_data_segment(sbi, sum->entries, gc_list, 1668 segno, gc_type, 1669 force_migrate); 1670 1671 stat_inc_seg_count(sbi, type, gc_type); 1672 migrated++; 1673 1674freed: 1675 if (gc_type == FG_GC && 1676 get_valid_blocks(sbi, segno, false) == 0) 1677 seg_freed++; 1678 1679 if (__is_large_section(sbi)) 1680 sbi->next_victim_seg[gc_type] = 1681 (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO; 1682skip: 1683 f2fs_put_page(sum_page, 0); 1684 } 1685 1686 if (submitted) 1687 f2fs_submit_merged_write(sbi, 1688 (type == SUM_TYPE_NODE) ? NODE : DATA); 1689 1690 blk_finish_plug(&plug); 1691 1692 stat_inc_call_count(sbi->stat_info); 1693 1694 return seg_freed; 1695} 1696 1697int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, 1698 bool background, bool force, unsigned int segno) 1699{ 1700 int gc_type = sync ? FG_GC : BG_GC; 1701 int sec_freed = 0, seg_freed = 0, total_freed = 0; 1702 int ret = 0; 1703 struct cp_control cpc; 1704 unsigned int init_segno = segno; 1705 struct gc_inode_list gc_list = { 1706 .ilist = LIST_HEAD_INIT(gc_list.ilist), 1707 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), 1708 }; 1709 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC]; 1710 unsigned long long first_skipped; 1711 unsigned int skipped_round = 0, round = 0; 1712 1713 trace_f2fs_gc_begin(sbi->sb, sync, background, 1714 get_pages(sbi, F2FS_DIRTY_NODES), 1715 get_pages(sbi, F2FS_DIRTY_DENTS), 1716 get_pages(sbi, F2FS_DIRTY_IMETA), 1717 free_sections(sbi), 1718 free_segments(sbi), 1719 reserved_segments(sbi), 1720 prefree_segments(sbi)); 1721 1722 cpc.reason = __get_cp_reason(sbi); 1723 sbi->skipped_gc_rwsem = 0; 1724 first_skipped = last_skipped; 1725gc_more: 1726 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { 1727 ret = -EINVAL; 1728 goto stop; 1729 } 1730 if (unlikely(f2fs_cp_error(sbi))) { 1731 ret = -EIO; 1732 goto stop; 1733 } 1734 1735 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) { 1736 /* 1737 * For example, if there are many prefree_segments below given 1738 * threshold, we can make them free by checkpoint. Then, we 1739 * secure free segments which doesn't need fggc any more. 1740 */ 1741 if (prefree_segments(sbi) && 1742 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { 1743 ret = f2fs_write_checkpoint(sbi, &cpc); 1744 if (ret) 1745 goto stop; 1746 } 1747 if (has_not_enough_free_secs(sbi, 0, 0)) 1748 gc_type = FG_GC; 1749 } 1750 1751 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */ 1752 if (gc_type == BG_GC && !background) { 1753 ret = -EINVAL; 1754 goto stop; 1755 } 1756 ret = __get_victim(sbi, &segno, gc_type); 1757 if (ret) 1758 goto stop; 1759 1760 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force); 1761 if (gc_type == FG_GC && 1762 seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) 1763 sec_freed++; 1764 total_freed += seg_freed; 1765 1766 if (gc_type == FG_GC) { 1767 if (sbi->skipped_atomic_files[FG_GC] > last_skipped || 1768 sbi->skipped_gc_rwsem) 1769 skipped_round++; 1770 last_skipped = sbi->skipped_atomic_files[FG_GC]; 1771 round++; 1772 } 1773 1774 if (gc_type == FG_GC && seg_freed) 1775 sbi->cur_victim_sec = NULL_SEGNO; 1776 1777 if (sync) 1778 goto stop; 1779 1780 if (!has_not_enough_free_secs(sbi, sec_freed, 0)) 1781 goto stop; 1782 1783 if (skipped_round <= MAX_SKIP_GC_COUNT || skipped_round * 2 < round) { 1784 1785 /* Write checkpoint to reclaim prefree segments */ 1786 if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE && 1787 prefree_segments(sbi) && 1788 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { 1789 ret = f2fs_write_checkpoint(sbi, &cpc); 1790 if (ret) 1791 goto stop; 1792 } 1793 segno = NULL_SEGNO; 1794 goto gc_more; 1795 } 1796 if (first_skipped < last_skipped && 1797 (last_skipped - first_skipped) > 1798 sbi->skipped_gc_rwsem) { 1799 f2fs_drop_inmem_pages_all(sbi, true); 1800 segno = NULL_SEGNO; 1801 goto gc_more; 1802 } 1803 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) 1804 ret = f2fs_write_checkpoint(sbi, &cpc); 1805stop: 1806 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; 1807 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno; 1808 1809 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed, 1810 get_pages(sbi, F2FS_DIRTY_NODES), 1811 get_pages(sbi, F2FS_DIRTY_DENTS), 1812 get_pages(sbi, F2FS_DIRTY_IMETA), 1813 free_sections(sbi), 1814 free_segments(sbi), 1815 reserved_segments(sbi), 1816 prefree_segments(sbi)); 1817 1818 up_write(&sbi->gc_lock); 1819 1820 put_gc_inode(&gc_list); 1821 1822 if (sync && !ret) 1823 ret = sec_freed ? 0 : -EAGAIN; 1824 return ret; 1825} 1826 1827int __init f2fs_create_garbage_collection_cache(void) 1828{ 1829 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry", 1830 sizeof(struct victim_entry)); 1831 if (!victim_entry_slab) 1832 return -ENOMEM; 1833 return 0; 1834} 1835 1836void f2fs_destroy_garbage_collection_cache(void) 1837{ 1838 kmem_cache_destroy(victim_entry_slab); 1839} 1840 1841static void init_atgc_management(struct f2fs_sb_info *sbi) 1842{ 1843 struct atgc_management *am = &sbi->am; 1844 1845 if (test_opt(sbi, ATGC) && 1846 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD) 1847 am->atgc_enabled = true; 1848 1849 am->root = RB_ROOT_CACHED; 1850 INIT_LIST_HEAD(&am->victim_list); 1851 am->victim_count = 0; 1852 1853 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO; 1854 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT; 1855 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT; 1856 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD; 1857} 1858 1859void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) 1860{ 1861 DIRTY_I(sbi)->v_ops = &default_v_ops; 1862 1863 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; 1864 1865 /* give warm/cold data area from slower device */ 1866 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi)) 1867 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 1868 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; 1869 1870 init_atgc_management(sbi); 1871} 1872 1873static int free_segment_range(struct f2fs_sb_info *sbi, 1874 unsigned int secs, bool gc_only) 1875{ 1876 unsigned int segno, next_inuse, start, end; 1877 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; 1878 int gc_mode, gc_type; 1879 int err = 0; 1880 int type; 1881 1882 /* Force block allocation for GC */ 1883 MAIN_SECS(sbi) -= secs; 1884 start = MAIN_SECS(sbi) * sbi->segs_per_sec; 1885 end = MAIN_SEGS(sbi) - 1; 1886 1887 mutex_lock(&DIRTY_I(sbi)->seglist_lock); 1888 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++) 1889 if (SIT_I(sbi)->last_victim[gc_mode] >= start) 1890 SIT_I(sbi)->last_victim[gc_mode] = 0; 1891 1892 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++) 1893 if (sbi->next_victim_seg[gc_type] >= start) 1894 sbi->next_victim_seg[gc_type] = NULL_SEGNO; 1895 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); 1896 1897 /* Move out cursegs from the target range */ 1898 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) 1899 f2fs_allocate_segment_for_resize(sbi, type, start, end); 1900 1901 /* do GC to move out valid blocks in the range */ 1902 for (segno = start; segno <= end; segno += sbi->segs_per_sec) { 1903 struct gc_inode_list gc_list = { 1904 .ilist = LIST_HEAD_INIT(gc_list.ilist), 1905 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), 1906 }; 1907 1908 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true); 1909 put_gc_inode(&gc_list); 1910 1911 if (!gc_only && get_valid_blocks(sbi, segno, true)) { 1912 err = -EAGAIN; 1913 goto out; 1914 } 1915 if (fatal_signal_pending(current)) { 1916 err = -ERESTARTSYS; 1917 goto out; 1918 } 1919 } 1920 if (gc_only) 1921 goto out; 1922 1923 err = f2fs_write_checkpoint(sbi, &cpc); 1924 if (err) 1925 goto out; 1926 1927 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); 1928 if (next_inuse <= end) { 1929 f2fs_err(sbi, "segno %u should be free but still inuse!", 1930 next_inuse); 1931 f2fs_bug_on(sbi, 1); 1932 } 1933out: 1934 MAIN_SECS(sbi) += secs; 1935 return err; 1936} 1937 1938static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) 1939{ 1940 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); 1941 int section_count; 1942 int segment_count; 1943 int segment_count_main; 1944 long long block_count; 1945 int segs = secs * sbi->segs_per_sec; 1946 1947 down_write(&sbi->sb_lock); 1948 1949 section_count = le32_to_cpu(raw_sb->section_count); 1950 segment_count = le32_to_cpu(raw_sb->segment_count); 1951 segment_count_main = le32_to_cpu(raw_sb->segment_count_main); 1952 block_count = le64_to_cpu(raw_sb->block_count); 1953 1954 raw_sb->section_count = cpu_to_le32(section_count + secs); 1955 raw_sb->segment_count = cpu_to_le32(segment_count + segs); 1956 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); 1957 raw_sb->block_count = cpu_to_le64(block_count + 1958 (long long)segs * sbi->blocks_per_seg); 1959 if (f2fs_is_multi_device(sbi)) { 1960 int last_dev = sbi->s_ndevs - 1; 1961 int dev_segs = 1962 le32_to_cpu(raw_sb->devs[last_dev].total_segments); 1963 1964 raw_sb->devs[last_dev].total_segments = 1965 cpu_to_le32(dev_segs + segs); 1966 } 1967 1968 up_write(&sbi->sb_lock); 1969} 1970 1971static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) 1972{ 1973 int segs = secs * sbi->segs_per_sec; 1974 long long blks = (long long)segs * sbi->blocks_per_seg; 1975 long long user_block_count = 1976 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count); 1977 1978 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; 1979 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; 1980 MAIN_SECS(sbi) += secs; 1981 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; 1982 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; 1983 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks); 1984 1985 if (f2fs_is_multi_device(sbi)) { 1986 int last_dev = sbi->s_ndevs - 1; 1987 1988 FDEV(last_dev).total_segments = 1989 (int)FDEV(last_dev).total_segments + segs; 1990 FDEV(last_dev).end_blk = 1991 (long long)FDEV(last_dev).end_blk + blks; 1992#ifdef CONFIG_BLK_DEV_ZONED 1993 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz + 1994 (int)(blks >> sbi->log_blocks_per_blkz); 1995#endif 1996 } 1997} 1998 1999int f2fs_resize_fs(struct file *filp, __u64 block_count) 2000{ 2001 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); 2002 __u64 old_block_count, shrunk_blocks; 2003 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; 2004 unsigned int secs; 2005 int err = 0; 2006 __u32 rem; 2007 2008 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count); 2009 if (block_count > old_block_count) 2010 return -EINVAL; 2011 2012 if (f2fs_is_multi_device(sbi)) { 2013 int last_dev = sbi->s_ndevs - 1; 2014 __u64 last_segs = FDEV(last_dev).total_segments; 2015 2016 if (block_count + last_segs * sbi->blocks_per_seg <= 2017 old_block_count) 2018 return -EINVAL; 2019 } 2020 2021 /* new fs size should align to section size */ 2022 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem); 2023 if (rem) 2024 return -EINVAL; 2025 2026 if (block_count == old_block_count) 2027 return 0; 2028 2029 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { 2030 f2fs_err(sbi, "Should run fsck to repair first."); 2031 return -EFSCORRUPTED; 2032 } 2033 2034 if (test_opt(sbi, DISABLE_CHECKPOINT)) { 2035 f2fs_err(sbi, "Checkpoint should be enabled."); 2036 return -EINVAL; 2037 } 2038 2039 err = mnt_want_write_file(filp); 2040 if (err) 2041 return err; 2042 2043 shrunk_blocks = old_block_count - block_count; 2044 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); 2045 2046 /* stop other GC */ 2047 if (!down_write_trylock(&sbi->gc_lock)) { 2048 err = -EAGAIN; 2049 goto out_drop_write; 2050 } 2051 2052 /* stop CP to protect MAIN_SEC in free_segment_range */ 2053 f2fs_lock_op(sbi); 2054 2055 spin_lock(&sbi->stat_lock); 2056 if (shrunk_blocks + valid_user_blocks(sbi) + 2057 sbi->current_reserved_blocks + sbi->unusable_block_count + 2058 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) 2059 err = -ENOSPC; 2060 spin_unlock(&sbi->stat_lock); 2061 2062 if (err) 2063 goto out_unlock; 2064 2065 err = free_segment_range(sbi, secs, true); 2066 2067out_unlock: 2068 f2fs_unlock_op(sbi); 2069 up_write(&sbi->gc_lock); 2070out_drop_write: 2071 mnt_drop_write_file(filp); 2072 if (err) 2073 return err; 2074 2075 freeze_super(sbi->sb); 2076 2077 if (f2fs_readonly(sbi->sb)) { 2078 thaw_super(sbi->sb); 2079 return -EROFS; 2080 } 2081 2082 down_write(&sbi->gc_lock); 2083 mutex_lock(&sbi->cp_mutex); 2084 2085 spin_lock(&sbi->stat_lock); 2086 if (shrunk_blocks + valid_user_blocks(sbi) + 2087 sbi->current_reserved_blocks + sbi->unusable_block_count + 2088 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) 2089 err = -ENOSPC; 2090 else 2091 sbi->user_block_count -= shrunk_blocks; 2092 spin_unlock(&sbi->stat_lock); 2093 if (err) 2094 goto out_err; 2095 2096 set_sbi_flag(sbi, SBI_IS_RESIZEFS); 2097 err = free_segment_range(sbi, secs, false); 2098 if (err) 2099 goto recover_out; 2100 2101 update_sb_metadata(sbi, -secs); 2102 2103 err = f2fs_commit_super(sbi, false); 2104 if (err) { 2105 update_sb_metadata(sbi, secs); 2106 goto recover_out; 2107 } 2108 2109 update_fs_metadata(sbi, -secs); 2110 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); 2111 set_sbi_flag(sbi, SBI_IS_DIRTY); 2112 2113 err = f2fs_write_checkpoint(sbi, &cpc); 2114 if (err) { 2115 update_fs_metadata(sbi, secs); 2116 update_sb_metadata(sbi, secs); 2117 f2fs_commit_super(sbi, false); 2118 } 2119recover_out: 2120 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); 2121 if (err) { 2122 set_sbi_flag(sbi, SBI_NEED_FSCK); 2123 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); 2124 2125 spin_lock(&sbi->stat_lock); 2126 sbi->user_block_count += shrunk_blocks; 2127 spin_unlock(&sbi->stat_lock); 2128 } 2129out_err: 2130 mutex_unlock(&sbi->cp_mutex); 2131 up_write(&sbi->gc_lock); 2132 thaw_super(sbi->sb); 2133 return err; 2134} 2135