Lines Matching defs:nm_i
23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
47 struct f2fs_nm_info *nm_i = NM_I(sbi);
54 if (!nm_i)
66 mem_size = (nm_i->nid_cnt[FREE_NID] *
68 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
70 mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
79 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
87 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
97 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
101 res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
145 struct f2fs_nm_info *nm_i = NM_I(sbi);
162 set_to_next_nat(nm_i, nid);
187 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
191 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
192 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
198 spin_lock(&nm_i->nat_list_lock);
199 list_add_tail(&ne->list, &nm_i->nat_entries);
200 spin_unlock(&nm_i->nat_list_lock);
202 nm_i->nat_cnt[TOTAL_NAT]++;
203 nm_i->nat_cnt[RECLAIMABLE_NAT]++;
207 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
211 ne = radix_tree_lookup(&nm_i->nat_root, n);
215 spin_lock(&nm_i->nat_list_lock);
217 list_move_tail(&ne->list, &nm_i->nat_entries);
218 spin_unlock(&nm_i->nat_list_lock);
224 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
227 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
230 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
232 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
233 nm_i->nat_cnt[TOTAL_NAT]--;
234 nm_i->nat_cnt[RECLAIMABLE_NAT]--;
238 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
244 head = radix_tree_lookup(&nm_i->nat_set_root, set);
253 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
258 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
265 head = __grab_nat_entry_set(nm_i, ne);
281 nm_i->nat_cnt[DIRTY_NAT]++;
282 nm_i->nat_cnt[RECLAIMABLE_NAT]--;
285 spin_lock(&nm_i->nat_list_lock);
290 spin_unlock(&nm_i->nat_list_lock);
293 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
296 spin_lock(&nm_i->nat_list_lock);
297 list_move_tail(&ne->list, &nm_i->nat_entries);
298 spin_unlock(&nm_i->nat_list_lock);
302 nm_i->nat_cnt[DIRTY_NAT]--;
303 nm_i->nat_cnt[RECLAIMABLE_NAT]++;
306 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
309 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
382 struct f2fs_nm_info *nm_i = NM_I(sbi);
386 f2fs_down_read(&nm_i->nat_tree_lock);
387 e = __lookup_nat_cache(nm_i, nid);
393 f2fs_up_read(&nm_i->nat_tree_lock);
399 struct f2fs_nm_info *nm_i = NM_I(sbi);
403 f2fs_down_read(&nm_i->nat_tree_lock);
404 e = __lookup_nat_cache(nm_i, nid);
407 f2fs_up_read(&nm_i->nat_tree_lock);
413 struct f2fs_nm_info *nm_i = NM_I(sbi);
417 f2fs_down_read(&nm_i->nat_tree_lock);
418 e = __lookup_nat_cache(nm_i, ino);
423 f2fs_up_read(&nm_i->nat_tree_lock);
431 struct f2fs_nm_info *nm_i = NM_I(sbi);
442 f2fs_down_write(&nm_i->nat_tree_lock);
443 e = __lookup_nat_cache(nm_i, nid);
445 e = __init_nat_entry(nm_i, new, ne, false);
451 f2fs_up_write(&nm_i->nat_tree_lock);
459 struct f2fs_nm_info *nm_i = NM_I(sbi);
463 f2fs_down_write(&nm_i->nat_tree_lock);
464 e = __lookup_nat_cache(nm_i, ni->nid);
466 e = __init_nat_entry(nm_i, new, NULL, true);
502 __set_nat_cache_dirty(nm_i, e);
506 e = __lookup_nat_cache(nm_i, ni->ino);
512 f2fs_up_write(&nm_i->nat_tree_lock);
517 struct f2fs_nm_info *nm_i = NM_I(sbi);
520 if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
523 spin_lock(&nm_i->nat_list_lock);
527 if (list_empty(&nm_i->nat_entries))
530 ne = list_first_entry(&nm_i->nat_entries,
533 spin_unlock(&nm_i->nat_list_lock);
535 __del_from_nat_cache(nm_i, ne);
538 spin_lock(&nm_i->nat_list_lock);
540 spin_unlock(&nm_i->nat_list_lock);
542 f2fs_up_write(&nm_i->nat_tree_lock);
549 struct f2fs_nm_info *nm_i = NM_I(sbi);
564 f2fs_down_read(&nm_i->nat_tree_lock);
565 e = __lookup_nat_cache(nm_i, nid);
570 f2fs_up_read(&nm_i->nat_tree_lock);
582 } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
584 f2fs_up_read(&nm_i->nat_tree_lock);
595 f2fs_up_read(&nm_i->nat_tree_lock);
601 f2fs_up_read(&nm_i->nat_tree_lock);
2192 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2195 return radix_tree_lookup(&nm_i->free_nid_root, n);
2201 struct f2fs_nm_info *nm_i = NM_I(sbi);
2202 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2207 nm_i->nid_cnt[FREE_NID]++;
2208 list_add_tail(&i->list, &nm_i->free_nid_list);
2215 struct f2fs_nm_info *nm_i = NM_I(sbi);
2218 nm_i->nid_cnt[state]--;
2221 radix_tree_delete(&nm_i->free_nid_root, i->nid);
2227 struct f2fs_nm_info *nm_i = NM_I(sbi);
2231 nm_i->nid_cnt[org_state]--;
2232 nm_i->nid_cnt[dst_state]++;
2239 list_add_tail(&i->list, &nm_i->free_nid_list);
2248 struct f2fs_nm_info *nm_i = NM_I(sbi);
2252 f2fs_down_read(&nm_i->nat_tree_lock);
2253 for (i = 0; i < nm_i->nat_blocks; i++) {
2254 if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
2259 f2fs_up_read(&nm_i->nat_tree_lock);
2267 struct f2fs_nm_info *nm_i = NM_I(sbi);
2271 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2275 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2277 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2278 nm_i->free_nid_count[nat_ofs]++;
2280 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2282 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2284 nm_i->free_nid_count[nat_ofs]--;
2292 struct f2fs_nm_info *nm_i = NM_I(sbi);
2311 spin_lock(&nm_i->nid_list_lock);
2335 ne = __lookup_nat_cache(nm_i, nid);
2340 e = __lookup_free_nid_list(nm_i, nid);
2353 nm_i->available_nids++;
2355 spin_unlock(&nm_i->nid_list_lock);
2365 struct f2fs_nm_info *nm_i = NM_I(sbi);
2369 spin_lock(&nm_i->nid_list_lock);
2370 i = __lookup_free_nid_list(nm_i, nid);
2375 spin_unlock(&nm_i->nid_list_lock);
2384 struct f2fs_nm_info *nm_i = NM_I(sbi);
2390 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2395 if (unlikely(start_nid >= nm_i->max_nid))
2438 struct f2fs_nm_info *nm_i = NM_I(sbi);
2442 f2fs_down_read(&nm_i->nat_tree_lock);
2444 for (i = 0; i < nm_i->nat_blocks; i++) {
2445 if (!test_bit_le(i, nm_i->nat_block_bitmap))
2447 if (!nm_i->free_nid_count[i])
2450 idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2458 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2465 f2fs_up_read(&nm_i->nat_tree_lock);
2471 struct f2fs_nm_info *nm_i = NM_I(sbi);
2473 nid_t nid = nm_i->next_scan_nid;
2475 if (unlikely(nid >= nm_i->max_nid))
2482 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2492 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2500 f2fs_down_read(&nm_i->nat_tree_lock);
2504 nm_i->nat_block_bitmap)) {
2515 f2fs_up_read(&nm_i->nat_tree_lock);
2529 if (unlikely(nid >= nm_i->max_nid))
2537 nm_i->next_scan_nid = nid;
2542 f2fs_up_read(&nm_i->nat_tree_lock);
2544 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2545 nm_i->ra_nid_pages, META_NAT, false);
2568 struct f2fs_nm_info *nm_i = NM_I(sbi);
2574 spin_lock(&nm_i->nid_list_lock);
2576 if (unlikely(nm_i->available_nids == 0)) {
2577 spin_unlock(&nm_i->nid_list_lock);
2582 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2583 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2584 i = list_first_entry(&nm_i->free_nid_list,
2589 nm_i->available_nids--;
2593 spin_unlock(&nm_i->nid_list_lock);
2596 spin_unlock(&nm_i->nid_list_lock);
2609 struct f2fs_nm_info *nm_i = NM_I(sbi);
2612 spin_lock(&nm_i->nid_list_lock);
2613 i = __lookup_free_nid_list(nm_i, nid);
2616 spin_unlock(&nm_i->nid_list_lock);
2626 struct f2fs_nm_info *nm_i = NM_I(sbi);
2633 spin_lock(&nm_i->nid_list_lock);
2634 i = __lookup_free_nid_list(nm_i, nid);
2644 nm_i->available_nids++;
2648 spin_unlock(&nm_i->nid_list_lock);
2656 struct f2fs_nm_info *nm_i = NM_I(sbi);
2659 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2662 if (!mutex_trylock(&nm_i->build_lock))
2665 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2669 spin_lock(&nm_i->nid_list_lock);
2670 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2672 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2679 spin_unlock(&nm_i->nid_list_lock);
2682 mutex_unlock(&nm_i->build_lock);
2884 struct f2fs_nm_info *nm_i = NM_I(sbi);
2900 ne = __lookup_nat_cache(nm_i, nid);
2903 __init_nat_entry(nm_i, ne, &raw_ne, true);
2913 spin_lock(&nm_i->nid_list_lock);
2914 nm_i->available_nids--;
2915 spin_unlock(&nm_i->nid_list_lock);
2918 __set_nat_cache_dirty(nm_i, ne);
2942 static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
2946 __set_bit_le(nat_ofs, nm_i->empty_nat_bits);
2947 __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2951 __clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
2953 __set_bit_le(nat_ofs, nm_i->full_nat_bits);
2955 __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2961 struct f2fs_nm_info *nm_i = NM_I(sbi);
2979 __update_nat_bits(nm_i, nat_index, valid);
2984 struct f2fs_nm_info *nm_i = NM_I(sbi);
2987 f2fs_down_read(&nm_i->nat_tree_lock);
2989 for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
3000 nm_i->free_nid_bitmap[nat_ofs]))
3004 __update_nat_bits(nm_i, nat_ofs, valid);
3007 f2fs_up_read(&nm_i->nat_tree_lock);
3090 struct f2fs_nm_info *nm_i = NM_I(sbi);
3105 f2fs_down_write(&nm_i->nat_tree_lock);
3107 f2fs_up_write(&nm_i->nat_tree_lock);
3110 if (!nm_i->nat_cnt[DIRTY_NAT])
3113 f2fs_down_write(&nm_i->nat_tree_lock);
3122 nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3125 while ((found = __gang_lookup_nat_set(nm_i,
3142 f2fs_up_write(&nm_i->nat_tree_lock);
3151 struct f2fs_nm_info *nm_i = NM_I(sbi);
3152 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3157 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3158 nm_i->nat_bits = f2fs_kvzalloc(sbi,
3159 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
3160 if (!nm_i->nat_bits)
3163 nm_i->full_nat_bits = nm_i->nat_bits + 8;
3164 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3170 nm_i->nat_bits_blocks;
3171 for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3178 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
3184 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3187 cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
3197 struct f2fs_nm_info *nm_i = NM_I(sbi);
3204 for (i = 0; i < nm_i->nat_blocks; i++) {
3205 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3206 if (i >= nm_i->nat_blocks)
3209 __set_bit_le(i, nm_i->nat_block_bitmap);
3220 for (i = 0; i < nm_i->nat_blocks; i++) {
3221 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3222 if (i >= nm_i->nat_blocks)
3225 __set_bit_le(i, nm_i->nat_block_bitmap);
3232 struct f2fs_nm_info *nm_i = NM_I(sbi);
3237 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3241 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3242 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3245 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3247 nm_i->nid_cnt[FREE_NID] = 0;
3248 nm_i->nid_cnt[PREALLOC_NID] = 0;
3249 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3250 nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3251 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3252 nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
3254 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3255 INIT_LIST_HEAD(&nm_i->free_nid_list);
3256 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3257 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3258 INIT_LIST_HEAD(&nm_i->nat_entries);
3259 spin_lock_init(&nm_i->nat_list_lock);
3261 mutex_init(&nm_i->build_lock);
3262 spin_lock_init(&nm_i->nid_list_lock);
3263 init_f2fs_rwsem(&nm_i->nat_tree_lock);
3265 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3266 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3268 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3270 if (!nm_i->nat_bitmap)
3278 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3280 if (!nm_i->nat_bitmap_mir)
3289 struct f2fs_nm_info *nm_i = NM_I(sbi);
3292 nm_i->free_nid_bitmap =
3294 nm_i->nat_blocks),
3296 if (!nm_i->free_nid_bitmap)
3299 for (i = 0; i < nm_i->nat_blocks; i++) {
3300 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3302 if (!nm_i->free_nid_bitmap[i])
3306 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3308 if (!nm_i->nat_block_bitmap)
3311 nm_i->free_nid_count =
3313 nm_i->nat_blocks),
3315 if (!nm_i->free_nid_count)
3345 struct f2fs_nm_info *nm_i = NM_I(sbi);
3353 if (!nm_i)
3357 spin_lock(&nm_i->nid_list_lock);
3358 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3360 spin_unlock(&nm_i->nid_list_lock);
3362 spin_lock(&nm_i->nid_list_lock);
3364 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3365 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3366 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3367 spin_unlock(&nm_i->nid_list_lock);
3370 f2fs_down_write(&nm_i->nat_tree_lock);
3371 while ((found = __gang_lookup_nat_cache(nm_i,
3377 spin_lock(&nm_i->nat_list_lock);
3379 spin_unlock(&nm_i->nat_list_lock);
3381 __del_from_nat_cache(nm_i, natvec[idx]);
3384 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3389 while ((found = __gang_lookup_nat_set(nm_i,
3397 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3401 f2fs_up_write(&nm_i->nat_tree_lock);
3403 kvfree(nm_i->nat_block_bitmap);
3404 if (nm_i->free_nid_bitmap) {
3407 for (i = 0; i < nm_i->nat_blocks; i++)
3408 kvfree(nm_i->free_nid_bitmap[i]);
3409 kvfree(nm_i->free_nid_bitmap);
3411 kvfree(nm_i->free_nid_count);
3413 kvfree(nm_i->nat_bitmap);
3414 kvfree(nm_i->nat_bits);
3416 kvfree(nm_i->nat_bitmap_mir);
3419 kfree(nm_i);