Lines Matching refs:nm_i

23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
46 struct f2fs_nm_info *nm_i = NM_I(sbi);
61 mem_size = (nm_i->nid_cnt[FREE_NID] *
63 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
65 mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
67 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
74 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
82 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
88 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
122 struct f2fs_nm_info *nm_i = NM_I(sbi);
139 set_to_next_nat(nm_i, nid);
165 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
169 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
170 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
176 spin_lock(&nm_i->nat_list_lock);
177 list_add_tail(&ne->list, &nm_i->nat_entries);
178 spin_unlock(&nm_i->nat_list_lock);
180 nm_i->nat_cnt[TOTAL_NAT]++;
181 nm_i->nat_cnt[RECLAIMABLE_NAT]++;
185 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
189 ne = radix_tree_lookup(&nm_i->nat_root, n);
193 spin_lock(&nm_i->nat_list_lock);
195 list_move_tail(&ne->list, &nm_i->nat_entries);
196 spin_unlock(&nm_i->nat_list_lock);
202 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
205 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
208 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
210 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
211 nm_i->nat_cnt[TOTAL_NAT]--;
212 nm_i->nat_cnt[RECLAIMABLE_NAT]--;
216 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
222 head = radix_tree_lookup(&nm_i->nat_set_root, set);
230 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
235 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
242 head = __grab_nat_entry_set(nm_i, ne);
258 nm_i->nat_cnt[DIRTY_NAT]++;
259 nm_i->nat_cnt[RECLAIMABLE_NAT]--;
262 spin_lock(&nm_i->nat_list_lock);
267 spin_unlock(&nm_i->nat_list_lock);
270 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
273 spin_lock(&nm_i->nat_list_lock);
274 list_move_tail(&ne->list, &nm_i->nat_entries);
275 spin_unlock(&nm_i->nat_list_lock);
279 nm_i->nat_cnt[DIRTY_NAT]--;
280 nm_i->nat_cnt[RECLAIMABLE_NAT]++;
283 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
286 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
358 struct f2fs_nm_info *nm_i = NM_I(sbi);
362 down_read(&nm_i->nat_tree_lock);
363 e = __lookup_nat_cache(nm_i, nid);
369 up_read(&nm_i->nat_tree_lock);
375 struct f2fs_nm_info *nm_i = NM_I(sbi);
379 down_read(&nm_i->nat_tree_lock);
380 e = __lookup_nat_cache(nm_i, nid);
383 up_read(&nm_i->nat_tree_lock);
389 struct f2fs_nm_info *nm_i = NM_I(sbi);
393 down_read(&nm_i->nat_tree_lock);
394 e = __lookup_nat_cache(nm_i, ino);
399 up_read(&nm_i->nat_tree_lock);
407 struct f2fs_nm_info *nm_i = NM_I(sbi);
414 down_write(&nm_i->nat_tree_lock);
415 e = __lookup_nat_cache(nm_i, nid);
417 e = __init_nat_entry(nm_i, new, ne, false);
423 up_write(&nm_i->nat_tree_lock);
431 struct f2fs_nm_info *nm_i = NM_I(sbi);
435 down_write(&nm_i->nat_tree_lock);
436 e = __lookup_nat_cache(nm_i, ni->nid);
438 e = __init_nat_entry(nm_i, new, NULL, true);
473 __set_nat_cache_dirty(nm_i, e);
477 e = __lookup_nat_cache(nm_i, ni->ino);
483 up_write(&nm_i->nat_tree_lock);
488 struct f2fs_nm_info *nm_i = NM_I(sbi);
491 if (!down_write_trylock(&nm_i->nat_tree_lock))
494 spin_lock(&nm_i->nat_list_lock);
498 if (list_empty(&nm_i->nat_entries))
501 ne = list_first_entry(&nm_i->nat_entries,
504 spin_unlock(&nm_i->nat_list_lock);
506 __del_from_nat_cache(nm_i, ne);
509 spin_lock(&nm_i->nat_list_lock);
511 spin_unlock(&nm_i->nat_list_lock);
513 up_write(&nm_i->nat_tree_lock);
520 struct f2fs_nm_info *nm_i = NM_I(sbi);
535 down_read(&nm_i->nat_tree_lock);
536 e = __lookup_nat_cache(nm_i, nid);
541 up_read(&nm_i->nat_tree_lock);
556 up_read(&nm_i->nat_tree_lock);
562 up_read(&nm_i->nat_tree_lock);
2123 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2126 return radix_tree_lookup(&nm_i->free_nid_root, n);
2132 struct f2fs_nm_info *nm_i = NM_I(sbi);
2134 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2138 nm_i->nid_cnt[FREE_NID]++;
2139 list_add_tail(&i->list, &nm_i->free_nid_list);
2146 struct f2fs_nm_info *nm_i = NM_I(sbi);
2149 nm_i->nid_cnt[state]--;
2152 radix_tree_delete(&nm_i->free_nid_root, i->nid);
2158 struct f2fs_nm_info *nm_i = NM_I(sbi);
2162 nm_i->nid_cnt[org_state]--;
2163 nm_i->nid_cnt[dst_state]++;
2170 list_add_tail(&i->list, &nm_i->free_nid_list);
2180 struct f2fs_nm_info *nm_i = NM_I(sbi);
2184 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2188 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2190 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2191 nm_i->free_nid_count[nat_ofs]++;
2193 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2195 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2197 nm_i->free_nid_count[nat_ofs]--;
2205 struct f2fs_nm_info *nm_i = NM_I(sbi);
2224 spin_lock(&nm_i->nid_list_lock);
2248 ne = __lookup_nat_cache(nm_i, nid);
2253 e = __lookup_free_nid_list(nm_i, nid);
2266 nm_i->available_nids++;
2268 spin_unlock(&nm_i->nid_list_lock);
2278 struct f2fs_nm_info *nm_i = NM_I(sbi);
2282 spin_lock(&nm_i->nid_list_lock);
2283 i = __lookup_free_nid_list(nm_i, nid);
2288 spin_unlock(&nm_i->nid_list_lock);
2297 struct f2fs_nm_info *nm_i = NM_I(sbi);
2303 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2308 if (unlikely(start_nid >= nm_i->max_nid))
2351 struct f2fs_nm_info *nm_i = NM_I(sbi);
2355 down_read(&nm_i->nat_tree_lock);
2357 for (i = 0; i < nm_i->nat_blocks; i++) {
2358 if (!test_bit_le(i, nm_i->nat_block_bitmap))
2360 if (!nm_i->free_nid_count[i])
2363 idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2371 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2378 up_read(&nm_i->nat_tree_lock);
2384 struct f2fs_nm_info *nm_i = NM_I(sbi);
2386 nid_t nid = nm_i->next_scan_nid;
2388 if (unlikely(nid >= nm_i->max_nid))
2395 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2405 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2413 down_read(&nm_i->nat_tree_lock);
2417 nm_i->nat_block_bitmap)) {
2428 up_read(&nm_i->nat_tree_lock);
2435 if (unlikely(nid >= nm_i->max_nid))
2443 nm_i->next_scan_nid = nid;
2448 up_read(&nm_i->nat_tree_lock);
2450 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2451 nm_i->ra_nid_pages, META_NAT, false);
2474 struct f2fs_nm_info *nm_i = NM_I(sbi);
2482 spin_lock(&nm_i->nid_list_lock);
2484 if (unlikely(nm_i->available_nids == 0)) {
2485 spin_unlock(&nm_i->nid_list_lock);
2490 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2491 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2492 i = list_first_entry(&nm_i->free_nid_list,
2497 nm_i->available_nids--;
2501 spin_unlock(&nm_i->nid_list_lock);
2504 spin_unlock(&nm_i->nid_list_lock);
2517 struct f2fs_nm_info *nm_i = NM_I(sbi);
2520 spin_lock(&nm_i->nid_list_lock);
2521 i = __lookup_free_nid_list(nm_i, nid);
2524 spin_unlock(&nm_i->nid_list_lock);
2534 struct f2fs_nm_info *nm_i = NM_I(sbi);
2541 spin_lock(&nm_i->nid_list_lock);
2542 i = __lookup_free_nid_list(nm_i, nid);
2552 nm_i->available_nids++;
2556 spin_unlock(&nm_i->nid_list_lock);
2564 struct f2fs_nm_info *nm_i = NM_I(sbi);
2567 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2570 if (!mutex_trylock(&nm_i->build_lock))
2573 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2577 spin_lock(&nm_i->nid_list_lock);
2578 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2580 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2587 spin_unlock(&nm_i->nid_list_lock);
2590 mutex_unlock(&nm_i->build_lock);
2784 struct f2fs_nm_info *nm_i = NM_I(sbi);
2800 ne = __lookup_nat_cache(nm_i, nid);
2803 __init_nat_entry(nm_i, ne, &raw_ne, true);
2813 spin_lock(&nm_i->nid_list_lock);
2814 nm_i->available_nids--;
2815 spin_unlock(&nm_i->nid_list_lock);
2818 __set_nat_cache_dirty(nm_i, ne);
2845 struct f2fs_nm_info *nm_i = NM_I(sbi);
2863 __set_bit_le(nat_index, nm_i->empty_nat_bits);
2864 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2868 __clear_bit_le(nat_index, nm_i->empty_nat_bits);
2870 __set_bit_le(nat_index, nm_i->full_nat_bits);
2872 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2955 struct f2fs_nm_info *nm_i = NM_I(sbi);
2970 down_write(&nm_i->nat_tree_lock);
2972 up_write(&nm_i->nat_tree_lock);
2975 if (!nm_i->nat_cnt[DIRTY_NAT])
2978 down_write(&nm_i->nat_tree_lock);
2987 nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
2990 while ((found = __gang_lookup_nat_set(nm_i,
3006 up_write(&nm_i->nat_tree_lock);
3015 struct f2fs_nm_info *nm_i = NM_I(sbi);
3016 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3024 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3025 nm_i->nat_bits = f2fs_kvzalloc(sbi,
3026 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
3027 if (!nm_i->nat_bits)
3031 nm_i->nat_bits_blocks;
3032 for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3039 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
3045 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3050 nm_i->full_nat_bits = nm_i->nat_bits + 8;
3051 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3059 struct f2fs_nm_info *nm_i = NM_I(sbi);
3066 for (i = 0; i < nm_i->nat_blocks; i++) {
3067 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3068 if (i >= nm_i->nat_blocks)
3071 __set_bit_le(i, nm_i->nat_block_bitmap);
3082 for (i = 0; i < nm_i->nat_blocks; i++) {
3083 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3084 if (i >= nm_i->nat_blocks)
3087 __set_bit_le(i, nm_i->nat_block_bitmap);
3094 struct f2fs_nm_info *nm_i = NM_I(sbi);
3099 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3103 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3104 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3107 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3109 nm_i->nid_cnt[FREE_NID] = 0;
3110 nm_i->nid_cnt[PREALLOC_NID] = 0;
3111 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3112 nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3113 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3115 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3116 INIT_LIST_HEAD(&nm_i->free_nid_list);
3117 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3118 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3119 INIT_LIST_HEAD(&nm_i->nat_entries);
3120 spin_lock_init(&nm_i->nat_list_lock);
3122 mutex_init(&nm_i->build_lock);
3123 spin_lock_init(&nm_i->nid_list_lock);
3124 init_rwsem(&nm_i->nat_tree_lock);
3126 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3127 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3129 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3131 if (!nm_i->nat_bitmap)
3139 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3141 if (!nm_i->nat_bitmap_mir)
3150 struct f2fs_nm_info *nm_i = NM_I(sbi);
3153 nm_i->free_nid_bitmap =
3155 nm_i->nat_blocks),
3157 if (!nm_i->free_nid_bitmap)
3160 for (i = 0; i < nm_i->nat_blocks; i++) {
3161 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3163 if (!nm_i->free_nid_bitmap[i])
3167 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3169 if (!nm_i->nat_block_bitmap)
3172 nm_i->free_nid_count =
3174 nm_i->nat_blocks),
3176 if (!nm_i->free_nid_count)
3206 struct f2fs_nm_info *nm_i = NM_I(sbi);
3213 if (!nm_i)
3217 spin_lock(&nm_i->nid_list_lock);
3218 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3220 spin_unlock(&nm_i->nid_list_lock);
3222 spin_lock(&nm_i->nid_list_lock);
3224 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3225 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3226 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3227 spin_unlock(&nm_i->nid_list_lock);
3230 down_write(&nm_i->nat_tree_lock);
3231 while ((found = __gang_lookup_nat_cache(nm_i,
3237 spin_lock(&nm_i->nat_list_lock);
3239 spin_unlock(&nm_i->nat_list_lock);
3241 __del_from_nat_cache(nm_i, natvec[idx]);
3244 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3248 while ((found = __gang_lookup_nat_set(nm_i,
3256 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3260 up_write(&nm_i->nat_tree_lock);
3262 kvfree(nm_i->nat_block_bitmap);
3263 if (nm_i->free_nid_bitmap) {
3266 for (i = 0; i < nm_i->nat_blocks; i++)
3267 kvfree(nm_i->free_nid_bitmap[i]);
3268 kvfree(nm_i->free_nid_bitmap);
3270 kvfree(nm_i->free_nid_count);
3272 kvfree(nm_i->nat_bitmap);
3273 kvfree(nm_i->nat_bits);
3275 kvfree(nm_i->nat_bitmap_mir);
3278 kfree(nm_i);