Lines Matching refs:et
236 struct extent_tree *et, struct extent_info *ei,
248 en->et = et;
251 rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
252 atomic_inc(&et->node_cnt);
258 struct extent_tree *et, struct extent_node *en)
260 rb_erase_cached(&en->rb_node, &et->root);
261 atomic_dec(&et->node_cnt);
264 if (et->cached_en == en)
265 et->cached_en = NULL;
276 struct extent_tree *et, struct extent_node *en)
283 __detach_extent_node(sbi, et, en);
289 struct extent_tree *et;
293 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
294 if (!et) {
295 et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
296 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
297 memset(et, 0, sizeof(struct extent_tree));
298 et->ino = ino;
299 et->root = RB_ROOT_CACHED;
300 et->cached_en = NULL;
301 rwlock_init(&et->lock);
302 INIT_LIST_HEAD(&et->list);
303 atomic_set(&et->node_cnt, 0);
307 list_del_init(&et->list);
312 F2FS_I(inode)->extent_tree = et;
314 return et;
318 struct extent_tree *et, struct extent_info *ei)
320 struct rb_node **p = &et->root.rb_root.rb_node;
323 en = __attach_extent_node(sbi, et, ei, NULL, p, true);
327 et->largest = en->ei;
328 et->cached_en = en;
333 struct extent_tree *et)
337 unsigned int count = atomic_read(&et->node_cnt);
339 node = rb_first_cached(&et->root);
343 __release_extent_node(sbi, et, en);
347 return count - atomic_read(&et->node_cnt);
350 static void __drop_largest_extent(struct extent_tree *et,
353 if (fofs < et->largest.fofs + et->largest.len &&
354 fofs + len > et->largest.fofs) {
355 et->largest.len = 0;
356 et->largest_updated = true;
365 struct extent_tree *et;
380 et = __grab_extent_tree(inode);
387 write_lock(&et->lock);
388 if (atomic_read(&et->node_cnt))
391 en = __init_extent_tree(sbi, et, &ei);
398 write_unlock(&et->lock);
413 struct extent_tree *et = F2FS_I(inode)->extent_tree;
417 if (!et)
422 read_lock(&et->lock);
424 if (et->largest.fofs <= pgofs &&
425 et->largest.fofs + et->largest.len > pgofs) {
426 *ei = et->largest;
432 en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
433 (struct rb_entry *)et->cached_en, pgofs);
437 if (en == et->cached_en)
446 et->cached_en = en;
452 read_unlock(&et->lock);
459 struct extent_tree *et, struct extent_info *ei,
476 __release_extent_node(sbi, et, prev_ex);
484 __try_update_largest_extent(et, en);
489 et->cached_en = en;
496 struct extent_tree *et, struct extent_info *ei,
513 p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
516 en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
520 __try_update_largest_extent(et, en);
525 et->cached_en = en;
534 struct extent_tree *et = F2FS_I(inode)->extent_tree;
544 if (!et)
549 write_lock(&et->lock);
552 write_unlock(&et->lock);
556 prev = et->largest;
563 __drop_largest_extent(et, fofs, len);
566 en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
567 (struct rb_entry *)et->cached_en, fofs,
597 en1 = __insert_extent_tree(sbi, et, &ei,
617 __try_update_largest_extent(et, en);
619 __release_extent_node(sbi, et, en);
637 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
638 __insert_extent_tree(sbi, et, &ei,
644 et->largest.len < F2FS_MIN_EXTENT_LEN) {
645 et->largest.len = 0;
646 et->largest_updated = true;
652 __free_extent_tree(sbi, et);
654 if (et->largest_updated) {
655 et->largest_updated = false;
659 write_unlock(&et->lock);
667 struct extent_tree *et, *next;
682 list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
683 if (atomic_read(&et->node_cnt)) {
684 write_lock(&et->lock);
685 node_cnt += __free_extent_tree(sbi, et);
686 write_unlock(&et->lock);
688 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
689 list_del_init(&et->list);
690 radix_tree_delete(&sbi->extent_tree_root, et->ino);
691 kmem_cache_free(extent_tree_slab, et);
715 et = en->et;
716 if (!write_trylock(&et->lock)) {
725 __detach_extent_node(sbi, et, en);
727 write_unlock(&et->lock);
744 struct extent_tree *et = F2FS_I(inode)->extent_tree;
747 if (!et || !atomic_read(&et->node_cnt))
750 write_lock(&et->lock);
751 node_cnt = __free_extent_tree(sbi, et);
752 write_unlock(&et->lock);
760 struct extent_tree *et = F2FS_I(inode)->extent_tree;
766 write_lock(&et->lock);
768 __free_extent_tree(sbi, et);
769 if (et->largest.len) {
770 et->largest.len = 0;
773 write_unlock(&et->lock);
781 struct extent_tree *et = F2FS_I(inode)->extent_tree;
784 if (!et)
788 atomic_read(&et->node_cnt)) {
790 list_add_tail(&et->list, &sbi->zombie_list);
801 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
803 kmem_cache_free(extent_tree_slab, et);