Lines Matching refs:em
52 struct extent_map *em;
53 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
54 if (!em)
56 RB_CLEAR_NODE(&em->rb_node);
57 em->flags = 0;
58 em->compress_type = BTRFS_COMPRESS_NONE;
59 em->generation = 0;
60 refcount_set(&em->refs, 1);
61 INIT_LIST_HEAD(&em->list);
62 return em;
67 * @em: extent map being released
69 * Drops the reference out on @em by one and free the structure
72 void free_extent_map(struct extent_map *em)
74 if (!em)
76 WARN_ON(refcount_read(&em->refs) == 0);
77 if (refcount_dec_and_test(&em->refs)) {
78 WARN_ON(extent_map_in_tree(em));
79 WARN_ON(!list_empty(&em->list));
80 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
81 kfree(em->map_lookup);
82 kmem_cache_free(extent_map_cache, em);
94 static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
100 u64 end = range_end(em->start, em->len);
107 if (em->start < entry->start) {
109 } else if (em->start >= extent_map_end(entry)) {
118 while (parent && em->start >= extent_map_end(entry)) {
123 if (end > entry->start && em->start < extent_map_end(entry))
128 while (parent && em->start < entry->start) {
133 if (end > entry->start && em->start < extent_map_end(entry))
136 rb_link_node(&em->rb_node, orig_parent, p);
137 rb_insert_color_cached(&em->rb_node, root, leftmost);
235 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
248 if (refcount_read(&em->refs) > 2)
251 if (em->start != 0) {
252 rb = rb_prev(&em->rb_node);
255 if (rb && mergable_maps(merge, em)) {
256 em->start = merge->start;
257 em->orig_start = merge->orig_start;
258 em->len += merge->len;
259 em->block_len += merge->block_len;
260 em->block_start = merge->block_start;
261 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
262 em->mod_start = merge->mod_start;
263 em->generation = max(em->generation, merge->generation);
271 rb = rb_next(&em->rb_node);
274 if (rb && mergable_maps(em, merge)) {
275 em->len += merge->len;
276 em->block_len += merge->block_len;
279 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
280 em->generation = max(em->generation, merge->generation);
300 struct extent_map *em;
304 em = lookup_extent_mapping(tree, start, len);
306 WARN_ON(!em || em->start != start);
308 if (!em)
311 em->generation = gen;
312 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
313 em->mod_start = em->start;
314 em->mod_len = em->len;
316 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
318 clear_bit(EXTENT_FLAG_FILLING, &em->flags);
321 try_merge_map(tree, em);
324 em->mod_start = em->start;
325 em->mod_len = em->len;
328 free_extent_map(em);
335 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
337 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
338 if (extent_map_in_tree(em))
339 try_merge_map(tree, em);
343 struct extent_map *em,
346 refcount_inc(&em->refs);
347 em->mod_start = em->start;
348 em->mod_len = em->len;
351 list_move(&em->list, &tree->modified_extents);
353 try_merge_map(tree, em);
356 static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
358 struct map_lookup *map = em->map_lookup;
359 u64 stripe_size = em->orig_block_len;
371 static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
373 struct map_lookup *map = em->map_lookup;
374 u64 stripe_size = em->orig_block_len;
390 * @em: map to insert
392 * Insert @em into @tree or perform a simple forward/backward merge with
398 struct extent_map *em, int modified)
404 ret = tree_insert(&tree->map, em);
408 setup_extent_mapping(tree, em, modified);
409 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
410 extent_map_device_set_bits(em, CHUNK_ALLOCATED);
411 extent_map_device_clear_bits(em, CHUNK_TRIMMED);
421 struct extent_map *em;
437 em = rb_entry(rb_node, struct extent_map, rb_node);
439 if (strict && !(end > em->start && start < extent_map_end(em)))
442 refcount_inc(&em->refs);
443 return em;
483 * @em: extent map being removed
485 * Removes @em from @tree. No reference counts are dropped, and no checks
488 void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
490 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
491 rb_erase_cached(&em->rb_node, &tree->map);
492 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
493 list_del_init(&em->list);
494 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
495 extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
496 RB_CLEAR_NODE(&em->rb_node);
514 static struct extent_map *next_extent_map(struct extent_map *em)
518 next = rb_next(&em->rb_node);
524 static struct extent_map *prev_extent_map(struct extent_map *em)
528 prev = rb_prev(&em->rb_node);
542 struct extent_map *em,
551 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
561 start = prev ? extent_map_end(prev) : em->start;
562 start = max_t(u64, start, em->start);
563 end = next ? next->start : extent_map_end(em);
564 end = min_t(u64, end, extent_map_end(em));
565 start_diff = start - em->start;
566 em->start = start;
567 em->len = end - start;
568 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
569 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
570 em->block_start += start_diff;
571 em->block_len = em->len;
573 return add_extent_mapping(em_tree, em, 0);
590 * existing em.
601 struct extent_map *em = *em_in;
603 ret = add_extent_mapping(em_tree, em, 0);
615 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
623 free_extent_map(em);
627 u64 orig_start = em->start;
628 u64 orig_len = em->len;
635 em, start);
637 free_extent_map(em);
640 "unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",