Lines Matching defs:bitmap

67 		if (!info->bitmap) {
578 void *bitmap)
588 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
607 static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
623 copy_page(io_ctl->cur, bitmap);
683 copy_page(entry->bitmap, io_ctl->cur);
844 e->bitmap = kmem_cache_zalloc(
846 if (!e->bitmap) {
874 * the bitmap entries are added to the cache.
906 if (!info->bitmap) {
1102 e->bitmap);
1106 if (e->bitmap) {
1255 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1446 * or freeing the bitmap.
1616 * we could have a bitmap entry and an extent entry
1621 * extent is faster than allocating from a bitmap. So
1622 * if we're inserting a bitmap and we find an entry at
1625 * found a bitmap, we want to go left, or before
1628 if (new_entry->bitmap) {
1629 if (info->bitmap) {
1635 if (!info->bitmap) {
1652 * searched through the bitmap and figured out the largest ->max_extent_size,
1658 * less than the required bytes. So if we didn't search down this bitmap, we
1664 * that first bitmap entry had ->max_extent_size set, but the second one did
1670 * allocator comes in it'll fully search our second bitmap, and this time it'll
1676 if (entry->bitmap && entry->max_extent_size)
1728 if (entry->bitmap)
1732 * bitmap entry and extent entry may share same offset,
1733 * in that case, bitmap entry comes after extent entry.
1742 WARN_ON(!entry->bitmap);
1745 if (entry->bitmap) {
1748 * we should return it instead of the bitmap entry
1754 if (!prev->bitmap &&
1781 if (entry->bitmap) {
1786 if (!prev->bitmap &&
1803 if (entry->bitmap) {
1825 if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1841 ASSERT(info->bytes || info->bitmap);
1848 if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1861 ASSERT(info->bitmap);
1888 bitmap_clear(info->bitmap, start, count);
1896 if (start && test_bit(start - 1, info->bitmap))
1899 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
1924 bitmap_set(info->bitmap, start, count);
1936 if (start && test_bit(start - 1, info->bitmap))
1939 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
1964 * Skip searching the bitmap if we don't have a contiguous section that
1978 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1983 next_zero = find_next_zero_bit(bitmap_info->bitmap,
2081 if (entry->bitmap) {
2097 * The bitmap may have gotten re-arranged in the space
2131 * Normally when this is called, the bitmap is completely empty. However,
2143 kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
2161 * We need to search for bits in this bitmap. We could only cover some
2162 * of the extent in this bitmap thanks to how we add space, so we need
2177 /* Cannot clear past the end of the bitmap */
2190 * no entry after this bitmap, but we still have bytes to
2200 * if the next entry isn't a bitmap we need to return to let the
2203 if (!bitmap_info->bitmap)
2207 * Ok the next item is a bitmap, but it may not actually hold
2234 * This is a tradeoff to make bitmap trim state minimal. We mark the
2235 * whole bitmap untrimmed if at any point we add untrimmed regions.
2274 * extent, and don't have to deal with the bitmap
2282 * the overhead of a bitmap if we don't have to.
2294 * megabytes, so don't bother with a bitmap for those entries. However
2295 * some block groups can be smaller than what a bitmap would cover but
2297 * so allow those block groups to still be allowed to have a bitmap
2332 * have a cluster here, and if so and it has our bitmap we need to add
2333 * the free space to that bitmap.
2351 if (!entry->bitmap) {
2390 if (info && info->bitmap) {
2409 /* allocate the bitmap */
2410 info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
2414 if (!info->bitmap) {
2423 if (info->bitmap)
2425 info->bitmap);
2474 if (right_info && !right_info->bitmap &&
2483 if (left_info && !left_info->bitmap &&
2500 struct btrfs_free_space *bitmap;
2507 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2508 if (!bitmap)
2511 i = offset_to_bit(bitmap->offset, ctl->unit, end);
2512 j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
2519 if (!btrfs_free_space_trimmed(bitmap))
2522 bitmap_clear_bits(ctl, bitmap, end, bytes, update_stat);
2524 if (!bitmap->bytes)
2525 free_bitmap(ctl, bitmap);
2534 struct btrfs_free_space *bitmap;
2542 /* If we're on a boundary, try the previous logical bitmap. */
2549 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2550 if (!bitmap)
2553 i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
2556 for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
2573 if (!btrfs_free_space_trimmed(bitmap))
2576 bitmap_clear_bits(ctl, bitmap, info->offset, bytes, update_stat);
2578 if (!bitmap->bytes)
2579 free_bitmap(ctl, bitmap);
2587 * entry, try to see if there's adjacent free space in bitmap entries, and if
2592 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2603 ASSERT(!info->bitmap);
2650 * before we do that see if we need to drop this into a bitmap
2662 * going to add the new free space to existing bitmap entries - because
2832 * to remove, look for a bitmap instead
2839 * bitmap but then couldn't find the other part this may
2848 if (!info->bitmap) {
2932 btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
2934 (info->bitmap) ? "yes" : "no");
3000 if (!entry->bitmap) {
3103 if (entry->bitmap) {
3246 (!entry->bitmap && entry->offset < min_start)) {
3255 if (entry->bitmap) {
3290 if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
3297 if (entry->bitmap) {
3299 entry->bitmap);
3339 * Don't bother looking for a cluster in this bitmap if it's heavily
3347 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
3348 next_zero = find_next_zero_bit(entry->bitmap,
3387 * manipulate the bitmap so that we know we need to remove and re-insert
3389 * bitmap manipulation helpers know not to mess with the space_index
3390 * until this bitmap entry is added back into the normal cache.
3432 while (entry->bitmap || entry->bytes < min_bytes) {
3433 if (entry->bitmap && list_empty(&entry->list))
3450 if (entry->bitmap) {
3481 if (entry->bitmap || entry->bytes < min_bytes)
3515 * The bitmap that covers offset won't be in the list unless offset
3734 while (entry->bitmap ||
3825 * If we break out of trimming a bitmap prematurely, we should reset the
3829 * start = start of bitmap
3830 * end = near end of bitmap
3902 * discarding of the same bitmap (the reason why we are bound
3917 * Async discard bitmap trimming begins at by setting the start
3919 * start of the bitmap. This lets us know we are fully
3920 * scanning the bitmap rather than only some portion of it.
3929 * We lossily consider a bitmap trimmed if we only skip
4039 /* If we ended in the middle of a bitmap, reset the trimming flag */
4180 * Use this if you need to make a bitmap or extent entry specifically, it
4186 u64 offset, u64 bytes, bool bitmap)
4202 if (!bitmap) {
4226 info->bitmap = map;
4272 if (info->bitmap) {