Lines Matching defs:chunk

1419  * Try to find a chunk that intersects [start, start + len] range and when one
1824 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1846 * the device information is stored in the chunk root
2969 "Failed lookup while freeing chunk.");
2977 "Failed to delete chunk item.");
2987 struct btrfs_chunk *chunk;
3009 chunk = (struct btrfs_chunk *)(ptr + len);
3010 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
3049 "unable to find chunk map for logical %llu length %llu",
3056 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
3072 * Removing chunk items and updating the device items in the chunks btree
3117 * may result in allocating a new metadata chunk, which would attempt to
3146 * 1) Just like with the first phase of the chunk allocation, we must
3147 * reserve system space, do all chunk btree updates and deletions, and
3148 * update the system chunk array in the superblock while holding this
3159 * the device item, which does not exists on the chunk btree.
3182 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3225 * We are done with chunk btree updates and deletions, so release the
3262 * After we relocate and before we remove the chunk with offset
3267 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3269 * we release the path used to search the chunk/dev tree and before
3274 /* step one, relocate all the extents inside this chunk */
3319 * chunk tree entries
3331 struct btrfs_chunk *chunk;
3369 chunk = btrfs_item_ptr(leaf, path->slots[0],
3371 chunk_type = btrfs_chunk_type(leaf, chunk);
3401 * return 1 : allocate a data chunk successfully,
3402 * return <0: errors during allocating a data chunk,
3403 * return 0 : no need to allocate a data chunk.
3542 * Turn on soft mode for chunk types that were being converted.
3599 * Balance filters. Return 1 if chunk should be filtered out
3670 struct btrfs_chunk *chunk,
3674 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3678 stripe = btrfs_stripe_nr(chunk, i);
3697 struct btrfs_chunk *chunk,
3701 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3711 type = btrfs_chunk_type(leaf, chunk);
3715 stripe = btrfs_stripe_nr(chunk, i);
3720 stripe_length = btrfs_chunk_length(leaf, chunk);
3733 struct btrfs_chunk *chunk,
3738 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3739 /* at least part of the chunk is inside this vrange */
3746 struct btrfs_chunk *chunk,
3749 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3774 struct btrfs_chunk *chunk, u64 chunk_offset)
3779 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3811 chunk_devid_filter(leaf, chunk, bargs)) {
3817 chunk_drange_filter(leaf, chunk, bargs)) {
3823 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3829 chunk_stripes_range_filter(leaf, chunk, bargs)) {
3867 struct btrfs_chunk *chunk;
3947 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3948 chunk_type = btrfs_chunk_type(leaf, chunk);
3956 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3996 * We may be relocating the only data chunk we have,
4820 * The chunk relocation code actually frees the device extent
4931 * We may be relocating the only data chunk we have,
5012 struct btrfs_chunk *chunk, int item_size)
5030 memcpy(ptr, chunk, item_size);
5118 /* We don't want a chunk larger than 10% of writable space */
5149 /* We don't want a chunk larger than 10% of writable space */
5283 * Use the number of data stripes to figure out how big this chunk is
5285 * that answer with the max chunk size. If it's higher, we try to
5493 btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5528 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5531 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5540 struct btrfs_chunk *chunk;
5551 * 1) Updates and insertions in the chunk btree must be done while holding
5552 * the chunk_mutex, as well as updating the system chunk array in the
5563 * not exists, or persisting a stripe of the chunk item with such ID.
5582 chunk = kzalloc(item_size, GFP_NOFS);
5583 if (!chunk) {
5597 stripe = &chunk->stripe;
5608 btrfs_set_stack_chunk_length(chunk, bg->length);
5609 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
5610 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
5611 btrfs_set_stack_chunk_type(chunk, map->type);
5612 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5613 btrfs_set_stack_chunk_io_align(chunk, BTRFS_STRIPE_LEN);
5614 btrfs_set_stack_chunk_io_width(chunk, BTRFS_STRIPE_LEN);
5615 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5616 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5622 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5629 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5635 kfree(chunk);
5649 * so we must first allocate a metadata and a system chunk. But before
5650 * adding the block group items to the extent, device and chunk btrees,
5658 * of a new block group requires updating the device item in the chunk
5663 * update existing device item in the chunk btree, add a new block group
5664 * item to the extent btree, add a new chunk item to the chunk btree and
5716 * not write the data into that chunk successfully.
6685 * metadata chunk beyond that limit.
6701 * This is to give early warning for any metadata chunk reaching
6741 struct btrfs_chunk *chunk)
6759 length = btrfs_chunk_length(leaf, chunk);
6760 type = btrfs_chunk_type(leaf, chunk);
6762 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6772 * Only need to verify chunk item if we're reading from sys chunk array,
6773 * as chunk item in tree block is already verified by tree-checker.
6776 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6811 map->io_width = btrfs_chunk_io_width(leaf, chunk);
6812 map->io_align = btrfs_chunk_io_align(leaf, chunk);
6827 btrfs_stripe_offset_nr(leaf, chunk, i);
6828 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6831 btrfs_stripe_dev_uuid_nr(chunk, i),
6854 "failed to add chunk map, start=%llu len=%llu: %d",
7055 struct btrfs_chunk *chunk;
7071 * that's fine, we will not go beyond system chunk array anyway.
7105 chunk = (struct btrfs_chunk *)sb_array_offset;
7114 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7123 type = btrfs_chunk_type(sb, chunk);
7126 "invalid chunk type %llu in sys_array at offset %u",
7136 ret = read_one_chunk(&key, sb, chunk);
7162 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7175 /* No chunk at all? Return false anyway */
7203 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7266 * chunk tree if we call read_one_dev() while holding a lock on an
7267 * extent buffer of the chunk tree. Since we are mounting the filesystem
7269 * chunk tree, to keep it simple, just skip locking on the chunk tree.
7275 * Read all device items, and then all the chunk items. All
7276 * device items are found before any chunk item (their object id
7277 * is smaller than the lowest possible object id for a chunk
7304 struct btrfs_chunk *chunk;
7310 * acquiring any locks on the chunk tree. This is a
7311 * requirement for chunk allocation, see the comment on
7314 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7315 ret = read_one_chunk(&found_key, leaf, chunk);
7327 * After loading chunk tree, we've got all device information,
7723 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7733 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7756 "too many dev extents for chunk %llu found",
7767 "dev extent physical offset %llu devid %llu has no corresponding chunk",
7820 "chunk %llu has missing dev extent, have %d expect %d",
7833 * Ensure that all dev extents are mapped to correct chunk, otherwise
7834 * later chunk allocation/free would cause unexpected behavior.
7837 * the same size level as the chunk tree. This slightly increases mount time.