Lines Matching defs:range
278 struct btrfs_ioctl_defrag_range_args range;
298 memset(&range, 0, sizeof(range));
299 range.len = (u64)-1;
300 range.start = defrag->last_offset;
303 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
312 defrag->last_offset = range.start;
514 * this drops all the extents in the cache that intersect the range
668 * in the range start - end. hint_block is filled in with a block number
671 * If an extent intersects the range but is not entirely inside the range
672 * it is either truncated or split. Anything entirely inside the range
805 * | - range to drop - |
861 * | ---- range to drop ----- |
886 * | ---- range to drop ----- |
909 * | ---- range to drop ----- |
1063 * Mark extent in the range start - end as written.
1456 * We have the pages locked and the extent range locked, so there's
1457 * no way someone can start IO on any dirty pages in this range.
1531 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1535 * range
1537 * This function will flush ordered extents in the range to ensure proper
1738 * If we have not locked the extent range, because the range's
1740 * cached extent state, acquired while marking the extent range
1906 * There are holes in the range or parts of the range that must
2111 * Always set the range to a full range, otherwise we can get into
2115 * extents outside the range, to missing checksums due to ordered extents
2123 * We write the dirty pages in the range and wait until they complete
2153 * pages in the target range. We need to make sure that writeback for
2492 * We need to make sure we have no ordered extents in this range
2493 * and nobody raced in and read a page in this range, if we did
2598 * The respective range must have been previously locked, as well as the inode.
2599 * The end offset is inclusive (last byte of the range).
2601 * the file range with an extent.
2636 * 1 - removing the extents in the range
2638 * replacing the range with a new extent
2790 * Don't insert file hole extent item if it's for a range beyond eof
2791 * (because it's useless) or if it represents a 0 bytes range (when
3007 /* Helper structure to record which range is already reserved */
3015 * Helper function to add falloc range
3017 * Caller should have locked the larger range of extent containing
3023 struct falloc_range *range = NULL;
3030 * the last range.
3038 range = kmalloc(sizeof(*range), GFP_KERNEL);
3039 if (!range)
3041 range->start = start;
3042 range->len = len;
3043 list_add_tail(&range->list, head);
3130 * range contains one or more prealloc extents together with regular
3140 * The whole range is already a prealloc extent,
3150 * Part of the range is already a prealloc extent, so operate
3151 * only on the remaining part of the range.
3197 * they might map to a hole, in which case we need our allocation range
3285 struct falloc_range *range;
3393 * we can't wait on the range with the transaction
3439 * range, free reserved data space first, otherwise
3454 list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3457 range->start,
3458 range->len, i_blocksize(inode),
3462 data_reserved, range->start,
3463 range->len);
3464 list_del(&range->list);
3465 kfree(range);
3644 * with the entire range locked but with nobody actually marked with