Lines Matching defs:end

56 			char *end;
430 p->end = p->start;
434 p->end = p->start;
476 return p->end - p->start;
495 path_len = p->end - p->start;
520 p->end = p->buf + p->buf_len - 1;
521 p->start = p->end - path_len;
525 p->end = p->start + path_len;
536 new_len = p->end - p->start + name_len;
537 if (p->start != p->end)
544 if (p->start != p->end)
549 if (p->start != p->end)
550 *p->end++ = '/';
551 *prepared = p->end;
552 p->end += name_len;
553 *p->end = 0;
579 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
582 memcpy(prepared, p2->start, p2->end - p2->start);
622 len = p->end - p->start;
624 p->end = p->start + len;
743 p->end - p->start); \
1126 * Start with a small buffer (1 page). If later we end up needing more
1380 * we clone more optimally and end up doing less writes and getting
2365 dest->start, dest->end - dest->start);
3052 ref->name_len = ref->full_path->end - ref->name;
3995 * because we can end up with a circular dependency
5237 * it is implicitly to the end of the command.
5466 static int send_hole(struct send_ctx *sctx, u64 end)
5486 end = min_t(u64, end, sctx->cur_inode_size);
5489 return send_update_extent(sctx, offset, end - offset);
5497 while (offset < end) {
5498 u64 len = min(end - offset, read_size);
5695 const u64 end = offset + len;
5775 if (sctx->clean_page_cache && PAGE_ALIGNED(end)) {
5783 * Always start from the end offset of the last range cleared.
5793 * parent and send snapshots, some or all of its pages may end
5795 * the page cache we always start from the end offset of the
5796 * previously processed extent up to the end of the current
5801 end - 1);
5802 sctx->page_cache_clear_start = end;
6142 u64 end;
6145 end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
6146 if (offset >= end)
6149 if (clone_root && IS_ALIGNED(end, bs)) {
6159 data_offset, offset, end - offset);
6161 ret = send_extent_data(sctx, path, offset, end - offset);
6163 sctx->cur_inode_next_write_offset = end;
6343 * We're now behind the left extent (treat as unchanged) or at the end
6389 const u64 end)
6410 while (search_start < end) {
6431 key.offset >= end)
7237 * have a leaf when we have reached the end of the send root but have
7238 * not yet reached the end of the parent root.
7245 * NULL. When doing an incremental send, we may have reached the end of
7510 * Even if we have reached the end of a tree, ret is -1, update the key