Lines Matching refs:run
34 static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
39 if (!run->count) {
45 max_idx = run->count - 1;
48 r = run->runs;
61 *index = run->count;
72 r = run->runs + mid_idx;
93 static void run_consolidate(struct runs_tree *run, size_t index)
96 struct ntfs_run *r = run->runs + index;
98 while (index + 1 < run->count) {
100 * I should merge current run with next
101 * if start of the next run lies inside one being tested.
141 * of a next run lcn does not match
142 * last volume block of the current run.
154 i = run->count - (index + 1);
158 run->count -= 1;
167 bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
173 if (!run_lookup(run, svcn, &i))
176 end = run->runs + run->count;
177 r = run->runs + i;
192 bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
200 if (!run->runs)
203 if (!run_lookup(run, vcn, &idx))
206 r = run->runs + idx;
228 void run_truncate_head(struct runs_tree *run, CLST vcn)
233 if (run_lookup(run, vcn, &index)) {
234 r = run->runs + index;
248 r = run->runs;
249 memmove(r, r + index, sizeof(*r) * (run->count - index));
251 run->count -= index;
253 if (!run->count) {
254 kvfree(run->runs);
255 run->runs = NULL;
256 run->allocated = 0;
263 void run_truncate(struct runs_tree *run, CLST vcn)
273 if (run_lookup(run, vcn, &index)) {
274 struct ntfs_run *r = run->runs + index;
287 run->count = index;
291 kvfree(run->runs);
292 run->runs = NULL;
293 run->allocated = 0;
300 void run_truncate_around(struct runs_tree *run, CLST vcn)
302 run_truncate_head(run, vcn);
304 if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2)
305 run_truncate(run, (run->runs + (run->count >> 1))->vcn);
316 bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
331 inrange = run_lookup(run, vcn, &index);
336 * continues previous run.
341 struct ntfs_run *t = run->runs + index - 1;
363 used = run->count * sizeof(struct ntfs_run);
370 if (run->allocated < used + sizeof(struct ntfs_run)) {
378 if (is_power_of_2(run->allocated))
379 bytes = run->allocated << 1;
384 bytes = run->allocated + (16 * PAGE_SIZE);
395 memcpy(new_ptr, run->runs,
397 memcpy(r + 1, run->runs + index,
398 sizeof(struct ntfs_run) * (run->count - index));
400 kvfree(run->runs);
401 run->runs = new_ptr;
402 run->allocated = bytes;
405 size_t i = run->count - index;
407 r = run->runs + index;
417 run->count += 1;
419 r = run->runs + index;
470 run_consolidate(run, index);
471 run_consolidate(run, index + 1);
478 !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
489 bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
495 if (WARN_ON(!run_lookup(run, vcn, &index)))
498 e = run->runs + run->count;
499 r = run->runs + index;
504 /* Collapse tail of run .*/
507 /* Collapse a middle part of sparsed run. */
510 /* Collapse a middle part of normal run, split. */
511 if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
513 return run_collapse_range(run, vcn, len);
531 /* Eat this run. */
545 run->count -= eat;
555 bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len)
560 if (WARN_ON(!run_lookup(run, vcn, &index)))
563 e = run->runs + run->count;
564 r = run->runs + index;
572 r = run->runs + index;
582 if (!run_add_entry(run, vcn + len, lcn2, len2, false))
586 if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
595 bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
600 if (index >= run->count)
603 r = run->runs + index;
817 int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
835 if (!run_lookup(run, svcn, &i))
838 r_end = run->runs + run->count;
839 r = run->runs + i;
848 r = run->runs + i;
877 /* Can we store this entire run. */
882 /* Pack run header. */
886 /* Pack the length of run. */
920 int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
1000 "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n"
1011 if (!run)
1012 ; /* Called from check_attr(fslog.c) to check run. */
1013 else if (run == RUN_DEALLOCATE) {
1016 * without storing in run.
1021 if (!run_add_entry(run, vcn64, lcn, len, is_mft))
1026 if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
1050 int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
1060 ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
1064 if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE)
1073 for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
1075 ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) {
1149 * Make a copy of run
1151 int run_clone(const struct runs_tree *run, struct runs_tree *new_run)
1153 size_t bytes = run->count * sizeof(struct ntfs_run);
1166 memcpy(new_run->runs, run->runs, bytes);
1167 new_run->count = run->count;