Lines Matching defs:lprops

13  * garbage collection. In general, lprops category heaps and lists are used
37 * @lprops: LEB properties
42 static int valuable(struct ubifs_info *c, const struct ubifs_lprops *lprops)
44 int n, cat = lprops->flags & LPROPS_CAT_MASK;
54 if (lprops->free + lprops->dirty >= c->dark_wm)
74 * @lprops: LEB properties to scan
84 const struct ubifs_lprops *lprops, int in_tree,
90 if (lprops->flags & LPROPS_TAKEN)
93 if (!in_tree && valuable(c, lprops))
96 if (lprops->free + lprops->dirty < data->min_space)
99 if (data->exclude_index && lprops->flags & LPROPS_INDEX)
102 if (lprops->free + lprops->dirty == c->leb_size) {
106 } else if (lprops->dirty < c->dead_wm)
109 data->lnum = lprops->lnum;
128 const struct ubifs_lprops *lprops;
136 lprops = heap->arr[i];
137 if (lprops->free + lprops->dirty < min_space)
139 if (lprops->dirty < c->dead_wm)
141 return lprops;
150 list_for_each_entry(lprops, &c->uncat_list, list) {
151 if (lprops->flags & LPROPS_TAKEN)
153 if (lprops->free + lprops->dirty < min_space)
155 if (exclude_index && (lprops->flags & LPROPS_INDEX))
157 if (lprops->dirty < c->dead_wm)
159 return lprops;
176 lprops = ubifs_lpt_lookup_dirty(c, data.lnum);
177 if (IS_ERR(lprops))
178 return lprops;
179 ubifs_assert(c, lprops->lnum == data.lnum);
180 ubifs_assert(c, lprops->free + lprops->dirty >= min_space);
181 ubifs_assert(c, lprops->dirty >= c->dead_wm ||
183 lprops->free + lprops->dirty == c->leb_size));
184 ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
185 ubifs_assert(c, !exclude_index || !(lprops->flags & LPROPS_INDEX));
186 return lprops;
341 * @lprops: LEB properties to scan
351 const struct ubifs_lprops *lprops, int in_tree,
357 if (lprops->flags & LPROPS_TAKEN)
360 if (!in_tree && valuable(c, lprops))
363 if (lprops->flags & LPROPS_INDEX)
366 if (lprops->free < data->min_space)
369 if (!data->pick_free && lprops->free == c->leb_size)
377 if (lprops->free + lprops->dirty == c->leb_size && lprops->dirty > 0)
380 data->lnum = lprops->lnum;
399 const struct ubifs_lprops *lprops;
405 lprops = ubifs_fast_find_free(c);
406 if (lprops && lprops->free >= min_space)
407 return lprops;
410 lprops = ubifs_fast_find_empty(c);
411 if (lprops)
412 return lprops;
415 lprops = ubifs_fast_find_free(c);
416 if (lprops && lprops->free >= min_space)
417 return lprops;
422 lprops = heap->arr[i];
423 if (lprops->free >= min_space)
424 return lprops;
433 list_for_each_entry(lprops, &c->uncat_list, list) {
434 if (lprops->flags & LPROPS_TAKEN)
436 if (lprops->flags & LPROPS_INDEX)
438 if (lprops->free >= min_space)
439 return lprops;
455 lprops = ubifs_lpt_lookup_dirty(c, data.lnum);
456 if (IS_ERR(lprops))
457 return lprops;
458 ubifs_assert(c, lprops->lnum == data.lnum);
459 ubifs_assert(c, lprops->free >= min_space);
460 ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
461 ubifs_assert(c, !(lprops->flags & LPROPS_INDEX));
462 return lprops;
484 const struct ubifs_lprops *lprops;
509 * result of this is that lprops also decreases
518 * budgeting and lprops subsystems. We could make the
528 lprops = do_find_free_space(c, min_space, pick_free, squeeze);
529 if (IS_ERR(lprops)) {
530 err = PTR_ERR(lprops);
534 lnum = lprops->lnum;
535 flags = lprops->flags | LPROPS_TAKEN;
537 lprops = ubifs_change_lp(c, lprops, LPROPS_NC, LPROPS_NC, flags, 0);
538 if (IS_ERR(lprops)) {
539 err = PTR_ERR(lprops);
549 *offs = c->leb_size - lprops->free;
581 * @lprops: LEB properties to scan
591 const struct ubifs_lprops *lprops, int in_tree,
597 if (lprops->flags & LPROPS_TAKEN)
600 if (!in_tree && valuable(c, lprops))
603 if (lprops->flags & LPROPS_INDEX)
606 if (lprops->free + lprops->dirty != c->leb_size)
613 data->lnum = lprops->lnum;
623 const struct ubifs_lprops *lprops;
635 lprops = ubifs_lpt_lookup_dirty(c, data.lnum);
636 if (IS_ERR(lprops))
637 return lprops;
638 ubifs_assert(c, lprops->lnum == data.lnum);
639 ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size);
640 ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
641 ubifs_assert(c, !(lprops->flags & LPROPS_INDEX));
642 return lprops;
663 const struct ubifs_lprops *lprops;
668 lprops = ubifs_fast_find_empty(c);
669 if (!lprops) {
670 lprops = ubifs_fast_find_freeable(c);
671 if (!lprops) {
674 * LPT if there are uncategorized lprops, which means
682 lprops = scan_for_leb_for_idx(c);
683 if (IS_ERR(lprops)) {
684 err = PTR_ERR(lprops);
691 if (!lprops) {
696 lnum = lprops->lnum;
699 lnum, lprops->free, lprops->dirty, lprops->flags);
701 flags = lprops->flags | LPROPS_TAKEN | LPROPS_INDEX;
702 lprops = ubifs_change_lp(c, lprops, c->leb_size, 0, flags, 0);
703 if (IS_ERR(lprops)) {
704 err = PTR_ERR(lprops);
764 /* Replace the lprops pointers with LEB numbers */
774 * @lprops: LEB properties to scan
784 const struct ubifs_lprops *lprops, int in_tree,
790 if (lprops->flags & LPROPS_TAKEN)
793 if (!in_tree && valuable(c, lprops))
796 if (!(lprops->flags & LPROPS_INDEX))
799 if (lprops->free + lprops->dirty < c->min_idx_node_sz)
802 data->lnum = lprops->lnum;
818 const struct ubifs_lprops *lprops;
827 lprops = heap->arr[i];
828 ret = scan_dirty_idx_cb(c, lprops, 1, &data);
832 list_for_each_entry(lprops, &c->frdi_idx_list, list) {
833 ret = scan_dirty_idx_cb(c, lprops, 1, &data);
837 list_for_each_entry(lprops, &c->uncat_list, list) {
838 ret = scan_dirty_idx_cb(c, lprops, 1, &data);
853 lprops = ubifs_lpt_lookup_dirty(c, data.lnum);
854 if (IS_ERR(lprops))
855 return PTR_ERR(lprops);
856 ubifs_assert(c, lprops->lnum == data.lnum);
857 ubifs_assert(c, lprops->free + lprops->dirty >= c->min_idx_node_sz);
858 ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
859 ubifs_assert(c, (lprops->flags & LPROPS_INDEX));
862 lprops->lnum, lprops->free, lprops->dirty, lprops->flags);
864 lprops = ubifs_change_lp(c, lprops, LPROPS_NC, LPROPS_NC,
865 lprops->flags | LPROPS_TAKEN, 0);
866 if (IS_ERR(lprops))
867 return PTR_ERR(lprops);
869 return lprops->lnum;
913 /* The lprops pointers were replaced by LEB numbers */