Lines Matching refs:e1

145 		struct ubi_wl_entry *e1;
148 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
150 if (e->ec < e1->ec)
152 else if (e->ec > e1->ec)
155 ubi_assert(e->pnum != e1->pnum);
156 if (e->pnum < e1->pnum)
242 struct ubi_wl_entry *e1;
244 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
246 if (e->pnum == e1->pnum) {
247 ubi_assert(e == e1);
251 if (e->ec < e1->ec)
253 else if (e->ec > e1->ec)
256 ubi_assert(e->pnum != e1->pnum);
257 if (e->pnum < e1->pnum)
330 struct ubi_wl_entry *e1;
332 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
333 if (e1->ec >= max)
337 e = e1;
652 struct ubi_wl_entry *e1, *e2;
695 e1 = find_anchor_wl_entry(&ubi->used);
696 if (e1 && ubi->fm_anchor &&
697 (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
710 if (!e1)
716 self_check_in_wl_tree(ubi, e1, &ubi->used);
717 rb_erase(&e1->u.rb, &ubi->used);
718 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
729 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
734 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
736 e1->ec, e2->ec);
743 self_check_in_wl_tree(ubi, e1, &ubi->used);
744 rb_erase(&e1->u.rb, &ubi->used);
746 e1->pnum, e1->ec, e2->pnum, e2->ec);
750 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
755 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
756 rb_erase(&e1->u.rb, &ubi->scrub);
757 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
760 ubi->move_from = e1;
765 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
767 * eraseblock (@e1) belongs to. We have to read the volume identifier
775 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
789 dbg_wl("PEB %d has no VID header", e1->pnum);
799 e1->pnum);
809 e1->pnum);
815 err, e1->pnum);
822 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
879 e1->pnum, vol_id, lnum, e2->pnum);
891 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
920 * something else. @e1 was not changed, so return it back. @e2 might
926 e1->pnum, vol_id, lnum, e2->pnum, err);
929 e1->pnum, e2->pnum, err);
932 prot_queue_add(ubi, e1);
934 wl_tree_add(e1, &ubi->erroneous);
937 wl_tree_add(e1, &ubi->scrub);
939 wl_tree_add(e1, &ubi->used);
960 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
972 err, e1->pnum, e2->pnum);
975 err, e1->pnum, vol_id, lnum, e2->pnum);
979 wl_entry_destroy(ubi, e1);
1029 struct ubi_wl_entry *e1;
1042 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1045 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))