Lines Matching defs:aeb
168 * ubi_alloc_aeb - allocate an aeb element
173 * Allocate an aeb object and initialize the pnum and ec information.
181 struct ubi_ainf_peb *aeb;
183 aeb = kmem_cache_zalloc(ai->aeb_slab_cache, GFP_KERNEL);
184 if (!aeb)
187 aeb->pnum = pnum;
188 aeb->ec = ec;
189 aeb->vol_id = UBI_UNKNOWN;
190 aeb->lnum = UBI_UNKNOWN;
192 return aeb;
196 * ubi_free_aeb - free an aeb element
198 * @aeb: the element to free
200 * Free an aeb object. The caller must have removed the element from any list
203 void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb)
205 kmem_cache_free(ai->aeb_slab_cache, aeb);
232 struct ubi_ainf_peb *aeb;
244 aeb = ubi_alloc_aeb(ai, pnum, ec);
245 if (!aeb)
248 aeb->vol_id = vol_id;
249 aeb->lnum = lnum;
251 list_add(&aeb->u.list, list);
253 list_add_tail(&aeb->u.list, list);
270 struct ubi_ainf_peb *aeb;
274 aeb = ubi_alloc_aeb(ai, pnum, ec);
275 if (!aeb)
279 list_add(&aeb->u.list, &ai->corr);
299 struct ubi_ainf_peb *aeb;
301 aeb = ubi_alloc_aeb(ai, pnum, ec);
302 if (!aeb)
305 aeb->vol_id = be32_to_cpu(vid_hdr->vol_id);
306 aeb->sqnum = be64_to_cpu(vid_hdr->sqnum);
307 list_add(&aeb->u.list, &ai->fastmap);
310 aeb->vol_id, aeb->sqnum);
421 * @aeb: first logical eraseblock to compare
430 * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
438 int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
446 if (sqnum2 == aeb->sqnum) {
460 second_is_newer = (sqnum2 > aeb->sqnum);
479 if (!aeb->copy_flag) {
490 pnum = aeb->pnum;
569 struct ubi_ainf_peb *aeb;
595 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
596 if (lnum != aeb->lnum) {
597 if (lnum < aeb->lnum)
610 aeb->pnum, aeb->sqnum, aeb->ec);
625 if (aeb->sqnum == sqnum && sqnum != 0) {
628 ubi_dump_aeb(aeb, 0);
637 cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
650 err = add_to_list(ai, aeb->pnum, aeb->vol_id,
651 aeb->lnum, aeb->ec, cmp_res & 4,
656 aeb->ec = ec;
657 aeb->pnum = pnum;
658 aeb->vol_id = vol_id;
659 aeb->lnum = lnum;
660 aeb->scrub = ((cmp_res & 2) || bitflips);
661 aeb->copy_flag = vid_hdr->copy_flag;
662 aeb->sqnum = sqnum;
688 aeb = ubi_alloc_aeb(ai, pnum, ec);
689 if (!aeb)
692 aeb->vol_id = vol_id;
693 aeb->lnum = lnum;
694 aeb->scrub = bitflips;
695 aeb->copy_flag = vid_hdr->copy_flag;
696 aeb->sqnum = sqnum;
704 rb_link_node(&aeb->u.rb, parent, p);
705 rb_insert_color(&aeb->u.rb, &av->root);
815 * This function returns a pointer to the "aeb" of the found free PEB in case
822 struct ubi_ainf_peb *aeb, *tmp_aeb;
825 aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
826 list_del(&aeb->u.list);
827 dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
828 return aeb;
837 list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
838 if (aeb->ec == UBI_UNKNOWN)
839 aeb->ec = ai->mean_ec;
841 err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
845 aeb->ec += 1;
846 list_del(&aeb->u.list);
847 dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
848 return aeb;
1209 struct ubi_ainf_peb *aeb;
1224 list_for_each_entry(aeb, &ai->corr, u.list)
1225 pr_cont(" %d", aeb->pnum);
1273 * @list: put the aeb elements in there if !NULL, otherwise free them
1280 struct ubi_ainf_peb *aeb;
1289 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
1292 if (this->rb_left == &aeb->u.rb)
1299 list_add_tail(&aeb->u.list, list);
1301 ubi_free_aeb(ai, aeb);
1313 struct ubi_ainf_peb *aeb, *aeb_tmp;
1317 list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
1318 list_del(&aeb->u.list);
1319 ubi_free_aeb(ai, aeb);
1321 list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
1322 list_del(&aeb->u.list);
1323 ubi_free_aeb(ai, aeb);
1325 list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
1326 list_del(&aeb->u.list);
1327 ubi_free_aeb(ai, aeb);
1329 list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
1330 list_del(&aeb->u.list);
1331 ubi_free_aeb(ai, aeb);
1333 list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) {
1334 list_del(&aeb->u.list);
1335 ubi_free_aeb(ai, aeb);
1380 struct ubi_ainf_peb *aeb;
1416 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
1417 if (aeb->ec == UBI_UNKNOWN)
1418 aeb->ec = ai->mean_ec;
1421 list_for_each_entry(aeb, &ai->free, u.list) {
1422 if (aeb->ec == UBI_UNKNOWN)
1423 aeb->ec = ai->mean_ec;
1426 list_for_each_entry(aeb, &ai->corr, u.list)
1427 if (aeb->ec == UBI_UNKNOWN)
1428 aeb->ec = ai->mean_ec;
1430 list_for_each_entry(aeb, &ai->erase, u.list)
1431 if (aeb->ec == UBI_UNKNOWN)
1432 aeb->ec = ai->mean_ec;
1665 struct ubi_ainf_peb *aeb, *last_aeb;
1717 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1720 last_aeb = aeb;
1723 if (aeb->pnum < 0 || aeb->ec < 0) {
1728 if (aeb->ec < ai->min_ec) {
1730 ai->min_ec, aeb->ec);
1734 if (aeb->ec > ai->max_ec) {
1736 ai->max_ec, aeb->ec);
1740 if (aeb->pnum >= ubi->peb_count) {
1742 aeb->pnum, ubi->peb_count);
1747 if (aeb->lnum >= av->used_ebs) {
1758 if (aeb->lnum > av->highest_lnum) {
1773 aeb = last_aeb;
1775 if (aeb->lnum != av->highest_lnum) {
1790 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1795 last_aeb = aeb;
1797 err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidb, 1);
1813 if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
1814 ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
1828 if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
1829 ubi_err(ubi, "bad lnum %d", aeb->lnum);
1877 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
1878 buf[aeb->pnum] = 1;
1880 list_for_each_entry(aeb, &ai->free, u.list)
1881 buf[aeb->pnum] = 1;
1883 list_for_each_entry(aeb, &ai->corr, u.list)
1884 buf[aeb->pnum] = 1;
1886 list_for_each_entry(aeb, &ai->erase, u.list)
1887 buf[aeb->pnum] = 1;
1889 list_for_each_entry(aeb, &ai->alien, u.list)
1890 buf[aeb->pnum] = 1;
1905 ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
1906 ubi_dump_aeb(aeb, 0);