xref: /kernel/linux/linux-5.10/drivers/mtd/ubi/wl.c (revision 8c2ecf20)
18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * Copyright (c) International Business Machines Corp., 2006
48c2ecf20Sopenharmony_ci *
58c2ecf20Sopenharmony_ci * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
68c2ecf20Sopenharmony_ci */
78c2ecf20Sopenharmony_ci
88c2ecf20Sopenharmony_ci/*
98c2ecf20Sopenharmony_ci * UBI wear-leveling sub-system.
108c2ecf20Sopenharmony_ci *
118c2ecf20Sopenharmony_ci * This sub-system is responsible for wear-leveling. It works in terms of
128c2ecf20Sopenharmony_ci * physical eraseblocks and erase counters and knows nothing about logical
138c2ecf20Sopenharmony_ci * eraseblocks, volumes, etc. From this sub-system's perspective all physical
148c2ecf20Sopenharmony_ci * eraseblocks are of two types - used and free. Used physical eraseblocks are
158c2ecf20Sopenharmony_ci * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
168c2ecf20Sopenharmony_ci * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
178c2ecf20Sopenharmony_ci *
188c2ecf20Sopenharmony_ci * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
198c2ecf20Sopenharmony_ci * header. The rest of the physical eraseblock contains only %0xFF bytes.
208c2ecf20Sopenharmony_ci *
218c2ecf20Sopenharmony_ci * When physical eraseblocks are returned to the WL sub-system by means of the
228c2ecf20Sopenharmony_ci * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
238c2ecf20Sopenharmony_ci * done asynchronously in context of the per-UBI device background thread,
248c2ecf20Sopenharmony_ci * which is also managed by the WL sub-system.
258c2ecf20Sopenharmony_ci *
268c2ecf20Sopenharmony_ci * The wear-leveling is ensured by means of moving the contents of used
278c2ecf20Sopenharmony_ci * physical eraseblocks with low erase counter to free physical eraseblocks
288c2ecf20Sopenharmony_ci * with high erase counter.
298c2ecf20Sopenharmony_ci *
308c2ecf20Sopenharmony_ci * If the WL sub-system fails to erase a physical eraseblock, it marks it as
318c2ecf20Sopenharmony_ci * bad.
328c2ecf20Sopenharmony_ci *
338c2ecf20Sopenharmony_ci * This sub-system is also responsible for scrubbing. If a bit-flip is detected
348c2ecf20Sopenharmony_ci * in a physical eraseblock, it has to be moved. Technically this is the same
358c2ecf20Sopenharmony_ci * as moving it for wear-leveling reasons.
368c2ecf20Sopenharmony_ci *
378c2ecf20Sopenharmony_ci * As it was said, for the UBI sub-system all physical eraseblocks are either
388c2ecf20Sopenharmony_ci * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
398c2ecf20Sopenharmony_ci * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
408c2ecf20Sopenharmony_ci * RB-trees, as well as (temporarily) in the @wl->pq queue.
418c2ecf20Sopenharmony_ci *
428c2ecf20Sopenharmony_ci * When the WL sub-system returns a physical eraseblock, the physical
438c2ecf20Sopenharmony_ci * eraseblock is protected from being moved for some "time". For this reason,
448c2ecf20Sopenharmony_ci * the physical eraseblock is not directly moved from the @wl->free tree to the
458c2ecf20Sopenharmony_ci * @wl->used tree. There is a protection queue in between where this
468c2ecf20Sopenharmony_ci * physical eraseblock is temporarily stored (@wl->pq).
478c2ecf20Sopenharmony_ci *
488c2ecf20Sopenharmony_ci * All this protection stuff is needed because:
498c2ecf20Sopenharmony_ci *  o we don't want to move physical eraseblocks just after we have given them
508c2ecf20Sopenharmony_ci *    to the user; instead, we first want to let users fill them up with data;
518c2ecf20Sopenharmony_ci *
528c2ecf20Sopenharmony_ci *  o there is a chance that the user will put the physical eraseblock very
538c2ecf20Sopenharmony_ci *    soon, so it makes sense not to move it for some time, but wait.
548c2ecf20Sopenharmony_ci *
558c2ecf20Sopenharmony_ci * Physical eraseblocks stay protected only for limited time. But the "time" is
568c2ecf20Sopenharmony_ci * measured in erase cycles in this case. This is implemented with help of the
578c2ecf20Sopenharmony_ci * protection queue. Eraseblocks are put to the tail of this queue when they
588c2ecf20Sopenharmony_ci * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
598c2ecf20Sopenharmony_ci * head of the queue on each erase operation (for any eraseblock). So the
608c2ecf20Sopenharmony_ci * length of the queue defines how may (global) erase cycles PEBs are protected.
618c2ecf20Sopenharmony_ci *
628c2ecf20Sopenharmony_ci * To put it differently, each physical eraseblock has 2 main states: free and
638c2ecf20Sopenharmony_ci * used. The former state corresponds to the @wl->free tree. The latter state
648c2ecf20Sopenharmony_ci * is split up on several sub-states:
658c2ecf20Sopenharmony_ci * o the WL movement is allowed (@wl->used tree);
668c2ecf20Sopenharmony_ci * o the WL movement is disallowed (@wl->erroneous) because the PEB is
678c2ecf20Sopenharmony_ci *   erroneous - e.g., there was a read error;
688c2ecf20Sopenharmony_ci * o the WL movement is temporarily prohibited (@wl->pq queue);
698c2ecf20Sopenharmony_ci * o scrubbing is needed (@wl->scrub tree).
708c2ecf20Sopenharmony_ci *
718c2ecf20Sopenharmony_ci * Depending on the sub-state, wear-leveling entries of the used physical
728c2ecf20Sopenharmony_ci * eraseblocks may be kept in one of those structures.
738c2ecf20Sopenharmony_ci *
748c2ecf20Sopenharmony_ci * Note, in this implementation, we keep a small in-RAM object for each physical
758c2ecf20Sopenharmony_ci * eraseblock. This is surely not a scalable solution. But it appears to be good
768c2ecf20Sopenharmony_ci * enough for moderately large flashes and it is simple. In future, one may
778c2ecf20Sopenharmony_ci * re-work this sub-system and make it more scalable.
788c2ecf20Sopenharmony_ci *
798c2ecf20Sopenharmony_ci * At the moment this sub-system does not utilize the sequence number, which
808c2ecf20Sopenharmony_ci * was introduced relatively recently. But it would be wise to do this because
818c2ecf20Sopenharmony_ci * the sequence number of a logical eraseblock characterizes how old is it. For
828c2ecf20Sopenharmony_ci * example, when we move a PEB with low erase counter, and we need to pick the
838c2ecf20Sopenharmony_ci * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
848c2ecf20Sopenharmony_ci * pick target PEB with an average EC if our PEB is not very "old". This is a
858c2ecf20Sopenharmony_ci * room for future re-works of the WL sub-system.
868c2ecf20Sopenharmony_ci */
878c2ecf20Sopenharmony_ci
888c2ecf20Sopenharmony_ci#include <linux/slab.h>
898c2ecf20Sopenharmony_ci#include <linux/crc32.h>
908c2ecf20Sopenharmony_ci#include <linux/freezer.h>
918c2ecf20Sopenharmony_ci#include <linux/kthread.h>
928c2ecf20Sopenharmony_ci#include "ubi.h"
938c2ecf20Sopenharmony_ci#include "wl.h"
948c2ecf20Sopenharmony_ci
958c2ecf20Sopenharmony_ci/* Number of physical eraseblocks reserved for wear-leveling purposes */
968c2ecf20Sopenharmony_ci#define WL_RESERVED_PEBS 1
978c2ecf20Sopenharmony_ci
988c2ecf20Sopenharmony_ci/*
998c2ecf20Sopenharmony_ci * Maximum difference between two erase counters. If this threshold is
1008c2ecf20Sopenharmony_ci * exceeded, the WL sub-system starts moving data from used physical
1018c2ecf20Sopenharmony_ci * eraseblocks with low erase counter to free physical eraseblocks with high
1028c2ecf20Sopenharmony_ci * erase counter.
1038c2ecf20Sopenharmony_ci */
1048c2ecf20Sopenharmony_ci#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
1058c2ecf20Sopenharmony_ci
1068c2ecf20Sopenharmony_ci/*
1078c2ecf20Sopenharmony_ci * When a physical eraseblock is moved, the WL sub-system has to pick the target
1088c2ecf20Sopenharmony_ci * physical eraseblock to move to. The simplest way would be just to pick the
1098c2ecf20Sopenharmony_ci * one with the highest erase counter. But in certain workloads this could lead
1108c2ecf20Sopenharmony_ci * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
1118c2ecf20Sopenharmony_ci * situation when the picked physical eraseblock is constantly erased after the
1128c2ecf20Sopenharmony_ci * data is written to it. So, we have a constant which limits the highest erase
1138c2ecf20Sopenharmony_ci * counter of the free physical eraseblock to pick. Namely, the WL sub-system
1148c2ecf20Sopenharmony_ci * does not pick eraseblocks with erase counter greater than the lowest erase
1158c2ecf20Sopenharmony_ci * counter plus %WL_FREE_MAX_DIFF.
1168c2ecf20Sopenharmony_ci */
1178c2ecf20Sopenharmony_ci#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
1188c2ecf20Sopenharmony_ci
1198c2ecf20Sopenharmony_ci/*
1208c2ecf20Sopenharmony_ci * Maximum number of consecutive background thread failures which is enough to
1218c2ecf20Sopenharmony_ci * switch to read-only mode.
1228c2ecf20Sopenharmony_ci */
1238c2ecf20Sopenharmony_ci#define WL_MAX_FAILURES 32
1248c2ecf20Sopenharmony_ci
1258c2ecf20Sopenharmony_cistatic int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
1268c2ecf20Sopenharmony_cistatic int self_check_in_wl_tree(const struct ubi_device *ubi,
1278c2ecf20Sopenharmony_ci				 struct ubi_wl_entry *e, struct rb_root *root);
1288c2ecf20Sopenharmony_cistatic int self_check_in_pq(const struct ubi_device *ubi,
1298c2ecf20Sopenharmony_ci			    struct ubi_wl_entry *e);
1308c2ecf20Sopenharmony_ci
1318c2ecf20Sopenharmony_ci/**
1328c2ecf20Sopenharmony_ci * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
1338c2ecf20Sopenharmony_ci * @e: the wear-leveling entry to add
1348c2ecf20Sopenharmony_ci * @root: the root of the tree
1358c2ecf20Sopenharmony_ci *
1368c2ecf20Sopenharmony_ci * Note, we use (erase counter, physical eraseblock number) pairs as keys in
1378c2ecf20Sopenharmony_ci * the @ubi->used and @ubi->free RB-trees.
1388c2ecf20Sopenharmony_ci */
1398c2ecf20Sopenharmony_cistatic void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
1408c2ecf20Sopenharmony_ci{
1418c2ecf20Sopenharmony_ci	struct rb_node **p, *parent = NULL;
1428c2ecf20Sopenharmony_ci
1438c2ecf20Sopenharmony_ci	p = &root->rb_node;
1448c2ecf20Sopenharmony_ci	while (*p) {
1458c2ecf20Sopenharmony_ci		struct ubi_wl_entry *e1;
1468c2ecf20Sopenharmony_ci
1478c2ecf20Sopenharmony_ci		parent = *p;
1488c2ecf20Sopenharmony_ci		e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
1498c2ecf20Sopenharmony_ci
1508c2ecf20Sopenharmony_ci		if (e->ec < e1->ec)
1518c2ecf20Sopenharmony_ci			p = &(*p)->rb_left;
1528c2ecf20Sopenharmony_ci		else if (e->ec > e1->ec)
1538c2ecf20Sopenharmony_ci			p = &(*p)->rb_right;
1548c2ecf20Sopenharmony_ci		else {
1558c2ecf20Sopenharmony_ci			ubi_assert(e->pnum != e1->pnum);
1568c2ecf20Sopenharmony_ci			if (e->pnum < e1->pnum)
1578c2ecf20Sopenharmony_ci				p = &(*p)->rb_left;
1588c2ecf20Sopenharmony_ci			else
1598c2ecf20Sopenharmony_ci				p = &(*p)->rb_right;
1608c2ecf20Sopenharmony_ci		}
1618c2ecf20Sopenharmony_ci	}
1628c2ecf20Sopenharmony_ci
1638c2ecf20Sopenharmony_ci	rb_link_node(&e->u.rb, parent, p);
1648c2ecf20Sopenharmony_ci	rb_insert_color(&e->u.rb, root);
1658c2ecf20Sopenharmony_ci}
1668c2ecf20Sopenharmony_ci
1678c2ecf20Sopenharmony_ci/**
1688c2ecf20Sopenharmony_ci * wl_tree_destroy - destroy a wear-leveling entry.
1698c2ecf20Sopenharmony_ci * @ubi: UBI device description object
1708c2ecf20Sopenharmony_ci * @e: the wear-leveling entry to add
1718c2ecf20Sopenharmony_ci *
1728c2ecf20Sopenharmony_ci * This function destroys a wear leveling entry and removes
1738c2ecf20Sopenharmony_ci * the reference from the lookup table.
1748c2ecf20Sopenharmony_ci */
1758c2ecf20Sopenharmony_cistatic void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
1768c2ecf20Sopenharmony_ci{
1778c2ecf20Sopenharmony_ci	ubi->lookuptbl[e->pnum] = NULL;
1788c2ecf20Sopenharmony_ci	kmem_cache_free(ubi_wl_entry_slab, e);
1798c2ecf20Sopenharmony_ci}
1808c2ecf20Sopenharmony_ci
1818c2ecf20Sopenharmony_ci/**
1828c2ecf20Sopenharmony_ci * do_work - do one pending work.
1838c2ecf20Sopenharmony_ci * @ubi: UBI device description object
1848c2ecf20Sopenharmony_ci *
1858c2ecf20Sopenharmony_ci * This function returns zero in case of success and a negative error code in
1868c2ecf20Sopenharmony_ci * case of failure.
1878c2ecf20Sopenharmony_ci */
1888c2ecf20Sopenharmony_cistatic int do_work(struct ubi_device *ubi)
1898c2ecf20Sopenharmony_ci{
1908c2ecf20Sopenharmony_ci	int err;
1918c2ecf20Sopenharmony_ci	struct ubi_work *wrk;
1928c2ecf20Sopenharmony_ci
1938c2ecf20Sopenharmony_ci	cond_resched();
1948c2ecf20Sopenharmony_ci
1958c2ecf20Sopenharmony_ci	/*
1968c2ecf20Sopenharmony_ci	 * @ubi->work_sem is used to synchronize with the workers. Workers take
1978c2ecf20Sopenharmony_ci	 * it in read mode, so many of them may be doing works at a time. But
1988c2ecf20Sopenharmony_ci	 * the queue flush code has to be sure the whole queue of works is
1998c2ecf20Sopenharmony_ci	 * done, and it takes the mutex in write mode.
2008c2ecf20Sopenharmony_ci	 */
2018c2ecf20Sopenharmony_ci	down_read(&ubi->work_sem);
2028c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
2038c2ecf20Sopenharmony_ci	if (list_empty(&ubi->works)) {
2048c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
2058c2ecf20Sopenharmony_ci		up_read(&ubi->work_sem);
2068c2ecf20Sopenharmony_ci		return 0;
2078c2ecf20Sopenharmony_ci	}
2088c2ecf20Sopenharmony_ci
2098c2ecf20Sopenharmony_ci	wrk = list_entry(ubi->works.next, struct ubi_work, list);
2108c2ecf20Sopenharmony_ci	list_del(&wrk->list);
2118c2ecf20Sopenharmony_ci	ubi->works_count -= 1;
2128c2ecf20Sopenharmony_ci	ubi_assert(ubi->works_count >= 0);
2138c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
2148c2ecf20Sopenharmony_ci
2158c2ecf20Sopenharmony_ci	/*
2168c2ecf20Sopenharmony_ci	 * Call the worker function. Do not touch the work structure
2178c2ecf20Sopenharmony_ci	 * after this call as it will have been freed or reused by that
2188c2ecf20Sopenharmony_ci	 * time by the worker function.
2198c2ecf20Sopenharmony_ci	 */
2208c2ecf20Sopenharmony_ci	err = wrk->func(ubi, wrk, 0);
2218c2ecf20Sopenharmony_ci	if (err)
2228c2ecf20Sopenharmony_ci		ubi_err(ubi, "work failed with error code %d", err);
2238c2ecf20Sopenharmony_ci	up_read(&ubi->work_sem);
2248c2ecf20Sopenharmony_ci
2258c2ecf20Sopenharmony_ci	return err;
2268c2ecf20Sopenharmony_ci}
2278c2ecf20Sopenharmony_ci
2288c2ecf20Sopenharmony_ci/**
2298c2ecf20Sopenharmony_ci * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
2308c2ecf20Sopenharmony_ci * @e: the wear-leveling entry to check
2318c2ecf20Sopenharmony_ci * @root: the root of the tree
2328c2ecf20Sopenharmony_ci *
2338c2ecf20Sopenharmony_ci * This function returns non-zero if @e is in the @root RB-tree and zero if it
2348c2ecf20Sopenharmony_ci * is not.
2358c2ecf20Sopenharmony_ci */
2368c2ecf20Sopenharmony_cistatic int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
2378c2ecf20Sopenharmony_ci{
2388c2ecf20Sopenharmony_ci	struct rb_node *p;
2398c2ecf20Sopenharmony_ci
2408c2ecf20Sopenharmony_ci	p = root->rb_node;
2418c2ecf20Sopenharmony_ci	while (p) {
2428c2ecf20Sopenharmony_ci		struct ubi_wl_entry *e1;
2438c2ecf20Sopenharmony_ci
2448c2ecf20Sopenharmony_ci		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
2458c2ecf20Sopenharmony_ci
2468c2ecf20Sopenharmony_ci		if (e->pnum == e1->pnum) {
2478c2ecf20Sopenharmony_ci			ubi_assert(e == e1);
2488c2ecf20Sopenharmony_ci			return 1;
2498c2ecf20Sopenharmony_ci		}
2508c2ecf20Sopenharmony_ci
2518c2ecf20Sopenharmony_ci		if (e->ec < e1->ec)
2528c2ecf20Sopenharmony_ci			p = p->rb_left;
2538c2ecf20Sopenharmony_ci		else if (e->ec > e1->ec)
2548c2ecf20Sopenharmony_ci			p = p->rb_right;
2558c2ecf20Sopenharmony_ci		else {
2568c2ecf20Sopenharmony_ci			ubi_assert(e->pnum != e1->pnum);
2578c2ecf20Sopenharmony_ci			if (e->pnum < e1->pnum)
2588c2ecf20Sopenharmony_ci				p = p->rb_left;
2598c2ecf20Sopenharmony_ci			else
2608c2ecf20Sopenharmony_ci				p = p->rb_right;
2618c2ecf20Sopenharmony_ci		}
2628c2ecf20Sopenharmony_ci	}
2638c2ecf20Sopenharmony_ci
2648c2ecf20Sopenharmony_ci	return 0;
2658c2ecf20Sopenharmony_ci}
2668c2ecf20Sopenharmony_ci
2678c2ecf20Sopenharmony_ci/**
2688c2ecf20Sopenharmony_ci * in_pq - check if a wear-leveling entry is present in the protection queue.
2698c2ecf20Sopenharmony_ci * @ubi: UBI device description object
2708c2ecf20Sopenharmony_ci * @e: the wear-leveling entry to check
2718c2ecf20Sopenharmony_ci *
2728c2ecf20Sopenharmony_ci * This function returns non-zero if @e is in the protection queue and zero
2738c2ecf20Sopenharmony_ci * if it is not.
2748c2ecf20Sopenharmony_ci */
2758c2ecf20Sopenharmony_cistatic inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
2768c2ecf20Sopenharmony_ci{
2778c2ecf20Sopenharmony_ci	struct ubi_wl_entry *p;
2788c2ecf20Sopenharmony_ci	int i;
2798c2ecf20Sopenharmony_ci
2808c2ecf20Sopenharmony_ci	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
2818c2ecf20Sopenharmony_ci		list_for_each_entry(p, &ubi->pq[i], u.list)
2828c2ecf20Sopenharmony_ci			if (p == e)
2838c2ecf20Sopenharmony_ci				return 1;
2848c2ecf20Sopenharmony_ci
2858c2ecf20Sopenharmony_ci	return 0;
2868c2ecf20Sopenharmony_ci}
2878c2ecf20Sopenharmony_ci
2888c2ecf20Sopenharmony_ci/**
2898c2ecf20Sopenharmony_ci * prot_queue_add - add physical eraseblock to the protection queue.
2908c2ecf20Sopenharmony_ci * @ubi: UBI device description object
2918c2ecf20Sopenharmony_ci * @e: the physical eraseblock to add
2928c2ecf20Sopenharmony_ci *
2938c2ecf20Sopenharmony_ci * This function adds @e to the tail of the protection queue @ubi->pq, where
2948c2ecf20Sopenharmony_ci * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
2958c2ecf20Sopenharmony_ci * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
2968c2ecf20Sopenharmony_ci * be locked.
2978c2ecf20Sopenharmony_ci */
2988c2ecf20Sopenharmony_cistatic void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
2998c2ecf20Sopenharmony_ci{
3008c2ecf20Sopenharmony_ci	int pq_tail = ubi->pq_head - 1;
3018c2ecf20Sopenharmony_ci
3028c2ecf20Sopenharmony_ci	if (pq_tail < 0)
3038c2ecf20Sopenharmony_ci		pq_tail = UBI_PROT_QUEUE_LEN - 1;
3048c2ecf20Sopenharmony_ci	ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
3058c2ecf20Sopenharmony_ci	list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
3068c2ecf20Sopenharmony_ci	dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
3078c2ecf20Sopenharmony_ci}
3088c2ecf20Sopenharmony_ci
3098c2ecf20Sopenharmony_ci/**
3108c2ecf20Sopenharmony_ci * find_wl_entry - find wear-leveling entry closest to certain erase counter.
3118c2ecf20Sopenharmony_ci * @ubi: UBI device description object
3128c2ecf20Sopenharmony_ci * @root: the RB-tree where to look for
3138c2ecf20Sopenharmony_ci * @diff: maximum possible difference from the smallest erase counter
3148c2ecf20Sopenharmony_ci *
3158c2ecf20Sopenharmony_ci * This function looks for a wear leveling entry with erase counter closest to
3168c2ecf20Sopenharmony_ci * min + @diff, where min is the smallest erase counter.
3178c2ecf20Sopenharmony_ci */
3188c2ecf20Sopenharmony_cistatic struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
3198c2ecf20Sopenharmony_ci					  struct rb_root *root, int diff)
3208c2ecf20Sopenharmony_ci{
3218c2ecf20Sopenharmony_ci	struct rb_node *p;
3228c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e;
3238c2ecf20Sopenharmony_ci	int max;
3248c2ecf20Sopenharmony_ci
3258c2ecf20Sopenharmony_ci	e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
3268c2ecf20Sopenharmony_ci	max = e->ec + diff;
3278c2ecf20Sopenharmony_ci
3288c2ecf20Sopenharmony_ci	p = root->rb_node;
3298c2ecf20Sopenharmony_ci	while (p) {
3308c2ecf20Sopenharmony_ci		struct ubi_wl_entry *e1;
3318c2ecf20Sopenharmony_ci
3328c2ecf20Sopenharmony_ci		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
3338c2ecf20Sopenharmony_ci		if (e1->ec >= max)
3348c2ecf20Sopenharmony_ci			p = p->rb_left;
3358c2ecf20Sopenharmony_ci		else {
3368c2ecf20Sopenharmony_ci			p = p->rb_right;
3378c2ecf20Sopenharmony_ci			e = e1;
3388c2ecf20Sopenharmony_ci		}
3398c2ecf20Sopenharmony_ci	}
3408c2ecf20Sopenharmony_ci
3418c2ecf20Sopenharmony_ci	return e;
3428c2ecf20Sopenharmony_ci}
3438c2ecf20Sopenharmony_ci
3448c2ecf20Sopenharmony_ci/**
3458c2ecf20Sopenharmony_ci * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
3468c2ecf20Sopenharmony_ci * @ubi: UBI device description object
3478c2ecf20Sopenharmony_ci * @root: the RB-tree where to look for
3488c2ecf20Sopenharmony_ci *
3498c2ecf20Sopenharmony_ci * This function looks for a wear leveling entry with medium erase counter,
3508c2ecf20Sopenharmony_ci * but not greater or equivalent than the lowest erase counter plus
3518c2ecf20Sopenharmony_ci * %WL_FREE_MAX_DIFF/2.
3528c2ecf20Sopenharmony_ci */
3538c2ecf20Sopenharmony_cistatic struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
3548c2ecf20Sopenharmony_ci					       struct rb_root *root)
3558c2ecf20Sopenharmony_ci{
3568c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e, *first, *last;
3578c2ecf20Sopenharmony_ci
3588c2ecf20Sopenharmony_ci	first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
3598c2ecf20Sopenharmony_ci	last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
3608c2ecf20Sopenharmony_ci
3618c2ecf20Sopenharmony_ci	if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
3628c2ecf20Sopenharmony_ci		e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
3638c2ecf20Sopenharmony_ci
3648c2ecf20Sopenharmony_ci		/* If no fastmap has been written and this WL entry can be used
3658c2ecf20Sopenharmony_ci		 * as anchor PEB, hold it back and return the second best
3668c2ecf20Sopenharmony_ci		 * WL entry such that fastmap can use the anchor PEB later. */
3678c2ecf20Sopenharmony_ci		e = may_reserve_for_fm(ubi, e, root);
3688c2ecf20Sopenharmony_ci	} else
3698c2ecf20Sopenharmony_ci		e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
3708c2ecf20Sopenharmony_ci
3718c2ecf20Sopenharmony_ci	return e;
3728c2ecf20Sopenharmony_ci}
3738c2ecf20Sopenharmony_ci
3748c2ecf20Sopenharmony_ci/**
3758c2ecf20Sopenharmony_ci * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
3768c2ecf20Sopenharmony_ci * refill_wl_user_pool().
3778c2ecf20Sopenharmony_ci * @ubi: UBI device description object
3788c2ecf20Sopenharmony_ci *
3798c2ecf20Sopenharmony_ci * This function returns a a wear leveling entry in case of success and
3808c2ecf20Sopenharmony_ci * NULL in case of failure.
3818c2ecf20Sopenharmony_ci */
3828c2ecf20Sopenharmony_cistatic struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
3838c2ecf20Sopenharmony_ci{
3848c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e;
3858c2ecf20Sopenharmony_ci
3868c2ecf20Sopenharmony_ci	e = find_mean_wl_entry(ubi, &ubi->free);
3878c2ecf20Sopenharmony_ci	if (!e) {
3888c2ecf20Sopenharmony_ci		ubi_err(ubi, "no free eraseblocks");
3898c2ecf20Sopenharmony_ci		return NULL;
3908c2ecf20Sopenharmony_ci	}
3918c2ecf20Sopenharmony_ci
3928c2ecf20Sopenharmony_ci	self_check_in_wl_tree(ubi, e, &ubi->free);
3938c2ecf20Sopenharmony_ci
3948c2ecf20Sopenharmony_ci	/*
3958c2ecf20Sopenharmony_ci	 * Move the physical eraseblock to the protection queue where it will
3968c2ecf20Sopenharmony_ci	 * be protected from being moved for some time.
3978c2ecf20Sopenharmony_ci	 */
3988c2ecf20Sopenharmony_ci	rb_erase(&e->u.rb, &ubi->free);
3998c2ecf20Sopenharmony_ci	ubi->free_count--;
4008c2ecf20Sopenharmony_ci	dbg_wl("PEB %d EC %d", e->pnum, e->ec);
4018c2ecf20Sopenharmony_ci
4028c2ecf20Sopenharmony_ci	return e;
4038c2ecf20Sopenharmony_ci}
4048c2ecf20Sopenharmony_ci
4058c2ecf20Sopenharmony_ci/**
4068c2ecf20Sopenharmony_ci * prot_queue_del - remove a physical eraseblock from the protection queue.
4078c2ecf20Sopenharmony_ci * @ubi: UBI device description object
4088c2ecf20Sopenharmony_ci * @pnum: the physical eraseblock to remove
4098c2ecf20Sopenharmony_ci *
4108c2ecf20Sopenharmony_ci * This function deletes PEB @pnum from the protection queue and returns zero
4118c2ecf20Sopenharmony_ci * in case of success and %-ENODEV if the PEB was not found.
4128c2ecf20Sopenharmony_ci */
4138c2ecf20Sopenharmony_cistatic int prot_queue_del(struct ubi_device *ubi, int pnum)
4148c2ecf20Sopenharmony_ci{
4158c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e;
4168c2ecf20Sopenharmony_ci
4178c2ecf20Sopenharmony_ci	e = ubi->lookuptbl[pnum];
4188c2ecf20Sopenharmony_ci	if (!e)
4198c2ecf20Sopenharmony_ci		return -ENODEV;
4208c2ecf20Sopenharmony_ci
4218c2ecf20Sopenharmony_ci	if (self_check_in_pq(ubi, e))
4228c2ecf20Sopenharmony_ci		return -ENODEV;
4238c2ecf20Sopenharmony_ci
4248c2ecf20Sopenharmony_ci	list_del(&e->u.list);
4258c2ecf20Sopenharmony_ci	dbg_wl("deleted PEB %d from the protection queue", e->pnum);
4268c2ecf20Sopenharmony_ci	return 0;
4278c2ecf20Sopenharmony_ci}
4288c2ecf20Sopenharmony_ci
4298c2ecf20Sopenharmony_ci/**
4308c2ecf20Sopenharmony_ci * sync_erase - synchronously erase a physical eraseblock.
4318c2ecf20Sopenharmony_ci * @ubi: UBI device description object
4328c2ecf20Sopenharmony_ci * @e: the the physical eraseblock to erase
4338c2ecf20Sopenharmony_ci * @torture: if the physical eraseblock has to be tortured
4348c2ecf20Sopenharmony_ci *
4358c2ecf20Sopenharmony_ci * This function returns zero in case of success and a negative error code in
4368c2ecf20Sopenharmony_ci * case of failure.
4378c2ecf20Sopenharmony_ci */
4388c2ecf20Sopenharmony_cistatic int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
4398c2ecf20Sopenharmony_ci		      int torture)
4408c2ecf20Sopenharmony_ci{
4418c2ecf20Sopenharmony_ci	int err;
4428c2ecf20Sopenharmony_ci	struct ubi_ec_hdr *ec_hdr;
4438c2ecf20Sopenharmony_ci	unsigned long long ec = e->ec;
4448c2ecf20Sopenharmony_ci
4458c2ecf20Sopenharmony_ci	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
4468c2ecf20Sopenharmony_ci
4478c2ecf20Sopenharmony_ci	err = self_check_ec(ubi, e->pnum, e->ec);
4488c2ecf20Sopenharmony_ci	if (err)
4498c2ecf20Sopenharmony_ci		return -EINVAL;
4508c2ecf20Sopenharmony_ci
4518c2ecf20Sopenharmony_ci	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
4528c2ecf20Sopenharmony_ci	if (!ec_hdr)
4538c2ecf20Sopenharmony_ci		return -ENOMEM;
4548c2ecf20Sopenharmony_ci
4558c2ecf20Sopenharmony_ci	err = ubi_io_sync_erase(ubi, e->pnum, torture);
4568c2ecf20Sopenharmony_ci	if (err < 0)
4578c2ecf20Sopenharmony_ci		goto out_free;
4588c2ecf20Sopenharmony_ci
4598c2ecf20Sopenharmony_ci	ec += err;
4608c2ecf20Sopenharmony_ci	if (ec > UBI_MAX_ERASECOUNTER) {
4618c2ecf20Sopenharmony_ci		/*
4628c2ecf20Sopenharmony_ci		 * Erase counter overflow. Upgrade UBI and use 64-bit
4638c2ecf20Sopenharmony_ci		 * erase counters internally.
4648c2ecf20Sopenharmony_ci		 */
4658c2ecf20Sopenharmony_ci		ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
4668c2ecf20Sopenharmony_ci			e->pnum, ec);
4678c2ecf20Sopenharmony_ci		err = -EINVAL;
4688c2ecf20Sopenharmony_ci		goto out_free;
4698c2ecf20Sopenharmony_ci	}
4708c2ecf20Sopenharmony_ci
4718c2ecf20Sopenharmony_ci	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
4728c2ecf20Sopenharmony_ci
4738c2ecf20Sopenharmony_ci	ec_hdr->ec = cpu_to_be64(ec);
4748c2ecf20Sopenharmony_ci
4758c2ecf20Sopenharmony_ci	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
4768c2ecf20Sopenharmony_ci	if (err)
4778c2ecf20Sopenharmony_ci		goto out_free;
4788c2ecf20Sopenharmony_ci
4798c2ecf20Sopenharmony_ci	e->ec = ec;
4808c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
4818c2ecf20Sopenharmony_ci	if (e->ec > ubi->max_ec)
4828c2ecf20Sopenharmony_ci		ubi->max_ec = e->ec;
4838c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
4848c2ecf20Sopenharmony_ci
4858c2ecf20Sopenharmony_ciout_free:
4868c2ecf20Sopenharmony_ci	kfree(ec_hdr);
4878c2ecf20Sopenharmony_ci	return err;
4888c2ecf20Sopenharmony_ci}
4898c2ecf20Sopenharmony_ci
4908c2ecf20Sopenharmony_ci/**
4918c2ecf20Sopenharmony_ci * serve_prot_queue - check if it is time to stop protecting PEBs.
4928c2ecf20Sopenharmony_ci * @ubi: UBI device description object
4938c2ecf20Sopenharmony_ci *
4948c2ecf20Sopenharmony_ci * This function is called after each erase operation and removes PEBs from the
4958c2ecf20Sopenharmony_ci * tail of the protection queue. These PEBs have been protected for long enough
4968c2ecf20Sopenharmony_ci * and should be moved to the used tree.
4978c2ecf20Sopenharmony_ci */
4988c2ecf20Sopenharmony_cistatic void serve_prot_queue(struct ubi_device *ubi)
4998c2ecf20Sopenharmony_ci{
5008c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e, *tmp;
5018c2ecf20Sopenharmony_ci	int count;
5028c2ecf20Sopenharmony_ci
5038c2ecf20Sopenharmony_ci	/*
5048c2ecf20Sopenharmony_ci	 * There may be several protected physical eraseblock to remove,
5058c2ecf20Sopenharmony_ci	 * process them all.
5068c2ecf20Sopenharmony_ci	 */
5078c2ecf20Sopenharmony_cirepeat:
5088c2ecf20Sopenharmony_ci	count = 0;
5098c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
5108c2ecf20Sopenharmony_ci	list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
5118c2ecf20Sopenharmony_ci		dbg_wl("PEB %d EC %d protection over, move to used tree",
5128c2ecf20Sopenharmony_ci			e->pnum, e->ec);
5138c2ecf20Sopenharmony_ci
5148c2ecf20Sopenharmony_ci		list_del(&e->u.list);
5158c2ecf20Sopenharmony_ci		wl_tree_add(e, &ubi->used);
5168c2ecf20Sopenharmony_ci		if (count++ > 32) {
5178c2ecf20Sopenharmony_ci			/*
5188c2ecf20Sopenharmony_ci			 * Let's be nice and avoid holding the spinlock for
5198c2ecf20Sopenharmony_ci			 * too long.
5208c2ecf20Sopenharmony_ci			 */
5218c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
5228c2ecf20Sopenharmony_ci			cond_resched();
5238c2ecf20Sopenharmony_ci			goto repeat;
5248c2ecf20Sopenharmony_ci		}
5258c2ecf20Sopenharmony_ci	}
5268c2ecf20Sopenharmony_ci
5278c2ecf20Sopenharmony_ci	ubi->pq_head += 1;
5288c2ecf20Sopenharmony_ci	if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
5298c2ecf20Sopenharmony_ci		ubi->pq_head = 0;
5308c2ecf20Sopenharmony_ci	ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
5318c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
5328c2ecf20Sopenharmony_ci}
5338c2ecf20Sopenharmony_ci
5348c2ecf20Sopenharmony_ci/**
5358c2ecf20Sopenharmony_ci * __schedule_ubi_work - schedule a work.
5368c2ecf20Sopenharmony_ci * @ubi: UBI device description object
5378c2ecf20Sopenharmony_ci * @wrk: the work to schedule
5388c2ecf20Sopenharmony_ci *
5398c2ecf20Sopenharmony_ci * This function adds a work defined by @wrk to the tail of the pending works
5408c2ecf20Sopenharmony_ci * list. Can only be used if ubi->work_sem is already held in read mode!
5418c2ecf20Sopenharmony_ci */
5428c2ecf20Sopenharmony_cistatic void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
5438c2ecf20Sopenharmony_ci{
5448c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
5458c2ecf20Sopenharmony_ci	list_add_tail(&wrk->list, &ubi->works);
5468c2ecf20Sopenharmony_ci	ubi_assert(ubi->works_count >= 0);
5478c2ecf20Sopenharmony_ci	ubi->works_count += 1;
5488c2ecf20Sopenharmony_ci	if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
5498c2ecf20Sopenharmony_ci		wake_up_process(ubi->bgt_thread);
5508c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
5518c2ecf20Sopenharmony_ci}
5528c2ecf20Sopenharmony_ci
5538c2ecf20Sopenharmony_ci/**
5548c2ecf20Sopenharmony_ci * schedule_ubi_work - schedule a work.
5558c2ecf20Sopenharmony_ci * @ubi: UBI device description object
5568c2ecf20Sopenharmony_ci * @wrk: the work to schedule
5578c2ecf20Sopenharmony_ci *
5588c2ecf20Sopenharmony_ci * This function adds a work defined by @wrk to the tail of the pending works
5598c2ecf20Sopenharmony_ci * list.
5608c2ecf20Sopenharmony_ci */
5618c2ecf20Sopenharmony_cistatic void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
5628c2ecf20Sopenharmony_ci{
5638c2ecf20Sopenharmony_ci	down_read(&ubi->work_sem);
5648c2ecf20Sopenharmony_ci	__schedule_ubi_work(ubi, wrk);
5658c2ecf20Sopenharmony_ci	up_read(&ubi->work_sem);
5668c2ecf20Sopenharmony_ci}
5678c2ecf20Sopenharmony_ci
5688c2ecf20Sopenharmony_cistatic int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
5698c2ecf20Sopenharmony_ci			int shutdown);
5708c2ecf20Sopenharmony_ci
5718c2ecf20Sopenharmony_ci/**
5728c2ecf20Sopenharmony_ci * schedule_erase - schedule an erase work.
5738c2ecf20Sopenharmony_ci * @ubi: UBI device description object
5748c2ecf20Sopenharmony_ci * @e: the WL entry of the physical eraseblock to erase
5758c2ecf20Sopenharmony_ci * @vol_id: the volume ID that last used this PEB
5768c2ecf20Sopenharmony_ci * @lnum: the last used logical eraseblock number for the PEB
5778c2ecf20Sopenharmony_ci * @torture: if the physical eraseblock has to be tortured
5788c2ecf20Sopenharmony_ci * @nested: denotes whether the work_sem is already held
5798c2ecf20Sopenharmony_ci *
5808c2ecf20Sopenharmony_ci * This function returns zero in case of success and a %-ENOMEM in case of
5818c2ecf20Sopenharmony_ci * failure.
5828c2ecf20Sopenharmony_ci */
5838c2ecf20Sopenharmony_cistatic int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
5848c2ecf20Sopenharmony_ci			  int vol_id, int lnum, int torture, bool nested)
5858c2ecf20Sopenharmony_ci{
5868c2ecf20Sopenharmony_ci	struct ubi_work *wl_wrk;
5878c2ecf20Sopenharmony_ci
5888c2ecf20Sopenharmony_ci	ubi_assert(e);
5898c2ecf20Sopenharmony_ci
5908c2ecf20Sopenharmony_ci	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
5918c2ecf20Sopenharmony_ci	       e->pnum, e->ec, torture);
5928c2ecf20Sopenharmony_ci
5938c2ecf20Sopenharmony_ci	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
5948c2ecf20Sopenharmony_ci	if (!wl_wrk)
5958c2ecf20Sopenharmony_ci		return -ENOMEM;
5968c2ecf20Sopenharmony_ci
5978c2ecf20Sopenharmony_ci	wl_wrk->func = &erase_worker;
5988c2ecf20Sopenharmony_ci	wl_wrk->e = e;
5998c2ecf20Sopenharmony_ci	wl_wrk->vol_id = vol_id;
6008c2ecf20Sopenharmony_ci	wl_wrk->lnum = lnum;
6018c2ecf20Sopenharmony_ci	wl_wrk->torture = torture;
6028c2ecf20Sopenharmony_ci
6038c2ecf20Sopenharmony_ci	if (nested)
6048c2ecf20Sopenharmony_ci		__schedule_ubi_work(ubi, wl_wrk);
6058c2ecf20Sopenharmony_ci	else
6068c2ecf20Sopenharmony_ci		schedule_ubi_work(ubi, wl_wrk);
6078c2ecf20Sopenharmony_ci	return 0;
6088c2ecf20Sopenharmony_ci}
6098c2ecf20Sopenharmony_ci
6108c2ecf20Sopenharmony_cistatic int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
6118c2ecf20Sopenharmony_ci/**
6128c2ecf20Sopenharmony_ci * do_sync_erase - run the erase worker synchronously.
6138c2ecf20Sopenharmony_ci * @ubi: UBI device description object
6148c2ecf20Sopenharmony_ci * @e: the WL entry of the physical eraseblock to erase
6158c2ecf20Sopenharmony_ci * @vol_id: the volume ID that last used this PEB
6168c2ecf20Sopenharmony_ci * @lnum: the last used logical eraseblock number for the PEB
6178c2ecf20Sopenharmony_ci * @torture: if the physical eraseblock has to be tortured
6188c2ecf20Sopenharmony_ci *
6198c2ecf20Sopenharmony_ci */
6208c2ecf20Sopenharmony_cistatic int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
6218c2ecf20Sopenharmony_ci			 int vol_id, int lnum, int torture)
6228c2ecf20Sopenharmony_ci{
6238c2ecf20Sopenharmony_ci	struct ubi_work wl_wrk;
6248c2ecf20Sopenharmony_ci
6258c2ecf20Sopenharmony_ci	dbg_wl("sync erase of PEB %i", e->pnum);
6268c2ecf20Sopenharmony_ci
6278c2ecf20Sopenharmony_ci	wl_wrk.e = e;
6288c2ecf20Sopenharmony_ci	wl_wrk.vol_id = vol_id;
6298c2ecf20Sopenharmony_ci	wl_wrk.lnum = lnum;
6308c2ecf20Sopenharmony_ci	wl_wrk.torture = torture;
6318c2ecf20Sopenharmony_ci
6328c2ecf20Sopenharmony_ci	return __erase_worker(ubi, &wl_wrk);
6338c2ecf20Sopenharmony_ci}
6348c2ecf20Sopenharmony_ci
6358c2ecf20Sopenharmony_cistatic int ensure_wear_leveling(struct ubi_device *ubi, int nested);
6368c2ecf20Sopenharmony_ci/**
6378c2ecf20Sopenharmony_ci * wear_leveling_worker - wear-leveling worker function.
6388c2ecf20Sopenharmony_ci * @ubi: UBI device description object
6398c2ecf20Sopenharmony_ci * @wrk: the work object
6408c2ecf20Sopenharmony_ci * @shutdown: non-zero if the worker has to free memory and exit
6418c2ecf20Sopenharmony_ci * because the WL-subsystem is shutting down
6428c2ecf20Sopenharmony_ci *
6438c2ecf20Sopenharmony_ci * This function copies a more worn out physical eraseblock to a less worn out
6448c2ecf20Sopenharmony_ci * one. Returns zero in case of success and a negative error code in case of
6458c2ecf20Sopenharmony_ci * failure.
6468c2ecf20Sopenharmony_ci */
6478c2ecf20Sopenharmony_cistatic int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
6488c2ecf20Sopenharmony_ci				int shutdown)
6498c2ecf20Sopenharmony_ci{
6508c2ecf20Sopenharmony_ci	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
6518c2ecf20Sopenharmony_ci	int erase = 0, keep = 0, vol_id = -1, lnum = -1;
6528c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e1, *e2;
6538c2ecf20Sopenharmony_ci	struct ubi_vid_io_buf *vidb;
6548c2ecf20Sopenharmony_ci	struct ubi_vid_hdr *vid_hdr;
6558c2ecf20Sopenharmony_ci	int dst_leb_clean = 0;
6568c2ecf20Sopenharmony_ci
6578c2ecf20Sopenharmony_ci	kfree(wrk);
6588c2ecf20Sopenharmony_ci	if (shutdown)
6598c2ecf20Sopenharmony_ci		return 0;
6608c2ecf20Sopenharmony_ci
6618c2ecf20Sopenharmony_ci	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
6628c2ecf20Sopenharmony_ci	if (!vidb)
6638c2ecf20Sopenharmony_ci		return -ENOMEM;
6648c2ecf20Sopenharmony_ci
6658c2ecf20Sopenharmony_ci	vid_hdr = ubi_get_vid_hdr(vidb);
6668c2ecf20Sopenharmony_ci
6678c2ecf20Sopenharmony_ci	down_read(&ubi->fm_eba_sem);
6688c2ecf20Sopenharmony_ci	mutex_lock(&ubi->move_mutex);
6698c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
6708c2ecf20Sopenharmony_ci	ubi_assert(!ubi->move_from && !ubi->move_to);
6718c2ecf20Sopenharmony_ci	ubi_assert(!ubi->move_to_put);
6728c2ecf20Sopenharmony_ci
6738c2ecf20Sopenharmony_ci	if (!ubi->free.rb_node ||
6748c2ecf20Sopenharmony_ci	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
6758c2ecf20Sopenharmony_ci		/*
6768c2ecf20Sopenharmony_ci		 * No free physical eraseblocks? Well, they must be waiting in
6778c2ecf20Sopenharmony_ci		 * the queue to be erased. Cancel movement - it will be
6788c2ecf20Sopenharmony_ci		 * triggered again when a free physical eraseblock appears.
6798c2ecf20Sopenharmony_ci		 *
6808c2ecf20Sopenharmony_ci		 * No used physical eraseblocks? They must be temporarily
6818c2ecf20Sopenharmony_ci		 * protected from being moved. They will be moved to the
6828c2ecf20Sopenharmony_ci		 * @ubi->used tree later and the wear-leveling will be
6838c2ecf20Sopenharmony_ci		 * triggered again.
6848c2ecf20Sopenharmony_ci		 */
6858c2ecf20Sopenharmony_ci		dbg_wl("cancel WL, a list is empty: free %d, used %d",
6868c2ecf20Sopenharmony_ci		       !ubi->free.rb_node, !ubi->used.rb_node);
6878c2ecf20Sopenharmony_ci		goto out_cancel;
6888c2ecf20Sopenharmony_ci	}
6898c2ecf20Sopenharmony_ci
6908c2ecf20Sopenharmony_ci#ifdef CONFIG_MTD_UBI_FASTMAP
6918c2ecf20Sopenharmony_ci	e1 = find_anchor_wl_entry(&ubi->used);
6928c2ecf20Sopenharmony_ci	if (e1 && ubi->fm_anchor &&
6938c2ecf20Sopenharmony_ci	    (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
6948c2ecf20Sopenharmony_ci		ubi->fm_do_produce_anchor = 1;
6958c2ecf20Sopenharmony_ci		/*
6968c2ecf20Sopenharmony_ci		 * fm_anchor is no longer considered a good anchor.
6978c2ecf20Sopenharmony_ci		 * NULL assignment also prevents multiple wear level checks
6988c2ecf20Sopenharmony_ci		 * of this PEB.
6998c2ecf20Sopenharmony_ci		 */
7008c2ecf20Sopenharmony_ci		wl_tree_add(ubi->fm_anchor, &ubi->free);
7018c2ecf20Sopenharmony_ci		ubi->fm_anchor = NULL;
7028c2ecf20Sopenharmony_ci		ubi->free_count++;
7038c2ecf20Sopenharmony_ci	}
7048c2ecf20Sopenharmony_ci
7058c2ecf20Sopenharmony_ci	if (ubi->fm_do_produce_anchor) {
7068c2ecf20Sopenharmony_ci		if (!e1)
7078c2ecf20Sopenharmony_ci			goto out_cancel;
7088c2ecf20Sopenharmony_ci		e2 = get_peb_for_wl(ubi);
7098c2ecf20Sopenharmony_ci		if (!e2)
7108c2ecf20Sopenharmony_ci			goto out_cancel;
7118c2ecf20Sopenharmony_ci
7128c2ecf20Sopenharmony_ci		self_check_in_wl_tree(ubi, e1, &ubi->used);
7138c2ecf20Sopenharmony_ci		rb_erase(&e1->u.rb, &ubi->used);
7148c2ecf20Sopenharmony_ci		dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
7158c2ecf20Sopenharmony_ci		ubi->fm_do_produce_anchor = 0;
7168c2ecf20Sopenharmony_ci	} else if (!ubi->scrub.rb_node) {
7178c2ecf20Sopenharmony_ci#else
7188c2ecf20Sopenharmony_ci	if (!ubi->scrub.rb_node) {
7198c2ecf20Sopenharmony_ci#endif
7208c2ecf20Sopenharmony_ci		/*
7218c2ecf20Sopenharmony_ci		 * Now pick the least worn-out used physical eraseblock and a
7228c2ecf20Sopenharmony_ci		 * highly worn-out free physical eraseblock. If the erase
7238c2ecf20Sopenharmony_ci		 * counters differ much enough, start wear-leveling.
7248c2ecf20Sopenharmony_ci		 */
7258c2ecf20Sopenharmony_ci		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
7268c2ecf20Sopenharmony_ci		e2 = get_peb_for_wl(ubi);
7278c2ecf20Sopenharmony_ci		if (!e2)
7288c2ecf20Sopenharmony_ci			goto out_cancel;
7298c2ecf20Sopenharmony_ci
7308c2ecf20Sopenharmony_ci		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
7318c2ecf20Sopenharmony_ci			dbg_wl("no WL needed: min used EC %d, max free EC %d",
7328c2ecf20Sopenharmony_ci			       e1->ec, e2->ec);
7338c2ecf20Sopenharmony_ci
7348c2ecf20Sopenharmony_ci			/* Give the unused PEB back */
7358c2ecf20Sopenharmony_ci			wl_tree_add(e2, &ubi->free);
7368c2ecf20Sopenharmony_ci			ubi->free_count++;
7378c2ecf20Sopenharmony_ci			goto out_cancel;
7388c2ecf20Sopenharmony_ci		}
7398c2ecf20Sopenharmony_ci		self_check_in_wl_tree(ubi, e1, &ubi->used);
7408c2ecf20Sopenharmony_ci		rb_erase(&e1->u.rb, &ubi->used);
7418c2ecf20Sopenharmony_ci		dbg_wl("move PEB %d EC %d to PEB %d EC %d",
7428c2ecf20Sopenharmony_ci		       e1->pnum, e1->ec, e2->pnum, e2->ec);
7438c2ecf20Sopenharmony_ci	} else {
7448c2ecf20Sopenharmony_ci		/* Perform scrubbing */
7458c2ecf20Sopenharmony_ci		scrubbing = 1;
7468c2ecf20Sopenharmony_ci		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
7478c2ecf20Sopenharmony_ci		e2 = get_peb_for_wl(ubi);
7488c2ecf20Sopenharmony_ci		if (!e2)
7498c2ecf20Sopenharmony_ci			goto out_cancel;
7508c2ecf20Sopenharmony_ci
7518c2ecf20Sopenharmony_ci		self_check_in_wl_tree(ubi, e1, &ubi->scrub);
7528c2ecf20Sopenharmony_ci		rb_erase(&e1->u.rb, &ubi->scrub);
7538c2ecf20Sopenharmony_ci		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
7548c2ecf20Sopenharmony_ci	}
7558c2ecf20Sopenharmony_ci
7568c2ecf20Sopenharmony_ci	ubi->move_from = e1;
7578c2ecf20Sopenharmony_ci	ubi->move_to = e2;
7588c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
7598c2ecf20Sopenharmony_ci
7608c2ecf20Sopenharmony_ci	/*
7618c2ecf20Sopenharmony_ci	 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
7628c2ecf20Sopenharmony_ci	 * We so far do not know which logical eraseblock our physical
7638c2ecf20Sopenharmony_ci	 * eraseblock (@e1) belongs to. We have to read the volume identifier
7648c2ecf20Sopenharmony_ci	 * header first.
7658c2ecf20Sopenharmony_ci	 *
7668c2ecf20Sopenharmony_ci	 * Note, we are protected from this PEB being unmapped and erased. The
7678c2ecf20Sopenharmony_ci	 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
7688c2ecf20Sopenharmony_ci	 * which is being moved was unmapped.
7698c2ecf20Sopenharmony_ci	 */
7708c2ecf20Sopenharmony_ci
7718c2ecf20Sopenharmony_ci	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
7728c2ecf20Sopenharmony_ci	if (err && err != UBI_IO_BITFLIPS) {
7738c2ecf20Sopenharmony_ci		dst_leb_clean = 1;
7748c2ecf20Sopenharmony_ci		if (err == UBI_IO_FF) {
7758c2ecf20Sopenharmony_ci			/*
7768c2ecf20Sopenharmony_ci			 * We are trying to move PEB without a VID header. UBI
7778c2ecf20Sopenharmony_ci			 * always write VID headers shortly after the PEB was
7788c2ecf20Sopenharmony_ci			 * given, so we have a situation when it has not yet
7798c2ecf20Sopenharmony_ci			 * had a chance to write it, because it was preempted.
7808c2ecf20Sopenharmony_ci			 * So add this PEB to the protection queue so far,
7818c2ecf20Sopenharmony_ci			 * because presumably more data will be written there
7828c2ecf20Sopenharmony_ci			 * (including the missing VID header), and then we'll
7838c2ecf20Sopenharmony_ci			 * move it.
7848c2ecf20Sopenharmony_ci			 */
7858c2ecf20Sopenharmony_ci			dbg_wl("PEB %d has no VID header", e1->pnum);
7868c2ecf20Sopenharmony_ci			protect = 1;
7878c2ecf20Sopenharmony_ci			goto out_not_moved;
7888c2ecf20Sopenharmony_ci		} else if (err == UBI_IO_FF_BITFLIPS) {
7898c2ecf20Sopenharmony_ci			/*
7908c2ecf20Sopenharmony_ci			 * The same situation as %UBI_IO_FF, but bit-flips were
7918c2ecf20Sopenharmony_ci			 * detected. It is better to schedule this PEB for
7928c2ecf20Sopenharmony_ci			 * scrubbing.
7938c2ecf20Sopenharmony_ci			 */
7948c2ecf20Sopenharmony_ci			dbg_wl("PEB %d has no VID header but has bit-flips",
7958c2ecf20Sopenharmony_ci			       e1->pnum);
7968c2ecf20Sopenharmony_ci			scrubbing = 1;
7978c2ecf20Sopenharmony_ci			goto out_not_moved;
7988c2ecf20Sopenharmony_ci		} else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
7998c2ecf20Sopenharmony_ci			/*
8008c2ecf20Sopenharmony_ci			 * While a full scan would detect interrupted erasures
8018c2ecf20Sopenharmony_ci			 * at attach time we can face them here when attached from
8028c2ecf20Sopenharmony_ci			 * Fastmap.
8038c2ecf20Sopenharmony_ci			 */
8048c2ecf20Sopenharmony_ci			dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
8058c2ecf20Sopenharmony_ci			       e1->pnum);
8068c2ecf20Sopenharmony_ci			erase = 1;
8078c2ecf20Sopenharmony_ci			goto out_not_moved;
8088c2ecf20Sopenharmony_ci		}
8098c2ecf20Sopenharmony_ci
8108c2ecf20Sopenharmony_ci		ubi_err(ubi, "error %d while reading VID header from PEB %d",
8118c2ecf20Sopenharmony_ci			err, e1->pnum);
8128c2ecf20Sopenharmony_ci		goto out_error;
8138c2ecf20Sopenharmony_ci	}
8148c2ecf20Sopenharmony_ci
8158c2ecf20Sopenharmony_ci	vol_id = be32_to_cpu(vid_hdr->vol_id);
8168c2ecf20Sopenharmony_ci	lnum = be32_to_cpu(vid_hdr->lnum);
8178c2ecf20Sopenharmony_ci
8188c2ecf20Sopenharmony_ci	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
8198c2ecf20Sopenharmony_ci	if (err) {
8208c2ecf20Sopenharmony_ci		if (err == MOVE_CANCEL_RACE) {
8218c2ecf20Sopenharmony_ci			/*
8228c2ecf20Sopenharmony_ci			 * The LEB has not been moved because the volume is
8238c2ecf20Sopenharmony_ci			 * being deleted or the PEB has been put meanwhile. We
8248c2ecf20Sopenharmony_ci			 * should prevent this PEB from being selected for
8258c2ecf20Sopenharmony_ci			 * wear-leveling movement again, so put it to the
8268c2ecf20Sopenharmony_ci			 * protection queue.
8278c2ecf20Sopenharmony_ci			 */
8288c2ecf20Sopenharmony_ci			protect = 1;
8298c2ecf20Sopenharmony_ci			dst_leb_clean = 1;
8308c2ecf20Sopenharmony_ci			goto out_not_moved;
8318c2ecf20Sopenharmony_ci		}
8328c2ecf20Sopenharmony_ci		if (err == MOVE_RETRY) {
8338c2ecf20Sopenharmony_ci			scrubbing = 1;
8348c2ecf20Sopenharmony_ci			dst_leb_clean = 1;
8358c2ecf20Sopenharmony_ci			goto out_not_moved;
8368c2ecf20Sopenharmony_ci		}
8378c2ecf20Sopenharmony_ci		if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
8388c2ecf20Sopenharmony_ci		    err == MOVE_TARGET_RD_ERR) {
8398c2ecf20Sopenharmony_ci			/*
8408c2ecf20Sopenharmony_ci			 * Target PEB had bit-flips or write error - torture it.
8418c2ecf20Sopenharmony_ci			 */
8428c2ecf20Sopenharmony_ci			torture = 1;
8438c2ecf20Sopenharmony_ci			keep = 1;
8448c2ecf20Sopenharmony_ci			goto out_not_moved;
8458c2ecf20Sopenharmony_ci		}
8468c2ecf20Sopenharmony_ci
8478c2ecf20Sopenharmony_ci		if (err == MOVE_SOURCE_RD_ERR) {
8488c2ecf20Sopenharmony_ci			/*
8498c2ecf20Sopenharmony_ci			 * An error happened while reading the source PEB. Do
8508c2ecf20Sopenharmony_ci			 * not switch to R/O mode in this case, and give the
8518c2ecf20Sopenharmony_ci			 * upper layers a possibility to recover from this,
8528c2ecf20Sopenharmony_ci			 * e.g. by unmapping corresponding LEB. Instead, just
8538c2ecf20Sopenharmony_ci			 * put this PEB to the @ubi->erroneous list to prevent
8548c2ecf20Sopenharmony_ci			 * UBI from trying to move it over and over again.
8558c2ecf20Sopenharmony_ci			 */
8568c2ecf20Sopenharmony_ci			if (ubi->erroneous_peb_count > ubi->max_erroneous) {
8578c2ecf20Sopenharmony_ci				ubi_err(ubi, "too many erroneous eraseblocks (%d)",
8588c2ecf20Sopenharmony_ci					ubi->erroneous_peb_count);
8598c2ecf20Sopenharmony_ci				goto out_error;
8608c2ecf20Sopenharmony_ci			}
8618c2ecf20Sopenharmony_ci			dst_leb_clean = 1;
8628c2ecf20Sopenharmony_ci			erroneous = 1;
8638c2ecf20Sopenharmony_ci			goto out_not_moved;
8648c2ecf20Sopenharmony_ci		}
8658c2ecf20Sopenharmony_ci
8668c2ecf20Sopenharmony_ci		if (err < 0)
8678c2ecf20Sopenharmony_ci			goto out_error;
8688c2ecf20Sopenharmony_ci
8698c2ecf20Sopenharmony_ci		ubi_assert(0);
8708c2ecf20Sopenharmony_ci	}
8718c2ecf20Sopenharmony_ci
8728c2ecf20Sopenharmony_ci	/* The PEB has been successfully moved */
8738c2ecf20Sopenharmony_ci	if (scrubbing)
8748c2ecf20Sopenharmony_ci		ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
8758c2ecf20Sopenharmony_ci			e1->pnum, vol_id, lnum, e2->pnum);
8768c2ecf20Sopenharmony_ci	ubi_free_vid_buf(vidb);
8778c2ecf20Sopenharmony_ci
8788c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
8798c2ecf20Sopenharmony_ci	if (!ubi->move_to_put) {
8808c2ecf20Sopenharmony_ci		wl_tree_add(e2, &ubi->used);
8818c2ecf20Sopenharmony_ci		e2 = NULL;
8828c2ecf20Sopenharmony_ci	}
8838c2ecf20Sopenharmony_ci	ubi->move_from = ubi->move_to = NULL;
8848c2ecf20Sopenharmony_ci	ubi->move_to_put = ubi->wl_scheduled = 0;
8858c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
8868c2ecf20Sopenharmony_ci
8878c2ecf20Sopenharmony_ci	err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
8888c2ecf20Sopenharmony_ci	if (err) {
8898c2ecf20Sopenharmony_ci		if (e2) {
8908c2ecf20Sopenharmony_ci			spin_lock(&ubi->wl_lock);
8918c2ecf20Sopenharmony_ci			wl_entry_destroy(ubi, e2);
8928c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
8938c2ecf20Sopenharmony_ci		}
8948c2ecf20Sopenharmony_ci		goto out_ro;
8958c2ecf20Sopenharmony_ci	}
8968c2ecf20Sopenharmony_ci
8978c2ecf20Sopenharmony_ci	if (e2) {
8988c2ecf20Sopenharmony_ci		/*
8998c2ecf20Sopenharmony_ci		 * Well, the target PEB was put meanwhile, schedule it for
9008c2ecf20Sopenharmony_ci		 * erasure.
9018c2ecf20Sopenharmony_ci		 */
9028c2ecf20Sopenharmony_ci		dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
9038c2ecf20Sopenharmony_ci		       e2->pnum, vol_id, lnum);
9048c2ecf20Sopenharmony_ci		err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
9058c2ecf20Sopenharmony_ci		if (err)
9068c2ecf20Sopenharmony_ci			goto out_ro;
9078c2ecf20Sopenharmony_ci	}
9088c2ecf20Sopenharmony_ci
9098c2ecf20Sopenharmony_ci	dbg_wl("done");
9108c2ecf20Sopenharmony_ci	mutex_unlock(&ubi->move_mutex);
9118c2ecf20Sopenharmony_ci	up_read(&ubi->fm_eba_sem);
9128c2ecf20Sopenharmony_ci	return 0;
9138c2ecf20Sopenharmony_ci
9148c2ecf20Sopenharmony_ci	/*
9158c2ecf20Sopenharmony_ci	 * For some reasons the LEB was not moved, might be an error, might be
9168c2ecf20Sopenharmony_ci	 * something else. @e1 was not changed, so return it back. @e2 might
9178c2ecf20Sopenharmony_ci	 * have been changed, schedule it for erasure.
9188c2ecf20Sopenharmony_ci	 */
9198c2ecf20Sopenharmony_ciout_not_moved:
9208c2ecf20Sopenharmony_ci	if (vol_id != -1)
9218c2ecf20Sopenharmony_ci		dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
9228c2ecf20Sopenharmony_ci		       e1->pnum, vol_id, lnum, e2->pnum, err);
9238c2ecf20Sopenharmony_ci	else
9248c2ecf20Sopenharmony_ci		dbg_wl("cancel moving PEB %d to PEB %d (%d)",
9258c2ecf20Sopenharmony_ci		       e1->pnum, e2->pnum, err);
9268c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
9278c2ecf20Sopenharmony_ci	if (protect)
9288c2ecf20Sopenharmony_ci		prot_queue_add(ubi, e1);
9298c2ecf20Sopenharmony_ci	else if (erroneous) {
9308c2ecf20Sopenharmony_ci		wl_tree_add(e1, &ubi->erroneous);
9318c2ecf20Sopenharmony_ci		ubi->erroneous_peb_count += 1;
9328c2ecf20Sopenharmony_ci	} else if (scrubbing)
9338c2ecf20Sopenharmony_ci		wl_tree_add(e1, &ubi->scrub);
9348c2ecf20Sopenharmony_ci	else if (keep)
9358c2ecf20Sopenharmony_ci		wl_tree_add(e1, &ubi->used);
9368c2ecf20Sopenharmony_ci	if (dst_leb_clean) {
9378c2ecf20Sopenharmony_ci		wl_tree_add(e2, &ubi->free);
9388c2ecf20Sopenharmony_ci		ubi->free_count++;
9398c2ecf20Sopenharmony_ci	}
9408c2ecf20Sopenharmony_ci
9418c2ecf20Sopenharmony_ci	ubi_assert(!ubi->move_to_put);
9428c2ecf20Sopenharmony_ci	ubi->move_from = ubi->move_to = NULL;
9438c2ecf20Sopenharmony_ci	ubi->wl_scheduled = 0;
9448c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
9458c2ecf20Sopenharmony_ci
9468c2ecf20Sopenharmony_ci	ubi_free_vid_buf(vidb);
9478c2ecf20Sopenharmony_ci	if (dst_leb_clean) {
9488c2ecf20Sopenharmony_ci		ensure_wear_leveling(ubi, 1);
9498c2ecf20Sopenharmony_ci	} else {
9508c2ecf20Sopenharmony_ci		err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
9518c2ecf20Sopenharmony_ci		if (err)
9528c2ecf20Sopenharmony_ci			goto out_ro;
9538c2ecf20Sopenharmony_ci	}
9548c2ecf20Sopenharmony_ci
9558c2ecf20Sopenharmony_ci	if (erase) {
9568c2ecf20Sopenharmony_ci		err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
9578c2ecf20Sopenharmony_ci		if (err)
9588c2ecf20Sopenharmony_ci			goto out_ro;
9598c2ecf20Sopenharmony_ci	}
9608c2ecf20Sopenharmony_ci
9618c2ecf20Sopenharmony_ci	mutex_unlock(&ubi->move_mutex);
9628c2ecf20Sopenharmony_ci	up_read(&ubi->fm_eba_sem);
9638c2ecf20Sopenharmony_ci	return 0;
9648c2ecf20Sopenharmony_ci
9658c2ecf20Sopenharmony_ciout_error:
9668c2ecf20Sopenharmony_ci	if (vol_id != -1)
9678c2ecf20Sopenharmony_ci		ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
9688c2ecf20Sopenharmony_ci			err, e1->pnum, e2->pnum);
9698c2ecf20Sopenharmony_ci	else
9708c2ecf20Sopenharmony_ci		ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
9718c2ecf20Sopenharmony_ci			err, e1->pnum, vol_id, lnum, e2->pnum);
9728c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
9738c2ecf20Sopenharmony_ci	ubi->move_from = ubi->move_to = NULL;
9748c2ecf20Sopenharmony_ci	ubi->move_to_put = ubi->wl_scheduled = 0;
9758c2ecf20Sopenharmony_ci	wl_entry_destroy(ubi, e1);
9768c2ecf20Sopenharmony_ci	wl_entry_destroy(ubi, e2);
9778c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
9788c2ecf20Sopenharmony_ci
9798c2ecf20Sopenharmony_ci	ubi_free_vid_buf(vidb);
9808c2ecf20Sopenharmony_ci
9818c2ecf20Sopenharmony_ciout_ro:
9828c2ecf20Sopenharmony_ci	ubi_ro_mode(ubi);
9838c2ecf20Sopenharmony_ci	mutex_unlock(&ubi->move_mutex);
9848c2ecf20Sopenharmony_ci	up_read(&ubi->fm_eba_sem);
9858c2ecf20Sopenharmony_ci	ubi_assert(err != 0);
9868c2ecf20Sopenharmony_ci	return err < 0 ? err : -EIO;
9878c2ecf20Sopenharmony_ci
9888c2ecf20Sopenharmony_ciout_cancel:
9898c2ecf20Sopenharmony_ci	ubi->wl_scheduled = 0;
9908c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
9918c2ecf20Sopenharmony_ci	mutex_unlock(&ubi->move_mutex);
9928c2ecf20Sopenharmony_ci	up_read(&ubi->fm_eba_sem);
9938c2ecf20Sopenharmony_ci	ubi_free_vid_buf(vidb);
9948c2ecf20Sopenharmony_ci	return 0;
9958c2ecf20Sopenharmony_ci}
9968c2ecf20Sopenharmony_ci
9978c2ecf20Sopenharmony_ci/**
9988c2ecf20Sopenharmony_ci * ensure_wear_leveling - schedule wear-leveling if it is needed.
9998c2ecf20Sopenharmony_ci * @ubi: UBI device description object
10008c2ecf20Sopenharmony_ci * @nested: set to non-zero if this function is called from UBI worker
10018c2ecf20Sopenharmony_ci *
10028c2ecf20Sopenharmony_ci * This function checks if it is time to start wear-leveling and schedules it
10038c2ecf20Sopenharmony_ci * if yes. This function returns zero in case of success and a negative error
10048c2ecf20Sopenharmony_ci * code in case of failure.
10058c2ecf20Sopenharmony_ci */
10068c2ecf20Sopenharmony_cistatic int ensure_wear_leveling(struct ubi_device *ubi, int nested)
10078c2ecf20Sopenharmony_ci{
10088c2ecf20Sopenharmony_ci	int err = 0;
10098c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e1;
10108c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e2;
10118c2ecf20Sopenharmony_ci	struct ubi_work *wrk;
10128c2ecf20Sopenharmony_ci
10138c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
10148c2ecf20Sopenharmony_ci	if (ubi->wl_scheduled)
10158c2ecf20Sopenharmony_ci		/* Wear-leveling is already in the work queue */
10168c2ecf20Sopenharmony_ci		goto out_unlock;
10178c2ecf20Sopenharmony_ci
10188c2ecf20Sopenharmony_ci	/*
10198c2ecf20Sopenharmony_ci	 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
10208c2ecf20Sopenharmony_ci	 * the WL worker has to be scheduled anyway.
10218c2ecf20Sopenharmony_ci	 */
10228c2ecf20Sopenharmony_ci	if (!ubi->scrub.rb_node) {
10238c2ecf20Sopenharmony_ci		if (!ubi->used.rb_node || !ubi->free.rb_node)
10248c2ecf20Sopenharmony_ci			/* No physical eraseblocks - no deal */
10258c2ecf20Sopenharmony_ci			goto out_unlock;
10268c2ecf20Sopenharmony_ci
10278c2ecf20Sopenharmony_ci		/*
10288c2ecf20Sopenharmony_ci		 * We schedule wear-leveling only if the difference between the
10298c2ecf20Sopenharmony_ci		 * lowest erase counter of used physical eraseblocks and a high
10308c2ecf20Sopenharmony_ci		 * erase counter of free physical eraseblocks is greater than
10318c2ecf20Sopenharmony_ci		 * %UBI_WL_THRESHOLD.
10328c2ecf20Sopenharmony_ci		 */
10338c2ecf20Sopenharmony_ci		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
10348c2ecf20Sopenharmony_ci		e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
10358c2ecf20Sopenharmony_ci
10368c2ecf20Sopenharmony_ci		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
10378c2ecf20Sopenharmony_ci			goto out_unlock;
10388c2ecf20Sopenharmony_ci		dbg_wl("schedule wear-leveling");
10398c2ecf20Sopenharmony_ci	} else
10408c2ecf20Sopenharmony_ci		dbg_wl("schedule scrubbing");
10418c2ecf20Sopenharmony_ci
10428c2ecf20Sopenharmony_ci	ubi->wl_scheduled = 1;
10438c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
10448c2ecf20Sopenharmony_ci
10458c2ecf20Sopenharmony_ci	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
10468c2ecf20Sopenharmony_ci	if (!wrk) {
10478c2ecf20Sopenharmony_ci		err = -ENOMEM;
10488c2ecf20Sopenharmony_ci		goto out_cancel;
10498c2ecf20Sopenharmony_ci	}
10508c2ecf20Sopenharmony_ci
10518c2ecf20Sopenharmony_ci	wrk->func = &wear_leveling_worker;
10528c2ecf20Sopenharmony_ci	if (nested)
10538c2ecf20Sopenharmony_ci		__schedule_ubi_work(ubi, wrk);
10548c2ecf20Sopenharmony_ci	else
10558c2ecf20Sopenharmony_ci		schedule_ubi_work(ubi, wrk);
10568c2ecf20Sopenharmony_ci	return err;
10578c2ecf20Sopenharmony_ci
10588c2ecf20Sopenharmony_ciout_cancel:
10598c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
10608c2ecf20Sopenharmony_ci	ubi->wl_scheduled = 0;
10618c2ecf20Sopenharmony_ciout_unlock:
10628c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
10638c2ecf20Sopenharmony_ci	return err;
10648c2ecf20Sopenharmony_ci}
10658c2ecf20Sopenharmony_ci
10668c2ecf20Sopenharmony_ci/**
10678c2ecf20Sopenharmony_ci * __erase_worker - physical eraseblock erase worker function.
10688c2ecf20Sopenharmony_ci * @ubi: UBI device description object
10698c2ecf20Sopenharmony_ci * @wl_wrk: the work object
10708c2ecf20Sopenharmony_ci *
10718c2ecf20Sopenharmony_ci * This function erases a physical eraseblock and perform torture testing if
10728c2ecf20Sopenharmony_ci * needed. It also takes care about marking the physical eraseblock bad if
10738c2ecf20Sopenharmony_ci * needed. Returns zero in case of success and a negative error code in case of
10748c2ecf20Sopenharmony_ci * failure.
10758c2ecf20Sopenharmony_ci */
10768c2ecf20Sopenharmony_cistatic int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
10778c2ecf20Sopenharmony_ci{
10788c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e = wl_wrk->e;
10798c2ecf20Sopenharmony_ci	int pnum = e->pnum;
10808c2ecf20Sopenharmony_ci	int vol_id = wl_wrk->vol_id;
10818c2ecf20Sopenharmony_ci	int lnum = wl_wrk->lnum;
10828c2ecf20Sopenharmony_ci	int err, available_consumed = 0;
10838c2ecf20Sopenharmony_ci
10848c2ecf20Sopenharmony_ci	dbg_wl("erase PEB %d EC %d LEB %d:%d",
10858c2ecf20Sopenharmony_ci	       pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
10868c2ecf20Sopenharmony_ci
10878c2ecf20Sopenharmony_ci	err = sync_erase(ubi, e, wl_wrk->torture);
10888c2ecf20Sopenharmony_ci	if (!err) {
10898c2ecf20Sopenharmony_ci		spin_lock(&ubi->wl_lock);
10908c2ecf20Sopenharmony_ci
10918c2ecf20Sopenharmony_ci		if (!ubi->fm_disabled && !ubi->fm_anchor &&
10928c2ecf20Sopenharmony_ci		    e->pnum < UBI_FM_MAX_START) {
10938c2ecf20Sopenharmony_ci			/*
10948c2ecf20Sopenharmony_ci			 * Abort anchor production, if needed it will be
10958c2ecf20Sopenharmony_ci			 * enabled again in the wear leveling started below.
10968c2ecf20Sopenharmony_ci			 */
10978c2ecf20Sopenharmony_ci			ubi->fm_anchor = e;
10988c2ecf20Sopenharmony_ci			ubi->fm_do_produce_anchor = 0;
10998c2ecf20Sopenharmony_ci		} else {
11008c2ecf20Sopenharmony_ci			wl_tree_add(e, &ubi->free);
11018c2ecf20Sopenharmony_ci			ubi->free_count++;
11028c2ecf20Sopenharmony_ci		}
11038c2ecf20Sopenharmony_ci
11048c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
11058c2ecf20Sopenharmony_ci
11068c2ecf20Sopenharmony_ci		/*
11078c2ecf20Sopenharmony_ci		 * One more erase operation has happened, take care about
11088c2ecf20Sopenharmony_ci		 * protected physical eraseblocks.
11098c2ecf20Sopenharmony_ci		 */
11108c2ecf20Sopenharmony_ci		serve_prot_queue(ubi);
11118c2ecf20Sopenharmony_ci
11128c2ecf20Sopenharmony_ci		/* And take care about wear-leveling */
11138c2ecf20Sopenharmony_ci		err = ensure_wear_leveling(ubi, 1);
11148c2ecf20Sopenharmony_ci		return err;
11158c2ecf20Sopenharmony_ci	}
11168c2ecf20Sopenharmony_ci
11178c2ecf20Sopenharmony_ci	ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
11188c2ecf20Sopenharmony_ci
11198c2ecf20Sopenharmony_ci	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
11208c2ecf20Sopenharmony_ci	    err == -EBUSY) {
11218c2ecf20Sopenharmony_ci		int err1;
11228c2ecf20Sopenharmony_ci
11238c2ecf20Sopenharmony_ci		/* Re-schedule the LEB for erasure */
11248c2ecf20Sopenharmony_ci		err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
11258c2ecf20Sopenharmony_ci		if (err1) {
11268c2ecf20Sopenharmony_ci			spin_lock(&ubi->wl_lock);
11278c2ecf20Sopenharmony_ci			wl_entry_destroy(ubi, e);
11288c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
11298c2ecf20Sopenharmony_ci			err = err1;
11308c2ecf20Sopenharmony_ci			goto out_ro;
11318c2ecf20Sopenharmony_ci		}
11328c2ecf20Sopenharmony_ci		return err;
11338c2ecf20Sopenharmony_ci	}
11348c2ecf20Sopenharmony_ci
11358c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
11368c2ecf20Sopenharmony_ci	wl_entry_destroy(ubi, e);
11378c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
11388c2ecf20Sopenharmony_ci	if (err != -EIO)
11398c2ecf20Sopenharmony_ci		/*
11408c2ecf20Sopenharmony_ci		 * If this is not %-EIO, we have no idea what to do. Scheduling
11418c2ecf20Sopenharmony_ci		 * this physical eraseblock for erasure again would cause
11428c2ecf20Sopenharmony_ci		 * errors again and again. Well, lets switch to R/O mode.
11438c2ecf20Sopenharmony_ci		 */
11448c2ecf20Sopenharmony_ci		goto out_ro;
11458c2ecf20Sopenharmony_ci
11468c2ecf20Sopenharmony_ci	/* It is %-EIO, the PEB went bad */
11478c2ecf20Sopenharmony_ci
11488c2ecf20Sopenharmony_ci	if (!ubi->bad_allowed) {
11498c2ecf20Sopenharmony_ci		ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
11508c2ecf20Sopenharmony_ci		goto out_ro;
11518c2ecf20Sopenharmony_ci	}
11528c2ecf20Sopenharmony_ci
11538c2ecf20Sopenharmony_ci	spin_lock(&ubi->volumes_lock);
11548c2ecf20Sopenharmony_ci	if (ubi->beb_rsvd_pebs == 0) {
11558c2ecf20Sopenharmony_ci		if (ubi->avail_pebs == 0) {
11568c2ecf20Sopenharmony_ci			spin_unlock(&ubi->volumes_lock);
11578c2ecf20Sopenharmony_ci			ubi_err(ubi, "no reserved/available physical eraseblocks");
11588c2ecf20Sopenharmony_ci			goto out_ro;
11598c2ecf20Sopenharmony_ci		}
11608c2ecf20Sopenharmony_ci		ubi->avail_pebs -= 1;
11618c2ecf20Sopenharmony_ci		available_consumed = 1;
11628c2ecf20Sopenharmony_ci	}
11638c2ecf20Sopenharmony_ci	spin_unlock(&ubi->volumes_lock);
11648c2ecf20Sopenharmony_ci
11658c2ecf20Sopenharmony_ci	ubi_msg(ubi, "mark PEB %d as bad", pnum);
11668c2ecf20Sopenharmony_ci	err = ubi_io_mark_bad(ubi, pnum);
11678c2ecf20Sopenharmony_ci	if (err)
11688c2ecf20Sopenharmony_ci		goto out_ro;
11698c2ecf20Sopenharmony_ci
11708c2ecf20Sopenharmony_ci	spin_lock(&ubi->volumes_lock);
11718c2ecf20Sopenharmony_ci	if (ubi->beb_rsvd_pebs > 0) {
11728c2ecf20Sopenharmony_ci		if (available_consumed) {
11738c2ecf20Sopenharmony_ci			/*
11748c2ecf20Sopenharmony_ci			 * The amount of reserved PEBs increased since we last
11758c2ecf20Sopenharmony_ci			 * checked.
11768c2ecf20Sopenharmony_ci			 */
11778c2ecf20Sopenharmony_ci			ubi->avail_pebs += 1;
11788c2ecf20Sopenharmony_ci			available_consumed = 0;
11798c2ecf20Sopenharmony_ci		}
11808c2ecf20Sopenharmony_ci		ubi->beb_rsvd_pebs -= 1;
11818c2ecf20Sopenharmony_ci	}
11828c2ecf20Sopenharmony_ci	ubi->bad_peb_count += 1;
11838c2ecf20Sopenharmony_ci	ubi->good_peb_count -= 1;
11848c2ecf20Sopenharmony_ci	ubi_calculate_reserved(ubi);
11858c2ecf20Sopenharmony_ci	if (available_consumed)
11868c2ecf20Sopenharmony_ci		ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
11878c2ecf20Sopenharmony_ci	else if (ubi->beb_rsvd_pebs)
11888c2ecf20Sopenharmony_ci		ubi_msg(ubi, "%d PEBs left in the reserve",
11898c2ecf20Sopenharmony_ci			ubi->beb_rsvd_pebs);
11908c2ecf20Sopenharmony_ci	else
11918c2ecf20Sopenharmony_ci		ubi_warn(ubi, "last PEB from the reserve was used");
11928c2ecf20Sopenharmony_ci	spin_unlock(&ubi->volumes_lock);
11938c2ecf20Sopenharmony_ci
11948c2ecf20Sopenharmony_ci	return err;
11958c2ecf20Sopenharmony_ci
11968c2ecf20Sopenharmony_ciout_ro:
11978c2ecf20Sopenharmony_ci	if (available_consumed) {
11988c2ecf20Sopenharmony_ci		spin_lock(&ubi->volumes_lock);
11998c2ecf20Sopenharmony_ci		ubi->avail_pebs += 1;
12008c2ecf20Sopenharmony_ci		spin_unlock(&ubi->volumes_lock);
12018c2ecf20Sopenharmony_ci	}
12028c2ecf20Sopenharmony_ci	ubi_ro_mode(ubi);
12038c2ecf20Sopenharmony_ci	return err;
12048c2ecf20Sopenharmony_ci}
12058c2ecf20Sopenharmony_ci
12068c2ecf20Sopenharmony_cistatic int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
12078c2ecf20Sopenharmony_ci			  int shutdown)
12088c2ecf20Sopenharmony_ci{
12098c2ecf20Sopenharmony_ci	int ret;
12108c2ecf20Sopenharmony_ci
12118c2ecf20Sopenharmony_ci	if (shutdown) {
12128c2ecf20Sopenharmony_ci		struct ubi_wl_entry *e = wl_wrk->e;
12138c2ecf20Sopenharmony_ci
12148c2ecf20Sopenharmony_ci		dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
12158c2ecf20Sopenharmony_ci		kfree(wl_wrk);
12168c2ecf20Sopenharmony_ci		wl_entry_destroy(ubi, e);
12178c2ecf20Sopenharmony_ci		return 0;
12188c2ecf20Sopenharmony_ci	}
12198c2ecf20Sopenharmony_ci
12208c2ecf20Sopenharmony_ci	ret = __erase_worker(ubi, wl_wrk);
12218c2ecf20Sopenharmony_ci	kfree(wl_wrk);
12228c2ecf20Sopenharmony_ci	return ret;
12238c2ecf20Sopenharmony_ci}
12248c2ecf20Sopenharmony_ci
12258c2ecf20Sopenharmony_ci/**
12268c2ecf20Sopenharmony_ci * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
12278c2ecf20Sopenharmony_ci * @ubi: UBI device description object
12288c2ecf20Sopenharmony_ci * @vol_id: the volume ID that last used this PEB
12298c2ecf20Sopenharmony_ci * @lnum: the last used logical eraseblock number for the PEB
12308c2ecf20Sopenharmony_ci * @pnum: physical eraseblock to return
12318c2ecf20Sopenharmony_ci * @torture: if this physical eraseblock has to be tortured
12328c2ecf20Sopenharmony_ci *
12338c2ecf20Sopenharmony_ci * This function is called to return physical eraseblock @pnum to the pool of
12348c2ecf20Sopenharmony_ci * free physical eraseblocks. The @torture flag has to be set if an I/O error
12358c2ecf20Sopenharmony_ci * occurred to this @pnum and it has to be tested. This function returns zero
12368c2ecf20Sopenharmony_ci * in case of success, and a negative error code in case of failure.
12378c2ecf20Sopenharmony_ci */
12388c2ecf20Sopenharmony_ciint ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
12398c2ecf20Sopenharmony_ci		   int pnum, int torture)
12408c2ecf20Sopenharmony_ci{
12418c2ecf20Sopenharmony_ci	int err;
12428c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e;
12438c2ecf20Sopenharmony_ci
12448c2ecf20Sopenharmony_ci	dbg_wl("PEB %d", pnum);
12458c2ecf20Sopenharmony_ci	ubi_assert(pnum >= 0);
12468c2ecf20Sopenharmony_ci	ubi_assert(pnum < ubi->peb_count);
12478c2ecf20Sopenharmony_ci
12488c2ecf20Sopenharmony_ci	down_read(&ubi->fm_protect);
12498c2ecf20Sopenharmony_ci
12508c2ecf20Sopenharmony_ciretry:
12518c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
12528c2ecf20Sopenharmony_ci	e = ubi->lookuptbl[pnum];
12538c2ecf20Sopenharmony_ci	if (!e) {
12548c2ecf20Sopenharmony_ci		/*
12558c2ecf20Sopenharmony_ci		 * This wl entry has been removed for some errors by other
12568c2ecf20Sopenharmony_ci		 * process (eg. wear leveling worker), corresponding process
12578c2ecf20Sopenharmony_ci		 * (except __erase_worker, which cannot concurrent with
12588c2ecf20Sopenharmony_ci		 * ubi_wl_put_peb) will set ubi ro_mode at the same time,
12598c2ecf20Sopenharmony_ci		 * just ignore this wl entry.
12608c2ecf20Sopenharmony_ci		 */
12618c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
12628c2ecf20Sopenharmony_ci		up_read(&ubi->fm_protect);
12638c2ecf20Sopenharmony_ci		return 0;
12648c2ecf20Sopenharmony_ci	}
12658c2ecf20Sopenharmony_ci	if (e == ubi->move_from) {
12668c2ecf20Sopenharmony_ci		/*
12678c2ecf20Sopenharmony_ci		 * User is putting the physical eraseblock which was selected to
12688c2ecf20Sopenharmony_ci		 * be moved. It will be scheduled for erasure in the
12698c2ecf20Sopenharmony_ci		 * wear-leveling worker.
12708c2ecf20Sopenharmony_ci		 */
12718c2ecf20Sopenharmony_ci		dbg_wl("PEB %d is being moved, wait", pnum);
12728c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
12738c2ecf20Sopenharmony_ci
12748c2ecf20Sopenharmony_ci		/* Wait for the WL worker by taking the @ubi->move_mutex */
12758c2ecf20Sopenharmony_ci		mutex_lock(&ubi->move_mutex);
12768c2ecf20Sopenharmony_ci		mutex_unlock(&ubi->move_mutex);
12778c2ecf20Sopenharmony_ci		goto retry;
12788c2ecf20Sopenharmony_ci	} else if (e == ubi->move_to) {
12798c2ecf20Sopenharmony_ci		/*
12808c2ecf20Sopenharmony_ci		 * User is putting the physical eraseblock which was selected
12818c2ecf20Sopenharmony_ci		 * as the target the data is moved to. It may happen if the EBA
12828c2ecf20Sopenharmony_ci		 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
12838c2ecf20Sopenharmony_ci		 * but the WL sub-system has not put the PEB to the "used" tree
12848c2ecf20Sopenharmony_ci		 * yet, but it is about to do this. So we just set a flag which
12858c2ecf20Sopenharmony_ci		 * will tell the WL worker that the PEB is not needed anymore
12868c2ecf20Sopenharmony_ci		 * and should be scheduled for erasure.
12878c2ecf20Sopenharmony_ci		 */
12888c2ecf20Sopenharmony_ci		dbg_wl("PEB %d is the target of data moving", pnum);
12898c2ecf20Sopenharmony_ci		ubi_assert(!ubi->move_to_put);
12908c2ecf20Sopenharmony_ci		ubi->move_to_put = 1;
12918c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
12928c2ecf20Sopenharmony_ci		up_read(&ubi->fm_protect);
12938c2ecf20Sopenharmony_ci		return 0;
12948c2ecf20Sopenharmony_ci	} else {
12958c2ecf20Sopenharmony_ci		if (in_wl_tree(e, &ubi->used)) {
12968c2ecf20Sopenharmony_ci			self_check_in_wl_tree(ubi, e, &ubi->used);
12978c2ecf20Sopenharmony_ci			rb_erase(&e->u.rb, &ubi->used);
12988c2ecf20Sopenharmony_ci		} else if (in_wl_tree(e, &ubi->scrub)) {
12998c2ecf20Sopenharmony_ci			self_check_in_wl_tree(ubi, e, &ubi->scrub);
13008c2ecf20Sopenharmony_ci			rb_erase(&e->u.rb, &ubi->scrub);
13018c2ecf20Sopenharmony_ci		} else if (in_wl_tree(e, &ubi->erroneous)) {
13028c2ecf20Sopenharmony_ci			self_check_in_wl_tree(ubi, e, &ubi->erroneous);
13038c2ecf20Sopenharmony_ci			rb_erase(&e->u.rb, &ubi->erroneous);
13048c2ecf20Sopenharmony_ci			ubi->erroneous_peb_count -= 1;
13058c2ecf20Sopenharmony_ci			ubi_assert(ubi->erroneous_peb_count >= 0);
13068c2ecf20Sopenharmony_ci			/* Erroneous PEBs should be tortured */
13078c2ecf20Sopenharmony_ci			torture = 1;
13088c2ecf20Sopenharmony_ci		} else {
13098c2ecf20Sopenharmony_ci			err = prot_queue_del(ubi, e->pnum);
13108c2ecf20Sopenharmony_ci			if (err) {
13118c2ecf20Sopenharmony_ci				ubi_err(ubi, "PEB %d not found", pnum);
13128c2ecf20Sopenharmony_ci				ubi_ro_mode(ubi);
13138c2ecf20Sopenharmony_ci				spin_unlock(&ubi->wl_lock);
13148c2ecf20Sopenharmony_ci				up_read(&ubi->fm_protect);
13158c2ecf20Sopenharmony_ci				return err;
13168c2ecf20Sopenharmony_ci			}
13178c2ecf20Sopenharmony_ci		}
13188c2ecf20Sopenharmony_ci	}
13198c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
13208c2ecf20Sopenharmony_ci
13218c2ecf20Sopenharmony_ci	err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
13228c2ecf20Sopenharmony_ci	if (err) {
13238c2ecf20Sopenharmony_ci		spin_lock(&ubi->wl_lock);
13248c2ecf20Sopenharmony_ci		wl_tree_add(e, &ubi->used);
13258c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
13268c2ecf20Sopenharmony_ci	}
13278c2ecf20Sopenharmony_ci
13288c2ecf20Sopenharmony_ci	up_read(&ubi->fm_protect);
13298c2ecf20Sopenharmony_ci	return err;
13308c2ecf20Sopenharmony_ci}
13318c2ecf20Sopenharmony_ci
13328c2ecf20Sopenharmony_ci/**
13338c2ecf20Sopenharmony_ci * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
13348c2ecf20Sopenharmony_ci * @ubi: UBI device description object
13358c2ecf20Sopenharmony_ci * @pnum: the physical eraseblock to schedule
13368c2ecf20Sopenharmony_ci *
13378c2ecf20Sopenharmony_ci * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
13388c2ecf20Sopenharmony_ci * needs scrubbing. This function schedules a physical eraseblock for
13398c2ecf20Sopenharmony_ci * scrubbing which is done in background. This function returns zero in case of
13408c2ecf20Sopenharmony_ci * success and a negative error code in case of failure.
13418c2ecf20Sopenharmony_ci */
13428c2ecf20Sopenharmony_ciint ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
13438c2ecf20Sopenharmony_ci{
13448c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e;
13458c2ecf20Sopenharmony_ci
13468c2ecf20Sopenharmony_ci	ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
13478c2ecf20Sopenharmony_ci
13488c2ecf20Sopenharmony_ciretry:
13498c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
13508c2ecf20Sopenharmony_ci	e = ubi->lookuptbl[pnum];
13518c2ecf20Sopenharmony_ci	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
13528c2ecf20Sopenharmony_ci				   in_wl_tree(e, &ubi->erroneous)) {
13538c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
13548c2ecf20Sopenharmony_ci		return 0;
13558c2ecf20Sopenharmony_ci	}
13568c2ecf20Sopenharmony_ci
13578c2ecf20Sopenharmony_ci	if (e == ubi->move_to) {
13588c2ecf20Sopenharmony_ci		/*
13598c2ecf20Sopenharmony_ci		 * This physical eraseblock was used to move data to. The data
13608c2ecf20Sopenharmony_ci		 * was moved but the PEB was not yet inserted to the proper
13618c2ecf20Sopenharmony_ci		 * tree. We should just wait a little and let the WL worker
13628c2ecf20Sopenharmony_ci		 * proceed.
13638c2ecf20Sopenharmony_ci		 */
13648c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
13658c2ecf20Sopenharmony_ci		dbg_wl("the PEB %d is not in proper tree, retry", pnum);
13668c2ecf20Sopenharmony_ci		yield();
13678c2ecf20Sopenharmony_ci		goto retry;
13688c2ecf20Sopenharmony_ci	}
13698c2ecf20Sopenharmony_ci
13708c2ecf20Sopenharmony_ci	if (in_wl_tree(e, &ubi->used)) {
13718c2ecf20Sopenharmony_ci		self_check_in_wl_tree(ubi, e, &ubi->used);
13728c2ecf20Sopenharmony_ci		rb_erase(&e->u.rb, &ubi->used);
13738c2ecf20Sopenharmony_ci	} else {
13748c2ecf20Sopenharmony_ci		int err;
13758c2ecf20Sopenharmony_ci
13768c2ecf20Sopenharmony_ci		err = prot_queue_del(ubi, e->pnum);
13778c2ecf20Sopenharmony_ci		if (err) {
13788c2ecf20Sopenharmony_ci			ubi_err(ubi, "PEB %d not found", pnum);
13798c2ecf20Sopenharmony_ci			ubi_ro_mode(ubi);
13808c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
13818c2ecf20Sopenharmony_ci			return err;
13828c2ecf20Sopenharmony_ci		}
13838c2ecf20Sopenharmony_ci	}
13848c2ecf20Sopenharmony_ci
13858c2ecf20Sopenharmony_ci	wl_tree_add(e, &ubi->scrub);
13868c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
13878c2ecf20Sopenharmony_ci
13888c2ecf20Sopenharmony_ci	/*
13898c2ecf20Sopenharmony_ci	 * Technically scrubbing is the same as wear-leveling, so it is done
13908c2ecf20Sopenharmony_ci	 * by the WL worker.
13918c2ecf20Sopenharmony_ci	 */
13928c2ecf20Sopenharmony_ci	return ensure_wear_leveling(ubi, 0);
13938c2ecf20Sopenharmony_ci}
13948c2ecf20Sopenharmony_ci
13958c2ecf20Sopenharmony_ci/**
13968c2ecf20Sopenharmony_ci * ubi_wl_flush - flush all pending works.
13978c2ecf20Sopenharmony_ci * @ubi: UBI device description object
13988c2ecf20Sopenharmony_ci * @vol_id: the volume id to flush for
13998c2ecf20Sopenharmony_ci * @lnum: the logical eraseblock number to flush for
14008c2ecf20Sopenharmony_ci *
14018c2ecf20Sopenharmony_ci * This function executes all pending works for a particular volume id /
14028c2ecf20Sopenharmony_ci * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
14038c2ecf20Sopenharmony_ci * acts as a wildcard for all of the corresponding volume numbers or logical
14048c2ecf20Sopenharmony_ci * eraseblock numbers. It returns zero in case of success and a negative error
14058c2ecf20Sopenharmony_ci * code in case of failure.
14068c2ecf20Sopenharmony_ci */
14078c2ecf20Sopenharmony_ciint ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
14088c2ecf20Sopenharmony_ci{
14098c2ecf20Sopenharmony_ci	int err = 0;
14108c2ecf20Sopenharmony_ci	int found = 1;
14118c2ecf20Sopenharmony_ci
14128c2ecf20Sopenharmony_ci	/*
14138c2ecf20Sopenharmony_ci	 * Erase while the pending works queue is not empty, but not more than
14148c2ecf20Sopenharmony_ci	 * the number of currently pending works.
14158c2ecf20Sopenharmony_ci	 */
14168c2ecf20Sopenharmony_ci	dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
14178c2ecf20Sopenharmony_ci	       vol_id, lnum, ubi->works_count);
14188c2ecf20Sopenharmony_ci
14198c2ecf20Sopenharmony_ci	while (found) {
14208c2ecf20Sopenharmony_ci		struct ubi_work *wrk, *tmp;
14218c2ecf20Sopenharmony_ci		found = 0;
14228c2ecf20Sopenharmony_ci
14238c2ecf20Sopenharmony_ci		down_read(&ubi->work_sem);
14248c2ecf20Sopenharmony_ci		spin_lock(&ubi->wl_lock);
14258c2ecf20Sopenharmony_ci		list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
14268c2ecf20Sopenharmony_ci			if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
14278c2ecf20Sopenharmony_ci			    (lnum == UBI_ALL || wrk->lnum == lnum)) {
14288c2ecf20Sopenharmony_ci				list_del(&wrk->list);
14298c2ecf20Sopenharmony_ci				ubi->works_count -= 1;
14308c2ecf20Sopenharmony_ci				ubi_assert(ubi->works_count >= 0);
14318c2ecf20Sopenharmony_ci				spin_unlock(&ubi->wl_lock);
14328c2ecf20Sopenharmony_ci
14338c2ecf20Sopenharmony_ci				err = wrk->func(ubi, wrk, 0);
14348c2ecf20Sopenharmony_ci				if (err) {
14358c2ecf20Sopenharmony_ci					up_read(&ubi->work_sem);
14368c2ecf20Sopenharmony_ci					return err;
14378c2ecf20Sopenharmony_ci				}
14388c2ecf20Sopenharmony_ci
14398c2ecf20Sopenharmony_ci				spin_lock(&ubi->wl_lock);
14408c2ecf20Sopenharmony_ci				found = 1;
14418c2ecf20Sopenharmony_ci				break;
14428c2ecf20Sopenharmony_ci			}
14438c2ecf20Sopenharmony_ci		}
14448c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
14458c2ecf20Sopenharmony_ci		up_read(&ubi->work_sem);
14468c2ecf20Sopenharmony_ci	}
14478c2ecf20Sopenharmony_ci
14488c2ecf20Sopenharmony_ci	/*
14498c2ecf20Sopenharmony_ci	 * Make sure all the works which have been done in parallel are
14508c2ecf20Sopenharmony_ci	 * finished.
14518c2ecf20Sopenharmony_ci	 */
14528c2ecf20Sopenharmony_ci	down_write(&ubi->work_sem);
14538c2ecf20Sopenharmony_ci	up_write(&ubi->work_sem);
14548c2ecf20Sopenharmony_ci
14558c2ecf20Sopenharmony_ci	return err;
14568c2ecf20Sopenharmony_ci}
14578c2ecf20Sopenharmony_ci
14588c2ecf20Sopenharmony_cistatic bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
14598c2ecf20Sopenharmony_ci{
14608c2ecf20Sopenharmony_ci	if (in_wl_tree(e, &ubi->scrub))
14618c2ecf20Sopenharmony_ci		return false;
14628c2ecf20Sopenharmony_ci	else if (in_wl_tree(e, &ubi->erroneous))
14638c2ecf20Sopenharmony_ci		return false;
14648c2ecf20Sopenharmony_ci	else if (ubi->move_from == e)
14658c2ecf20Sopenharmony_ci		return false;
14668c2ecf20Sopenharmony_ci	else if (ubi->move_to == e)
14678c2ecf20Sopenharmony_ci		return false;
14688c2ecf20Sopenharmony_ci
14698c2ecf20Sopenharmony_ci	return true;
14708c2ecf20Sopenharmony_ci}
14718c2ecf20Sopenharmony_ci
14728c2ecf20Sopenharmony_ci/**
14738c2ecf20Sopenharmony_ci * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
14748c2ecf20Sopenharmony_ci * @ubi: UBI device description object
14758c2ecf20Sopenharmony_ci * @pnum: the physical eraseblock to schedule
14768c2ecf20Sopenharmony_ci * @force: dont't read the block, assume bitflips happened and take action.
14778c2ecf20Sopenharmony_ci *
14788c2ecf20Sopenharmony_ci * This function reads the given eraseblock and checks if bitflips occured.
14798c2ecf20Sopenharmony_ci * In case of bitflips, the eraseblock is scheduled for scrubbing.
14808c2ecf20Sopenharmony_ci * If scrubbing is forced with @force, the eraseblock is not read,
14818c2ecf20Sopenharmony_ci * but scheduled for scrubbing right away.
14828c2ecf20Sopenharmony_ci *
14838c2ecf20Sopenharmony_ci * Returns:
14848c2ecf20Sopenharmony_ci * %EINVAL, PEB is out of range
14858c2ecf20Sopenharmony_ci * %ENOENT, PEB is no longer used by UBI
14868c2ecf20Sopenharmony_ci * %EBUSY, PEB cannot be checked now or a check is currently running on it
14878c2ecf20Sopenharmony_ci * %EAGAIN, bit flips happened but scrubbing is currently not possible
14888c2ecf20Sopenharmony_ci * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
14898c2ecf20Sopenharmony_ci * %0, no bit flips detected
14908c2ecf20Sopenharmony_ci */
14918c2ecf20Sopenharmony_ciint ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
14928c2ecf20Sopenharmony_ci{
14938c2ecf20Sopenharmony_ci	int err = 0;
14948c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e;
14958c2ecf20Sopenharmony_ci
14968c2ecf20Sopenharmony_ci	if (pnum < 0 || pnum >= ubi->peb_count) {
14978c2ecf20Sopenharmony_ci		err = -EINVAL;
14988c2ecf20Sopenharmony_ci		goto out;
14998c2ecf20Sopenharmony_ci	}
15008c2ecf20Sopenharmony_ci
15018c2ecf20Sopenharmony_ci	/*
15028c2ecf20Sopenharmony_ci	 * Pause all parallel work, otherwise it can happen that the
15038c2ecf20Sopenharmony_ci	 * erase worker frees a wl entry under us.
15048c2ecf20Sopenharmony_ci	 */
15058c2ecf20Sopenharmony_ci	down_write(&ubi->work_sem);
15068c2ecf20Sopenharmony_ci
15078c2ecf20Sopenharmony_ci	/*
15088c2ecf20Sopenharmony_ci	 * Make sure that the wl entry does not change state while
15098c2ecf20Sopenharmony_ci	 * inspecting it.
15108c2ecf20Sopenharmony_ci	 */
15118c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
15128c2ecf20Sopenharmony_ci	e = ubi->lookuptbl[pnum];
15138c2ecf20Sopenharmony_ci	if (!e) {
15148c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
15158c2ecf20Sopenharmony_ci		err = -ENOENT;
15168c2ecf20Sopenharmony_ci		goto out_resume;
15178c2ecf20Sopenharmony_ci	}
15188c2ecf20Sopenharmony_ci
15198c2ecf20Sopenharmony_ci	/*
15208c2ecf20Sopenharmony_ci	 * Does it make sense to check this PEB?
15218c2ecf20Sopenharmony_ci	 */
15228c2ecf20Sopenharmony_ci	if (!scrub_possible(ubi, e)) {
15238c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
15248c2ecf20Sopenharmony_ci		err = -EBUSY;
15258c2ecf20Sopenharmony_ci		goto out_resume;
15268c2ecf20Sopenharmony_ci	}
15278c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
15288c2ecf20Sopenharmony_ci
15298c2ecf20Sopenharmony_ci	if (!force) {
15308c2ecf20Sopenharmony_ci		mutex_lock(&ubi->buf_mutex);
15318c2ecf20Sopenharmony_ci		err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
15328c2ecf20Sopenharmony_ci		mutex_unlock(&ubi->buf_mutex);
15338c2ecf20Sopenharmony_ci	}
15348c2ecf20Sopenharmony_ci
15358c2ecf20Sopenharmony_ci	if (force || err == UBI_IO_BITFLIPS) {
15368c2ecf20Sopenharmony_ci		/*
15378c2ecf20Sopenharmony_ci		 * Okay, bit flip happened, let's figure out what we can do.
15388c2ecf20Sopenharmony_ci		 */
15398c2ecf20Sopenharmony_ci		spin_lock(&ubi->wl_lock);
15408c2ecf20Sopenharmony_ci
15418c2ecf20Sopenharmony_ci		/*
15428c2ecf20Sopenharmony_ci		 * Recheck. We released wl_lock, UBI might have killed the
15438c2ecf20Sopenharmony_ci		 * wl entry under us.
15448c2ecf20Sopenharmony_ci		 */
15458c2ecf20Sopenharmony_ci		e = ubi->lookuptbl[pnum];
15468c2ecf20Sopenharmony_ci		if (!e) {
15478c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
15488c2ecf20Sopenharmony_ci			err = -ENOENT;
15498c2ecf20Sopenharmony_ci			goto out_resume;
15508c2ecf20Sopenharmony_ci		}
15518c2ecf20Sopenharmony_ci
15528c2ecf20Sopenharmony_ci		/*
15538c2ecf20Sopenharmony_ci		 * Need to re-check state
15548c2ecf20Sopenharmony_ci		 */
15558c2ecf20Sopenharmony_ci		if (!scrub_possible(ubi, e)) {
15568c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
15578c2ecf20Sopenharmony_ci			err = -EBUSY;
15588c2ecf20Sopenharmony_ci			goto out_resume;
15598c2ecf20Sopenharmony_ci		}
15608c2ecf20Sopenharmony_ci
15618c2ecf20Sopenharmony_ci		if (in_pq(ubi, e)) {
15628c2ecf20Sopenharmony_ci			prot_queue_del(ubi, e->pnum);
15638c2ecf20Sopenharmony_ci			wl_tree_add(e, &ubi->scrub);
15648c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
15658c2ecf20Sopenharmony_ci
15668c2ecf20Sopenharmony_ci			err = ensure_wear_leveling(ubi, 1);
15678c2ecf20Sopenharmony_ci		} else if (in_wl_tree(e, &ubi->used)) {
15688c2ecf20Sopenharmony_ci			rb_erase(&e->u.rb, &ubi->used);
15698c2ecf20Sopenharmony_ci			wl_tree_add(e, &ubi->scrub);
15708c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
15718c2ecf20Sopenharmony_ci
15728c2ecf20Sopenharmony_ci			err = ensure_wear_leveling(ubi, 1);
15738c2ecf20Sopenharmony_ci		} else if (in_wl_tree(e, &ubi->free)) {
15748c2ecf20Sopenharmony_ci			rb_erase(&e->u.rb, &ubi->free);
15758c2ecf20Sopenharmony_ci			ubi->free_count--;
15768c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
15778c2ecf20Sopenharmony_ci
15788c2ecf20Sopenharmony_ci			/*
15798c2ecf20Sopenharmony_ci			 * This PEB is empty we can schedule it for
15808c2ecf20Sopenharmony_ci			 * erasure right away. No wear leveling needed.
15818c2ecf20Sopenharmony_ci			 */
15828c2ecf20Sopenharmony_ci			err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
15838c2ecf20Sopenharmony_ci					     force ? 0 : 1, true);
15848c2ecf20Sopenharmony_ci		} else {
15858c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
15868c2ecf20Sopenharmony_ci			err = -EAGAIN;
15878c2ecf20Sopenharmony_ci		}
15888c2ecf20Sopenharmony_ci
15898c2ecf20Sopenharmony_ci		if (!err && !force)
15908c2ecf20Sopenharmony_ci			err = -EUCLEAN;
15918c2ecf20Sopenharmony_ci	} else {
15928c2ecf20Sopenharmony_ci		err = 0;
15938c2ecf20Sopenharmony_ci	}
15948c2ecf20Sopenharmony_ci
15958c2ecf20Sopenharmony_ciout_resume:
15968c2ecf20Sopenharmony_ci	up_write(&ubi->work_sem);
15978c2ecf20Sopenharmony_ciout:
15988c2ecf20Sopenharmony_ci
15998c2ecf20Sopenharmony_ci	return err;
16008c2ecf20Sopenharmony_ci}
16018c2ecf20Sopenharmony_ci
16028c2ecf20Sopenharmony_ci/**
16038c2ecf20Sopenharmony_ci * tree_destroy - destroy an RB-tree.
16048c2ecf20Sopenharmony_ci * @ubi: UBI device description object
16058c2ecf20Sopenharmony_ci * @root: the root of the tree to destroy
16068c2ecf20Sopenharmony_ci */
16078c2ecf20Sopenharmony_cistatic void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
16088c2ecf20Sopenharmony_ci{
16098c2ecf20Sopenharmony_ci	struct rb_node *rb;
16108c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e;
16118c2ecf20Sopenharmony_ci
16128c2ecf20Sopenharmony_ci	rb = root->rb_node;
16138c2ecf20Sopenharmony_ci	while (rb) {
16148c2ecf20Sopenharmony_ci		if (rb->rb_left)
16158c2ecf20Sopenharmony_ci			rb = rb->rb_left;
16168c2ecf20Sopenharmony_ci		else if (rb->rb_right)
16178c2ecf20Sopenharmony_ci			rb = rb->rb_right;
16188c2ecf20Sopenharmony_ci		else {
16198c2ecf20Sopenharmony_ci			e = rb_entry(rb, struct ubi_wl_entry, u.rb);
16208c2ecf20Sopenharmony_ci
16218c2ecf20Sopenharmony_ci			rb = rb_parent(rb);
16228c2ecf20Sopenharmony_ci			if (rb) {
16238c2ecf20Sopenharmony_ci				if (rb->rb_left == &e->u.rb)
16248c2ecf20Sopenharmony_ci					rb->rb_left = NULL;
16258c2ecf20Sopenharmony_ci				else
16268c2ecf20Sopenharmony_ci					rb->rb_right = NULL;
16278c2ecf20Sopenharmony_ci			}
16288c2ecf20Sopenharmony_ci
16298c2ecf20Sopenharmony_ci			wl_entry_destroy(ubi, e);
16308c2ecf20Sopenharmony_ci		}
16318c2ecf20Sopenharmony_ci	}
16328c2ecf20Sopenharmony_ci}
16338c2ecf20Sopenharmony_ci
16348c2ecf20Sopenharmony_ci/**
16358c2ecf20Sopenharmony_ci * ubi_thread - UBI background thread.
16368c2ecf20Sopenharmony_ci * @u: the UBI device description object pointer
16378c2ecf20Sopenharmony_ci */
16388c2ecf20Sopenharmony_ciint ubi_thread(void *u)
16398c2ecf20Sopenharmony_ci{
16408c2ecf20Sopenharmony_ci	int failures = 0;
16418c2ecf20Sopenharmony_ci	struct ubi_device *ubi = u;
16428c2ecf20Sopenharmony_ci
16438c2ecf20Sopenharmony_ci	ubi_msg(ubi, "background thread \"%s\" started, PID %d",
16448c2ecf20Sopenharmony_ci		ubi->bgt_name, task_pid_nr(current));
16458c2ecf20Sopenharmony_ci
16468c2ecf20Sopenharmony_ci	set_freezable();
16478c2ecf20Sopenharmony_ci	for (;;) {
16488c2ecf20Sopenharmony_ci		int err;
16498c2ecf20Sopenharmony_ci
16508c2ecf20Sopenharmony_ci		if (kthread_should_stop())
16518c2ecf20Sopenharmony_ci			break;
16528c2ecf20Sopenharmony_ci
16538c2ecf20Sopenharmony_ci		if (try_to_freeze())
16548c2ecf20Sopenharmony_ci			continue;
16558c2ecf20Sopenharmony_ci
16568c2ecf20Sopenharmony_ci		spin_lock(&ubi->wl_lock);
16578c2ecf20Sopenharmony_ci		if (list_empty(&ubi->works) || ubi->ro_mode ||
16588c2ecf20Sopenharmony_ci		    !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
16598c2ecf20Sopenharmony_ci			set_current_state(TASK_INTERRUPTIBLE);
16608c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
16618c2ecf20Sopenharmony_ci
16628c2ecf20Sopenharmony_ci			/*
16638c2ecf20Sopenharmony_ci			 * Check kthread_should_stop() after we set the task
16648c2ecf20Sopenharmony_ci			 * state to guarantee that we either see the stop bit
16658c2ecf20Sopenharmony_ci			 * and exit or the task state is reset to runnable such
16668c2ecf20Sopenharmony_ci			 * that it's not scheduled out indefinitely and detects
16678c2ecf20Sopenharmony_ci			 * the stop bit at kthread_should_stop().
16688c2ecf20Sopenharmony_ci			 */
16698c2ecf20Sopenharmony_ci			if (kthread_should_stop()) {
16708c2ecf20Sopenharmony_ci				set_current_state(TASK_RUNNING);
16718c2ecf20Sopenharmony_ci				break;
16728c2ecf20Sopenharmony_ci			}
16738c2ecf20Sopenharmony_ci
16748c2ecf20Sopenharmony_ci			schedule();
16758c2ecf20Sopenharmony_ci			continue;
16768c2ecf20Sopenharmony_ci		}
16778c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
16788c2ecf20Sopenharmony_ci
16798c2ecf20Sopenharmony_ci		err = do_work(ubi);
16808c2ecf20Sopenharmony_ci		if (err) {
16818c2ecf20Sopenharmony_ci			ubi_err(ubi, "%s: work failed with error code %d",
16828c2ecf20Sopenharmony_ci				ubi->bgt_name, err);
16838c2ecf20Sopenharmony_ci			if (failures++ > WL_MAX_FAILURES) {
16848c2ecf20Sopenharmony_ci				/*
16858c2ecf20Sopenharmony_ci				 * Too many failures, disable the thread and
16868c2ecf20Sopenharmony_ci				 * switch to read-only mode.
16878c2ecf20Sopenharmony_ci				 */
16888c2ecf20Sopenharmony_ci				ubi_msg(ubi, "%s: %d consecutive failures",
16898c2ecf20Sopenharmony_ci					ubi->bgt_name, WL_MAX_FAILURES);
16908c2ecf20Sopenharmony_ci				ubi_ro_mode(ubi);
16918c2ecf20Sopenharmony_ci				ubi->thread_enabled = 0;
16928c2ecf20Sopenharmony_ci				continue;
16938c2ecf20Sopenharmony_ci			}
16948c2ecf20Sopenharmony_ci		} else
16958c2ecf20Sopenharmony_ci			failures = 0;
16968c2ecf20Sopenharmony_ci
16978c2ecf20Sopenharmony_ci		cond_resched();
16988c2ecf20Sopenharmony_ci	}
16998c2ecf20Sopenharmony_ci
17008c2ecf20Sopenharmony_ci	dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
17018c2ecf20Sopenharmony_ci	ubi->thread_enabled = 0;
17028c2ecf20Sopenharmony_ci	return 0;
17038c2ecf20Sopenharmony_ci}
17048c2ecf20Sopenharmony_ci
17058c2ecf20Sopenharmony_ci/**
17068c2ecf20Sopenharmony_ci * shutdown_work - shutdown all pending works.
17078c2ecf20Sopenharmony_ci * @ubi: UBI device description object
17088c2ecf20Sopenharmony_ci */
17098c2ecf20Sopenharmony_cistatic void shutdown_work(struct ubi_device *ubi)
17108c2ecf20Sopenharmony_ci{
17118c2ecf20Sopenharmony_ci	while (!list_empty(&ubi->works)) {
17128c2ecf20Sopenharmony_ci		struct ubi_work *wrk;
17138c2ecf20Sopenharmony_ci
17148c2ecf20Sopenharmony_ci		wrk = list_entry(ubi->works.next, struct ubi_work, list);
17158c2ecf20Sopenharmony_ci		list_del(&wrk->list);
17168c2ecf20Sopenharmony_ci		wrk->func(ubi, wrk, 1);
17178c2ecf20Sopenharmony_ci		ubi->works_count -= 1;
17188c2ecf20Sopenharmony_ci		ubi_assert(ubi->works_count >= 0);
17198c2ecf20Sopenharmony_ci	}
17208c2ecf20Sopenharmony_ci}
17218c2ecf20Sopenharmony_ci
17228c2ecf20Sopenharmony_ci/**
17238c2ecf20Sopenharmony_ci * erase_aeb - erase a PEB given in UBI attach info PEB
17248c2ecf20Sopenharmony_ci * @ubi: UBI device description object
17258c2ecf20Sopenharmony_ci * @aeb: UBI attach info PEB
17268c2ecf20Sopenharmony_ci * @sync: If true, erase synchronously. Otherwise schedule for erasure
17278c2ecf20Sopenharmony_ci */
17288c2ecf20Sopenharmony_cistatic int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
17298c2ecf20Sopenharmony_ci{
17308c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e;
17318c2ecf20Sopenharmony_ci	int err;
17328c2ecf20Sopenharmony_ci
17338c2ecf20Sopenharmony_ci	e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
17348c2ecf20Sopenharmony_ci	if (!e)
17358c2ecf20Sopenharmony_ci		return -ENOMEM;
17368c2ecf20Sopenharmony_ci
17378c2ecf20Sopenharmony_ci	e->pnum = aeb->pnum;
17388c2ecf20Sopenharmony_ci	e->ec = aeb->ec;
17398c2ecf20Sopenharmony_ci	ubi->lookuptbl[e->pnum] = e;
17408c2ecf20Sopenharmony_ci
17418c2ecf20Sopenharmony_ci	if (sync) {
17428c2ecf20Sopenharmony_ci		err = sync_erase(ubi, e, false);
17438c2ecf20Sopenharmony_ci		if (err)
17448c2ecf20Sopenharmony_ci			goto out_free;
17458c2ecf20Sopenharmony_ci
17468c2ecf20Sopenharmony_ci		wl_tree_add(e, &ubi->free);
17478c2ecf20Sopenharmony_ci		ubi->free_count++;
17488c2ecf20Sopenharmony_ci	} else {
17498c2ecf20Sopenharmony_ci		err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
17508c2ecf20Sopenharmony_ci		if (err)
17518c2ecf20Sopenharmony_ci			goto out_free;
17528c2ecf20Sopenharmony_ci	}
17538c2ecf20Sopenharmony_ci
17548c2ecf20Sopenharmony_ci	return 0;
17558c2ecf20Sopenharmony_ci
17568c2ecf20Sopenharmony_ciout_free:
17578c2ecf20Sopenharmony_ci	wl_entry_destroy(ubi, e);
17588c2ecf20Sopenharmony_ci
17598c2ecf20Sopenharmony_ci	return err;
17608c2ecf20Sopenharmony_ci}
17618c2ecf20Sopenharmony_ci
17628c2ecf20Sopenharmony_ci/**
17638c2ecf20Sopenharmony_ci * ubi_wl_init - initialize the WL sub-system using attaching information.
17648c2ecf20Sopenharmony_ci * @ubi: UBI device description object
17658c2ecf20Sopenharmony_ci * @ai: attaching information
17668c2ecf20Sopenharmony_ci *
17678c2ecf20Sopenharmony_ci * This function returns zero in case of success, and a negative error code in
17688c2ecf20Sopenharmony_ci * case of failure.
17698c2ecf20Sopenharmony_ci */
17708c2ecf20Sopenharmony_ciint ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
17718c2ecf20Sopenharmony_ci{
17728c2ecf20Sopenharmony_ci	int err, i, reserved_pebs, found_pebs = 0;
17738c2ecf20Sopenharmony_ci	struct rb_node *rb1, *rb2;
17748c2ecf20Sopenharmony_ci	struct ubi_ainf_volume *av;
17758c2ecf20Sopenharmony_ci	struct ubi_ainf_peb *aeb, *tmp;
17768c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e;
17778c2ecf20Sopenharmony_ci
17788c2ecf20Sopenharmony_ci	ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
17798c2ecf20Sopenharmony_ci	spin_lock_init(&ubi->wl_lock);
17808c2ecf20Sopenharmony_ci	mutex_init(&ubi->move_mutex);
17818c2ecf20Sopenharmony_ci	init_rwsem(&ubi->work_sem);
17828c2ecf20Sopenharmony_ci	ubi->max_ec = ai->max_ec;
17838c2ecf20Sopenharmony_ci	INIT_LIST_HEAD(&ubi->works);
17848c2ecf20Sopenharmony_ci
17858c2ecf20Sopenharmony_ci	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
17868c2ecf20Sopenharmony_ci
17878c2ecf20Sopenharmony_ci	err = -ENOMEM;
17888c2ecf20Sopenharmony_ci	ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
17898c2ecf20Sopenharmony_ci	if (!ubi->lookuptbl)
17908c2ecf20Sopenharmony_ci		return err;
17918c2ecf20Sopenharmony_ci
17928c2ecf20Sopenharmony_ci	for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
17938c2ecf20Sopenharmony_ci		INIT_LIST_HEAD(&ubi->pq[i]);
17948c2ecf20Sopenharmony_ci	ubi->pq_head = 0;
17958c2ecf20Sopenharmony_ci
17968c2ecf20Sopenharmony_ci	ubi->free_count = 0;
17978c2ecf20Sopenharmony_ci	list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
17988c2ecf20Sopenharmony_ci		cond_resched();
17998c2ecf20Sopenharmony_ci
18008c2ecf20Sopenharmony_ci		err = erase_aeb(ubi, aeb, false);
18018c2ecf20Sopenharmony_ci		if (err)
18028c2ecf20Sopenharmony_ci			goto out_free;
18038c2ecf20Sopenharmony_ci
18048c2ecf20Sopenharmony_ci		found_pebs++;
18058c2ecf20Sopenharmony_ci	}
18068c2ecf20Sopenharmony_ci
18078c2ecf20Sopenharmony_ci	list_for_each_entry(aeb, &ai->free, u.list) {
18088c2ecf20Sopenharmony_ci		cond_resched();
18098c2ecf20Sopenharmony_ci
18108c2ecf20Sopenharmony_ci		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
18118c2ecf20Sopenharmony_ci		if (!e) {
18128c2ecf20Sopenharmony_ci			err = -ENOMEM;
18138c2ecf20Sopenharmony_ci			goto out_free;
18148c2ecf20Sopenharmony_ci		}
18158c2ecf20Sopenharmony_ci
18168c2ecf20Sopenharmony_ci		e->pnum = aeb->pnum;
18178c2ecf20Sopenharmony_ci		e->ec = aeb->ec;
18188c2ecf20Sopenharmony_ci		ubi_assert(e->ec >= 0);
18198c2ecf20Sopenharmony_ci
18208c2ecf20Sopenharmony_ci		wl_tree_add(e, &ubi->free);
18218c2ecf20Sopenharmony_ci		ubi->free_count++;
18228c2ecf20Sopenharmony_ci
18238c2ecf20Sopenharmony_ci		ubi->lookuptbl[e->pnum] = e;
18248c2ecf20Sopenharmony_ci
18258c2ecf20Sopenharmony_ci		found_pebs++;
18268c2ecf20Sopenharmony_ci	}
18278c2ecf20Sopenharmony_ci
18288c2ecf20Sopenharmony_ci	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
18298c2ecf20Sopenharmony_ci		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
18308c2ecf20Sopenharmony_ci			cond_resched();
18318c2ecf20Sopenharmony_ci
18328c2ecf20Sopenharmony_ci			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
18338c2ecf20Sopenharmony_ci			if (!e) {
18348c2ecf20Sopenharmony_ci				err = -ENOMEM;
18358c2ecf20Sopenharmony_ci				goto out_free;
18368c2ecf20Sopenharmony_ci			}
18378c2ecf20Sopenharmony_ci
18388c2ecf20Sopenharmony_ci			e->pnum = aeb->pnum;
18398c2ecf20Sopenharmony_ci			e->ec = aeb->ec;
18408c2ecf20Sopenharmony_ci			ubi->lookuptbl[e->pnum] = e;
18418c2ecf20Sopenharmony_ci
18428c2ecf20Sopenharmony_ci			if (!aeb->scrub) {
18438c2ecf20Sopenharmony_ci				dbg_wl("add PEB %d EC %d to the used tree",
18448c2ecf20Sopenharmony_ci				       e->pnum, e->ec);
18458c2ecf20Sopenharmony_ci				wl_tree_add(e, &ubi->used);
18468c2ecf20Sopenharmony_ci			} else {
18478c2ecf20Sopenharmony_ci				dbg_wl("add PEB %d EC %d to the scrub tree",
18488c2ecf20Sopenharmony_ci				       e->pnum, e->ec);
18498c2ecf20Sopenharmony_ci				wl_tree_add(e, &ubi->scrub);
18508c2ecf20Sopenharmony_ci			}
18518c2ecf20Sopenharmony_ci
18528c2ecf20Sopenharmony_ci			found_pebs++;
18538c2ecf20Sopenharmony_ci		}
18548c2ecf20Sopenharmony_ci	}
18558c2ecf20Sopenharmony_ci
18568c2ecf20Sopenharmony_ci	list_for_each_entry(aeb, &ai->fastmap, u.list) {
18578c2ecf20Sopenharmony_ci		cond_resched();
18588c2ecf20Sopenharmony_ci
18598c2ecf20Sopenharmony_ci		e = ubi_find_fm_block(ubi, aeb->pnum);
18608c2ecf20Sopenharmony_ci
18618c2ecf20Sopenharmony_ci		if (e) {
18628c2ecf20Sopenharmony_ci			ubi_assert(!ubi->lookuptbl[e->pnum]);
18638c2ecf20Sopenharmony_ci			ubi->lookuptbl[e->pnum] = e;
18648c2ecf20Sopenharmony_ci		} else {
18658c2ecf20Sopenharmony_ci			bool sync = false;
18668c2ecf20Sopenharmony_ci
18678c2ecf20Sopenharmony_ci			/*
18688c2ecf20Sopenharmony_ci			 * Usually old Fastmap PEBs are scheduled for erasure
18698c2ecf20Sopenharmony_ci			 * and we don't have to care about them but if we face
18708c2ecf20Sopenharmony_ci			 * an power cut before scheduling them we need to
18718c2ecf20Sopenharmony_ci			 * take care of them here.
18728c2ecf20Sopenharmony_ci			 */
18738c2ecf20Sopenharmony_ci			if (ubi->lookuptbl[aeb->pnum])
18748c2ecf20Sopenharmony_ci				continue;
18758c2ecf20Sopenharmony_ci
18768c2ecf20Sopenharmony_ci			/*
18778c2ecf20Sopenharmony_ci			 * The fastmap update code might not find a free PEB for
18788c2ecf20Sopenharmony_ci			 * writing the fastmap anchor to and then reuses the
18798c2ecf20Sopenharmony_ci			 * current fastmap anchor PEB. When this PEB gets erased
18808c2ecf20Sopenharmony_ci			 * and a power cut happens before it is written again we
18818c2ecf20Sopenharmony_ci			 * must make sure that the fastmap attach code doesn't
18828c2ecf20Sopenharmony_ci			 * find any outdated fastmap anchors, hence we erase the
18838c2ecf20Sopenharmony_ci			 * outdated fastmap anchor PEBs synchronously here.
18848c2ecf20Sopenharmony_ci			 */
18858c2ecf20Sopenharmony_ci			if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
18868c2ecf20Sopenharmony_ci				sync = true;
18878c2ecf20Sopenharmony_ci
18888c2ecf20Sopenharmony_ci			err = erase_aeb(ubi, aeb, sync);
18898c2ecf20Sopenharmony_ci			if (err)
18908c2ecf20Sopenharmony_ci				goto out_free;
18918c2ecf20Sopenharmony_ci		}
18928c2ecf20Sopenharmony_ci
18938c2ecf20Sopenharmony_ci		found_pebs++;
18948c2ecf20Sopenharmony_ci	}
18958c2ecf20Sopenharmony_ci
18968c2ecf20Sopenharmony_ci	dbg_wl("found %i PEBs", found_pebs);
18978c2ecf20Sopenharmony_ci
18988c2ecf20Sopenharmony_ci	ubi_assert(ubi->good_peb_count == found_pebs);
18998c2ecf20Sopenharmony_ci
19008c2ecf20Sopenharmony_ci	reserved_pebs = WL_RESERVED_PEBS;
19018c2ecf20Sopenharmony_ci	ubi_fastmap_init(ubi, &reserved_pebs);
19028c2ecf20Sopenharmony_ci
19038c2ecf20Sopenharmony_ci	if (ubi->avail_pebs < reserved_pebs) {
19048c2ecf20Sopenharmony_ci		ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
19058c2ecf20Sopenharmony_ci			ubi->avail_pebs, reserved_pebs);
19068c2ecf20Sopenharmony_ci		if (ubi->corr_peb_count)
19078c2ecf20Sopenharmony_ci			ubi_err(ubi, "%d PEBs are corrupted and not used",
19088c2ecf20Sopenharmony_ci				ubi->corr_peb_count);
19098c2ecf20Sopenharmony_ci		err = -ENOSPC;
19108c2ecf20Sopenharmony_ci		goto out_free;
19118c2ecf20Sopenharmony_ci	}
19128c2ecf20Sopenharmony_ci	ubi->avail_pebs -= reserved_pebs;
19138c2ecf20Sopenharmony_ci	ubi->rsvd_pebs += reserved_pebs;
19148c2ecf20Sopenharmony_ci
19158c2ecf20Sopenharmony_ci	/* Schedule wear-leveling if needed */
19168c2ecf20Sopenharmony_ci	err = ensure_wear_leveling(ubi, 0);
19178c2ecf20Sopenharmony_ci	if (err)
19188c2ecf20Sopenharmony_ci		goto out_free;
19198c2ecf20Sopenharmony_ci
19208c2ecf20Sopenharmony_ci#ifdef CONFIG_MTD_UBI_FASTMAP
19218c2ecf20Sopenharmony_ci	if (!ubi->ro_mode && !ubi->fm_disabled)
19228c2ecf20Sopenharmony_ci		ubi_ensure_anchor_pebs(ubi);
19238c2ecf20Sopenharmony_ci#endif
19248c2ecf20Sopenharmony_ci	return 0;
19258c2ecf20Sopenharmony_ci
19268c2ecf20Sopenharmony_ciout_free:
19278c2ecf20Sopenharmony_ci	shutdown_work(ubi);
19288c2ecf20Sopenharmony_ci	tree_destroy(ubi, &ubi->used);
19298c2ecf20Sopenharmony_ci	tree_destroy(ubi, &ubi->free);
19308c2ecf20Sopenharmony_ci	tree_destroy(ubi, &ubi->scrub);
19318c2ecf20Sopenharmony_ci	kfree(ubi->lookuptbl);
19328c2ecf20Sopenharmony_ci	return err;
19338c2ecf20Sopenharmony_ci}
19348c2ecf20Sopenharmony_ci
19358c2ecf20Sopenharmony_ci/**
19368c2ecf20Sopenharmony_ci * protection_queue_destroy - destroy the protection queue.
19378c2ecf20Sopenharmony_ci * @ubi: UBI device description object
19388c2ecf20Sopenharmony_ci */
19398c2ecf20Sopenharmony_cistatic void protection_queue_destroy(struct ubi_device *ubi)
19408c2ecf20Sopenharmony_ci{
19418c2ecf20Sopenharmony_ci	int i;
19428c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e, *tmp;
19438c2ecf20Sopenharmony_ci
19448c2ecf20Sopenharmony_ci	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
19458c2ecf20Sopenharmony_ci		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
19468c2ecf20Sopenharmony_ci			list_del(&e->u.list);
19478c2ecf20Sopenharmony_ci			wl_entry_destroy(ubi, e);
19488c2ecf20Sopenharmony_ci		}
19498c2ecf20Sopenharmony_ci	}
19508c2ecf20Sopenharmony_ci}
19518c2ecf20Sopenharmony_ci
19528c2ecf20Sopenharmony_ci/**
19538c2ecf20Sopenharmony_ci * ubi_wl_close - close the wear-leveling sub-system.
19548c2ecf20Sopenharmony_ci * @ubi: UBI device description object
19558c2ecf20Sopenharmony_ci */
19568c2ecf20Sopenharmony_civoid ubi_wl_close(struct ubi_device *ubi)
19578c2ecf20Sopenharmony_ci{
19588c2ecf20Sopenharmony_ci	dbg_wl("close the WL sub-system");
19598c2ecf20Sopenharmony_ci	ubi_fastmap_close(ubi);
19608c2ecf20Sopenharmony_ci	shutdown_work(ubi);
19618c2ecf20Sopenharmony_ci	protection_queue_destroy(ubi);
19628c2ecf20Sopenharmony_ci	tree_destroy(ubi, &ubi->used);
19638c2ecf20Sopenharmony_ci	tree_destroy(ubi, &ubi->erroneous);
19648c2ecf20Sopenharmony_ci	tree_destroy(ubi, &ubi->free);
19658c2ecf20Sopenharmony_ci	tree_destroy(ubi, &ubi->scrub);
19668c2ecf20Sopenharmony_ci	kfree(ubi->lookuptbl);
19678c2ecf20Sopenharmony_ci}
19688c2ecf20Sopenharmony_ci
19698c2ecf20Sopenharmony_ci/**
19708c2ecf20Sopenharmony_ci * self_check_ec - make sure that the erase counter of a PEB is correct.
19718c2ecf20Sopenharmony_ci * @ubi: UBI device description object
19728c2ecf20Sopenharmony_ci * @pnum: the physical eraseblock number to check
19738c2ecf20Sopenharmony_ci * @ec: the erase counter to check
19748c2ecf20Sopenharmony_ci *
19758c2ecf20Sopenharmony_ci * This function returns zero if the erase counter of physical eraseblock @pnum
19768c2ecf20Sopenharmony_ci * is equivalent to @ec, and a negative error code if not or if an error
19778c2ecf20Sopenharmony_ci * occurred.
19788c2ecf20Sopenharmony_ci */
19798c2ecf20Sopenharmony_cistatic int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
19808c2ecf20Sopenharmony_ci{
19818c2ecf20Sopenharmony_ci	int err;
19828c2ecf20Sopenharmony_ci	long long read_ec;
19838c2ecf20Sopenharmony_ci	struct ubi_ec_hdr *ec_hdr;
19848c2ecf20Sopenharmony_ci
19858c2ecf20Sopenharmony_ci	if (!ubi_dbg_chk_gen(ubi))
19868c2ecf20Sopenharmony_ci		return 0;
19878c2ecf20Sopenharmony_ci
19888c2ecf20Sopenharmony_ci	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
19898c2ecf20Sopenharmony_ci	if (!ec_hdr)
19908c2ecf20Sopenharmony_ci		return -ENOMEM;
19918c2ecf20Sopenharmony_ci
19928c2ecf20Sopenharmony_ci	err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
19938c2ecf20Sopenharmony_ci	if (err && err != UBI_IO_BITFLIPS) {
19948c2ecf20Sopenharmony_ci		/* The header does not have to exist */
19958c2ecf20Sopenharmony_ci		err = 0;
19968c2ecf20Sopenharmony_ci		goto out_free;
19978c2ecf20Sopenharmony_ci	}
19988c2ecf20Sopenharmony_ci
19998c2ecf20Sopenharmony_ci	read_ec = be64_to_cpu(ec_hdr->ec);
20008c2ecf20Sopenharmony_ci	if (ec != read_ec && read_ec - ec > 1) {
20018c2ecf20Sopenharmony_ci		ubi_err(ubi, "self-check failed for PEB %d", pnum);
20028c2ecf20Sopenharmony_ci		ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
20038c2ecf20Sopenharmony_ci		dump_stack();
20048c2ecf20Sopenharmony_ci		err = 1;
20058c2ecf20Sopenharmony_ci	} else
20068c2ecf20Sopenharmony_ci		err = 0;
20078c2ecf20Sopenharmony_ci
20088c2ecf20Sopenharmony_ciout_free:
20098c2ecf20Sopenharmony_ci	kfree(ec_hdr);
20108c2ecf20Sopenharmony_ci	return err;
20118c2ecf20Sopenharmony_ci}
20128c2ecf20Sopenharmony_ci
20138c2ecf20Sopenharmony_ci/**
20148c2ecf20Sopenharmony_ci * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
20158c2ecf20Sopenharmony_ci * @ubi: UBI device description object
20168c2ecf20Sopenharmony_ci * @e: the wear-leveling entry to check
20178c2ecf20Sopenharmony_ci * @root: the root of the tree
20188c2ecf20Sopenharmony_ci *
20198c2ecf20Sopenharmony_ci * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
20208c2ecf20Sopenharmony_ci * is not.
20218c2ecf20Sopenharmony_ci */
20228c2ecf20Sopenharmony_cistatic int self_check_in_wl_tree(const struct ubi_device *ubi,
20238c2ecf20Sopenharmony_ci				 struct ubi_wl_entry *e, struct rb_root *root)
20248c2ecf20Sopenharmony_ci{
20258c2ecf20Sopenharmony_ci	if (!ubi_dbg_chk_gen(ubi))
20268c2ecf20Sopenharmony_ci		return 0;
20278c2ecf20Sopenharmony_ci
20288c2ecf20Sopenharmony_ci	if (in_wl_tree(e, root))
20298c2ecf20Sopenharmony_ci		return 0;
20308c2ecf20Sopenharmony_ci
20318c2ecf20Sopenharmony_ci	ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
20328c2ecf20Sopenharmony_ci		e->pnum, e->ec, root);
20338c2ecf20Sopenharmony_ci	dump_stack();
20348c2ecf20Sopenharmony_ci	return -EINVAL;
20358c2ecf20Sopenharmony_ci}
20368c2ecf20Sopenharmony_ci
20378c2ecf20Sopenharmony_ci/**
20388c2ecf20Sopenharmony_ci * self_check_in_pq - check if wear-leveling entry is in the protection
20398c2ecf20Sopenharmony_ci *                        queue.
20408c2ecf20Sopenharmony_ci * @ubi: UBI device description object
20418c2ecf20Sopenharmony_ci * @e: the wear-leveling entry to check
20428c2ecf20Sopenharmony_ci *
20438c2ecf20Sopenharmony_ci * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
20448c2ecf20Sopenharmony_ci */
20458c2ecf20Sopenharmony_cistatic int self_check_in_pq(const struct ubi_device *ubi,
20468c2ecf20Sopenharmony_ci			    struct ubi_wl_entry *e)
20478c2ecf20Sopenharmony_ci{
20488c2ecf20Sopenharmony_ci	if (!ubi_dbg_chk_gen(ubi))
20498c2ecf20Sopenharmony_ci		return 0;
20508c2ecf20Sopenharmony_ci
20518c2ecf20Sopenharmony_ci	if (in_pq(ubi, e))
20528c2ecf20Sopenharmony_ci		return 0;
20538c2ecf20Sopenharmony_ci
20548c2ecf20Sopenharmony_ci	ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
20558c2ecf20Sopenharmony_ci		e->pnum, e->ec);
20568c2ecf20Sopenharmony_ci	dump_stack();
20578c2ecf20Sopenharmony_ci	return -EINVAL;
20588c2ecf20Sopenharmony_ci}
20598c2ecf20Sopenharmony_ci#ifndef CONFIG_MTD_UBI_FASTMAP
20608c2ecf20Sopenharmony_cistatic struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
20618c2ecf20Sopenharmony_ci{
20628c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e;
20638c2ecf20Sopenharmony_ci
20648c2ecf20Sopenharmony_ci	e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
20658c2ecf20Sopenharmony_ci	self_check_in_wl_tree(ubi, e, &ubi->free);
20668c2ecf20Sopenharmony_ci	ubi->free_count--;
20678c2ecf20Sopenharmony_ci	ubi_assert(ubi->free_count >= 0);
20688c2ecf20Sopenharmony_ci	rb_erase(&e->u.rb, &ubi->free);
20698c2ecf20Sopenharmony_ci
20708c2ecf20Sopenharmony_ci	return e;
20718c2ecf20Sopenharmony_ci}
20728c2ecf20Sopenharmony_ci
20738c2ecf20Sopenharmony_ci/**
20748c2ecf20Sopenharmony_ci * produce_free_peb - produce a free physical eraseblock.
20758c2ecf20Sopenharmony_ci * @ubi: UBI device description object
20768c2ecf20Sopenharmony_ci *
20778c2ecf20Sopenharmony_ci * This function tries to make a free PEB by means of synchronous execution of
20788c2ecf20Sopenharmony_ci * pending works. This may be needed if, for example the background thread is
20798c2ecf20Sopenharmony_ci * disabled. Returns zero in case of success and a negative error code in case
20808c2ecf20Sopenharmony_ci * of failure.
20818c2ecf20Sopenharmony_ci */
20828c2ecf20Sopenharmony_cistatic int produce_free_peb(struct ubi_device *ubi)
20838c2ecf20Sopenharmony_ci{
20848c2ecf20Sopenharmony_ci	int err;
20858c2ecf20Sopenharmony_ci
20868c2ecf20Sopenharmony_ci	while (!ubi->free.rb_node && ubi->works_count) {
20878c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
20888c2ecf20Sopenharmony_ci
20898c2ecf20Sopenharmony_ci		dbg_wl("do one work synchronously");
20908c2ecf20Sopenharmony_ci		err = do_work(ubi);
20918c2ecf20Sopenharmony_ci
20928c2ecf20Sopenharmony_ci		spin_lock(&ubi->wl_lock);
20938c2ecf20Sopenharmony_ci		if (err)
20948c2ecf20Sopenharmony_ci			return err;
20958c2ecf20Sopenharmony_ci	}
20968c2ecf20Sopenharmony_ci
20978c2ecf20Sopenharmony_ci	return 0;
20988c2ecf20Sopenharmony_ci}
20998c2ecf20Sopenharmony_ci
21008c2ecf20Sopenharmony_ci/**
21018c2ecf20Sopenharmony_ci * ubi_wl_get_peb - get a physical eraseblock.
21028c2ecf20Sopenharmony_ci * @ubi: UBI device description object
21038c2ecf20Sopenharmony_ci *
21048c2ecf20Sopenharmony_ci * This function returns a physical eraseblock in case of success and a
21058c2ecf20Sopenharmony_ci * negative error code in case of failure.
21068c2ecf20Sopenharmony_ci * Returns with ubi->fm_eba_sem held in read mode!
21078c2ecf20Sopenharmony_ci */
21088c2ecf20Sopenharmony_ciint ubi_wl_get_peb(struct ubi_device *ubi)
21098c2ecf20Sopenharmony_ci{
21108c2ecf20Sopenharmony_ci	int err;
21118c2ecf20Sopenharmony_ci	struct ubi_wl_entry *e;
21128c2ecf20Sopenharmony_ci
21138c2ecf20Sopenharmony_ciretry:
21148c2ecf20Sopenharmony_ci	down_read(&ubi->fm_eba_sem);
21158c2ecf20Sopenharmony_ci	spin_lock(&ubi->wl_lock);
21168c2ecf20Sopenharmony_ci	if (!ubi->free.rb_node) {
21178c2ecf20Sopenharmony_ci		if (ubi->works_count == 0) {
21188c2ecf20Sopenharmony_ci			ubi_err(ubi, "no free eraseblocks");
21198c2ecf20Sopenharmony_ci			ubi_assert(list_empty(&ubi->works));
21208c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
21218c2ecf20Sopenharmony_ci			return -ENOSPC;
21228c2ecf20Sopenharmony_ci		}
21238c2ecf20Sopenharmony_ci
21248c2ecf20Sopenharmony_ci		err = produce_free_peb(ubi);
21258c2ecf20Sopenharmony_ci		if (err < 0) {
21268c2ecf20Sopenharmony_ci			spin_unlock(&ubi->wl_lock);
21278c2ecf20Sopenharmony_ci			return err;
21288c2ecf20Sopenharmony_ci		}
21298c2ecf20Sopenharmony_ci		spin_unlock(&ubi->wl_lock);
21308c2ecf20Sopenharmony_ci		up_read(&ubi->fm_eba_sem);
21318c2ecf20Sopenharmony_ci		goto retry;
21328c2ecf20Sopenharmony_ci
21338c2ecf20Sopenharmony_ci	}
21348c2ecf20Sopenharmony_ci	e = wl_get_wle(ubi);
21358c2ecf20Sopenharmony_ci	prot_queue_add(ubi, e);
21368c2ecf20Sopenharmony_ci	spin_unlock(&ubi->wl_lock);
21378c2ecf20Sopenharmony_ci
21388c2ecf20Sopenharmony_ci	err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
21398c2ecf20Sopenharmony_ci				    ubi->peb_size - ubi->vid_hdr_aloffset);
21408c2ecf20Sopenharmony_ci	if (err) {
21418c2ecf20Sopenharmony_ci		ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
21428c2ecf20Sopenharmony_ci		return err;
21438c2ecf20Sopenharmony_ci	}
21448c2ecf20Sopenharmony_ci
21458c2ecf20Sopenharmony_ci	return e->pnum;
21468c2ecf20Sopenharmony_ci}
21478c2ecf20Sopenharmony_ci#else
21488c2ecf20Sopenharmony_ci#include "fastmap-wl.c"
21498c2ecf20Sopenharmony_ci#endif
2150