1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2012 Linutronix GmbH
4 * Copyright (c) 2014 sigma star gmbh
5 * Author: Richard Weinberger <richard@nod.at>
6 */
7
8#include <linux/crc32.h>
9#include <linux/bitmap.h>
10#include "ubi.h"
11
12/**
13 * init_seen - allocate memory for used for debugging.
14 * @ubi: UBI device description object
15 */
16static inline unsigned long *init_seen(struct ubi_device *ubi)
17{
18	unsigned long *ret;
19
20	if (!ubi_dbg_chk_fastmap(ubi))
21		return NULL;
22
23	ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
24		      GFP_KERNEL);
25	if (!ret)
26		return ERR_PTR(-ENOMEM);
27
28	return ret;
29}
30
31/**
32 * free_seen - free the seen logic integer array.
33 * @seen: integer array of @ubi->peb_count size
34 */
35static inline void free_seen(unsigned long *seen)
36{
37	kfree(seen);
38}
39
40/**
41 * set_seen - mark a PEB as seen.
42 * @ubi: UBI device description object
43 * @pnum: The PEB to be makred as seen
44 * @seen: integer array of @ubi->peb_count size
45 */
46static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
47{
48	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
49		return;
50
51	set_bit(pnum, seen);
52}
53
54/**
55 * self_check_seen - check whether all PEB have been seen by fastmap.
56 * @ubi: UBI device description object
57 * @seen: integer array of @ubi->peb_count size
58 */
59static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
60{
61	int pnum, ret = 0;
62
63	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
64		return 0;
65
66	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
67		if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
68			ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
69			ret = -EINVAL;
70		}
71	}
72
73	return ret;
74}
75
76/**
77 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
78 * @ubi: UBI device description object
79 */
80size_t ubi_calc_fm_size(struct ubi_device *ubi)
81{
82	size_t size;
83
84	size = sizeof(struct ubi_fm_sb) +
85		sizeof(struct ubi_fm_hdr) +
86		sizeof(struct ubi_fm_scan_pool) +
87		sizeof(struct ubi_fm_scan_pool) +
88		(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
89		(sizeof(struct ubi_fm_eba) +
90		(ubi->peb_count * sizeof(__be32))) +
91		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
92	return roundup(size, ubi->leb_size);
93}
94
95
96/**
97 * new_fm_vhdr - allocate a new volume header for fastmap usage.
98 * @ubi: UBI device description object
99 * @vol_id: the VID of the new header
100 *
101 * Returns a new struct ubi_vid_hdr on success.
102 * NULL indicates out of memory.
103 */
104static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
105{
106	struct ubi_vid_io_buf *new;
107	struct ubi_vid_hdr *vh;
108
109	new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
110	if (!new)
111		goto out;
112
113	vh = ubi_get_vid_hdr(new);
114	vh->vol_type = UBI_VID_DYNAMIC;
115	vh->vol_id = cpu_to_be32(vol_id);
116
117	/* UBI implementations without fastmap support have to delete the
118	 * fastmap.
119	 */
120	vh->compat = UBI_COMPAT_DELETE;
121
122out:
123	return new;
124}
125
126/**
127 * add_aeb - create and add a attach erase block to a given list.
128 * @ai: UBI attach info object
129 * @list: the target list
130 * @pnum: PEB number of the new attach erase block
131 * @ec: erease counter of the new LEB
132 * @scrub: scrub this PEB after attaching
133 *
134 * Returns 0 on success, < 0 indicates an internal error.
135 */
136static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
137		   int pnum, int ec, int scrub)
138{
139	struct ubi_ainf_peb *aeb;
140
141	aeb = ubi_alloc_aeb(ai, pnum, ec);
142	if (!aeb)
143		return -ENOMEM;
144
145	aeb->lnum = -1;
146	aeb->scrub = scrub;
147	aeb->copy_flag = aeb->sqnum = 0;
148
149	ai->ec_sum += aeb->ec;
150	ai->ec_count++;
151
152	if (ai->max_ec < aeb->ec)
153		ai->max_ec = aeb->ec;
154
155	if (ai->min_ec > aeb->ec)
156		ai->min_ec = aeb->ec;
157
158	list_add_tail(&aeb->u.list, list);
159
160	return 0;
161}
162
163/**
164 * add_vol - create and add a new volume to ubi_attach_info.
165 * @ai: ubi_attach_info object
166 * @vol_id: VID of the new volume
167 * @used_ebs: number of used EBS
168 * @data_pad: data padding value of the new volume
169 * @vol_type: volume type
170 * @last_eb_bytes: number of bytes in the last LEB
171 *
172 * Returns the new struct ubi_ainf_volume on success.
173 * NULL indicates an error.
174 */
175static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
176				       int used_ebs, int data_pad, u8 vol_type,
177				       int last_eb_bytes)
178{
179	struct ubi_ainf_volume *av;
180
181	av = ubi_add_av(ai, vol_id);
182	if (IS_ERR(av))
183		return av;
184
185	av->data_pad = data_pad;
186	av->last_data_size = last_eb_bytes;
187	av->compat = 0;
188	av->vol_type = vol_type;
189	if (av->vol_type == UBI_STATIC_VOLUME)
190		av->used_ebs = used_ebs;
191
192	dbg_bld("found volume (ID %i)", vol_id);
193	return av;
194}
195
196/**
197 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
198 * from it's original list.
199 * @ai: ubi_attach_info object
200 * @aeb: the to be assigned SEB
201 * @av: target scan volume
202 */
203static void assign_aeb_to_av(struct ubi_attach_info *ai,
204			     struct ubi_ainf_peb *aeb,
205			     struct ubi_ainf_volume *av)
206{
207	struct ubi_ainf_peb *tmp_aeb;
208	struct rb_node **p = &av->root.rb_node, *parent = NULL;
209
210	while (*p) {
211		parent = *p;
212
213		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
214		if (aeb->lnum != tmp_aeb->lnum) {
215			if (aeb->lnum < tmp_aeb->lnum)
216				p = &(*p)->rb_left;
217			else
218				p = &(*p)->rb_right;
219
220			continue;
221		} else
222			break;
223	}
224
225	list_del(&aeb->u.list);
226	av->leb_count++;
227
228	rb_link_node(&aeb->u.rb, parent, p);
229	rb_insert_color(&aeb->u.rb, &av->root);
230}
231
232/**
233 * update_vol - inserts or updates a LEB which was found a pool.
234 * @ubi: the UBI device object
235 * @ai: attach info object
236 * @av: the volume this LEB belongs to
237 * @new_vh: the volume header derived from new_aeb
238 * @new_aeb: the AEB to be examined
239 *
240 * Returns 0 on success, < 0 indicates an internal error.
241 */
242static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
243		      struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
244		      struct ubi_ainf_peb *new_aeb)
245{
246	struct rb_node **p = &av->root.rb_node, *parent = NULL;
247	struct ubi_ainf_peb *aeb, *victim;
248	int cmp_res;
249
250	while (*p) {
251		parent = *p;
252		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
253
254		if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
255			if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
256				p = &(*p)->rb_left;
257			else
258				p = &(*p)->rb_right;
259
260			continue;
261		}
262
263		/* This case can happen if the fastmap gets written
264		 * because of a volume change (creation, deletion, ..).
265		 * Then a PEB can be within the persistent EBA and the pool.
266		 */
267		if (aeb->pnum == new_aeb->pnum) {
268			ubi_assert(aeb->lnum == new_aeb->lnum);
269			ubi_free_aeb(ai, new_aeb);
270
271			return 0;
272		}
273
274		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
275		if (cmp_res < 0)
276			return cmp_res;
277
278		/* new_aeb is newer */
279		if (cmp_res & 1) {
280			victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
281			if (!victim)
282				return -ENOMEM;
283
284			list_add_tail(&victim->u.list, &ai->erase);
285
286			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
287				av->last_data_size =
288					be32_to_cpu(new_vh->data_size);
289
290			dbg_bld("vol %i: AEB %i's PEB %i is the newer",
291				av->vol_id, aeb->lnum, new_aeb->pnum);
292
293			aeb->ec = new_aeb->ec;
294			aeb->pnum = new_aeb->pnum;
295			aeb->copy_flag = new_vh->copy_flag;
296			aeb->scrub = new_aeb->scrub;
297			aeb->sqnum = new_aeb->sqnum;
298			ubi_free_aeb(ai, new_aeb);
299
300		/* new_aeb is older */
301		} else {
302			dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
303				av->vol_id, aeb->lnum, new_aeb->pnum);
304			list_add_tail(&new_aeb->u.list, &ai->erase);
305		}
306
307		return 0;
308	}
309	/* This LEB is new, let's add it to the volume */
310
311	if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
312		av->highest_lnum = be32_to_cpu(new_vh->lnum);
313		av->last_data_size = be32_to_cpu(new_vh->data_size);
314	}
315
316	if (av->vol_type == UBI_STATIC_VOLUME)
317		av->used_ebs = be32_to_cpu(new_vh->used_ebs);
318
319	av->leb_count++;
320
321	rb_link_node(&new_aeb->u.rb, parent, p);
322	rb_insert_color(&new_aeb->u.rb, &av->root);
323
324	return 0;
325}
326
327/**
328 * process_pool_aeb - we found a non-empty PEB in a pool.
329 * @ubi: UBI device object
330 * @ai: attach info object
331 * @new_vh: the volume header derived from new_aeb
332 * @new_aeb: the AEB to be examined
333 *
334 * Returns 0 on success, < 0 indicates an internal error.
335 */
336static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
337			    struct ubi_vid_hdr *new_vh,
338			    struct ubi_ainf_peb *new_aeb)
339{
340	int vol_id = be32_to_cpu(new_vh->vol_id);
341	struct ubi_ainf_volume *av;
342
343	if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
344		ubi_free_aeb(ai, new_aeb);
345
346		return 0;
347	}
348
349	/* Find the volume this SEB belongs to */
350	av = ubi_find_av(ai, vol_id);
351	if (!av) {
352		ubi_err(ubi, "orphaned volume in fastmap pool!");
353		ubi_free_aeb(ai, new_aeb);
354		return UBI_BAD_FASTMAP;
355	}
356
357	ubi_assert(vol_id == av->vol_id);
358
359	return update_vol(ubi, ai, av, new_vh, new_aeb);
360}
361
362/**
363 * unmap_peb - unmap a PEB.
364 * If fastmap detects a free PEB in the pool it has to check whether
365 * this PEB has been unmapped after writing the fastmap.
366 *
367 * @ai: UBI attach info object
368 * @pnum: The PEB to be unmapped
369 */
370static void unmap_peb(struct ubi_attach_info *ai, int pnum)
371{
372	struct ubi_ainf_volume *av;
373	struct rb_node *node, *node2;
374	struct ubi_ainf_peb *aeb;
375
376	ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
377		ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
378			if (aeb->pnum == pnum) {
379				rb_erase(&aeb->u.rb, &av->root);
380				av->leb_count--;
381				ubi_free_aeb(ai, aeb);
382				return;
383			}
384		}
385	}
386}
387
388/**
389 * scan_pool - scans a pool for changed (no longer empty PEBs).
390 * @ubi: UBI device object
391 * @ai: attach info object
392 * @pebs: an array of all PEB numbers in the to be scanned pool
393 * @pool_size: size of the pool (number of entries in @pebs)
394 * @max_sqnum: pointer to the maximal sequence number
395 * @free: list of PEBs which are most likely free (and go into @ai->free)
396 *
397 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
398 * < 0 indicates an internal error.
399 */
400static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
401		     __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
402		     struct list_head *free)
403{
404	struct ubi_vid_io_buf *vb;
405	struct ubi_vid_hdr *vh;
406	struct ubi_ec_hdr *ech;
407	struct ubi_ainf_peb *new_aeb;
408	int i, pnum, err, ret = 0;
409
410	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
411	if (!ech)
412		return -ENOMEM;
413
414	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
415	if (!vb) {
416		kfree(ech);
417		return -ENOMEM;
418	}
419
420	vh = ubi_get_vid_hdr(vb);
421
422	dbg_bld("scanning fastmap pool: size = %i", pool_size);
423
424	/*
425	 * Now scan all PEBs in the pool to find changes which have been made
426	 * after the creation of the fastmap
427	 */
428	for (i = 0; i < pool_size; i++) {
429		int scrub = 0;
430		int image_seq;
431
432		pnum = be32_to_cpu(pebs[i]);
433
434		if (ubi_io_is_bad(ubi, pnum)) {
435			ubi_err(ubi, "bad PEB in fastmap pool!");
436			ret = UBI_BAD_FASTMAP;
437			goto out;
438		}
439
440		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
441		if (err && err != UBI_IO_BITFLIPS) {
442			ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
443				pnum, err);
444			ret = err > 0 ? UBI_BAD_FASTMAP : err;
445			goto out;
446		} else if (err == UBI_IO_BITFLIPS)
447			scrub = 1;
448
449		/*
450		 * Older UBI implementations have image_seq set to zero, so
451		 * we shouldn't fail if image_seq == 0.
452		 */
453		image_seq = be32_to_cpu(ech->image_seq);
454
455		if (image_seq && (image_seq != ubi->image_seq)) {
456			ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
457				be32_to_cpu(ech->image_seq), ubi->image_seq);
458			ret = UBI_BAD_FASTMAP;
459			goto out;
460		}
461
462		err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
463		if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
464			unsigned long long ec = be64_to_cpu(ech->ec);
465			unmap_peb(ai, pnum);
466			dbg_bld("Adding PEB to free: %i", pnum);
467
468			if (err == UBI_IO_FF_BITFLIPS)
469				scrub = 1;
470
471			ret = add_aeb(ai, free, pnum, ec, scrub);
472			if (ret)
473				goto out;
474			continue;
475		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
476			dbg_bld("Found non empty PEB:%i in pool", pnum);
477
478			if (err == UBI_IO_BITFLIPS)
479				scrub = 1;
480
481			new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
482			if (!new_aeb) {
483				ret = -ENOMEM;
484				goto out;
485			}
486
487			new_aeb->lnum = be32_to_cpu(vh->lnum);
488			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
489			new_aeb->copy_flag = vh->copy_flag;
490			new_aeb->scrub = scrub;
491
492			if (*max_sqnum < new_aeb->sqnum)
493				*max_sqnum = new_aeb->sqnum;
494
495			err = process_pool_aeb(ubi, ai, vh, new_aeb);
496			if (err) {
497				ret = err > 0 ? UBI_BAD_FASTMAP : err;
498				goto out;
499			}
500		} else {
501			/* We are paranoid and fall back to scanning mode */
502			ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
503			ret = err > 0 ? UBI_BAD_FASTMAP : err;
504			goto out;
505		}
506
507	}
508
509out:
510	ubi_free_vid_buf(vb);
511	kfree(ech);
512	return ret;
513}
514
515/**
516 * count_fastmap_pebs - Counts the PEBs found by fastmap.
517 * @ai: The UBI attach info object
518 */
519static int count_fastmap_pebs(struct ubi_attach_info *ai)
520{
521	struct ubi_ainf_peb *aeb;
522	struct ubi_ainf_volume *av;
523	struct rb_node *rb1, *rb2;
524	int n = 0;
525
526	list_for_each_entry(aeb, &ai->erase, u.list)
527		n++;
528
529	list_for_each_entry(aeb, &ai->free, u.list)
530		n++;
531
532	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
533		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
534			n++;
535
536	return n;
537}
538
539/**
540 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
541 * @ubi: UBI device object
542 * @ai: UBI attach info object
543 * @fm: the fastmap to be attached
544 *
545 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
546 * < 0 indicates an internal error.
547 */
548static int ubi_attach_fastmap(struct ubi_device *ubi,
549			      struct ubi_attach_info *ai,
550			      struct ubi_fastmap_layout *fm)
551{
552	struct list_head used, free;
553	struct ubi_ainf_volume *av;
554	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
555	struct ubi_fm_sb *fmsb;
556	struct ubi_fm_hdr *fmhdr;
557	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
558	struct ubi_fm_ec *fmec;
559	struct ubi_fm_volhdr *fmvhdr;
560	struct ubi_fm_eba *fm_eba;
561	int ret, i, j, pool_size, wl_pool_size;
562	size_t fm_pos = 0, fm_size = ubi->fm_size;
563	unsigned long long max_sqnum = 0;
564	void *fm_raw = ubi->fm_buf;
565
566	INIT_LIST_HEAD(&used);
567	INIT_LIST_HEAD(&free);
568	ai->min_ec = UBI_MAX_ERASECOUNTER;
569
570	fmsb = (struct ubi_fm_sb *)(fm_raw);
571	ai->max_sqnum = fmsb->sqnum;
572	fm_pos += sizeof(struct ubi_fm_sb);
573	if (fm_pos >= fm_size)
574		goto fail_bad;
575
576	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
577	fm_pos += sizeof(*fmhdr);
578	if (fm_pos >= fm_size)
579		goto fail_bad;
580
581	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
582		ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
583			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
584		goto fail_bad;
585	}
586
587	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
588	fm_pos += sizeof(*fmpl);
589	if (fm_pos >= fm_size)
590		goto fail_bad;
591	if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
592		ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
593			be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
594		goto fail_bad;
595	}
596
597	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
598	fm_pos += sizeof(*fmpl_wl);
599	if (fm_pos >= fm_size)
600		goto fail_bad;
601	if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
602		ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
603			be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
604		goto fail_bad;
605	}
606
607	pool_size = be16_to_cpu(fmpl->size);
608	wl_pool_size = be16_to_cpu(fmpl_wl->size);
609	fm->max_pool_size = be16_to_cpu(fmpl->max_size);
610	fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
611
612	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
613		ubi_err(ubi, "bad pool size: %i", pool_size);
614		goto fail_bad;
615	}
616
617	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
618		ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
619		goto fail_bad;
620	}
621
622
623	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
624	    fm->max_pool_size < 0) {
625		ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
626		goto fail_bad;
627	}
628
629	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
630	    fm->max_wl_pool_size < 0) {
631		ubi_err(ubi, "bad maximal WL pool size: %i",
632			fm->max_wl_pool_size);
633		goto fail_bad;
634	}
635
636	/* read EC values from free list */
637	for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
638		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
639		fm_pos += sizeof(*fmec);
640		if (fm_pos >= fm_size)
641			goto fail_bad;
642
643		ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
644			      be32_to_cpu(fmec->ec), 0);
645		if (ret)
646			goto fail;
647	}
648
649	/* read EC values from used list */
650	for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
651		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
652		fm_pos += sizeof(*fmec);
653		if (fm_pos >= fm_size)
654			goto fail_bad;
655
656		ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
657			      be32_to_cpu(fmec->ec), 0);
658		if (ret)
659			goto fail;
660	}
661
662	/* read EC values from scrub list */
663	for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
664		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
665		fm_pos += sizeof(*fmec);
666		if (fm_pos >= fm_size)
667			goto fail_bad;
668
669		ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
670			      be32_to_cpu(fmec->ec), 1);
671		if (ret)
672			goto fail;
673	}
674
675	/* read EC values from erase list */
676	for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
677		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
678		fm_pos += sizeof(*fmec);
679		if (fm_pos >= fm_size)
680			goto fail_bad;
681
682		ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
683			      be32_to_cpu(fmec->ec), 1);
684		if (ret)
685			goto fail;
686	}
687
688	ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
689	ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
690
691	/* Iterate over all volumes and read their EBA table */
692	for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
693		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
694		fm_pos += sizeof(*fmvhdr);
695		if (fm_pos >= fm_size)
696			goto fail_bad;
697
698		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
699			ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
700				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
701			goto fail_bad;
702		}
703
704		av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
705			     be32_to_cpu(fmvhdr->used_ebs),
706			     be32_to_cpu(fmvhdr->data_pad),
707			     fmvhdr->vol_type,
708			     be32_to_cpu(fmvhdr->last_eb_bytes));
709
710		if (IS_ERR(av)) {
711			if (PTR_ERR(av) == -EEXIST)
712				ubi_err(ubi, "volume (ID %i) already exists",
713					fmvhdr->vol_id);
714
715			goto fail_bad;
716		}
717
718		ai->vols_found++;
719		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
720			ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
721
722		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
723		fm_pos += sizeof(*fm_eba);
724		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
725		if (fm_pos >= fm_size)
726			goto fail_bad;
727
728		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
729			ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
730				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
731			goto fail_bad;
732		}
733
734		for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
735			int pnum = be32_to_cpu(fm_eba->pnum[j]);
736
737			if (pnum < 0)
738				continue;
739
740			aeb = NULL;
741			list_for_each_entry(tmp_aeb, &used, u.list) {
742				if (tmp_aeb->pnum == pnum) {
743					aeb = tmp_aeb;
744					break;
745				}
746			}
747
748			if (!aeb) {
749				ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
750				goto fail_bad;
751			}
752
753			aeb->lnum = j;
754
755			if (av->highest_lnum <= aeb->lnum)
756				av->highest_lnum = aeb->lnum;
757
758			assign_aeb_to_av(ai, aeb, av);
759
760			dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
761				aeb->pnum, aeb->lnum, av->vol_id);
762		}
763	}
764
765	ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
766	if (ret)
767		goto fail;
768
769	ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
770	if (ret)
771		goto fail;
772
773	if (max_sqnum > ai->max_sqnum)
774		ai->max_sqnum = max_sqnum;
775
776	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
777		list_move_tail(&tmp_aeb->u.list, &ai->free);
778
779	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
780		list_move_tail(&tmp_aeb->u.list, &ai->erase);
781
782	ubi_assert(list_empty(&free));
783
784	/*
785	 * If fastmap is leaking PEBs (must not happen), raise a
786	 * fat warning and fall back to scanning mode.
787	 * We do this here because in ubi_wl_init() it's too late
788	 * and we cannot fall back to scanning.
789	 */
790	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
791		    ai->bad_peb_count - fm->used_blocks))
792		goto fail_bad;
793
794	return 0;
795
796fail_bad:
797	ret = UBI_BAD_FASTMAP;
798fail:
799	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
800		list_del(&tmp_aeb->u.list);
801		ubi_free_aeb(ai, tmp_aeb);
802	}
803	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
804		list_del(&tmp_aeb->u.list);
805		ubi_free_aeb(ai, tmp_aeb);
806	}
807
808	return ret;
809}
810
811/**
812 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
813 * @ai: UBI attach info to be filled
814 */
815static int find_fm_anchor(struct ubi_attach_info *ai)
816{
817	int ret = -1;
818	struct ubi_ainf_peb *aeb;
819	unsigned long long max_sqnum = 0;
820
821	list_for_each_entry(aeb, &ai->fastmap, u.list) {
822		if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
823			max_sqnum = aeb->sqnum;
824			ret = aeb->pnum;
825		}
826	}
827
828	return ret;
829}
830
831/**
832 * ubi_scan_fastmap - scan the fastmap.
833 * @ubi: UBI device object
834 * @ai: UBI attach info to be filled
835 * @scan_ai: UBI attach info from the first 64 PEBs,
836 *           used to find the most recent Fastmap data structure
837 *
838 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
839 * UBI_BAD_FASTMAP if one was found but is not usable.
840 * < 0 indicates an internal error.
841 */
842int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
843		     struct ubi_attach_info *scan_ai)
844{
845	struct ubi_fm_sb *fmsb, *fmsb2;
846	struct ubi_vid_io_buf *vb;
847	struct ubi_vid_hdr *vh;
848	struct ubi_ec_hdr *ech;
849	struct ubi_fastmap_layout *fm;
850	struct ubi_ainf_peb *aeb;
851	int i, used_blocks, pnum, fm_anchor, ret = 0;
852	size_t fm_size;
853	__be32 crc, tmp_crc;
854	unsigned long long sqnum = 0;
855
856	fm_anchor = find_fm_anchor(scan_ai);
857	if (fm_anchor < 0)
858		return UBI_NO_FASTMAP;
859
860	/* Add fastmap blocks(pnum < UBI_FM_MAX_START) into attach structure. */
861	list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
862		ret = add_aeb(ai, &ai->fastmap, aeb->pnum, aeb->ec, 0);
863		if (ret)
864			return ret;
865	}
866
867	down_write(&ubi->fm_protect);
868	memset(ubi->fm_buf, 0, ubi->fm_size);
869
870	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
871	if (!fmsb) {
872		ret = -ENOMEM;
873		goto out;
874	}
875
876	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
877	if (!fm) {
878		ret = -ENOMEM;
879		kfree(fmsb);
880		goto out;
881	}
882
883	ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
884	if (ret && ret != UBI_IO_BITFLIPS)
885		goto free_fm_sb;
886	else if (ret == UBI_IO_BITFLIPS)
887		fm->to_be_tortured[0] = 1;
888
889	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
890		ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
891			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
892		ret = UBI_BAD_FASTMAP;
893		goto free_fm_sb;
894	}
895
896	if (fmsb->version != UBI_FM_FMT_VERSION) {
897		ubi_err(ubi, "bad fastmap version: %i, expected: %i",
898			fmsb->version, UBI_FM_FMT_VERSION);
899		ret = UBI_BAD_FASTMAP;
900		goto free_fm_sb;
901	}
902
903	used_blocks = be32_to_cpu(fmsb->used_blocks);
904	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
905		ubi_err(ubi, "number of fastmap blocks is invalid: %i",
906			used_blocks);
907		ret = UBI_BAD_FASTMAP;
908		goto free_fm_sb;
909	}
910
911	fm_size = ubi->leb_size * used_blocks;
912	if (fm_size != ubi->fm_size) {
913		ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
914			fm_size, ubi->fm_size);
915		ret = UBI_BAD_FASTMAP;
916		goto free_fm_sb;
917	}
918
919	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
920	if (!ech) {
921		ret = -ENOMEM;
922		goto free_fm_sb;
923	}
924
925	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
926	if (!vb) {
927		ret = -ENOMEM;
928		goto free_hdr;
929	}
930
931	vh = ubi_get_vid_hdr(vb);
932
933	for (i = 0; i < used_blocks; i++) {
934		int image_seq;
935
936		pnum = be32_to_cpu(fmsb->block_loc[i]);
937
938		if (ubi_io_is_bad(ubi, pnum)) {
939			ret = UBI_BAD_FASTMAP;
940			goto free_hdr;
941		}
942
943		if (i == 0 && pnum != fm_anchor) {
944			ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
945				pnum, fm_anchor);
946			ret = UBI_BAD_FASTMAP;
947			goto free_hdr;
948		}
949
950		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
951		if (ret && ret != UBI_IO_BITFLIPS) {
952			ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
953				i, pnum);
954			if (ret > 0)
955				ret = UBI_BAD_FASTMAP;
956			goto free_hdr;
957		} else if (ret == UBI_IO_BITFLIPS)
958			fm->to_be_tortured[i] = 1;
959
960		image_seq = be32_to_cpu(ech->image_seq);
961		if (!ubi->image_seq)
962			ubi->image_seq = image_seq;
963
964		/*
965		 * Older UBI implementations have image_seq set to zero, so
966		 * we shouldn't fail if image_seq == 0.
967		 */
968		if (image_seq && (image_seq != ubi->image_seq)) {
969			ubi_err(ubi, "wrong image seq:%d instead of %d",
970				be32_to_cpu(ech->image_seq), ubi->image_seq);
971			ret = UBI_BAD_FASTMAP;
972			goto free_hdr;
973		}
974
975		ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
976		if (ret && ret != UBI_IO_BITFLIPS) {
977			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
978				i, pnum);
979			goto free_hdr;
980		}
981
982		if (i == 0) {
983			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
984				ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
985					be32_to_cpu(vh->vol_id),
986					UBI_FM_SB_VOLUME_ID);
987				ret = UBI_BAD_FASTMAP;
988				goto free_hdr;
989			}
990		} else {
991			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
992				ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
993					be32_to_cpu(vh->vol_id),
994					UBI_FM_DATA_VOLUME_ID);
995				ret = UBI_BAD_FASTMAP;
996				goto free_hdr;
997			}
998		}
999
1000		if (sqnum < be64_to_cpu(vh->sqnum))
1001			sqnum = be64_to_cpu(vh->sqnum);
1002
1003		ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
1004				       pnum, 0, ubi->leb_size);
1005		if (ret && ret != UBI_IO_BITFLIPS) {
1006			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1007				"err: %i)", i, pnum, ret);
1008			goto free_hdr;
1009		}
1010
1011		/*
1012		 * Add left fastmap blocks (pnum >= UBI_FM_MAX_START) into
1013		 * attach structure.
1014		 */
1015		if (pnum >= UBI_FM_MAX_START) {
1016			ret = add_aeb(ai, &ai->fastmap, pnum,
1017				      be64_to_cpu(ech->ec), 0);
1018			if (ret)
1019				goto free_hdr;
1020		}
1021	}
1022
1023	kfree(fmsb);
1024	fmsb = NULL;
1025
1026	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1027	tmp_crc = be32_to_cpu(fmsb2->data_crc);
1028	fmsb2->data_crc = 0;
1029	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1030	if (crc != tmp_crc) {
1031		ubi_err(ubi, "fastmap data CRC is invalid");
1032		ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1033			tmp_crc, crc);
1034		ret = UBI_BAD_FASTMAP;
1035		goto free_hdr;
1036	}
1037
1038	fmsb2->sqnum = sqnum;
1039
1040	fm->used_blocks = used_blocks;
1041
1042	ret = ubi_attach_fastmap(ubi, ai, fm);
1043	if (ret) {
1044		if (ret > 0)
1045			ret = UBI_BAD_FASTMAP;
1046		goto free_hdr;
1047	}
1048
1049	for (i = 0; i < used_blocks; i++) {
1050		struct ubi_wl_entry *e;
1051
1052		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1053		if (!e) {
1054			while (i--)
1055				kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
1056
1057			ret = -ENOMEM;
1058			goto free_hdr;
1059		}
1060
1061		e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1062		e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1063		fm->e[i] = e;
1064	}
1065
1066	ubi->fm = fm;
1067	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1068	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1069	ubi_msg(ubi, "attached by fastmap");
1070	ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1071	ubi_msg(ubi, "fastmap WL pool size: %d",
1072		ubi->fm_wl_pool.max_size);
1073	ubi->fm_disabled = 0;
1074	ubi->fast_attach = 1;
1075
1076	ubi_free_vid_buf(vb);
1077	kfree(ech);
1078out:
1079	up_write(&ubi->fm_protect);
1080	if (ret == UBI_BAD_FASTMAP)
1081		ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1082	return ret;
1083
1084free_hdr:
1085	ubi_free_vid_buf(vb);
1086	kfree(ech);
1087free_fm_sb:
1088	kfree(fmsb);
1089	kfree(fm);
1090	goto out;
1091}
1092
1093int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
1094{
1095	struct ubi_device *ubi = vol->ubi;
1096
1097	if (!ubi->fast_attach)
1098		return 0;
1099
1100	vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
1101				GFP_KERNEL);
1102	if (!vol->checkmap)
1103		return -ENOMEM;
1104
1105	return 0;
1106}
1107
1108void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
1109{
1110	kfree(vol->checkmap);
1111}
1112
1113/**
1114 * ubi_write_fastmap - writes a fastmap.
1115 * @ubi: UBI device object
1116 * @new_fm: the to be written fastmap
1117 *
1118 * Returns 0 on success, < 0 indicates an internal error.
1119 */
1120static int ubi_write_fastmap(struct ubi_device *ubi,
1121			     struct ubi_fastmap_layout *new_fm)
1122{
1123	size_t fm_pos = 0;
1124	void *fm_raw;
1125	struct ubi_fm_sb *fmsb;
1126	struct ubi_fm_hdr *fmh;
1127	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1128	struct ubi_fm_ec *fec;
1129	struct ubi_fm_volhdr *fvh;
1130	struct ubi_fm_eba *feba;
1131	struct ubi_wl_entry *wl_e;
1132	struct ubi_volume *vol;
1133	struct ubi_vid_io_buf *avbuf, *dvbuf;
1134	struct ubi_vid_hdr *avhdr, *dvhdr;
1135	struct ubi_work *ubi_wrk;
1136	struct rb_node *tmp_rb;
1137	int ret, i, j, free_peb_count, used_peb_count, vol_count;
1138	int scrub_peb_count, erase_peb_count;
1139	unsigned long *seen_pebs;
1140
1141	fm_raw = ubi->fm_buf;
1142	memset(ubi->fm_buf, 0, ubi->fm_size);
1143
1144	avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1145	if (!avbuf) {
1146		ret = -ENOMEM;
1147		goto out;
1148	}
1149
1150	dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
1151	if (!dvbuf) {
1152		ret = -ENOMEM;
1153		goto out_free_avbuf;
1154	}
1155
1156	avhdr = ubi_get_vid_hdr(avbuf);
1157	dvhdr = ubi_get_vid_hdr(dvbuf);
1158
1159	seen_pebs = init_seen(ubi);
1160	if (IS_ERR(seen_pebs)) {
1161		ret = PTR_ERR(seen_pebs);
1162		goto out_free_dvbuf;
1163	}
1164
1165	spin_lock(&ubi->volumes_lock);
1166	spin_lock(&ubi->wl_lock);
1167
1168	fmsb = (struct ubi_fm_sb *)fm_raw;
1169	fm_pos += sizeof(*fmsb);
1170	ubi_assert(fm_pos <= ubi->fm_size);
1171
1172	fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1173	fm_pos += sizeof(*fmh);
1174	ubi_assert(fm_pos <= ubi->fm_size);
1175
1176	fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1177	fmsb->version = UBI_FM_FMT_VERSION;
1178	fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1179	/* the max sqnum will be filled in while *reading* the fastmap */
1180	fmsb->sqnum = 0;
1181
1182	fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1183	free_peb_count = 0;
1184	used_peb_count = 0;
1185	scrub_peb_count = 0;
1186	erase_peb_count = 0;
1187	vol_count = 0;
1188
1189	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1190	fm_pos += sizeof(*fmpl);
1191	fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1192	fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1193	fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1194
1195	for (i = 0; i < ubi->fm_pool.size; i++) {
1196		fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1197		set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1198	}
1199
1200	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1201	fm_pos += sizeof(*fmpl_wl);
1202	fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1203	fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1204	fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1205
1206	for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1207		fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1208		set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1209	}
1210
1211	ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1212		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1213
1214		fec->pnum = cpu_to_be32(wl_e->pnum);
1215		set_seen(ubi, wl_e->pnum, seen_pebs);
1216		fec->ec = cpu_to_be32(wl_e->ec);
1217
1218		free_peb_count++;
1219		fm_pos += sizeof(*fec);
1220		ubi_assert(fm_pos <= ubi->fm_size);
1221	}
1222	fmh->free_peb_count = cpu_to_be32(free_peb_count);
1223
1224	ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1225		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1226
1227		fec->pnum = cpu_to_be32(wl_e->pnum);
1228		set_seen(ubi, wl_e->pnum, seen_pebs);
1229		fec->ec = cpu_to_be32(wl_e->ec);
1230
1231		used_peb_count++;
1232		fm_pos += sizeof(*fec);
1233		ubi_assert(fm_pos <= ubi->fm_size);
1234	}
1235
1236	ubi_for_each_protected_peb(ubi, i, wl_e) {
1237		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1238
1239		fec->pnum = cpu_to_be32(wl_e->pnum);
1240		set_seen(ubi, wl_e->pnum, seen_pebs);
1241		fec->ec = cpu_to_be32(wl_e->ec);
1242
1243		used_peb_count++;
1244		fm_pos += sizeof(*fec);
1245		ubi_assert(fm_pos <= ubi->fm_size);
1246	}
1247	fmh->used_peb_count = cpu_to_be32(used_peb_count);
1248
1249	ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1250		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1251
1252		fec->pnum = cpu_to_be32(wl_e->pnum);
1253		set_seen(ubi, wl_e->pnum, seen_pebs);
1254		fec->ec = cpu_to_be32(wl_e->ec);
1255
1256		scrub_peb_count++;
1257		fm_pos += sizeof(*fec);
1258		ubi_assert(fm_pos <= ubi->fm_size);
1259	}
1260	fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1261
1262
1263	list_for_each_entry(ubi_wrk, &ubi->works, list) {
1264		if (ubi_is_erase_work(ubi_wrk)) {
1265			wl_e = ubi_wrk->e;
1266			ubi_assert(wl_e);
1267
1268			fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1269
1270			fec->pnum = cpu_to_be32(wl_e->pnum);
1271			set_seen(ubi, wl_e->pnum, seen_pebs);
1272			fec->ec = cpu_to_be32(wl_e->ec);
1273
1274			erase_peb_count++;
1275			fm_pos += sizeof(*fec);
1276			ubi_assert(fm_pos <= ubi->fm_size);
1277		}
1278	}
1279	fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1280
1281	for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1282		vol = ubi->volumes[i];
1283
1284		if (!vol)
1285			continue;
1286
1287		vol_count++;
1288
1289		fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1290		fm_pos += sizeof(*fvh);
1291		ubi_assert(fm_pos <= ubi->fm_size);
1292
1293		fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1294		fvh->vol_id = cpu_to_be32(vol->vol_id);
1295		fvh->vol_type = vol->vol_type;
1296		fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1297		fvh->data_pad = cpu_to_be32(vol->data_pad);
1298		fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1299
1300		ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1301			vol->vol_type == UBI_STATIC_VOLUME);
1302
1303		feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1304		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1305		ubi_assert(fm_pos <= ubi->fm_size);
1306
1307		for (j = 0; j < vol->reserved_pebs; j++) {
1308			struct ubi_eba_leb_desc ldesc;
1309
1310			ubi_eba_get_ldesc(vol, j, &ldesc);
1311			feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1312		}
1313
1314		feba->reserved_pebs = cpu_to_be32(j);
1315		feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1316	}
1317	fmh->vol_count = cpu_to_be32(vol_count);
1318	fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1319
1320	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1321	avhdr->lnum = 0;
1322
1323	spin_unlock(&ubi->wl_lock);
1324	spin_unlock(&ubi->volumes_lock);
1325
1326	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1327	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
1328	if (ret) {
1329		ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1330		goto out_free_seen;
1331	}
1332
1333	for (i = 0; i < new_fm->used_blocks; i++) {
1334		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1335		set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1336		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1337	}
1338
1339	fmsb->data_crc = 0;
1340	fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1341					   ubi->fm_size));
1342
1343	for (i = 1; i < new_fm->used_blocks; i++) {
1344		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1345		dvhdr->lnum = cpu_to_be32(i);
1346		dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1347			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1348		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
1349		if (ret) {
1350			ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1351				new_fm->e[i]->pnum);
1352			goto out_free_seen;
1353		}
1354	}
1355
1356	for (i = 0; i < new_fm->used_blocks; i++) {
1357		ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1358					new_fm->e[i]->pnum, 0, ubi->leb_size);
1359		if (ret) {
1360			ubi_err(ubi, "unable to write fastmap to PEB %i!",
1361				new_fm->e[i]->pnum);
1362			goto out_free_seen;
1363		}
1364	}
1365
1366	ubi_assert(new_fm);
1367	ubi->fm = new_fm;
1368
1369	ret = self_check_seen(ubi, seen_pebs);
1370	dbg_bld("fastmap written!");
1371
1372out_free_seen:
1373	free_seen(seen_pebs);
1374out_free_dvbuf:
1375	ubi_free_vid_buf(dvbuf);
1376out_free_avbuf:
1377	ubi_free_vid_buf(avbuf);
1378
1379out:
1380	return ret;
1381}
1382
1383/**
1384 * erase_block - Manually erase a PEB.
1385 * @ubi: UBI device object
1386 * @pnum: PEB to be erased
1387 *
1388 * Returns the new EC value on success, < 0 indicates an internal error.
1389 */
1390static int erase_block(struct ubi_device *ubi, int pnum)
1391{
1392	int ret;
1393	struct ubi_ec_hdr *ec_hdr;
1394	long long ec;
1395
1396	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1397	if (!ec_hdr)
1398		return -ENOMEM;
1399
1400	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1401	if (ret < 0)
1402		goto out;
1403	else if (ret && ret != UBI_IO_BITFLIPS) {
1404		ret = -EINVAL;
1405		goto out;
1406	}
1407
1408	ret = ubi_io_sync_erase(ubi, pnum, 0);
1409	if (ret < 0)
1410		goto out;
1411
1412	ec = be64_to_cpu(ec_hdr->ec);
1413	ec += ret;
1414	if (ec > UBI_MAX_ERASECOUNTER) {
1415		ret = -EINVAL;
1416		goto out;
1417	}
1418
1419	ec_hdr->ec = cpu_to_be64(ec);
1420	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1421	if (ret < 0)
1422		goto out;
1423
1424	ret = ec;
1425out:
1426	kfree(ec_hdr);
1427	return ret;
1428}
1429
1430/**
1431 * invalidate_fastmap - destroys a fastmap.
1432 * @ubi: UBI device object
1433 *
1434 * This function ensures that upon next UBI attach a full scan
1435 * is issued. We need this if UBI is about to write a new fastmap
1436 * but is unable to do so. In this case we have two options:
1437 * a) Make sure that the current fastmap will not be usued upon
1438 * attach time and contine or b) fall back to RO mode to have the
1439 * current fastmap in a valid state.
1440 * Returns 0 on success, < 0 indicates an internal error.
1441 */
1442static int invalidate_fastmap(struct ubi_device *ubi)
1443{
1444	int ret;
1445	struct ubi_fastmap_layout *fm;
1446	struct ubi_wl_entry *e;
1447	struct ubi_vid_io_buf *vb = NULL;
1448	struct ubi_vid_hdr *vh;
1449
1450	if (!ubi->fm)
1451		return 0;
1452
1453	ubi->fm = NULL;
1454
1455	ret = -ENOMEM;
1456	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1457	if (!fm)
1458		goto out;
1459
1460	vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1461	if (!vb)
1462		goto out_free_fm;
1463
1464	vh = ubi_get_vid_hdr(vb);
1465
1466	ret = -ENOSPC;
1467	e = ubi_wl_get_fm_peb(ubi, 1);
1468	if (!e)
1469		goto out_free_fm;
1470
1471	/*
1472	 * Create fake fastmap such that UBI will fall back
1473	 * to scanning mode.
1474	 */
1475	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1476	ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
1477	if (ret < 0) {
1478		ubi_wl_put_fm_peb(ubi, e, 0, 0);
1479		goto out_free_fm;
1480	}
1481
1482	fm->used_blocks = 1;
1483	fm->e[0] = e;
1484
1485	ubi->fm = fm;
1486
1487out:
1488	ubi_free_vid_buf(vb);
1489	return ret;
1490
1491out_free_fm:
1492	kfree(fm);
1493	goto out;
1494}
1495
1496/**
1497 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1498 * WL sub-system.
1499 * @ubi: UBI device object
1500 * @fm: fastmap layout object
1501 */
1502static void return_fm_pebs(struct ubi_device *ubi,
1503			   struct ubi_fastmap_layout *fm)
1504{
1505	int i;
1506
1507	if (!fm)
1508		return;
1509
1510	for (i = 0; i < fm->used_blocks; i++) {
1511		if (fm->e[i]) {
1512			ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1513					  fm->to_be_tortured[i]);
1514			fm->e[i] = NULL;
1515		}
1516	}
1517}
1518
1519/**
1520 * ubi_update_fastmap - will be called by UBI if a volume changes or
1521 * a fastmap pool becomes full.
1522 * @ubi: UBI device object
1523 *
1524 * Returns 0 on success, < 0 indicates an internal error.
1525 */
1526int ubi_update_fastmap(struct ubi_device *ubi)
1527{
1528	int ret, i, j;
1529	struct ubi_fastmap_layout *new_fm, *old_fm;
1530	struct ubi_wl_entry *tmp_e;
1531
1532	down_write(&ubi->fm_protect);
1533	down_write(&ubi->work_sem);
1534	down_write(&ubi->fm_eba_sem);
1535
1536	ubi_refill_pools(ubi);
1537
1538	if (ubi->ro_mode || ubi->fm_disabled) {
1539		up_write(&ubi->fm_eba_sem);
1540		up_write(&ubi->work_sem);
1541		up_write(&ubi->fm_protect);
1542		return 0;
1543	}
1544
1545	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1546	if (!new_fm) {
1547		up_write(&ubi->fm_eba_sem);
1548		up_write(&ubi->work_sem);
1549		up_write(&ubi->fm_protect);
1550		return -ENOMEM;
1551	}
1552
1553	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1554	old_fm = ubi->fm;
1555	ubi->fm = NULL;
1556
1557	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1558		ubi_err(ubi, "fastmap too large");
1559		ret = -ENOSPC;
1560		goto err;
1561	}
1562
1563	for (i = 1; i < new_fm->used_blocks; i++) {
1564		spin_lock(&ubi->wl_lock);
1565		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1566		spin_unlock(&ubi->wl_lock);
1567
1568		if (!tmp_e) {
1569			if (old_fm && old_fm->e[i]) {
1570				ret = erase_block(ubi, old_fm->e[i]->pnum);
1571				if (ret < 0) {
1572					ubi_err(ubi, "could not erase old fastmap PEB");
1573
1574					for (j = 1; j < i; j++) {
1575						ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1576								  j, 0);
1577						new_fm->e[j] = NULL;
1578					}
1579					goto err;
1580				}
1581				new_fm->e[i] = old_fm->e[i];
1582				old_fm->e[i] = NULL;
1583			} else {
1584				ubi_err(ubi, "could not get any free erase block");
1585
1586				for (j = 1; j < i; j++) {
1587					ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1588					new_fm->e[j] = NULL;
1589				}
1590
1591				ret = -ENOSPC;
1592				goto err;
1593			}
1594		} else {
1595			new_fm->e[i] = tmp_e;
1596
1597			if (old_fm && old_fm->e[i]) {
1598				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1599						  old_fm->to_be_tortured[i]);
1600				old_fm->e[i] = NULL;
1601			}
1602		}
1603	}
1604
1605	/* Old fastmap is larger than the new one */
1606	if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1607		for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1608			ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1609					  old_fm->to_be_tortured[i]);
1610			old_fm->e[i] = NULL;
1611		}
1612	}
1613
1614	spin_lock(&ubi->wl_lock);
1615	tmp_e = ubi->fm_anchor;
1616	ubi->fm_anchor = NULL;
1617	spin_unlock(&ubi->wl_lock);
1618
1619	if (old_fm) {
1620		/* no fresh anchor PEB was found, reuse the old one */
1621		if (!tmp_e) {
1622			ret = erase_block(ubi, old_fm->e[0]->pnum);
1623			if (ret < 0) {
1624				ubi_err(ubi, "could not erase old anchor PEB");
1625
1626				for (i = 1; i < new_fm->used_blocks; i++) {
1627					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1628							  i, 0);
1629					new_fm->e[i] = NULL;
1630				}
1631				goto err;
1632			}
1633			new_fm->e[0] = old_fm->e[0];
1634			new_fm->e[0]->ec = ret;
1635			old_fm->e[0] = NULL;
1636		} else {
1637			/* we've got a new anchor PEB, return the old one */
1638			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1639					  old_fm->to_be_tortured[0]);
1640			new_fm->e[0] = tmp_e;
1641			old_fm->e[0] = NULL;
1642		}
1643	} else {
1644		if (!tmp_e) {
1645			ubi_err(ubi, "could not find any anchor PEB");
1646
1647			for (i = 1; i < new_fm->used_blocks; i++) {
1648				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1649				new_fm->e[i] = NULL;
1650			}
1651
1652			ret = -ENOSPC;
1653			goto err;
1654		}
1655		new_fm->e[0] = tmp_e;
1656	}
1657
1658	ret = ubi_write_fastmap(ubi, new_fm);
1659
1660	if (ret)
1661		goto err;
1662
1663out_unlock:
1664	up_write(&ubi->fm_eba_sem);
1665	up_write(&ubi->work_sem);
1666	up_write(&ubi->fm_protect);
1667	kfree(old_fm);
1668
1669	ubi_ensure_anchor_pebs(ubi);
1670
1671	return ret;
1672
1673err:
1674	ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1675
1676	ret = invalidate_fastmap(ubi);
1677	if (ret < 0) {
1678		ubi_err(ubi, "Unable to invalidate current fastmap!");
1679		ubi_ro_mode(ubi);
1680	} else {
1681		return_fm_pebs(ubi, old_fm);
1682		return_fm_pebs(ubi, new_fm);
1683		ret = 0;
1684	}
1685
1686	kfree(new_fm);
1687	goto out_unlock;
1688}
1689