xref: /kernel/linux/linux-6.6/drivers/mtd/mtdcore.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Core registration and callback routines for MTD
4 * drivers and users.
5 *
6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7 * Copyright © 2006      Red Hat UK Limited
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/ptrace.h>
13#include <linux/seq_file.h>
14#include <linux/string.h>
15#include <linux/timer.h>
16#include <linux/major.h>
17#include <linux/fs.h>
18#include <linux/err.h>
19#include <linux/ioctl.h>
20#include <linux/init.h>
21#include <linux/of.h>
22#include <linux/proc_fs.h>
23#include <linux/idr.h>
24#include <linux/backing-dev.h>
25#include <linux/gfp.h>
26#include <linux/random.h>
27#include <linux/slab.h>
28#include <linux/reboot.h>
29#include <linux/leds.h>
30#include <linux/debugfs.h>
31#include <linux/nvmem-provider.h>
32#include <linux/root_dev.h>
33
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/partitions.h>
36
37#include "mtdcore.h"
38
39struct backing_dev_info *mtd_bdi;
40
41#ifdef CONFIG_PM_SLEEP
42
43static int mtd_cls_suspend(struct device *dev)
44{
45	struct mtd_info *mtd = dev_get_drvdata(dev);
46
47	return mtd ? mtd_suspend(mtd) : 0;
48}
49
50static int mtd_cls_resume(struct device *dev)
51{
52	struct mtd_info *mtd = dev_get_drvdata(dev);
53
54	if (mtd)
55		mtd_resume(mtd);
56	return 0;
57}
58
59static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
60#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
61#else
62#define MTD_CLS_PM_OPS NULL
63#endif
64
65static struct class mtd_class = {
66	.name = "mtd",
67	.pm = MTD_CLS_PM_OPS,
68};
69
70static DEFINE_IDR(mtd_idr);
71
72/* These are exported solely for the purpose of mtd_blkdevs.c. You
73   should not use them for _anything_ else */
74DEFINE_MUTEX(mtd_table_mutex);
75EXPORT_SYMBOL_GPL(mtd_table_mutex);
76
77struct mtd_info *__mtd_next_device(int i)
78{
79	return idr_get_next(&mtd_idr, &i);
80}
81EXPORT_SYMBOL_GPL(__mtd_next_device);
82
83static LIST_HEAD(mtd_notifiers);
84
85
86#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
87
88/* REVISIT once MTD uses the driver model better, whoever allocates
89 * the mtd_info will probably want to use the release() hook...
90 */
91static void mtd_release(struct device *dev)
92{
93	struct mtd_info *mtd = dev_get_drvdata(dev);
94	dev_t index = MTD_DEVT(mtd->index);
95
96	idr_remove(&mtd_idr, mtd->index);
97	of_node_put(mtd_get_of_node(mtd));
98
99	if (mtd_is_partition(mtd))
100		release_mtd_partition(mtd);
101
102	/* remove /dev/mtdXro node */
103	device_destroy(&mtd_class, index + 1);
104}
105
106static void mtd_device_release(struct kref *kref)
107{
108	struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
109	bool is_partition = mtd_is_partition(mtd);
110
111	debugfs_remove_recursive(mtd->dbg.dfs_dir);
112
113	/* Try to remove the NVMEM provider */
114	nvmem_unregister(mtd->nvmem);
115
116	device_unregister(&mtd->dev);
117
118	/*
119	 *  Clear dev so mtd can be safely re-registered later if desired.
120	 *  Should not be done for partition,
121	 *  as it was already destroyed in device_unregister().
122	 */
123	if (!is_partition)
124		memset(&mtd->dev, 0, sizeof(mtd->dev));
125
126	module_put(THIS_MODULE);
127}
128
129#define MTD_DEVICE_ATTR_RO(name) \
130static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
131
132#define MTD_DEVICE_ATTR_RW(name) \
133static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
134
135static ssize_t mtd_type_show(struct device *dev,
136		struct device_attribute *attr, char *buf)
137{
138	struct mtd_info *mtd = dev_get_drvdata(dev);
139	char *type;
140
141	switch (mtd->type) {
142	case MTD_ABSENT:
143		type = "absent";
144		break;
145	case MTD_RAM:
146		type = "ram";
147		break;
148	case MTD_ROM:
149		type = "rom";
150		break;
151	case MTD_NORFLASH:
152		type = "nor";
153		break;
154	case MTD_NANDFLASH:
155		type = "nand";
156		break;
157	case MTD_DATAFLASH:
158		type = "dataflash";
159		break;
160	case MTD_UBIVOLUME:
161		type = "ubi";
162		break;
163	case MTD_MLCNANDFLASH:
164		type = "mlc-nand";
165		break;
166	default:
167		type = "unknown";
168	}
169
170	return sysfs_emit(buf, "%s\n", type);
171}
172MTD_DEVICE_ATTR_RO(type);
173
174static ssize_t mtd_flags_show(struct device *dev,
175		struct device_attribute *attr, char *buf)
176{
177	struct mtd_info *mtd = dev_get_drvdata(dev);
178
179	return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
180}
181MTD_DEVICE_ATTR_RO(flags);
182
183static ssize_t mtd_size_show(struct device *dev,
184		struct device_attribute *attr, char *buf)
185{
186	struct mtd_info *mtd = dev_get_drvdata(dev);
187
188	return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
189}
190MTD_DEVICE_ATTR_RO(size);
191
192static ssize_t mtd_erasesize_show(struct device *dev,
193		struct device_attribute *attr, char *buf)
194{
195	struct mtd_info *mtd = dev_get_drvdata(dev);
196
197	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
198}
199MTD_DEVICE_ATTR_RO(erasesize);
200
201static ssize_t mtd_writesize_show(struct device *dev,
202		struct device_attribute *attr, char *buf)
203{
204	struct mtd_info *mtd = dev_get_drvdata(dev);
205
206	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
207}
208MTD_DEVICE_ATTR_RO(writesize);
209
210static ssize_t mtd_subpagesize_show(struct device *dev,
211		struct device_attribute *attr, char *buf)
212{
213	struct mtd_info *mtd = dev_get_drvdata(dev);
214	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
215
216	return sysfs_emit(buf, "%u\n", subpagesize);
217}
218MTD_DEVICE_ATTR_RO(subpagesize);
219
220static ssize_t mtd_oobsize_show(struct device *dev,
221		struct device_attribute *attr, char *buf)
222{
223	struct mtd_info *mtd = dev_get_drvdata(dev);
224
225	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
226}
227MTD_DEVICE_ATTR_RO(oobsize);
228
229static ssize_t mtd_oobavail_show(struct device *dev,
230				 struct device_attribute *attr, char *buf)
231{
232	struct mtd_info *mtd = dev_get_drvdata(dev);
233
234	return sysfs_emit(buf, "%u\n", mtd->oobavail);
235}
236MTD_DEVICE_ATTR_RO(oobavail);
237
238static ssize_t mtd_numeraseregions_show(struct device *dev,
239		struct device_attribute *attr, char *buf)
240{
241	struct mtd_info *mtd = dev_get_drvdata(dev);
242
243	return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
244}
245MTD_DEVICE_ATTR_RO(numeraseregions);
246
247static ssize_t mtd_name_show(struct device *dev,
248		struct device_attribute *attr, char *buf)
249{
250	struct mtd_info *mtd = dev_get_drvdata(dev);
251
252	return sysfs_emit(buf, "%s\n", mtd->name);
253}
254MTD_DEVICE_ATTR_RO(name);
255
256static ssize_t mtd_ecc_strength_show(struct device *dev,
257				     struct device_attribute *attr, char *buf)
258{
259	struct mtd_info *mtd = dev_get_drvdata(dev);
260
261	return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
262}
263MTD_DEVICE_ATTR_RO(ecc_strength);
264
265static ssize_t mtd_bitflip_threshold_show(struct device *dev,
266					  struct device_attribute *attr,
267					  char *buf)
268{
269	struct mtd_info *mtd = dev_get_drvdata(dev);
270
271	return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
272}
273
274static ssize_t mtd_bitflip_threshold_store(struct device *dev,
275					   struct device_attribute *attr,
276					   const char *buf, size_t count)
277{
278	struct mtd_info *mtd = dev_get_drvdata(dev);
279	unsigned int bitflip_threshold;
280	int retval;
281
282	retval = kstrtouint(buf, 0, &bitflip_threshold);
283	if (retval)
284		return retval;
285
286	mtd->bitflip_threshold = bitflip_threshold;
287	return count;
288}
289MTD_DEVICE_ATTR_RW(bitflip_threshold);
290
291static ssize_t mtd_ecc_step_size_show(struct device *dev,
292		struct device_attribute *attr, char *buf)
293{
294	struct mtd_info *mtd = dev_get_drvdata(dev);
295
296	return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
297
298}
299MTD_DEVICE_ATTR_RO(ecc_step_size);
300
301static ssize_t mtd_corrected_bits_show(struct device *dev,
302		struct device_attribute *attr, char *buf)
303{
304	struct mtd_info *mtd = dev_get_drvdata(dev);
305	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
306
307	return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
308}
309MTD_DEVICE_ATTR_RO(corrected_bits);	/* ecc stats corrected */
310
311static ssize_t mtd_ecc_failures_show(struct device *dev,
312		struct device_attribute *attr, char *buf)
313{
314	struct mtd_info *mtd = dev_get_drvdata(dev);
315	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
316
317	return sysfs_emit(buf, "%u\n", ecc_stats->failed);
318}
319MTD_DEVICE_ATTR_RO(ecc_failures);	/* ecc stats errors */
320
321static ssize_t mtd_bad_blocks_show(struct device *dev,
322		struct device_attribute *attr, char *buf)
323{
324	struct mtd_info *mtd = dev_get_drvdata(dev);
325	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
326
327	return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
328}
329MTD_DEVICE_ATTR_RO(bad_blocks);
330
331static ssize_t mtd_bbt_blocks_show(struct device *dev,
332		struct device_attribute *attr, char *buf)
333{
334	struct mtd_info *mtd = dev_get_drvdata(dev);
335	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
336
337	return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
338}
339MTD_DEVICE_ATTR_RO(bbt_blocks);
340
341static struct attribute *mtd_attrs[] = {
342	&dev_attr_type.attr,
343	&dev_attr_flags.attr,
344	&dev_attr_size.attr,
345	&dev_attr_erasesize.attr,
346	&dev_attr_writesize.attr,
347	&dev_attr_subpagesize.attr,
348	&dev_attr_oobsize.attr,
349	&dev_attr_oobavail.attr,
350	&dev_attr_numeraseregions.attr,
351	&dev_attr_name.attr,
352	&dev_attr_ecc_strength.attr,
353	&dev_attr_ecc_step_size.attr,
354	&dev_attr_corrected_bits.attr,
355	&dev_attr_ecc_failures.attr,
356	&dev_attr_bad_blocks.attr,
357	&dev_attr_bbt_blocks.attr,
358	&dev_attr_bitflip_threshold.attr,
359	NULL,
360};
361ATTRIBUTE_GROUPS(mtd);
362
363static const struct device_type mtd_devtype = {
364	.name		= "mtd",
365	.groups		= mtd_groups,
366	.release	= mtd_release,
367};
368
369static bool mtd_expert_analysis_mode;
370
371#ifdef CONFIG_DEBUG_FS
372bool mtd_check_expert_analysis_mode(void)
373{
374	const char *mtd_expert_analysis_warning =
375		"Bad block checks have been entirely disabled.\n"
376		"This is only reserved for post-mortem forensics and debug purposes.\n"
377		"Never enable this mode if you do not know what you are doing!\n";
378
379	return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
380}
381EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
382#endif
383
384static struct dentry *dfs_dir_mtd;
385
386static void mtd_debugfs_populate(struct mtd_info *mtd)
387{
388	struct device *dev = &mtd->dev;
389
390	if (IS_ERR_OR_NULL(dfs_dir_mtd))
391		return;
392
393	mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
394}
395
396#ifndef CONFIG_MMU
397unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
398{
399	switch (mtd->type) {
400	case MTD_RAM:
401		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
402			NOMMU_MAP_READ | NOMMU_MAP_WRITE;
403	case MTD_ROM:
404		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
405			NOMMU_MAP_READ;
406	default:
407		return NOMMU_MAP_COPY;
408	}
409}
410EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
411#endif
412
413static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
414			       void *cmd)
415{
416	struct mtd_info *mtd;
417
418	mtd = container_of(n, struct mtd_info, reboot_notifier);
419	mtd->_reboot(mtd);
420
421	return NOTIFY_DONE;
422}
423
424/**
425 * mtd_wunit_to_pairing_info - get pairing information of a wunit
426 * @mtd: pointer to new MTD device info structure
427 * @wunit: write unit we are interested in
428 * @info: returned pairing information
429 *
430 * Retrieve pairing information associated to the wunit.
431 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
432 * paired together, and where programming a page may influence the page it is
433 * paired with.
434 * The notion of page is replaced by the term wunit (write-unit) to stay
435 * consistent with the ->writesize field.
436 *
437 * The @wunit argument can be extracted from an absolute offset using
438 * mtd_offset_to_wunit(). @info is filled with the pairing information attached
439 * to @wunit.
440 *
441 * From the pairing info the MTD user can find all the wunits paired with
442 * @wunit using the following loop:
443 *
444 * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
445 *	info.pair = i;
446 *	mtd_pairing_info_to_wunit(mtd, &info);
447 *	...
448 * }
449 */
450int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
451			      struct mtd_pairing_info *info)
452{
453	struct mtd_info *master = mtd_get_master(mtd);
454	int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
455
456	if (wunit < 0 || wunit >= npairs)
457		return -EINVAL;
458
459	if (master->pairing && master->pairing->get_info)
460		return master->pairing->get_info(master, wunit, info);
461
462	info->group = 0;
463	info->pair = wunit;
464
465	return 0;
466}
467EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
468
469/**
470 * mtd_pairing_info_to_wunit - get wunit from pairing information
471 * @mtd: pointer to new MTD device info structure
472 * @info: pairing information struct
473 *
474 * Returns a positive number representing the wunit associated to the info
475 * struct, or a negative error code.
476 *
477 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
478 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
479 * doc).
480 *
481 * It can also be used to only program the first page of each pair (i.e.
482 * page attached to group 0), which allows one to use an MLC NAND in
483 * software-emulated SLC mode:
484 *
485 * info.group = 0;
486 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
487 * for (info.pair = 0; info.pair < npairs; info.pair++) {
488 *	wunit = mtd_pairing_info_to_wunit(mtd, &info);
489 *	mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
490 *		  mtd->writesize, &retlen, buf + (i * mtd->writesize));
491 * }
492 */
493int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
494			      const struct mtd_pairing_info *info)
495{
496	struct mtd_info *master = mtd_get_master(mtd);
497	int ngroups = mtd_pairing_groups(master);
498	int npairs = mtd_wunit_per_eb(master) / ngroups;
499
500	if (!info || info->pair < 0 || info->pair >= npairs ||
501	    info->group < 0 || info->group >= ngroups)
502		return -EINVAL;
503
504	if (master->pairing && master->pairing->get_wunit)
505		return mtd->pairing->get_wunit(master, info);
506
507	return info->pair;
508}
509EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
510
511/**
512 * mtd_pairing_groups - get the number of pairing groups
513 * @mtd: pointer to new MTD device info structure
514 *
515 * Returns the number of pairing groups.
516 *
517 * This number is usually equal to the number of bits exposed by a single
518 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
519 * to iterate over all pages of a given pair.
520 */
521int mtd_pairing_groups(struct mtd_info *mtd)
522{
523	struct mtd_info *master = mtd_get_master(mtd);
524
525	if (!master->pairing || !master->pairing->ngroups)
526		return 1;
527
528	return master->pairing->ngroups;
529}
530EXPORT_SYMBOL_GPL(mtd_pairing_groups);
531
532static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
533			      void *val, size_t bytes)
534{
535	struct mtd_info *mtd = priv;
536	size_t retlen;
537	int err;
538
539	err = mtd_read(mtd, offset, bytes, &retlen, val);
540	if (err && err != -EUCLEAN)
541		return err;
542
543	return retlen == bytes ? 0 : -EIO;
544}
545
546static int mtd_nvmem_add(struct mtd_info *mtd)
547{
548	struct device_node *node = mtd_get_of_node(mtd);
549	struct nvmem_config config = {};
550
551	config.id = NVMEM_DEVID_NONE;
552	config.dev = &mtd->dev;
553	config.name = dev_name(&mtd->dev);
554	config.owner = THIS_MODULE;
555	config.reg_read = mtd_nvmem_reg_read;
556	config.size = mtd->size;
557	config.word_size = 1;
558	config.stride = 1;
559	config.read_only = true;
560	config.root_only = true;
561	config.ignore_wp = true;
562	config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
563	config.priv = mtd;
564
565	mtd->nvmem = nvmem_register(&config);
566	if (IS_ERR(mtd->nvmem)) {
567		/* Just ignore if there is no NVMEM support in the kernel */
568		if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP)
569			mtd->nvmem = NULL;
570		else
571			return dev_err_probe(&mtd->dev, PTR_ERR(mtd->nvmem),
572					     "Failed to register NVMEM device\n");
573	}
574
575	return 0;
576}
577
578static void mtd_check_of_node(struct mtd_info *mtd)
579{
580	struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
581	const char *pname, *prefix = "partition-";
582	int plen, mtd_name_len, offset, prefix_len;
583
584	/* Check if MTD already has a device node */
585	if (mtd_get_of_node(mtd))
586		return;
587
588	if (!mtd_is_partition(mtd))
589		return;
590
591	parent_dn = of_node_get(mtd_get_of_node(mtd->parent));
592	if (!parent_dn)
593		return;
594
595	if (mtd_is_partition(mtd->parent))
596		partitions = of_node_get(parent_dn);
597	else
598		partitions = of_get_child_by_name(parent_dn, "partitions");
599	if (!partitions)
600		goto exit_parent;
601
602	prefix_len = strlen(prefix);
603	mtd_name_len = strlen(mtd->name);
604
605	/* Search if a partition is defined with the same name */
606	for_each_child_of_node(partitions, mtd_dn) {
607		/* Skip partition with no/wrong prefix */
608		if (!of_node_name_prefix(mtd_dn, prefix))
609			continue;
610
611		/* Label have priority. Check that first */
612		if (!of_property_read_string(mtd_dn, "label", &pname)) {
613			offset = 0;
614		} else {
615			pname = mtd_dn->name;
616			offset = prefix_len;
617		}
618
619		plen = strlen(pname) - offset;
620		if (plen == mtd_name_len &&
621		    !strncmp(mtd->name, pname + offset, plen)) {
622			mtd_set_of_node(mtd, mtd_dn);
623			break;
624		}
625	}
626
627	of_node_put(partitions);
628exit_parent:
629	of_node_put(parent_dn);
630}
631
632/**
633 *	add_mtd_device - register an MTD device
634 *	@mtd: pointer to new MTD device info structure
635 *
636 *	Add a device to the list of MTD devices present in the system, and
637 *	notify each currently active MTD 'user' of its arrival. Returns
638 *	zero on success or non-zero on failure.
639 */
640
641int add_mtd_device(struct mtd_info *mtd)
642{
643	struct device_node *np = mtd_get_of_node(mtd);
644	struct mtd_info *master = mtd_get_master(mtd);
645	struct mtd_notifier *not;
646	int i, error, ofidx;
647
648	/*
649	 * May occur, for instance, on buggy drivers which call
650	 * mtd_device_parse_register() multiple times on the same master MTD,
651	 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
652	 */
653	if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
654		return -EEXIST;
655
656	BUG_ON(mtd->writesize == 0);
657
658	/*
659	 * MTD drivers should implement ->_{write,read}() or
660	 * ->_{write,read}_oob(), but not both.
661	 */
662	if (WARN_ON((mtd->_write && mtd->_write_oob) ||
663		    (mtd->_read && mtd->_read_oob)))
664		return -EINVAL;
665
666	if (WARN_ON((!mtd->erasesize || !master->_erase) &&
667		    !(mtd->flags & MTD_NO_ERASE)))
668		return -EINVAL;
669
670	/*
671	 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
672	 * master is an MLC NAND and has a proper pairing scheme defined.
673	 * We also reject masters that implement ->_writev() for now, because
674	 * NAND controller drivers don't implement this hook, and adding the
675	 * SLC -> MLC address/length conversion to this path is useless if we
676	 * don't have a user.
677	 */
678	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
679	    (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
680	     !master->pairing || master->_writev))
681		return -EINVAL;
682
683	mutex_lock(&mtd_table_mutex);
684
685	ofidx = -1;
686	if (np)
687		ofidx = of_alias_get_id(np, "mtd");
688	if (ofidx >= 0)
689		i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
690	else
691		i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
692	if (i < 0) {
693		error = i;
694		goto fail_locked;
695	}
696
697	mtd->index = i;
698	kref_init(&mtd->refcnt);
699
700	/* default value if not set by driver */
701	if (mtd->bitflip_threshold == 0)
702		mtd->bitflip_threshold = mtd->ecc_strength;
703
704	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
705		int ngroups = mtd_pairing_groups(master);
706
707		mtd->erasesize /= ngroups;
708		mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
709			    mtd->erasesize;
710	}
711
712	if (is_power_of_2(mtd->erasesize))
713		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
714	else
715		mtd->erasesize_shift = 0;
716
717	if (is_power_of_2(mtd->writesize))
718		mtd->writesize_shift = ffs(mtd->writesize) - 1;
719	else
720		mtd->writesize_shift = 0;
721
722	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
723	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
724
725	/* Some chips always power up locked. Unlock them now */
726	if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
727		error = mtd_unlock(mtd, 0, mtd->size);
728		if (error && error != -EOPNOTSUPP)
729			printk(KERN_WARNING
730			       "%s: unlock failed, writes may not work\n",
731			       mtd->name);
732		/* Ignore unlock failures? */
733		error = 0;
734	}
735
736	/* Caller should have set dev.parent to match the
737	 * physical device, if appropriate.
738	 */
739	mtd->dev.type = &mtd_devtype;
740	mtd->dev.class = &mtd_class;
741	mtd->dev.devt = MTD_DEVT(i);
742	dev_set_name(&mtd->dev, "mtd%d", i);
743	dev_set_drvdata(&mtd->dev, mtd);
744	mtd_check_of_node(mtd);
745	of_node_get(mtd_get_of_node(mtd));
746	error = device_register(&mtd->dev);
747	if (error) {
748		put_device(&mtd->dev);
749		goto fail_added;
750	}
751
752	/* Add the nvmem provider */
753	error = mtd_nvmem_add(mtd);
754	if (error)
755		goto fail_nvmem_add;
756
757	mtd_debugfs_populate(mtd);
758
759	device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
760		      "mtd%dro", i);
761
762	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
763	/* No need to get a refcount on the module containing
764	   the notifier, since we hold the mtd_table_mutex */
765	list_for_each_entry(not, &mtd_notifiers, list)
766		not->add(mtd);
767
768	mutex_unlock(&mtd_table_mutex);
769
770	if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
771		if (IS_BUILTIN(CONFIG_MTD)) {
772			pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
773			ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
774		} else {
775			pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
776				mtd->index, mtd->name);
777		}
778	}
779
780	/* We _know_ we aren't being removed, because
781	   our caller is still holding us here. So none
782	   of this try_ nonsense, and no bitching about it
783	   either. :) */
784	__module_get(THIS_MODULE);
785	return 0;
786
787fail_nvmem_add:
788	device_unregister(&mtd->dev);
789fail_added:
790	of_node_put(mtd_get_of_node(mtd));
791	idr_remove(&mtd_idr, i);
792fail_locked:
793	mutex_unlock(&mtd_table_mutex);
794	return error;
795}
796
797/**
798 *	del_mtd_device - unregister an MTD device
799 *	@mtd: pointer to MTD device info structure
800 *
801 *	Remove a device from the list of MTD devices present in the system,
802 *	and notify each currently active MTD 'user' of its departure.
803 *	Returns zero on success or 1 on failure, which currently will happen
804 *	if the requested device does not appear to be present in the list.
805 */
806
807int del_mtd_device(struct mtd_info *mtd)
808{
809	int ret;
810	struct mtd_notifier *not;
811
812	mutex_lock(&mtd_table_mutex);
813
814	if (idr_find(&mtd_idr, mtd->index) != mtd) {
815		ret = -ENODEV;
816		goto out_error;
817	}
818
819	/* No need to get a refcount on the module containing
820		the notifier, since we hold the mtd_table_mutex */
821	list_for_each_entry(not, &mtd_notifiers, list)
822		not->remove(mtd);
823
824	kref_put(&mtd->refcnt, mtd_device_release);
825	ret = 0;
826
827out_error:
828	mutex_unlock(&mtd_table_mutex);
829	return ret;
830}
831
832/*
833 * Set a few defaults based on the parent devices, if not provided by the
834 * driver
835 */
836static void mtd_set_dev_defaults(struct mtd_info *mtd)
837{
838	if (mtd->dev.parent) {
839		if (!mtd->owner && mtd->dev.parent->driver)
840			mtd->owner = mtd->dev.parent->driver->owner;
841		if (!mtd->name)
842			mtd->name = dev_name(mtd->dev.parent);
843	} else {
844		pr_debug("mtd device won't show a device symlink in sysfs\n");
845	}
846
847	INIT_LIST_HEAD(&mtd->partitions);
848	mutex_init(&mtd->master.partitions_lock);
849	mutex_init(&mtd->master.chrdev_lock);
850}
851
852static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
853{
854	struct otp_info *info;
855	ssize_t size = 0;
856	unsigned int i;
857	size_t retlen;
858	int ret;
859
860	info = kmalloc(PAGE_SIZE, GFP_KERNEL);
861	if (!info)
862		return -ENOMEM;
863
864	if (is_user)
865		ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
866	else
867		ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
868	if (ret)
869		goto err;
870
871	for (i = 0; i < retlen / sizeof(*info); i++)
872		size += info[i].length;
873
874	kfree(info);
875	return size;
876
877err:
878	kfree(info);
879
880	/* ENODATA means there is no OTP region. */
881	return ret == -ENODATA ? 0 : ret;
882}
883
884static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
885						   const char *compatible,
886						   int size,
887						   nvmem_reg_read_t reg_read)
888{
889	struct nvmem_device *nvmem = NULL;
890	struct nvmem_config config = {};
891	struct device_node *np;
892
893	/* DT binding is optional */
894	np = of_get_compatible_child(mtd->dev.of_node, compatible);
895
896	/* OTP nvmem will be registered on the physical device */
897	config.dev = mtd->dev.parent;
898	config.name = compatible;
899	config.id = NVMEM_DEVID_AUTO;
900	config.owner = THIS_MODULE;
901	config.type = NVMEM_TYPE_OTP;
902	config.root_only = true;
903	config.ignore_wp = true;
904	config.reg_read = reg_read;
905	config.size = size;
906	config.of_node = np;
907	config.priv = mtd;
908
909	nvmem = nvmem_register(&config);
910	/* Just ignore if there is no NVMEM support in the kernel */
911	if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
912		nvmem = NULL;
913
914	of_node_put(np);
915
916	return nvmem;
917}
918
919static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
920				       void *val, size_t bytes)
921{
922	struct mtd_info *mtd = priv;
923	size_t retlen;
924	int ret;
925
926	ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
927	if (ret)
928		return ret;
929
930	return retlen == bytes ? 0 : -EIO;
931}
932
933static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
934				       void *val, size_t bytes)
935{
936	struct mtd_info *mtd = priv;
937	size_t retlen;
938	int ret;
939
940	ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
941	if (ret)
942		return ret;
943
944	return retlen == bytes ? 0 : -EIO;
945}
946
947static int mtd_otp_nvmem_add(struct mtd_info *mtd)
948{
949	struct device *dev = mtd->dev.parent;
950	struct nvmem_device *nvmem;
951	ssize_t size;
952	int err;
953
954	if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
955		size = mtd_otp_size(mtd, true);
956		if (size < 0)
957			return size;
958
959		if (size > 0) {
960			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
961						       mtd_nvmem_user_otp_reg_read);
962			if (IS_ERR(nvmem)) {
963				err = PTR_ERR(nvmem);
964				goto err;
965			}
966			mtd->otp_user_nvmem = nvmem;
967		}
968	}
969
970	if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
971		size = mtd_otp_size(mtd, false);
972		if (size < 0) {
973			err = size;
974			goto err;
975		}
976
977		if (size > 0) {
978			/*
979			 * The factory OTP contains thing such as a unique serial
980			 * number and is small, so let's read it out and put it
981			 * into the entropy pool.
982			 */
983			void *otp;
984
985			otp = kmalloc(size, GFP_KERNEL);
986			if (!otp) {
987				err = -ENOMEM;
988				goto err;
989			}
990			err = mtd_nvmem_fact_otp_reg_read(mtd, 0, otp, size);
991			if (err < 0) {
992				kfree(otp);
993				goto err;
994			}
995			add_device_randomness(otp, err);
996			kfree(otp);
997
998			nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
999						       mtd_nvmem_fact_otp_reg_read);
1000			if (IS_ERR(nvmem)) {
1001				err = PTR_ERR(nvmem);
1002				goto err;
1003			}
1004			mtd->otp_factory_nvmem = nvmem;
1005		}
1006	}
1007
1008	return 0;
1009
1010err:
1011	nvmem_unregister(mtd->otp_user_nvmem);
1012	return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n");
1013}
1014
1015/**
1016 * mtd_device_parse_register - parse partitions and register an MTD device.
1017 *
1018 * @mtd: the MTD device to register
1019 * @types: the list of MTD partition probes to try, see
1020 *         'parse_mtd_partitions()' for more information
1021 * @parser_data: MTD partition parser-specific data
1022 * @parts: fallback partition information to register, if parsing fails;
1023 *         only valid if %nr_parts > %0
1024 * @nr_parts: the number of partitions in parts, if zero then the full
1025 *            MTD device is registered if no partition info is found
1026 *
1027 * This function aggregates MTD partitions parsing (done by
1028 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
1029 * basically follows the most common pattern found in many MTD drivers:
1030 *
1031 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
1032 *   registered first.
1033 * * Then It tries to probe partitions on MTD device @mtd using parsers
1034 *   specified in @types (if @types is %NULL, then the default list of parsers
1035 *   is used, see 'parse_mtd_partitions()' for more information). If none are
1036 *   found this functions tries to fallback to information specified in
1037 *   @parts/@nr_parts.
1038 * * If no partitions were found this function just registers the MTD device
1039 *   @mtd and exits.
1040 *
1041 * Returns zero in case of success and a negative error code in case of failure.
1042 */
1043int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
1044			      struct mtd_part_parser_data *parser_data,
1045			      const struct mtd_partition *parts,
1046			      int nr_parts)
1047{
1048	int ret;
1049
1050	mtd_set_dev_defaults(mtd);
1051
1052	ret = mtd_otp_nvmem_add(mtd);
1053	if (ret)
1054		goto out;
1055
1056	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
1057		ret = add_mtd_device(mtd);
1058		if (ret)
1059			goto out;
1060	}
1061
1062	/* Prefer parsed partitions over driver-provided fallback */
1063	ret = parse_mtd_partitions(mtd, types, parser_data);
1064	if (ret == -EPROBE_DEFER)
1065		goto out;
1066
1067	if (ret > 0)
1068		ret = 0;
1069	else if (nr_parts)
1070		ret = add_mtd_partitions(mtd, parts, nr_parts);
1071	else if (!device_is_registered(&mtd->dev))
1072		ret = add_mtd_device(mtd);
1073	else
1074		ret = 0;
1075
1076	if (ret)
1077		goto out;
1078
1079	/*
1080	 * FIXME: some drivers unfortunately call this function more than once.
1081	 * So we have to check if we've already assigned the reboot notifier.
1082	 *
1083	 * Generally, we can make multiple calls work for most cases, but it
1084	 * does cause problems with parse_mtd_partitions() above (e.g.,
1085	 * cmdlineparts will register partitions more than once).
1086	 */
1087	WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
1088		  "MTD already registered\n");
1089	if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1090		mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1091		register_reboot_notifier(&mtd->reboot_notifier);
1092	}
1093
1094out:
1095	if (ret) {
1096		nvmem_unregister(mtd->otp_user_nvmem);
1097		nvmem_unregister(mtd->otp_factory_nvmem);
1098	}
1099
1100	if (ret && device_is_registered(&mtd->dev))
1101		del_mtd_device(mtd);
1102
1103	return ret;
1104}
1105EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1106
1107/**
1108 * mtd_device_unregister - unregister an existing MTD device.
1109 *
1110 * @master: the MTD device to unregister.  This will unregister both the master
1111 *          and any partitions if registered.
1112 */
1113int mtd_device_unregister(struct mtd_info *master)
1114{
1115	int err;
1116
1117	if (master->_reboot) {
1118		unregister_reboot_notifier(&master->reboot_notifier);
1119		memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1120	}
1121
1122	nvmem_unregister(master->otp_user_nvmem);
1123	nvmem_unregister(master->otp_factory_nvmem);
1124
1125	err = del_mtd_partitions(master);
1126	if (err)
1127		return err;
1128
1129	if (!device_is_registered(&master->dev))
1130		return 0;
1131
1132	return del_mtd_device(master);
1133}
1134EXPORT_SYMBOL_GPL(mtd_device_unregister);
1135
1136/**
1137 *	register_mtd_user - register a 'user' of MTD devices.
1138 *	@new: pointer to notifier info structure
1139 *
1140 *	Registers a pair of callbacks function to be called upon addition
1141 *	or removal of MTD devices. Causes the 'add' callback to be immediately
1142 *	invoked for each MTD device currently present in the system.
1143 */
1144void register_mtd_user (struct mtd_notifier *new)
1145{
1146	struct mtd_info *mtd;
1147
1148	mutex_lock(&mtd_table_mutex);
1149
1150	list_add(&new->list, &mtd_notifiers);
1151
1152	__module_get(THIS_MODULE);
1153
1154	mtd_for_each_device(mtd)
1155		new->add(mtd);
1156
1157	mutex_unlock(&mtd_table_mutex);
1158}
1159EXPORT_SYMBOL_GPL(register_mtd_user);
1160
1161/**
1162 *	unregister_mtd_user - unregister a 'user' of MTD devices.
1163 *	@old: pointer to notifier info structure
1164 *
1165 *	Removes a callback function pair from the list of 'users' to be
1166 *	notified upon addition or removal of MTD devices. Causes the
1167 *	'remove' callback to be immediately invoked for each MTD device
1168 *	currently present in the system.
1169 */
1170int unregister_mtd_user (struct mtd_notifier *old)
1171{
1172	struct mtd_info *mtd;
1173
1174	mutex_lock(&mtd_table_mutex);
1175
1176	module_put(THIS_MODULE);
1177
1178	mtd_for_each_device(mtd)
1179		old->remove(mtd);
1180
1181	list_del(&old->list);
1182	mutex_unlock(&mtd_table_mutex);
1183	return 0;
1184}
1185EXPORT_SYMBOL_GPL(unregister_mtd_user);
1186
1187/**
1188 *	get_mtd_device - obtain a validated handle for an MTD device
1189 *	@mtd: last known address of the required MTD device
1190 *	@num: internal device number of the required MTD device
1191 *
1192 *	Given a number and NULL address, return the num'th entry in the device
1193 *	table, if any.	Given an address and num == -1, search the device table
1194 *	for a device with that address and return if it's still present. Given
1195 *	both, return the num'th driver only if its address matches. Return
1196 *	error code if not.
1197 */
1198struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1199{
1200	struct mtd_info *ret = NULL, *other;
1201	int err = -ENODEV;
1202
1203	mutex_lock(&mtd_table_mutex);
1204
1205	if (num == -1) {
1206		mtd_for_each_device(other) {
1207			if (other == mtd) {
1208				ret = mtd;
1209				break;
1210			}
1211		}
1212	} else if (num >= 0) {
1213		ret = idr_find(&mtd_idr, num);
1214		if (mtd && mtd != ret)
1215			ret = NULL;
1216	}
1217
1218	if (!ret) {
1219		ret = ERR_PTR(err);
1220		goto out;
1221	}
1222
1223	err = __get_mtd_device(ret);
1224	if (err)
1225		ret = ERR_PTR(err);
1226out:
1227	mutex_unlock(&mtd_table_mutex);
1228	return ret;
1229}
1230EXPORT_SYMBOL_GPL(get_mtd_device);
1231
1232
1233int __get_mtd_device(struct mtd_info *mtd)
1234{
1235	struct mtd_info *master = mtd_get_master(mtd);
1236	int err;
1237
1238	if (master->_get_device) {
1239		err = master->_get_device(mtd);
1240		if (err)
1241			return err;
1242	}
1243
1244	if (!try_module_get(master->owner)) {
1245		if (master->_put_device)
1246			master->_put_device(master);
1247		return -ENODEV;
1248	}
1249
1250	while (mtd) {
1251		if (mtd != master)
1252			kref_get(&mtd->refcnt);
1253		mtd = mtd->parent;
1254	}
1255
1256	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
1257		kref_get(&master->refcnt);
1258
1259	return 0;
1260}
1261EXPORT_SYMBOL_GPL(__get_mtd_device);
1262
1263/**
1264 * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
1265 *
1266 * @np: device tree node
1267 */
1268struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
1269{
1270	struct mtd_info *mtd = NULL;
1271	struct mtd_info *tmp;
1272	int err;
1273
1274	mutex_lock(&mtd_table_mutex);
1275
1276	err = -EPROBE_DEFER;
1277	mtd_for_each_device(tmp) {
1278		if (mtd_get_of_node(tmp) == np) {
1279			mtd = tmp;
1280			err = __get_mtd_device(mtd);
1281			break;
1282		}
1283	}
1284
1285	mutex_unlock(&mtd_table_mutex);
1286
1287	return err ? ERR_PTR(err) : mtd;
1288}
1289EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
1290
1291/**
1292 *	get_mtd_device_nm - obtain a validated handle for an MTD device by
1293 *	device name
1294 *	@name: MTD device name to open
1295 *
1296 * 	This function returns MTD device description structure in case of
1297 * 	success and an error code in case of failure.
1298 */
1299struct mtd_info *get_mtd_device_nm(const char *name)
1300{
1301	int err = -ENODEV;
1302	struct mtd_info *mtd = NULL, *other;
1303
1304	mutex_lock(&mtd_table_mutex);
1305
1306	mtd_for_each_device(other) {
1307		if (!strcmp(name, other->name)) {
1308			mtd = other;
1309			break;
1310		}
1311	}
1312
1313	if (!mtd)
1314		goto out_unlock;
1315
1316	err = __get_mtd_device(mtd);
1317	if (err)
1318		goto out_unlock;
1319
1320	mutex_unlock(&mtd_table_mutex);
1321	return mtd;
1322
1323out_unlock:
1324	mutex_unlock(&mtd_table_mutex);
1325	return ERR_PTR(err);
1326}
1327EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1328
1329void put_mtd_device(struct mtd_info *mtd)
1330{
1331	mutex_lock(&mtd_table_mutex);
1332	__put_mtd_device(mtd);
1333	mutex_unlock(&mtd_table_mutex);
1334
1335}
1336EXPORT_SYMBOL_GPL(put_mtd_device);
1337
1338void __put_mtd_device(struct mtd_info *mtd)
1339{
1340	struct mtd_info *master = mtd_get_master(mtd);
1341
1342	while (mtd) {
1343		/* kref_put() can relese mtd, so keep a reference mtd->parent */
1344		struct mtd_info *parent = mtd->parent;
1345
1346		if (mtd != master)
1347			kref_put(&mtd->refcnt, mtd_device_release);
1348		mtd = parent;
1349	}
1350
1351	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
1352		kref_put(&master->refcnt, mtd_device_release);
1353
1354	module_put(master->owner);
1355
1356	/* must be the last as master can be freed in the _put_device */
1357	if (master->_put_device)
1358		master->_put_device(master);
1359}
1360EXPORT_SYMBOL_GPL(__put_mtd_device);
1361
1362/*
1363 * Erase is an synchronous operation. Device drivers are epected to return a
1364 * negative error code if the operation failed and update instr->fail_addr
1365 * to point the portion that was not properly erased.
1366 */
1367int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1368{
1369	struct mtd_info *master = mtd_get_master(mtd);
1370	u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1371	struct erase_info adjinstr;
1372	int ret;
1373
1374	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1375	adjinstr = *instr;
1376
1377	if (!mtd->erasesize || !master->_erase)
1378		return -ENOTSUPP;
1379
1380	if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1381		return -EINVAL;
1382	if (!(mtd->flags & MTD_WRITEABLE))
1383		return -EROFS;
1384
1385	if (!instr->len)
1386		return 0;
1387
1388	ledtrig_mtd_activity();
1389
1390	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1391		adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1392				master->erasesize;
1393		adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1394				master->erasesize) -
1395			       adjinstr.addr;
1396	}
1397
1398	adjinstr.addr += mst_ofs;
1399
1400	ret = master->_erase(master, &adjinstr);
1401
1402	if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1403		instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1404		if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1405			instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1406							 master);
1407			instr->fail_addr *= mtd->erasesize;
1408		}
1409	}
1410
1411	return ret;
1412}
1413EXPORT_SYMBOL_GPL(mtd_erase);
1414
1415/*
1416 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1417 */
1418int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1419	      void **virt, resource_size_t *phys)
1420{
1421	struct mtd_info *master = mtd_get_master(mtd);
1422
1423	*retlen = 0;
1424	*virt = NULL;
1425	if (phys)
1426		*phys = 0;
1427	if (!master->_point)
1428		return -EOPNOTSUPP;
1429	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1430		return -EINVAL;
1431	if (!len)
1432		return 0;
1433
1434	from = mtd_get_master_ofs(mtd, from);
1435	return master->_point(master, from, len, retlen, virt, phys);
1436}
1437EXPORT_SYMBOL_GPL(mtd_point);
1438
1439/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1440int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1441{
1442	struct mtd_info *master = mtd_get_master(mtd);
1443
1444	if (!master->_unpoint)
1445		return -EOPNOTSUPP;
1446	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1447		return -EINVAL;
1448	if (!len)
1449		return 0;
1450	return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1451}
1452EXPORT_SYMBOL_GPL(mtd_unpoint);
1453
1454/*
1455 * Allow NOMMU mmap() to directly map the device (if not NULL)
1456 * - return the address to which the offset maps
1457 * - return -ENOSYS to indicate refusal to do the mapping
1458 */
1459unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1460				    unsigned long offset, unsigned long flags)
1461{
1462	size_t retlen;
1463	void *virt;
1464	int ret;
1465
1466	ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1467	if (ret)
1468		return ret;
1469	if (retlen != len) {
1470		mtd_unpoint(mtd, offset, retlen);
1471		return -ENOSYS;
1472	}
1473	return (unsigned long)virt;
1474}
1475EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1476
1477static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1478				 const struct mtd_ecc_stats *old_stats)
1479{
1480	struct mtd_ecc_stats diff;
1481
1482	if (master == mtd)
1483		return;
1484
1485	diff = master->ecc_stats;
1486	diff.failed -= old_stats->failed;
1487	diff.corrected -= old_stats->corrected;
1488
1489	while (mtd->parent) {
1490		mtd->ecc_stats.failed += diff.failed;
1491		mtd->ecc_stats.corrected += diff.corrected;
1492		mtd = mtd->parent;
1493	}
1494}
1495
1496int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1497	     u_char *buf)
1498{
1499	struct mtd_oob_ops ops = {
1500		.len = len,
1501		.datbuf = buf,
1502	};
1503	int ret;
1504
1505	ret = mtd_read_oob(mtd, from, &ops);
1506	*retlen = ops.retlen;
1507
1508	return ret;
1509}
1510EXPORT_SYMBOL_GPL(mtd_read);
1511
1512int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1513	      const u_char *buf)
1514{
1515	struct mtd_oob_ops ops = {
1516		.len = len,
1517		.datbuf = (u8 *)buf,
1518	};
1519	int ret;
1520
1521	ret = mtd_write_oob(mtd, to, &ops);
1522	*retlen = ops.retlen;
1523
1524	return ret;
1525}
1526EXPORT_SYMBOL_GPL(mtd_write);
1527
1528/*
1529 * In blackbox flight recorder like scenarios we want to make successful writes
1530 * in interrupt context. panic_write() is only intended to be called when its
1531 * known the kernel is about to panic and we need the write to succeed. Since
1532 * the kernel is not going to be running for much longer, this function can
1533 * break locks and delay to ensure the write succeeds (but not sleep).
1534 */
1535int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1536		    const u_char *buf)
1537{
1538	struct mtd_info *master = mtd_get_master(mtd);
1539
1540	*retlen = 0;
1541	if (!master->_panic_write)
1542		return -EOPNOTSUPP;
1543	if (to < 0 || to >= mtd->size || len > mtd->size - to)
1544		return -EINVAL;
1545	if (!(mtd->flags & MTD_WRITEABLE))
1546		return -EROFS;
1547	if (!len)
1548		return 0;
1549	if (!master->oops_panic_write)
1550		master->oops_panic_write = true;
1551
1552	return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1553				    retlen, buf);
1554}
1555EXPORT_SYMBOL_GPL(mtd_panic_write);
1556
1557static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1558			     struct mtd_oob_ops *ops)
1559{
1560	/*
1561	 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1562	 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1563	 *  this case.
1564	 */
1565	if (!ops->datbuf)
1566		ops->len = 0;
1567
1568	if (!ops->oobbuf)
1569		ops->ooblen = 0;
1570
1571	if (offs < 0 || offs + ops->len > mtd->size)
1572		return -EINVAL;
1573
1574	if (ops->ooblen) {
1575		size_t maxooblen;
1576
1577		if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1578			return -EINVAL;
1579
1580		maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1581				      mtd_div_by_ws(offs, mtd)) *
1582			     mtd_oobavail(mtd, ops)) - ops->ooboffs;
1583		if (ops->ooblen > maxooblen)
1584			return -EINVAL;
1585	}
1586
1587	return 0;
1588}
1589
1590static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1591			    struct mtd_oob_ops *ops)
1592{
1593	struct mtd_info *master = mtd_get_master(mtd);
1594	int ret;
1595
1596	from = mtd_get_master_ofs(mtd, from);
1597	if (master->_read_oob)
1598		ret = master->_read_oob(master, from, ops);
1599	else
1600		ret = master->_read(master, from, ops->len, &ops->retlen,
1601				    ops->datbuf);
1602
1603	return ret;
1604}
1605
1606static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1607			     struct mtd_oob_ops *ops)
1608{
1609	struct mtd_info *master = mtd_get_master(mtd);
1610	int ret;
1611
1612	to = mtd_get_master_ofs(mtd, to);
1613	if (master->_write_oob)
1614		ret = master->_write_oob(master, to, ops);
1615	else
1616		ret = master->_write(master, to, ops->len, &ops->retlen,
1617				     ops->datbuf);
1618
1619	return ret;
1620}
1621
1622static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1623			       struct mtd_oob_ops *ops)
1624{
1625	struct mtd_info *master = mtd_get_master(mtd);
1626	int ngroups = mtd_pairing_groups(master);
1627	int npairs = mtd_wunit_per_eb(master) / ngroups;
1628	struct mtd_oob_ops adjops = *ops;
1629	unsigned int wunit, oobavail;
1630	struct mtd_pairing_info info;
1631	int max_bitflips = 0;
1632	u32 ebofs, pageofs;
1633	loff_t base, pos;
1634
1635	ebofs = mtd_mod_by_eb(start, mtd);
1636	base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1637	info.group = 0;
1638	info.pair = mtd_div_by_ws(ebofs, mtd);
1639	pageofs = mtd_mod_by_ws(ebofs, mtd);
1640	oobavail = mtd_oobavail(mtd, ops);
1641
1642	while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1643		int ret;
1644
1645		if (info.pair >= npairs) {
1646			info.pair = 0;
1647			base += master->erasesize;
1648		}
1649
1650		wunit = mtd_pairing_info_to_wunit(master, &info);
1651		pos = mtd_wunit_to_offset(mtd, base, wunit);
1652
1653		adjops.len = ops->len - ops->retlen;
1654		if (adjops.len > mtd->writesize - pageofs)
1655			adjops.len = mtd->writesize - pageofs;
1656
1657		adjops.ooblen = ops->ooblen - ops->oobretlen;
1658		if (adjops.ooblen > oobavail - adjops.ooboffs)
1659			adjops.ooblen = oobavail - adjops.ooboffs;
1660
1661		if (read) {
1662			ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1663			if (ret > 0)
1664				max_bitflips = max(max_bitflips, ret);
1665		} else {
1666			ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1667		}
1668
1669		if (ret < 0)
1670			return ret;
1671
1672		max_bitflips = max(max_bitflips, ret);
1673		ops->retlen += adjops.retlen;
1674		ops->oobretlen += adjops.oobretlen;
1675		adjops.datbuf += adjops.retlen;
1676		adjops.oobbuf += adjops.oobretlen;
1677		adjops.ooboffs = 0;
1678		pageofs = 0;
1679		info.pair++;
1680	}
1681
1682	return max_bitflips;
1683}
1684
1685int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1686{
1687	struct mtd_info *master = mtd_get_master(mtd);
1688	struct mtd_ecc_stats old_stats = master->ecc_stats;
1689	int ret_code;
1690
1691	ops->retlen = ops->oobretlen = 0;
1692
1693	ret_code = mtd_check_oob_ops(mtd, from, ops);
1694	if (ret_code)
1695		return ret_code;
1696
1697	ledtrig_mtd_activity();
1698
1699	/* Check the validity of a potential fallback on mtd->_read */
1700	if (!master->_read_oob && (!master->_read || ops->oobbuf))
1701		return -EOPNOTSUPP;
1702
1703	if (ops->stats)
1704		memset(ops->stats, 0, sizeof(*ops->stats));
1705
1706	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1707		ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1708	else
1709		ret_code = mtd_read_oob_std(mtd, from, ops);
1710
1711	mtd_update_ecc_stats(mtd, master, &old_stats);
1712
1713	/*
1714	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1715	 * similar to mtd->_read(), returning a non-negative integer
1716	 * representing max bitflips. In other cases, mtd->_read_oob() may
1717	 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1718	 */
1719	if (unlikely(ret_code < 0))
1720		return ret_code;
1721	if (mtd->ecc_strength == 0)
1722		return 0;	/* device lacks ecc */
1723	if (ops->stats)
1724		ops->stats->max_bitflips = ret_code;
1725	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1726}
1727EXPORT_SYMBOL_GPL(mtd_read_oob);
1728
1729int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1730				struct mtd_oob_ops *ops)
1731{
1732	struct mtd_info *master = mtd_get_master(mtd);
1733	int ret;
1734
1735	ops->retlen = ops->oobretlen = 0;
1736
1737	if (!(mtd->flags & MTD_WRITEABLE))
1738		return -EROFS;
1739
1740	ret = mtd_check_oob_ops(mtd, to, ops);
1741	if (ret)
1742		return ret;
1743
1744	ledtrig_mtd_activity();
1745
1746	/* Check the validity of a potential fallback on mtd->_write */
1747	if (!master->_write_oob && (!master->_write || ops->oobbuf))
1748		return -EOPNOTSUPP;
1749
1750	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1751		return mtd_io_emulated_slc(mtd, to, false, ops);
1752
1753	return mtd_write_oob_std(mtd, to, ops);
1754}
1755EXPORT_SYMBOL_GPL(mtd_write_oob);
1756
1757/**
1758 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1759 * @mtd: MTD device structure
1760 * @section: ECC section. Depending on the layout you may have all the ECC
1761 *	     bytes stored in a single contiguous section, or one section
1762 *	     per ECC chunk (and sometime several sections for a single ECC
1763 *	     ECC chunk)
1764 * @oobecc: OOB region struct filled with the appropriate ECC position
1765 *	    information
1766 *
1767 * This function returns ECC section information in the OOB area. If you want
1768 * to get all the ECC bytes information, then you should call
1769 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1770 *
1771 * Returns zero on success, a negative error code otherwise.
1772 */
1773int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1774		      struct mtd_oob_region *oobecc)
1775{
1776	struct mtd_info *master = mtd_get_master(mtd);
1777
1778	memset(oobecc, 0, sizeof(*oobecc));
1779
1780	if (!master || section < 0)
1781		return -EINVAL;
1782
1783	if (!master->ooblayout || !master->ooblayout->ecc)
1784		return -ENOTSUPP;
1785
1786	return master->ooblayout->ecc(master, section, oobecc);
1787}
1788EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1789
1790/**
1791 * mtd_ooblayout_free - Get the OOB region definition of a specific free
1792 *			section
1793 * @mtd: MTD device structure
1794 * @section: Free section you are interested in. Depending on the layout
1795 *	     you may have all the free bytes stored in a single contiguous
1796 *	     section, or one section per ECC chunk plus an extra section
1797 *	     for the remaining bytes (or other funky layout).
1798 * @oobfree: OOB region struct filled with the appropriate free position
1799 *	     information
1800 *
1801 * This function returns free bytes position in the OOB area. If you want
1802 * to get all the free bytes information, then you should call
1803 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1804 *
1805 * Returns zero on success, a negative error code otherwise.
1806 */
1807int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1808		       struct mtd_oob_region *oobfree)
1809{
1810	struct mtd_info *master = mtd_get_master(mtd);
1811
1812	memset(oobfree, 0, sizeof(*oobfree));
1813
1814	if (!master || section < 0)
1815		return -EINVAL;
1816
1817	if (!master->ooblayout || !master->ooblayout->free)
1818		return -ENOTSUPP;
1819
1820	return master->ooblayout->free(master, section, oobfree);
1821}
1822EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1823
1824/**
1825 * mtd_ooblayout_find_region - Find the region attached to a specific byte
1826 * @mtd: mtd info structure
1827 * @byte: the byte we are searching for
1828 * @sectionp: pointer where the section id will be stored
1829 * @oobregion: used to retrieve the ECC position
1830 * @iter: iterator function. Should be either mtd_ooblayout_free or
1831 *	  mtd_ooblayout_ecc depending on the region type you're searching for
1832 *
1833 * This function returns the section id and oobregion information of a
1834 * specific byte. For example, say you want to know where the 4th ECC byte is
1835 * stored, you'll use:
1836 *
1837 * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1838 *
1839 * Returns zero on success, a negative error code otherwise.
1840 */
1841static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1842				int *sectionp, struct mtd_oob_region *oobregion,
1843				int (*iter)(struct mtd_info *,
1844					    int section,
1845					    struct mtd_oob_region *oobregion))
1846{
1847	int pos = 0, ret, section = 0;
1848
1849	memset(oobregion, 0, sizeof(*oobregion));
1850
1851	while (1) {
1852		ret = iter(mtd, section, oobregion);
1853		if (ret)
1854			return ret;
1855
1856		if (pos + oobregion->length > byte)
1857			break;
1858
1859		pos += oobregion->length;
1860		section++;
1861	}
1862
1863	/*
1864	 * Adjust region info to make it start at the beginning at the
1865	 * 'start' ECC byte.
1866	 */
1867	oobregion->offset += byte - pos;
1868	oobregion->length -= byte - pos;
1869	*sectionp = section;
1870
1871	return 0;
1872}
1873
1874/**
1875 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1876 *				  ECC byte
1877 * @mtd: mtd info structure
1878 * @eccbyte: the byte we are searching for
1879 * @section: pointer where the section id will be stored
1880 * @oobregion: OOB region information
1881 *
1882 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1883 * byte.
1884 *
1885 * Returns zero on success, a negative error code otherwise.
1886 */
1887int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1888				 int *section,
1889				 struct mtd_oob_region *oobregion)
1890{
1891	return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1892					 mtd_ooblayout_ecc);
1893}
1894EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1895
1896/**
1897 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1898 * @mtd: mtd info structure
1899 * @buf: destination buffer to store OOB bytes
1900 * @oobbuf: OOB buffer
1901 * @start: first byte to retrieve
1902 * @nbytes: number of bytes to retrieve
1903 * @iter: section iterator
1904 *
1905 * Extract bytes attached to a specific category (ECC or free)
1906 * from the OOB buffer and copy them into buf.
1907 *
1908 * Returns zero on success, a negative error code otherwise.
1909 */
1910static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1911				const u8 *oobbuf, int start, int nbytes,
1912				int (*iter)(struct mtd_info *,
1913					    int section,
1914					    struct mtd_oob_region *oobregion))
1915{
1916	struct mtd_oob_region oobregion;
1917	int section, ret;
1918
1919	ret = mtd_ooblayout_find_region(mtd, start, &section,
1920					&oobregion, iter);
1921
1922	while (!ret) {
1923		int cnt;
1924
1925		cnt = min_t(int, nbytes, oobregion.length);
1926		memcpy(buf, oobbuf + oobregion.offset, cnt);
1927		buf += cnt;
1928		nbytes -= cnt;
1929
1930		if (!nbytes)
1931			break;
1932
1933		ret = iter(mtd, ++section, &oobregion);
1934	}
1935
1936	return ret;
1937}
1938
1939/**
1940 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1941 * @mtd: mtd info structure
1942 * @buf: source buffer to get OOB bytes from
1943 * @oobbuf: OOB buffer
1944 * @start: first OOB byte to set
1945 * @nbytes: number of OOB bytes to set
1946 * @iter: section iterator
1947 *
1948 * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1949 * is selected by passing the appropriate iterator.
1950 *
1951 * Returns zero on success, a negative error code otherwise.
1952 */
1953static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1954				u8 *oobbuf, int start, int nbytes,
1955				int (*iter)(struct mtd_info *,
1956					    int section,
1957					    struct mtd_oob_region *oobregion))
1958{
1959	struct mtd_oob_region oobregion;
1960	int section, ret;
1961
1962	ret = mtd_ooblayout_find_region(mtd, start, &section,
1963					&oobregion, iter);
1964
1965	while (!ret) {
1966		int cnt;
1967
1968		cnt = min_t(int, nbytes, oobregion.length);
1969		memcpy(oobbuf + oobregion.offset, buf, cnt);
1970		buf += cnt;
1971		nbytes -= cnt;
1972
1973		if (!nbytes)
1974			break;
1975
1976		ret = iter(mtd, ++section, &oobregion);
1977	}
1978
1979	return ret;
1980}
1981
1982/**
1983 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1984 * @mtd: mtd info structure
1985 * @iter: category iterator
1986 *
1987 * Count the number of bytes in a given category.
1988 *
1989 * Returns a positive value on success, a negative error code otherwise.
1990 */
1991static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1992				int (*iter)(struct mtd_info *,
1993					    int section,
1994					    struct mtd_oob_region *oobregion))
1995{
1996	struct mtd_oob_region oobregion;
1997	int section = 0, ret, nbytes = 0;
1998
1999	while (1) {
2000		ret = iter(mtd, section++, &oobregion);
2001		if (ret) {
2002			if (ret == -ERANGE)
2003				ret = nbytes;
2004			break;
2005		}
2006
2007		nbytes += oobregion.length;
2008	}
2009
2010	return ret;
2011}
2012
2013/**
2014 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
2015 * @mtd: mtd info structure
2016 * @eccbuf: destination buffer to store ECC bytes
2017 * @oobbuf: OOB buffer
2018 * @start: first ECC byte to retrieve
2019 * @nbytes: number of ECC bytes to retrieve
2020 *
2021 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
2022 *
2023 * Returns zero on success, a negative error code otherwise.
2024 */
2025int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
2026			       const u8 *oobbuf, int start, int nbytes)
2027{
2028	return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2029				       mtd_ooblayout_ecc);
2030}
2031EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
2032
2033/**
2034 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
2035 * @mtd: mtd info structure
2036 * @eccbuf: source buffer to get ECC bytes from
2037 * @oobbuf: OOB buffer
2038 * @start: first ECC byte to set
2039 * @nbytes: number of ECC bytes to set
2040 *
2041 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
2042 *
2043 * Returns zero on success, a negative error code otherwise.
2044 */
2045int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
2046			       u8 *oobbuf, int start, int nbytes)
2047{
2048	return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2049				       mtd_ooblayout_ecc);
2050}
2051EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
2052
2053/**
2054 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
2055 * @mtd: mtd info structure
2056 * @databuf: destination buffer to store ECC bytes
2057 * @oobbuf: OOB buffer
2058 * @start: first ECC byte to retrieve
2059 * @nbytes: number of ECC bytes to retrieve
2060 *
2061 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
2062 *
2063 * Returns zero on success, a negative error code otherwise.
2064 */
2065int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
2066				const u8 *oobbuf, int start, int nbytes)
2067{
2068	return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
2069				       mtd_ooblayout_free);
2070}
2071EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
2072
2073/**
2074 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
2075 * @mtd: mtd info structure
2076 * @databuf: source buffer to get data bytes from
2077 * @oobbuf: OOB buffer
2078 * @start: first ECC byte to set
2079 * @nbytes: number of ECC bytes to set
2080 *
2081 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
2082 *
2083 * Returns zero on success, a negative error code otherwise.
2084 */
2085int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
2086				u8 *oobbuf, int start, int nbytes)
2087{
2088	return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
2089				       mtd_ooblayout_free);
2090}
2091EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
2092
2093/**
2094 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
2095 * @mtd: mtd info structure
2096 *
2097 * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
2098 *
2099 * Returns zero on success, a negative error code otherwise.
2100 */
2101int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
2102{
2103	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
2104}
2105EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
2106
2107/**
2108 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
2109 * @mtd: mtd info structure
2110 *
2111 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
2112 *
2113 * Returns zero on success, a negative error code otherwise.
2114 */
2115int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
2116{
2117	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
2118}
2119EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
2120
2121/*
2122 * Method to access the protection register area, present in some flash
2123 * devices. The user data is one time programmable but the factory data is read
2124 * only.
2125 */
2126int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2127			   struct otp_info *buf)
2128{
2129	struct mtd_info *master = mtd_get_master(mtd);
2130
2131	if (!master->_get_fact_prot_info)
2132		return -EOPNOTSUPP;
2133	if (!len)
2134		return 0;
2135	return master->_get_fact_prot_info(master, len, retlen, buf);
2136}
2137EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2138
2139int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2140			   size_t *retlen, u_char *buf)
2141{
2142	struct mtd_info *master = mtd_get_master(mtd);
2143
2144	*retlen = 0;
2145	if (!master->_read_fact_prot_reg)
2146		return -EOPNOTSUPP;
2147	if (!len)
2148		return 0;
2149	return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2150}
2151EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2152
2153int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2154			   struct otp_info *buf)
2155{
2156	struct mtd_info *master = mtd_get_master(mtd);
2157
2158	if (!master->_get_user_prot_info)
2159		return -EOPNOTSUPP;
2160	if (!len)
2161		return 0;
2162	return master->_get_user_prot_info(master, len, retlen, buf);
2163}
2164EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2165
2166int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2167			   size_t *retlen, u_char *buf)
2168{
2169	struct mtd_info *master = mtd_get_master(mtd);
2170
2171	*retlen = 0;
2172	if (!master->_read_user_prot_reg)
2173		return -EOPNOTSUPP;
2174	if (!len)
2175		return 0;
2176	return master->_read_user_prot_reg(master, from, len, retlen, buf);
2177}
2178EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2179
2180int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2181			    size_t *retlen, const u_char *buf)
2182{
2183	struct mtd_info *master = mtd_get_master(mtd);
2184	int ret;
2185
2186	*retlen = 0;
2187	if (!master->_write_user_prot_reg)
2188		return -EOPNOTSUPP;
2189	if (!len)
2190		return 0;
2191	ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2192	if (ret)
2193		return ret;
2194
2195	/*
2196	 * If no data could be written at all, we are out of memory and
2197	 * must return -ENOSPC.
2198	 */
2199	return (*retlen) ? 0 : -ENOSPC;
2200}
2201EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2202
2203int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2204{
2205	struct mtd_info *master = mtd_get_master(mtd);
2206
2207	if (!master->_lock_user_prot_reg)
2208		return -EOPNOTSUPP;
2209	if (!len)
2210		return 0;
2211	return master->_lock_user_prot_reg(master, from, len);
2212}
2213EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2214
2215int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2216{
2217	struct mtd_info *master = mtd_get_master(mtd);
2218
2219	if (!master->_erase_user_prot_reg)
2220		return -EOPNOTSUPP;
2221	if (!len)
2222		return 0;
2223	return master->_erase_user_prot_reg(master, from, len);
2224}
2225EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2226
2227/* Chip-supported device locking */
2228int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2229{
2230	struct mtd_info *master = mtd_get_master(mtd);
2231
2232	if (!master->_lock)
2233		return -EOPNOTSUPP;
2234	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2235		return -EINVAL;
2236	if (!len)
2237		return 0;
2238
2239	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2240		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2241		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2242	}
2243
2244	return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2245}
2246EXPORT_SYMBOL_GPL(mtd_lock);
2247
2248int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2249{
2250	struct mtd_info *master = mtd_get_master(mtd);
2251
2252	if (!master->_unlock)
2253		return -EOPNOTSUPP;
2254	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2255		return -EINVAL;
2256	if (!len)
2257		return 0;
2258
2259	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2260		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2261		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2262	}
2263
2264	return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2265}
2266EXPORT_SYMBOL_GPL(mtd_unlock);
2267
2268int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2269{
2270	struct mtd_info *master = mtd_get_master(mtd);
2271
2272	if (!master->_is_locked)
2273		return -EOPNOTSUPP;
2274	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2275		return -EINVAL;
2276	if (!len)
2277		return 0;
2278
2279	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2280		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2281		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2282	}
2283
2284	return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2285}
2286EXPORT_SYMBOL_GPL(mtd_is_locked);
2287
2288int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2289{
2290	struct mtd_info *master = mtd_get_master(mtd);
2291
2292	if (ofs < 0 || ofs >= mtd->size)
2293		return -EINVAL;
2294	if (!master->_block_isreserved)
2295		return 0;
2296
2297	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2298		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2299
2300	return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2301}
2302EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2303
2304int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2305{
2306	struct mtd_info *master = mtd_get_master(mtd);
2307
2308	if (ofs < 0 || ofs >= mtd->size)
2309		return -EINVAL;
2310	if (!master->_block_isbad)
2311		return 0;
2312
2313	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2314		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2315
2316	return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2317}
2318EXPORT_SYMBOL_GPL(mtd_block_isbad);
2319
2320int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2321{
2322	struct mtd_info *master = mtd_get_master(mtd);
2323	int ret;
2324
2325	if (!master->_block_markbad)
2326		return -EOPNOTSUPP;
2327	if (ofs < 0 || ofs >= mtd->size)
2328		return -EINVAL;
2329	if (!(mtd->flags & MTD_WRITEABLE))
2330		return -EROFS;
2331
2332	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2333		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2334
2335	ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2336	if (ret)
2337		return ret;
2338
2339	while (mtd->parent) {
2340		mtd->ecc_stats.badblocks++;
2341		mtd = mtd->parent;
2342	}
2343
2344	return 0;
2345}
2346EXPORT_SYMBOL_GPL(mtd_block_markbad);
2347
2348/*
2349 * default_mtd_writev - the default writev method
2350 * @mtd: mtd device description object pointer
2351 * @vecs: the vectors to write
2352 * @count: count of vectors in @vecs
2353 * @to: the MTD device offset to write to
2354 * @retlen: on exit contains the count of bytes written to the MTD device.
2355 *
2356 * This function returns zero in case of success and a negative error code in
2357 * case of failure.
2358 */
2359static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2360			      unsigned long count, loff_t to, size_t *retlen)
2361{
2362	unsigned long i;
2363	size_t totlen = 0, thislen;
2364	int ret = 0;
2365
2366	for (i = 0; i < count; i++) {
2367		if (!vecs[i].iov_len)
2368			continue;
2369		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2370				vecs[i].iov_base);
2371		totlen += thislen;
2372		if (ret || thislen != vecs[i].iov_len)
2373			break;
2374		to += vecs[i].iov_len;
2375	}
2376	*retlen = totlen;
2377	return ret;
2378}
2379
2380/*
2381 * mtd_writev - the vector-based MTD write method
2382 * @mtd: mtd device description object pointer
2383 * @vecs: the vectors to write
2384 * @count: count of vectors in @vecs
2385 * @to: the MTD device offset to write to
2386 * @retlen: on exit contains the count of bytes written to the MTD device.
2387 *
2388 * This function returns zero in case of success and a negative error code in
2389 * case of failure.
2390 */
2391int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2392	       unsigned long count, loff_t to, size_t *retlen)
2393{
2394	struct mtd_info *master = mtd_get_master(mtd);
2395
2396	*retlen = 0;
2397	if (!(mtd->flags & MTD_WRITEABLE))
2398		return -EROFS;
2399
2400	if (!master->_writev)
2401		return default_mtd_writev(mtd, vecs, count, to, retlen);
2402
2403	return master->_writev(master, vecs, count,
2404			       mtd_get_master_ofs(mtd, to), retlen);
2405}
2406EXPORT_SYMBOL_GPL(mtd_writev);
2407
2408/**
2409 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2410 * @mtd: mtd device description object pointer
2411 * @size: a pointer to the ideal or maximum size of the allocation, points
2412 *        to the actual allocation size on success.
2413 *
2414 * This routine attempts to allocate a contiguous kernel buffer up to
2415 * the specified size, backing off the size of the request exponentially
2416 * until the request succeeds or until the allocation size falls below
2417 * the system page size. This attempts to make sure it does not adversely
2418 * impact system performance, so when allocating more than one page, we
2419 * ask the memory allocator to avoid re-trying, swapping, writing back
2420 * or performing I/O.
2421 *
2422 * Note, this function also makes sure that the allocated buffer is aligned to
2423 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2424 *
2425 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2426 * to handle smaller (i.e. degraded) buffer allocations under low- or
2427 * fragmented-memory situations where such reduced allocations, from a
2428 * requested ideal, are allowed.
2429 *
2430 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2431 */
2432void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2433{
2434	gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2435	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2436	void *kbuf;
2437
2438	*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2439
2440	while (*size > min_alloc) {
2441		kbuf = kmalloc(*size, flags);
2442		if (kbuf)
2443			return kbuf;
2444
2445		*size >>= 1;
2446		*size = ALIGN(*size, mtd->writesize);
2447	}
2448
2449	/*
2450	 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2451	 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2452	 */
2453	return kmalloc(*size, GFP_KERNEL);
2454}
2455EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2456
2457#ifdef CONFIG_PROC_FS
2458
2459/*====================================================================*/
2460/* Support for /proc/mtd */
2461
2462static int mtd_proc_show(struct seq_file *m, void *v)
2463{
2464	struct mtd_info *mtd;
2465
2466	seq_puts(m, "dev:    size   erasesize  name\n");
2467	mutex_lock(&mtd_table_mutex);
2468	mtd_for_each_device(mtd) {
2469		seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2470			   mtd->index, (unsigned long long)mtd->size,
2471			   mtd->erasesize, mtd->name);
2472	}
2473	mutex_unlock(&mtd_table_mutex);
2474	return 0;
2475}
2476#endif /* CONFIG_PROC_FS */
2477
2478/*====================================================================*/
2479/* Init code */
2480
2481static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2482{
2483	struct backing_dev_info *bdi;
2484	int ret;
2485
2486	bdi = bdi_alloc(NUMA_NO_NODE);
2487	if (!bdi)
2488		return ERR_PTR(-ENOMEM);
2489	bdi->ra_pages = 0;
2490	bdi->io_pages = 0;
2491
2492	/*
2493	 * We put '-0' suffix to the name to get the same name format as we
2494	 * used to get. Since this is called only once, we get a unique name.
2495	 */
2496	ret = bdi_register(bdi, "%.28s-0", name);
2497	if (ret)
2498		bdi_put(bdi);
2499
2500	return ret ? ERR_PTR(ret) : bdi;
2501}
2502
2503static struct proc_dir_entry *proc_mtd;
2504
2505static int __init init_mtd(void)
2506{
2507	int ret;
2508
2509	ret = class_register(&mtd_class);
2510	if (ret)
2511		goto err_reg;
2512
2513	mtd_bdi = mtd_bdi_init("mtd");
2514	if (IS_ERR(mtd_bdi)) {
2515		ret = PTR_ERR(mtd_bdi);
2516		goto err_bdi;
2517	}
2518
2519	proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2520
2521	ret = init_mtdchar();
2522	if (ret)
2523		goto out_procfs;
2524
2525	dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2526	debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2527			    &mtd_expert_analysis_mode);
2528
2529	return 0;
2530
2531out_procfs:
2532	if (proc_mtd)
2533		remove_proc_entry("mtd", NULL);
2534	bdi_unregister(mtd_bdi);
2535	bdi_put(mtd_bdi);
2536err_bdi:
2537	class_unregister(&mtd_class);
2538err_reg:
2539	pr_err("Error registering mtd class or bdi: %d\n", ret);
2540	return ret;
2541}
2542
2543static void __exit cleanup_mtd(void)
2544{
2545	debugfs_remove_recursive(dfs_dir_mtd);
2546	cleanup_mtdchar();
2547	if (proc_mtd)
2548		remove_proc_entry("mtd", NULL);
2549	class_unregister(&mtd_class);
2550	bdi_unregister(mtd_bdi);
2551	bdi_put(mtd_bdi);
2552	idr_destroy(&mtd_idr);
2553}
2554
2555module_init(init_mtd);
2556module_exit(cleanup_mtd);
2557
2558MODULE_LICENSE("GPL");
2559MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2560MODULE_DESCRIPTION("Core MTD registration and access routines");
2561