1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Core registration and callback routines for MTD 4 * drivers and users. 5 * 6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 7 * Copyright © 2006 Red Hat UK Limited 8 */ 9 10#include <linux/module.h> 11#include <linux/kernel.h> 12#include <linux/ptrace.h> 13#include <linux/seq_file.h> 14#include <linux/string.h> 15#include <linux/timer.h> 16#include <linux/major.h> 17#include <linux/fs.h> 18#include <linux/err.h> 19#include <linux/ioctl.h> 20#include <linux/init.h> 21#include <linux/of.h> 22#include <linux/proc_fs.h> 23#include <linux/idr.h> 24#include <linux/backing-dev.h> 25#include <linux/gfp.h> 26#include <linux/slab.h> 27#include <linux/reboot.h> 28#include <linux/leds.h> 29#include <linux/debugfs.h> 30#include <linux/nvmem-provider.h> 31 32#include <linux/mtd/mtd.h> 33#include <linux/mtd/partitions.h> 34 35#include "mtdcore.h" 36 37struct backing_dev_info *mtd_bdi; 38 39#ifdef CONFIG_PM_SLEEP 40 41static int mtd_cls_suspend(struct device *dev) 42{ 43 struct mtd_info *mtd = dev_get_drvdata(dev); 44 45 return mtd ? mtd_suspend(mtd) : 0; 46} 47 48static int mtd_cls_resume(struct device *dev) 49{ 50 struct mtd_info *mtd = dev_get_drvdata(dev); 51 52 if (mtd) 53 mtd_resume(mtd); 54 return 0; 55} 56 57static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume); 58#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops) 59#else 60#define MTD_CLS_PM_OPS NULL 61#endif 62 63static struct class mtd_class = { 64 .name = "mtd", 65 .owner = THIS_MODULE, 66 .pm = MTD_CLS_PM_OPS, 67}; 68 69static DEFINE_IDR(mtd_idr); 70 71/* These are exported solely for the purpose of mtd_blkdevs.c. You 72 should not use them for _anything_ else */ 73static DEFINE_MUTEX(mtd_table_mutex); 74static int mtd_table_mutex_depth; 75static struct task_struct *mtd_table_mutex_owner; 76 77struct mtd_info *__mtd_next_device(int i) 78{ 79 return idr_get_next(&mtd_idr, &i); 80} 81EXPORT_SYMBOL_GPL(__mtd_next_device); 82 83static LIST_HEAD(mtd_notifiers); 84 85 86#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) 87 88void mtd_table_mutex_lock(void) 89{ 90 if (mtd_table_mutex_owner != current) { 91 mutex_lock(&mtd_table_mutex); 92 mtd_table_mutex_owner = current; 93 } 94 mtd_table_mutex_depth++; 95} 96EXPORT_SYMBOL_GPL(mtd_table_mutex_lock); 97 98 99void mtd_table_mutex_unlock(void) 100{ 101 if (mtd_table_mutex_owner != current) { 102 pr_err("MTD:lock_owner is %s, but current is %s\n", 103 mtd_table_mutex_owner->comm, current->comm); 104 BUG(); 105 } 106 if (--mtd_table_mutex_depth == 0) { 107 mtd_table_mutex_owner = NULL; 108 mutex_unlock(&mtd_table_mutex); 109 } 110} 111EXPORT_SYMBOL_GPL(mtd_table_mutex_unlock); 112 113void mtd_table_assert_mutex_locked(void) 114{ 115 if (mtd_table_mutex_owner != current) { 116 pr_err("MTD:lock_owner is %s, but current is %s\n", 117 mtd_table_mutex_owner->comm, current->comm); 118 BUG(); 119 } 120} 121EXPORT_SYMBOL_GPL(mtd_table_assert_mutex_locked); 122/* REVISIT once MTD uses the driver model better, whoever allocates 123 * the mtd_info will probably want to use the release() hook... 124 */ 125static void mtd_release(struct device *dev) 126{ 127 struct mtd_info *mtd = dev_get_drvdata(dev); 128 dev_t index = MTD_DEVT(mtd->index); 129 130 /* remove /dev/mtdXro node */ 131 device_destroy(&mtd_class, index + 1); 132} 133 134static ssize_t mtd_type_show(struct device *dev, 135 struct device_attribute *attr, char *buf) 136{ 137 struct mtd_info *mtd = dev_get_drvdata(dev); 138 char *type; 139 140 switch (mtd->type) { 141 case MTD_ABSENT: 142 type = "absent"; 143 break; 144 case MTD_RAM: 145 type = "ram"; 146 break; 147 case MTD_ROM: 148 type = "rom"; 149 break; 150 case MTD_NORFLASH: 151 type = "nor"; 152 break; 153 case MTD_NANDFLASH: 154 type = "nand"; 155 break; 156 case MTD_DATAFLASH: 157 type = "dataflash"; 158 break; 159 case MTD_UBIVOLUME: 160 type = "ubi"; 161 break; 162 case MTD_MLCNANDFLASH: 163 type = "mlc-nand"; 164 break; 165 default: 166 type = "unknown"; 167 } 168 169 return snprintf(buf, PAGE_SIZE, "%s\n", type); 170} 171static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL); 172 173static ssize_t mtd_flags_show(struct device *dev, 174 struct device_attribute *attr, char *buf) 175{ 176 struct mtd_info *mtd = dev_get_drvdata(dev); 177 178 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags); 179} 180static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL); 181 182static ssize_t mtd_size_show(struct device *dev, 183 struct device_attribute *attr, char *buf) 184{ 185 struct mtd_info *mtd = dev_get_drvdata(dev); 186 187 return snprintf(buf, PAGE_SIZE, "%llu\n", 188 (unsigned long long)mtd->size); 189} 190static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL); 191 192static ssize_t mtd_erasesize_show(struct device *dev, 193 struct device_attribute *attr, char *buf) 194{ 195 struct mtd_info *mtd = dev_get_drvdata(dev); 196 197 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize); 198} 199static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL); 200 201static ssize_t mtd_writesize_show(struct device *dev, 202 struct device_attribute *attr, char *buf) 203{ 204 struct mtd_info *mtd = dev_get_drvdata(dev); 205 206 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize); 207} 208static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL); 209 210static ssize_t mtd_subpagesize_show(struct device *dev, 211 struct device_attribute *attr, char *buf) 212{ 213 struct mtd_info *mtd = dev_get_drvdata(dev); 214 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; 215 216 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize); 217} 218static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL); 219 220static ssize_t mtd_oobsize_show(struct device *dev, 221 struct device_attribute *attr, char *buf) 222{ 223 struct mtd_info *mtd = dev_get_drvdata(dev); 224 225 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize); 226} 227static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL); 228 229static ssize_t mtd_oobavail_show(struct device *dev, 230 struct device_attribute *attr, char *buf) 231{ 232 struct mtd_info *mtd = dev_get_drvdata(dev); 233 234 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->oobavail); 235} 236static DEVICE_ATTR(oobavail, S_IRUGO, mtd_oobavail_show, NULL); 237 238static ssize_t mtd_numeraseregions_show(struct device *dev, 239 struct device_attribute *attr, char *buf) 240{ 241 struct mtd_info *mtd = dev_get_drvdata(dev); 242 243 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions); 244} 245static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show, 246 NULL); 247 248static ssize_t mtd_name_show(struct device *dev, 249 struct device_attribute *attr, char *buf) 250{ 251 struct mtd_info *mtd = dev_get_drvdata(dev); 252 253 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name); 254} 255static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); 256 257static ssize_t mtd_ecc_strength_show(struct device *dev, 258 struct device_attribute *attr, char *buf) 259{ 260 struct mtd_info *mtd = dev_get_drvdata(dev); 261 262 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength); 263} 264static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL); 265 266static ssize_t mtd_bitflip_threshold_show(struct device *dev, 267 struct device_attribute *attr, 268 char *buf) 269{ 270 struct mtd_info *mtd = dev_get_drvdata(dev); 271 272 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold); 273} 274 275static ssize_t mtd_bitflip_threshold_store(struct device *dev, 276 struct device_attribute *attr, 277 const char *buf, size_t count) 278{ 279 struct mtd_info *mtd = dev_get_drvdata(dev); 280 unsigned int bitflip_threshold; 281 int retval; 282 283 retval = kstrtouint(buf, 0, &bitflip_threshold); 284 if (retval) 285 return retval; 286 287 mtd->bitflip_threshold = bitflip_threshold; 288 return count; 289} 290static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR, 291 mtd_bitflip_threshold_show, 292 mtd_bitflip_threshold_store); 293 294static ssize_t mtd_ecc_step_size_show(struct device *dev, 295 struct device_attribute *attr, char *buf) 296{ 297 struct mtd_info *mtd = dev_get_drvdata(dev); 298 299 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size); 300 301} 302static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL); 303 304static ssize_t mtd_ecc_stats_corrected_show(struct device *dev, 305 struct device_attribute *attr, char *buf) 306{ 307 struct mtd_info *mtd = dev_get_drvdata(dev); 308 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 309 310 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected); 311} 312static DEVICE_ATTR(corrected_bits, S_IRUGO, 313 mtd_ecc_stats_corrected_show, NULL); 314 315static ssize_t mtd_ecc_stats_errors_show(struct device *dev, 316 struct device_attribute *attr, char *buf) 317{ 318 struct mtd_info *mtd = dev_get_drvdata(dev); 319 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 320 321 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed); 322} 323static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL); 324 325static ssize_t mtd_badblocks_show(struct device *dev, 326 struct device_attribute *attr, char *buf) 327{ 328 struct mtd_info *mtd = dev_get_drvdata(dev); 329 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 330 331 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks); 332} 333static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL); 334 335static ssize_t mtd_bbtblocks_show(struct device *dev, 336 struct device_attribute *attr, char *buf) 337{ 338 struct mtd_info *mtd = dev_get_drvdata(dev); 339 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 340 341 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks); 342} 343static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL); 344 345static struct attribute *mtd_attrs[] = { 346 &dev_attr_type.attr, 347 &dev_attr_flags.attr, 348 &dev_attr_size.attr, 349 &dev_attr_erasesize.attr, 350 &dev_attr_writesize.attr, 351 &dev_attr_subpagesize.attr, 352 &dev_attr_oobsize.attr, 353 &dev_attr_oobavail.attr, 354 &dev_attr_numeraseregions.attr, 355 &dev_attr_name.attr, 356 &dev_attr_ecc_strength.attr, 357 &dev_attr_ecc_step_size.attr, 358 &dev_attr_corrected_bits.attr, 359 &dev_attr_ecc_failures.attr, 360 &dev_attr_bad_blocks.attr, 361 &dev_attr_bbt_blocks.attr, 362 &dev_attr_bitflip_threshold.attr, 363 NULL, 364}; 365ATTRIBUTE_GROUPS(mtd); 366 367static const struct device_type mtd_devtype = { 368 .name = "mtd", 369 .groups = mtd_groups, 370 .release = mtd_release, 371}; 372 373static int mtd_partid_debug_show(struct seq_file *s, void *p) 374{ 375 struct mtd_info *mtd = s->private; 376 377 seq_printf(s, "%s\n", mtd->dbg.partid); 378 379 return 0; 380} 381 382DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug); 383 384static int mtd_partname_debug_show(struct seq_file *s, void *p) 385{ 386 struct mtd_info *mtd = s->private; 387 388 seq_printf(s, "%s\n", mtd->dbg.partname); 389 390 return 0; 391} 392 393DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug); 394 395static struct dentry *dfs_dir_mtd; 396 397static void mtd_debugfs_populate(struct mtd_info *mtd) 398{ 399 struct device *dev = &mtd->dev; 400 struct dentry *root; 401 402 if (IS_ERR_OR_NULL(dfs_dir_mtd)) 403 return; 404 405 root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd); 406 mtd->dbg.dfs_dir = root; 407 408 if (mtd->dbg.partid) 409 debugfs_create_file("partid", 0400, root, mtd, 410 &mtd_partid_debug_fops); 411 412 if (mtd->dbg.partname) 413 debugfs_create_file("partname", 0400, root, mtd, 414 &mtd_partname_debug_fops); 415} 416 417#ifndef CONFIG_MMU 418unsigned mtd_mmap_capabilities(struct mtd_info *mtd) 419{ 420 switch (mtd->type) { 421 case MTD_RAM: 422 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 423 NOMMU_MAP_READ | NOMMU_MAP_WRITE; 424 case MTD_ROM: 425 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 426 NOMMU_MAP_READ; 427 default: 428 return NOMMU_MAP_COPY; 429 } 430} 431EXPORT_SYMBOL_GPL(mtd_mmap_capabilities); 432#endif 433 434static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, 435 void *cmd) 436{ 437 struct mtd_info *mtd; 438 439 mtd = container_of(n, struct mtd_info, reboot_notifier); 440 mtd->_reboot(mtd); 441 442 return NOTIFY_DONE; 443} 444 445/** 446 * mtd_wunit_to_pairing_info - get pairing information of a wunit 447 * @mtd: pointer to new MTD device info structure 448 * @wunit: write unit we are interested in 449 * @info: returned pairing information 450 * 451 * Retrieve pairing information associated to the wunit. 452 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be 453 * paired together, and where programming a page may influence the page it is 454 * paired with. 455 * The notion of page is replaced by the term wunit (write-unit) to stay 456 * consistent with the ->writesize field. 457 * 458 * The @wunit argument can be extracted from an absolute offset using 459 * mtd_offset_to_wunit(). @info is filled with the pairing information attached 460 * to @wunit. 461 * 462 * From the pairing info the MTD user can find all the wunits paired with 463 * @wunit using the following loop: 464 * 465 * for (i = 0; i < mtd_pairing_groups(mtd); i++) { 466 * info.pair = i; 467 * mtd_pairing_info_to_wunit(mtd, &info); 468 * ... 469 * } 470 */ 471int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, 472 struct mtd_pairing_info *info) 473{ 474 struct mtd_info *master = mtd_get_master(mtd); 475 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master); 476 477 if (wunit < 0 || wunit >= npairs) 478 return -EINVAL; 479 480 if (master->pairing && master->pairing->get_info) 481 return master->pairing->get_info(master, wunit, info); 482 483 info->group = 0; 484 info->pair = wunit; 485 486 return 0; 487} 488EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info); 489 490/** 491 * mtd_pairing_info_to_wunit - get wunit from pairing information 492 * @mtd: pointer to new MTD device info structure 493 * @info: pairing information struct 494 * 495 * Returns a positive number representing the wunit associated to the info 496 * struct, or a negative error code. 497 * 498 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to 499 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info() 500 * doc). 501 * 502 * It can also be used to only program the first page of each pair (i.e. 503 * page attached to group 0), which allows one to use an MLC NAND in 504 * software-emulated SLC mode: 505 * 506 * info.group = 0; 507 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd); 508 * for (info.pair = 0; info.pair < npairs; info.pair++) { 509 * wunit = mtd_pairing_info_to_wunit(mtd, &info); 510 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit), 511 * mtd->writesize, &retlen, buf + (i * mtd->writesize)); 512 * } 513 */ 514int mtd_pairing_info_to_wunit(struct mtd_info *mtd, 515 const struct mtd_pairing_info *info) 516{ 517 struct mtd_info *master = mtd_get_master(mtd); 518 int ngroups = mtd_pairing_groups(master); 519 int npairs = mtd_wunit_per_eb(master) / ngroups; 520 521 if (!info || info->pair < 0 || info->pair >= npairs || 522 info->group < 0 || info->group >= ngroups) 523 return -EINVAL; 524 525 if (master->pairing && master->pairing->get_wunit) 526 return mtd->pairing->get_wunit(master, info); 527 528 return info->pair; 529} 530EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit); 531 532/** 533 * mtd_pairing_groups - get the number of pairing groups 534 * @mtd: pointer to new MTD device info structure 535 * 536 * Returns the number of pairing groups. 537 * 538 * This number is usually equal to the number of bits exposed by a single 539 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit() 540 * to iterate over all pages of a given pair. 541 */ 542int mtd_pairing_groups(struct mtd_info *mtd) 543{ 544 struct mtd_info *master = mtd_get_master(mtd); 545 546 if (!master->pairing || !master->pairing->ngroups) 547 return 1; 548 549 return master->pairing->ngroups; 550} 551EXPORT_SYMBOL_GPL(mtd_pairing_groups); 552 553static int mtd_nvmem_reg_read(void *priv, unsigned int offset, 554 void *val, size_t bytes) 555{ 556 struct mtd_info *mtd = priv; 557 size_t retlen; 558 int err; 559 560 err = mtd_read(mtd, offset, bytes, &retlen, val); 561 if (err && err != -EUCLEAN) 562 return err; 563 564 return retlen == bytes ? 0 : -EIO; 565} 566 567static int mtd_nvmem_add(struct mtd_info *mtd) 568{ 569 struct nvmem_config config = {}; 570 571 config.id = -1; 572 config.dev = &mtd->dev; 573 config.name = dev_name(&mtd->dev); 574 config.owner = THIS_MODULE; 575 config.reg_read = mtd_nvmem_reg_read; 576 config.size = mtd->size; 577 config.word_size = 1; 578 config.stride = 1; 579 config.read_only = true; 580 config.root_only = true; 581 config.no_of_node = true; 582 config.priv = mtd; 583 584 mtd->nvmem = nvmem_register(&config); 585 if (IS_ERR(mtd->nvmem)) { 586 /* Just ignore if there is no NVMEM support in the kernel */ 587 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) { 588 mtd->nvmem = NULL; 589 } else { 590 dev_err(&mtd->dev, "Failed to register NVMEM device\n"); 591 return PTR_ERR(mtd->nvmem); 592 } 593 } 594 595 return 0; 596} 597 598/** 599 * add_mtd_device - register an MTD device 600 * @mtd: pointer to new MTD device info structure 601 * 602 * Add a device to the list of MTD devices present in the system, and 603 * notify each currently active MTD 'user' of its arrival. Returns 604 * zero on success or non-zero on failure. 605 */ 606 607int add_mtd_device(struct mtd_info *mtd) 608{ 609 struct mtd_info *master = mtd_get_master(mtd); 610 struct mtd_notifier *not; 611 int i, error; 612 613 /* 614 * May occur, for instance, on buggy drivers which call 615 * mtd_device_parse_register() multiple times on the same master MTD, 616 * especially with CONFIG_MTD_PARTITIONED_MASTER=y. 617 */ 618 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n")) 619 return -EEXIST; 620 621 BUG_ON(mtd->writesize == 0); 622 623 /* 624 * MTD drivers should implement ->_{write,read}() or 625 * ->_{write,read}_oob(), but not both. 626 */ 627 if (WARN_ON((mtd->_write && mtd->_write_oob) || 628 (mtd->_read && mtd->_read_oob))) 629 return -EINVAL; 630 631 if (WARN_ON((!mtd->erasesize || !master->_erase) && 632 !(mtd->flags & MTD_NO_ERASE))) 633 return -EINVAL; 634 635 /* 636 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the 637 * master is an MLC NAND and has a proper pairing scheme defined. 638 * We also reject masters that implement ->_writev() for now, because 639 * NAND controller drivers don't implement this hook, and adding the 640 * SLC -> MLC address/length conversion to this path is useless if we 641 * don't have a user. 642 */ 643 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION && 644 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH || 645 !master->pairing || master->_writev)) 646 return -EINVAL; 647 648 mtd_table_mutex_lock(); 649 650 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); 651 if (i < 0) { 652 error = i; 653 goto fail_locked; 654 } 655 656 mtd->index = i; 657 mtd->usecount = 0; 658 659 /* default value if not set by driver */ 660 if (mtd->bitflip_threshold == 0) 661 mtd->bitflip_threshold = mtd->ecc_strength; 662 663 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 664 int ngroups = mtd_pairing_groups(master); 665 666 mtd->erasesize /= ngroups; 667 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) * 668 mtd->erasesize; 669 } 670 671 if (is_power_of_2(mtd->erasesize)) 672 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 673 else 674 mtd->erasesize_shift = 0; 675 676 if (is_power_of_2(mtd->writesize)) 677 mtd->writesize_shift = ffs(mtd->writesize) - 1; 678 else 679 mtd->writesize_shift = 0; 680 681 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; 682 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 683 684 /* Some chips always power up locked. Unlock them now */ 685 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { 686 error = mtd_unlock(mtd, 0, mtd->size); 687 if (error && error != -EOPNOTSUPP) 688 printk(KERN_WARNING 689 "%s: unlock failed, writes may not work\n", 690 mtd->name); 691 /* Ignore unlock failures? */ 692 error = 0; 693 } 694 695 /* Caller should have set dev.parent to match the 696 * physical device, if appropriate. 697 */ 698 mtd->dev.type = &mtd_devtype; 699 mtd->dev.class = &mtd_class; 700 mtd->dev.devt = MTD_DEVT(i); 701 dev_set_name(&mtd->dev, "mtd%d", i); 702 dev_set_drvdata(&mtd->dev, mtd); 703 of_node_get(mtd_get_of_node(mtd)); 704 error = device_register(&mtd->dev); 705 if (error) { 706 put_device(&mtd->dev); 707 goto fail_added; 708 } 709 710 /* Add the nvmem provider */ 711 error = mtd_nvmem_add(mtd); 712 if (error) 713 goto fail_nvmem_add; 714 715 mtd_debugfs_populate(mtd); 716 717 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, 718 "mtd%dro", i); 719 720 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); 721 /* No need to get a refcount on the module containing 722 the notifier, since we hold the mtd_table_mutex */ 723 list_for_each_entry(not, &mtd_notifiers, list) 724 not->add(mtd); 725 726 mtd_table_mutex_unlock(); 727 /* We _know_ we aren't being removed, because 728 our caller is still holding us here. So none 729 of this try_ nonsense, and no bitching about it 730 either. :) */ 731 __module_get(THIS_MODULE); 732 return 0; 733 734fail_nvmem_add: 735 device_unregister(&mtd->dev); 736fail_added: 737 of_node_put(mtd_get_of_node(mtd)); 738 idr_remove(&mtd_idr, i); 739fail_locked: 740 mtd_table_mutex_unlock(); 741 return error; 742} 743 744/** 745 * del_mtd_device - unregister an MTD device 746 * @mtd: pointer to MTD device info structure 747 * 748 * Remove a device from the list of MTD devices present in the system, 749 * and notify each currently active MTD 'user' of its departure. 750 * Returns zero on success or 1 on failure, which currently will happen 751 * if the requested device does not appear to be present in the list. 752 */ 753 754int del_mtd_device(struct mtd_info *mtd) 755{ 756 int ret; 757 struct mtd_notifier *not; 758 759 mtd_table_mutex_lock(); 760 761 if (idr_find(&mtd_idr, mtd->index) != mtd) { 762 ret = -ENODEV; 763 goto out_error; 764 } 765 766 /* No need to get a refcount on the module containing 767 the notifier, since we hold the mtd_table_mutex */ 768 list_for_each_entry(not, &mtd_notifiers, list) 769 not->remove(mtd); 770 771 if (mtd->usecount) { 772 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n", 773 mtd->index, mtd->name, mtd->usecount); 774 ret = -EBUSY; 775 } else { 776 debugfs_remove_recursive(mtd->dbg.dfs_dir); 777 778 /* Try to remove the NVMEM provider */ 779 if (mtd->nvmem) 780 nvmem_unregister(mtd->nvmem); 781 782 device_unregister(&mtd->dev); 783 784 idr_remove(&mtd_idr, mtd->index); 785 of_node_put(mtd_get_of_node(mtd)); 786 787 module_put(THIS_MODULE); 788 ret = 0; 789 } 790 791out_error: 792 mtd_table_mutex_unlock(); 793 return ret; 794} 795 796/* 797 * Set a few defaults based on the parent devices, if not provided by the 798 * driver 799 */ 800static void mtd_set_dev_defaults(struct mtd_info *mtd) 801{ 802 if (mtd->dev.parent) { 803 if (!mtd->owner && mtd->dev.parent->driver) 804 mtd->owner = mtd->dev.parent->driver->owner; 805 if (!mtd->name) 806 mtd->name = dev_name(mtd->dev.parent); 807 } else { 808 pr_debug("mtd device won't show a device symlink in sysfs\n"); 809 } 810 811 INIT_LIST_HEAD(&mtd->partitions); 812 mutex_init(&mtd->master.partitions_lock); 813} 814 815/** 816 * mtd_device_parse_register - parse partitions and register an MTD device. 817 * 818 * @mtd: the MTD device to register 819 * @types: the list of MTD partition probes to try, see 820 * 'parse_mtd_partitions()' for more information 821 * @parser_data: MTD partition parser-specific data 822 * @parts: fallback partition information to register, if parsing fails; 823 * only valid if %nr_parts > %0 824 * @nr_parts: the number of partitions in parts, if zero then the full 825 * MTD device is registered if no partition info is found 826 * 827 * This function aggregates MTD partitions parsing (done by 828 * 'parse_mtd_partitions()') and MTD device and partitions registering. It 829 * basically follows the most common pattern found in many MTD drivers: 830 * 831 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is 832 * registered first. 833 * * Then It tries to probe partitions on MTD device @mtd using parsers 834 * specified in @types (if @types is %NULL, then the default list of parsers 835 * is used, see 'parse_mtd_partitions()' for more information). If none are 836 * found this functions tries to fallback to information specified in 837 * @parts/@nr_parts. 838 * * If no partitions were found this function just registers the MTD device 839 * @mtd and exits. 840 * 841 * Returns zero in case of success and a negative error code in case of failure. 842 */ 843int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, 844 struct mtd_part_parser_data *parser_data, 845 const struct mtd_partition *parts, 846 int nr_parts) 847{ 848 int ret; 849 850 mtd_set_dev_defaults(mtd); 851 852 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { 853 ret = add_mtd_device(mtd); 854 if (ret) 855 return ret; 856 } 857 858 /* Prefer parsed partitions over driver-provided fallback */ 859 ret = parse_mtd_partitions(mtd, types, parser_data); 860 if (ret == -EPROBE_DEFER) 861 goto out; 862 863 if (ret > 0) 864 ret = 0; 865 else if (nr_parts) 866 ret = add_mtd_partitions(mtd, parts, nr_parts); 867 else if (!device_is_registered(&mtd->dev)) 868 ret = add_mtd_device(mtd); 869 else 870 ret = 0; 871 872 if (ret) 873 goto out; 874 875 /* 876 * FIXME: some drivers unfortunately call this function more than once. 877 * So we have to check if we've already assigned the reboot notifier. 878 * 879 * Generally, we can make multiple calls work for most cases, but it 880 * does cause problems with parse_mtd_partitions() above (e.g., 881 * cmdlineparts will register partitions more than once). 882 */ 883 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call, 884 "MTD already registered\n"); 885 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) { 886 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier; 887 register_reboot_notifier(&mtd->reboot_notifier); 888 } 889 890out: 891 if (ret && device_is_registered(&mtd->dev)) 892 del_mtd_device(mtd); 893 894 return ret; 895} 896EXPORT_SYMBOL_GPL(mtd_device_parse_register); 897 898/** 899 * mtd_device_unregister - unregister an existing MTD device. 900 * 901 * @master: the MTD device to unregister. This will unregister both the master 902 * and any partitions if registered. 903 */ 904int mtd_device_unregister(struct mtd_info *master) 905{ 906 int err; 907 908 if (master->_reboot) 909 unregister_reboot_notifier(&master->reboot_notifier); 910 911 err = del_mtd_partitions(master); 912 if (err) 913 return err; 914 915 if (!device_is_registered(&master->dev)) 916 return 0; 917 918 return del_mtd_device(master); 919} 920EXPORT_SYMBOL_GPL(mtd_device_unregister); 921 922/** 923 * register_mtd_user - register a 'user' of MTD devices. 924 * @new: pointer to notifier info structure 925 * 926 * Registers a pair of callbacks function to be called upon addition 927 * or removal of MTD devices. Causes the 'add' callback to be immediately 928 * invoked for each MTD device currently present in the system. 929 */ 930void register_mtd_user (struct mtd_notifier *new) 931{ 932 struct mtd_info *mtd; 933 934 mtd_table_mutex_lock(); 935 936 list_add(&new->list, &mtd_notifiers); 937 938 __module_get(THIS_MODULE); 939 940 mtd_for_each_device(mtd) 941 new->add(mtd); 942 943 mtd_table_mutex_unlock(); 944} 945EXPORT_SYMBOL_GPL(register_mtd_user); 946 947/** 948 * unregister_mtd_user - unregister a 'user' of MTD devices. 949 * @old: pointer to notifier info structure 950 * 951 * Removes a callback function pair from the list of 'users' to be 952 * notified upon addition or removal of MTD devices. Causes the 953 * 'remove' callback to be immediately invoked for each MTD device 954 * currently present in the system. 955 */ 956int unregister_mtd_user (struct mtd_notifier *old) 957{ 958 struct mtd_info *mtd; 959 960 mtd_table_mutex_lock(); 961 962 module_put(THIS_MODULE); 963 964 mtd_for_each_device(mtd) 965 old->remove(mtd); 966 967 list_del(&old->list); 968 mtd_table_mutex_unlock(); 969 return 0; 970} 971EXPORT_SYMBOL_GPL(unregister_mtd_user); 972 973/** 974 * get_mtd_device - obtain a validated handle for an MTD device 975 * @mtd: last known address of the required MTD device 976 * @num: internal device number of the required MTD device 977 * 978 * Given a number and NULL address, return the num'th entry in the device 979 * table, if any. Given an address and num == -1, search the device table 980 * for a device with that address and return if it's still present. Given 981 * both, return the num'th driver only if its address matches. Return 982 * error code if not. 983 */ 984struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) 985{ 986 struct mtd_info *ret = NULL, *other; 987 int err = -ENODEV; 988 989 mtd_table_mutex_lock(); 990 991 if (num == -1) { 992 mtd_for_each_device(other) { 993 if (other == mtd) { 994 ret = mtd; 995 break; 996 } 997 } 998 } else if (num >= 0) { 999 ret = idr_find(&mtd_idr, num); 1000 if (mtd && mtd != ret) 1001 ret = NULL; 1002 } 1003 1004 if (!ret) { 1005 ret = ERR_PTR(err); 1006 goto out; 1007 } 1008 1009 err = __get_mtd_device(ret); 1010 if (err) 1011 ret = ERR_PTR(err); 1012out: 1013 mtd_table_mutex_unlock(); 1014 return ret; 1015} 1016EXPORT_SYMBOL_GPL(get_mtd_device); 1017 1018 1019int __get_mtd_device(struct mtd_info *mtd) 1020{ 1021 struct mtd_info *master = mtd_get_master(mtd); 1022 int err; 1023 1024 if (!try_module_get(master->owner)) 1025 return -ENODEV; 1026 1027 if (master->_get_device) { 1028 err = master->_get_device(mtd); 1029 1030 if (err) { 1031 module_put(master->owner); 1032 return err; 1033 } 1034 } 1035 1036 master->usecount++; 1037 1038 while (mtd->parent) { 1039 mtd->usecount++; 1040 mtd = mtd->parent; 1041 } 1042 1043 return 0; 1044} 1045EXPORT_SYMBOL_GPL(__get_mtd_device); 1046 1047/** 1048 * get_mtd_device_nm - obtain a validated handle for an MTD device by 1049 * device name 1050 * @name: MTD device name to open 1051 * 1052 * This function returns MTD device description structure in case of 1053 * success and an error code in case of failure. 1054 */ 1055struct mtd_info *get_mtd_device_nm(const char *name) 1056{ 1057 int err = -ENODEV; 1058 struct mtd_info *mtd = NULL, *other; 1059 1060 mtd_table_mutex_lock(); 1061 1062 mtd_for_each_device(other) { 1063 if (!strcmp(name, other->name)) { 1064 mtd = other; 1065 break; 1066 } 1067 } 1068 1069 if (!mtd) 1070 goto out_unlock; 1071 1072 err = __get_mtd_device(mtd); 1073 if (err) 1074 goto out_unlock; 1075 1076 mtd_table_mutex_unlock(); 1077 return mtd; 1078 1079out_unlock: 1080 mtd_table_mutex_unlock(); 1081 return ERR_PTR(err); 1082} 1083EXPORT_SYMBOL_GPL(get_mtd_device_nm); 1084 1085void put_mtd_device(struct mtd_info *mtd) 1086{ 1087 mtd_table_mutex_lock(); 1088 __put_mtd_device(mtd); 1089 mtd_table_mutex_unlock(); 1090 1091} 1092EXPORT_SYMBOL_GPL(put_mtd_device); 1093 1094void __put_mtd_device(struct mtd_info *mtd) 1095{ 1096 struct mtd_info *master = mtd_get_master(mtd); 1097 1098 while (mtd->parent) { 1099 --mtd->usecount; 1100 BUG_ON(mtd->usecount < 0); 1101 mtd = mtd->parent; 1102 } 1103 1104 master->usecount--; 1105 1106 if (master->_put_device) 1107 master->_put_device(master); 1108 1109 module_put(master->owner); 1110} 1111EXPORT_SYMBOL_GPL(__put_mtd_device); 1112 1113/* 1114 * Erase is an synchronous operation. Device drivers are epected to return a 1115 * negative error code if the operation failed and update instr->fail_addr 1116 * to point the portion that was not properly erased. 1117 */ 1118int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 1119{ 1120 struct mtd_info *master = mtd_get_master(mtd); 1121 u64 mst_ofs = mtd_get_master_ofs(mtd, 0); 1122 struct erase_info adjinstr; 1123 int ret; 1124 1125 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 1126 adjinstr = *instr; 1127 1128 if (!mtd->erasesize || !master->_erase) 1129 return -ENOTSUPP; 1130 1131 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr) 1132 return -EINVAL; 1133 if (!(mtd->flags & MTD_WRITEABLE)) 1134 return -EROFS; 1135 1136 if (!instr->len) 1137 return 0; 1138 1139 ledtrig_mtd_activity(); 1140 1141 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1142 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) * 1143 master->erasesize; 1144 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) * 1145 master->erasesize) - 1146 adjinstr.addr; 1147 } 1148 1149 adjinstr.addr += mst_ofs; 1150 1151 ret = master->_erase(master, &adjinstr); 1152 1153 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) { 1154 instr->fail_addr = adjinstr.fail_addr - mst_ofs; 1155 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1156 instr->fail_addr = mtd_div_by_eb(instr->fail_addr, 1157 master); 1158 instr->fail_addr *= mtd->erasesize; 1159 } 1160 } 1161 1162 return ret; 1163} 1164EXPORT_SYMBOL_GPL(mtd_erase); 1165 1166/* 1167 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. 1168 */ 1169int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 1170 void **virt, resource_size_t *phys) 1171{ 1172 struct mtd_info *master = mtd_get_master(mtd); 1173 1174 *retlen = 0; 1175 *virt = NULL; 1176 if (phys) 1177 *phys = 0; 1178 if (!master->_point) 1179 return -EOPNOTSUPP; 1180 if (from < 0 || from >= mtd->size || len > mtd->size - from) 1181 return -EINVAL; 1182 if (!len) 1183 return 0; 1184 1185 from = mtd_get_master_ofs(mtd, from); 1186 return master->_point(master, from, len, retlen, virt, phys); 1187} 1188EXPORT_SYMBOL_GPL(mtd_point); 1189 1190/* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 1191int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 1192{ 1193 struct mtd_info *master = mtd_get_master(mtd); 1194 1195 if (!master->_unpoint) 1196 return -EOPNOTSUPP; 1197 if (from < 0 || from >= mtd->size || len > mtd->size - from) 1198 return -EINVAL; 1199 if (!len) 1200 return 0; 1201 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len); 1202} 1203EXPORT_SYMBOL_GPL(mtd_unpoint); 1204 1205/* 1206 * Allow NOMMU mmap() to directly map the device (if not NULL) 1207 * - return the address to which the offset maps 1208 * - return -ENOSYS to indicate refusal to do the mapping 1209 */ 1210unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, 1211 unsigned long offset, unsigned long flags) 1212{ 1213 size_t retlen; 1214 void *virt; 1215 int ret; 1216 1217 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL); 1218 if (ret) 1219 return ret; 1220 if (retlen != len) { 1221 mtd_unpoint(mtd, offset, retlen); 1222 return -ENOSYS; 1223 } 1224 return (unsigned long)virt; 1225} 1226EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); 1227 1228static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master, 1229 const struct mtd_ecc_stats *old_stats) 1230{ 1231 struct mtd_ecc_stats diff; 1232 1233 if (master == mtd) 1234 return; 1235 1236 diff = master->ecc_stats; 1237 diff.failed -= old_stats->failed; 1238 diff.corrected -= old_stats->corrected; 1239 1240 while (mtd->parent) { 1241 mtd->ecc_stats.failed += diff.failed; 1242 mtd->ecc_stats.corrected += diff.corrected; 1243 mtd = mtd->parent; 1244 } 1245} 1246 1247int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 1248 u_char *buf) 1249{ 1250 struct mtd_oob_ops ops = { 1251 .len = len, 1252 .datbuf = buf, 1253 }; 1254 int ret; 1255 1256 ret = mtd_read_oob(mtd, from, &ops); 1257 *retlen = ops.retlen; 1258 1259 return ret; 1260} 1261EXPORT_SYMBOL_GPL(mtd_read); 1262 1263int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 1264 const u_char *buf) 1265{ 1266 struct mtd_oob_ops ops = { 1267 .len = len, 1268 .datbuf = (u8 *)buf, 1269 }; 1270 int ret; 1271 1272 ret = mtd_write_oob(mtd, to, &ops); 1273 *retlen = ops.retlen; 1274 1275 return ret; 1276} 1277EXPORT_SYMBOL_GPL(mtd_write); 1278 1279/* 1280 * In blackbox flight recorder like scenarios we want to make successful writes 1281 * in interrupt context. panic_write() is only intended to be called when its 1282 * known the kernel is about to panic and we need the write to succeed. Since 1283 * the kernel is not going to be running for much longer, this function can 1284 * break locks and delay to ensure the write succeeds (but not sleep). 1285 */ 1286int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 1287 const u_char *buf) 1288{ 1289 struct mtd_info *master = mtd_get_master(mtd); 1290 1291 *retlen = 0; 1292 if (!master->_panic_write) 1293 return -EOPNOTSUPP; 1294 if (to < 0 || to >= mtd->size || len > mtd->size - to) 1295 return -EINVAL; 1296 if (!(mtd->flags & MTD_WRITEABLE)) 1297 return -EROFS; 1298 if (!len) 1299 return 0; 1300 if (!master->oops_panic_write) 1301 master->oops_panic_write = true; 1302 1303 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len, 1304 retlen, buf); 1305} 1306EXPORT_SYMBOL_GPL(mtd_panic_write); 1307 1308static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, 1309 struct mtd_oob_ops *ops) 1310{ 1311 /* 1312 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving 1313 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in 1314 * this case. 1315 */ 1316 if (!ops->datbuf) 1317 ops->len = 0; 1318 1319 if (!ops->oobbuf) 1320 ops->ooblen = 0; 1321 1322 if (offs < 0 || offs + ops->len > mtd->size) 1323 return -EINVAL; 1324 1325 if (ops->ooblen) { 1326 size_t maxooblen; 1327 1328 if (ops->ooboffs >= mtd_oobavail(mtd, ops)) 1329 return -EINVAL; 1330 1331 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) - 1332 mtd_div_by_ws(offs, mtd)) * 1333 mtd_oobavail(mtd, ops)) - ops->ooboffs; 1334 if (ops->ooblen > maxooblen) 1335 return -EINVAL; 1336 } 1337 1338 return 0; 1339} 1340 1341static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from, 1342 struct mtd_oob_ops *ops) 1343{ 1344 struct mtd_info *master = mtd_get_master(mtd); 1345 int ret; 1346 1347 from = mtd_get_master_ofs(mtd, from); 1348 if (master->_read_oob) 1349 ret = master->_read_oob(master, from, ops); 1350 else 1351 ret = master->_read(master, from, ops->len, &ops->retlen, 1352 ops->datbuf); 1353 1354 return ret; 1355} 1356 1357static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to, 1358 struct mtd_oob_ops *ops) 1359{ 1360 struct mtd_info *master = mtd_get_master(mtd); 1361 int ret; 1362 1363 to = mtd_get_master_ofs(mtd, to); 1364 if (master->_write_oob) 1365 ret = master->_write_oob(master, to, ops); 1366 else 1367 ret = master->_write(master, to, ops->len, &ops->retlen, 1368 ops->datbuf); 1369 1370 return ret; 1371} 1372 1373static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read, 1374 struct mtd_oob_ops *ops) 1375{ 1376 struct mtd_info *master = mtd_get_master(mtd); 1377 int ngroups = mtd_pairing_groups(master); 1378 int npairs = mtd_wunit_per_eb(master) / ngroups; 1379 struct mtd_oob_ops adjops = *ops; 1380 unsigned int wunit, oobavail; 1381 struct mtd_pairing_info info; 1382 int max_bitflips = 0; 1383 u32 ebofs, pageofs; 1384 loff_t base, pos; 1385 1386 ebofs = mtd_mod_by_eb(start, mtd); 1387 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize; 1388 info.group = 0; 1389 info.pair = mtd_div_by_ws(ebofs, mtd); 1390 pageofs = mtd_mod_by_ws(ebofs, mtd); 1391 oobavail = mtd_oobavail(mtd, ops); 1392 1393 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { 1394 int ret; 1395 1396 if (info.pair >= npairs) { 1397 info.pair = 0; 1398 base += master->erasesize; 1399 } 1400 1401 wunit = mtd_pairing_info_to_wunit(master, &info); 1402 pos = mtd_wunit_to_offset(mtd, base, wunit); 1403 1404 adjops.len = ops->len - ops->retlen; 1405 if (adjops.len > mtd->writesize - pageofs) 1406 adjops.len = mtd->writesize - pageofs; 1407 1408 adjops.ooblen = ops->ooblen - ops->oobretlen; 1409 if (adjops.ooblen > oobavail - adjops.ooboffs) 1410 adjops.ooblen = oobavail - adjops.ooboffs; 1411 1412 if (read) { 1413 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops); 1414 if (ret > 0) 1415 max_bitflips = max(max_bitflips, ret); 1416 } else { 1417 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops); 1418 } 1419 1420 if (ret < 0) 1421 return ret; 1422 1423 max_bitflips = max(max_bitflips, ret); 1424 ops->retlen += adjops.retlen; 1425 ops->oobretlen += adjops.oobretlen; 1426 adjops.datbuf += adjops.retlen; 1427 adjops.oobbuf += adjops.oobretlen; 1428 adjops.ooboffs = 0; 1429 pageofs = 0; 1430 info.pair++; 1431 } 1432 1433 return max_bitflips; 1434} 1435 1436int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 1437{ 1438 struct mtd_info *master = mtd_get_master(mtd); 1439 struct mtd_ecc_stats old_stats = master->ecc_stats; 1440 int ret_code; 1441 1442 ops->retlen = ops->oobretlen = 0; 1443 1444 ret_code = mtd_check_oob_ops(mtd, from, ops); 1445 if (ret_code) 1446 return ret_code; 1447 1448 ledtrig_mtd_activity(); 1449 1450 /* Check the validity of a potential fallback on mtd->_read */ 1451 if (!master->_read_oob && (!master->_read || ops->oobbuf)) 1452 return -EOPNOTSUPP; 1453 1454 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 1455 ret_code = mtd_io_emulated_slc(mtd, from, true, ops); 1456 else 1457 ret_code = mtd_read_oob_std(mtd, from, ops); 1458 1459 mtd_update_ecc_stats(mtd, master, &old_stats); 1460 1461 /* 1462 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics 1463 * similar to mtd->_read(), returning a non-negative integer 1464 * representing max bitflips. In other cases, mtd->_read_oob() may 1465 * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). 1466 */ 1467 if (unlikely(ret_code < 0)) 1468 return ret_code; 1469 if (mtd->ecc_strength == 0) 1470 return 0; /* device lacks ecc */ 1471 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; 1472} 1473EXPORT_SYMBOL_GPL(mtd_read_oob); 1474 1475int mtd_write_oob(struct mtd_info *mtd, loff_t to, 1476 struct mtd_oob_ops *ops) 1477{ 1478 struct mtd_info *master = mtd_get_master(mtd); 1479 int ret; 1480 1481 ops->retlen = ops->oobretlen = 0; 1482 1483 if (!(mtd->flags & MTD_WRITEABLE)) 1484 return -EROFS; 1485 1486 ret = mtd_check_oob_ops(mtd, to, ops); 1487 if (ret) 1488 return ret; 1489 1490 ledtrig_mtd_activity(); 1491 1492 /* Check the validity of a potential fallback on mtd->_write */ 1493 if (!master->_write_oob && (!master->_write || ops->oobbuf)) 1494 return -EOPNOTSUPP; 1495 1496 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 1497 return mtd_io_emulated_slc(mtd, to, false, ops); 1498 1499 return mtd_write_oob_std(mtd, to, ops); 1500} 1501EXPORT_SYMBOL_GPL(mtd_write_oob); 1502 1503/** 1504 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section 1505 * @mtd: MTD device structure 1506 * @section: ECC section. Depending on the layout you may have all the ECC 1507 * bytes stored in a single contiguous section, or one section 1508 * per ECC chunk (and sometime several sections for a single ECC 1509 * ECC chunk) 1510 * @oobecc: OOB region struct filled with the appropriate ECC position 1511 * information 1512 * 1513 * This function returns ECC section information in the OOB area. If you want 1514 * to get all the ECC bytes information, then you should call 1515 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE. 1516 * 1517 * Returns zero on success, a negative error code otherwise. 1518 */ 1519int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, 1520 struct mtd_oob_region *oobecc) 1521{ 1522 struct mtd_info *master = mtd_get_master(mtd); 1523 1524 memset(oobecc, 0, sizeof(*oobecc)); 1525 1526 if (!master || section < 0) 1527 return -EINVAL; 1528 1529 if (!master->ooblayout || !master->ooblayout->ecc) 1530 return -ENOTSUPP; 1531 1532 return master->ooblayout->ecc(master, section, oobecc); 1533} 1534EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); 1535 1536/** 1537 * mtd_ooblayout_free - Get the OOB region definition of a specific free 1538 * section 1539 * @mtd: MTD device structure 1540 * @section: Free section you are interested in. Depending on the layout 1541 * you may have all the free bytes stored in a single contiguous 1542 * section, or one section per ECC chunk plus an extra section 1543 * for the remaining bytes (or other funky layout). 1544 * @oobfree: OOB region struct filled with the appropriate free position 1545 * information 1546 * 1547 * This function returns free bytes position in the OOB area. If you want 1548 * to get all the free bytes information, then you should call 1549 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE. 1550 * 1551 * Returns zero on success, a negative error code otherwise. 1552 */ 1553int mtd_ooblayout_free(struct mtd_info *mtd, int section, 1554 struct mtd_oob_region *oobfree) 1555{ 1556 struct mtd_info *master = mtd_get_master(mtd); 1557 1558 memset(oobfree, 0, sizeof(*oobfree)); 1559 1560 if (!master || section < 0) 1561 return -EINVAL; 1562 1563 if (!master->ooblayout || !master->ooblayout->free) 1564 return -ENOTSUPP; 1565 1566 return master->ooblayout->free(master, section, oobfree); 1567} 1568EXPORT_SYMBOL_GPL(mtd_ooblayout_free); 1569 1570/** 1571 * mtd_ooblayout_find_region - Find the region attached to a specific byte 1572 * @mtd: mtd info structure 1573 * @byte: the byte we are searching for 1574 * @sectionp: pointer where the section id will be stored 1575 * @oobregion: used to retrieve the ECC position 1576 * @iter: iterator function. Should be either mtd_ooblayout_free or 1577 * mtd_ooblayout_ecc depending on the region type you're searching for 1578 * 1579 * This function returns the section id and oobregion information of a 1580 * specific byte. For example, say you want to know where the 4th ECC byte is 1581 * stored, you'll use: 1582 * 1583 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc); 1584 * 1585 * Returns zero on success, a negative error code otherwise. 1586 */ 1587static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte, 1588 int *sectionp, struct mtd_oob_region *oobregion, 1589 int (*iter)(struct mtd_info *, 1590 int section, 1591 struct mtd_oob_region *oobregion)) 1592{ 1593 int pos = 0, ret, section = 0; 1594 1595 memset(oobregion, 0, sizeof(*oobregion)); 1596 1597 while (1) { 1598 ret = iter(mtd, section, oobregion); 1599 if (ret) 1600 return ret; 1601 1602 if (pos + oobregion->length > byte) 1603 break; 1604 1605 pos += oobregion->length; 1606 section++; 1607 } 1608 1609 /* 1610 * Adjust region info to make it start at the beginning at the 1611 * 'start' ECC byte. 1612 */ 1613 oobregion->offset += byte - pos; 1614 oobregion->length -= byte - pos; 1615 *sectionp = section; 1616 1617 return 0; 1618} 1619 1620/** 1621 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific 1622 * ECC byte 1623 * @mtd: mtd info structure 1624 * @eccbyte: the byte we are searching for 1625 * @sectionp: pointer where the section id will be stored 1626 * @oobregion: OOB region information 1627 * 1628 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC 1629 * byte. 1630 * 1631 * Returns zero on success, a negative error code otherwise. 1632 */ 1633int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, 1634 int *section, 1635 struct mtd_oob_region *oobregion) 1636{ 1637 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion, 1638 mtd_ooblayout_ecc); 1639} 1640EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion); 1641 1642/** 1643 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer 1644 * @mtd: mtd info structure 1645 * @buf: destination buffer to store OOB bytes 1646 * @oobbuf: OOB buffer 1647 * @start: first byte to retrieve 1648 * @nbytes: number of bytes to retrieve 1649 * @iter: section iterator 1650 * 1651 * Extract bytes attached to a specific category (ECC or free) 1652 * from the OOB buffer and copy them into buf. 1653 * 1654 * Returns zero on success, a negative error code otherwise. 1655 */ 1656static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf, 1657 const u8 *oobbuf, int start, int nbytes, 1658 int (*iter)(struct mtd_info *, 1659 int section, 1660 struct mtd_oob_region *oobregion)) 1661{ 1662 struct mtd_oob_region oobregion; 1663 int section, ret; 1664 1665 ret = mtd_ooblayout_find_region(mtd, start, §ion, 1666 &oobregion, iter); 1667 1668 while (!ret) { 1669 int cnt; 1670 1671 cnt = min_t(int, nbytes, oobregion.length); 1672 memcpy(buf, oobbuf + oobregion.offset, cnt); 1673 buf += cnt; 1674 nbytes -= cnt; 1675 1676 if (!nbytes) 1677 break; 1678 1679 ret = iter(mtd, ++section, &oobregion); 1680 } 1681 1682 return ret; 1683} 1684 1685/** 1686 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer 1687 * @mtd: mtd info structure 1688 * @buf: source buffer to get OOB bytes from 1689 * @oobbuf: OOB buffer 1690 * @start: first OOB byte to set 1691 * @nbytes: number of OOB bytes to set 1692 * @iter: section iterator 1693 * 1694 * Fill the OOB buffer with data provided in buf. The category (ECC or free) 1695 * is selected by passing the appropriate iterator. 1696 * 1697 * Returns zero on success, a negative error code otherwise. 1698 */ 1699static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf, 1700 u8 *oobbuf, int start, int nbytes, 1701 int (*iter)(struct mtd_info *, 1702 int section, 1703 struct mtd_oob_region *oobregion)) 1704{ 1705 struct mtd_oob_region oobregion; 1706 int section, ret; 1707 1708 ret = mtd_ooblayout_find_region(mtd, start, §ion, 1709 &oobregion, iter); 1710 1711 while (!ret) { 1712 int cnt; 1713 1714 cnt = min_t(int, nbytes, oobregion.length); 1715 memcpy(oobbuf + oobregion.offset, buf, cnt); 1716 buf += cnt; 1717 nbytes -= cnt; 1718 1719 if (!nbytes) 1720 break; 1721 1722 ret = iter(mtd, ++section, &oobregion); 1723 } 1724 1725 return ret; 1726} 1727 1728/** 1729 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category 1730 * @mtd: mtd info structure 1731 * @iter: category iterator 1732 * 1733 * Count the number of bytes in a given category. 1734 * 1735 * Returns a positive value on success, a negative error code otherwise. 1736 */ 1737static int mtd_ooblayout_count_bytes(struct mtd_info *mtd, 1738 int (*iter)(struct mtd_info *, 1739 int section, 1740 struct mtd_oob_region *oobregion)) 1741{ 1742 struct mtd_oob_region oobregion; 1743 int section = 0, ret, nbytes = 0; 1744 1745 while (1) { 1746 ret = iter(mtd, section++, &oobregion); 1747 if (ret) { 1748 if (ret == -ERANGE) 1749 ret = nbytes; 1750 break; 1751 } 1752 1753 nbytes += oobregion.length; 1754 } 1755 1756 return ret; 1757} 1758 1759/** 1760 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer 1761 * @mtd: mtd info structure 1762 * @eccbuf: destination buffer to store ECC bytes 1763 * @oobbuf: OOB buffer 1764 * @start: first ECC byte to retrieve 1765 * @nbytes: number of ECC bytes to retrieve 1766 * 1767 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes. 1768 * 1769 * Returns zero on success, a negative error code otherwise. 1770 */ 1771int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, 1772 const u8 *oobbuf, int start, int nbytes) 1773{ 1774 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes, 1775 mtd_ooblayout_ecc); 1776} 1777EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes); 1778 1779/** 1780 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer 1781 * @mtd: mtd info structure 1782 * @eccbuf: source buffer to get ECC bytes from 1783 * @oobbuf: OOB buffer 1784 * @start: first ECC byte to set 1785 * @nbytes: number of ECC bytes to set 1786 * 1787 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes. 1788 * 1789 * Returns zero on success, a negative error code otherwise. 1790 */ 1791int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, 1792 u8 *oobbuf, int start, int nbytes) 1793{ 1794 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes, 1795 mtd_ooblayout_ecc); 1796} 1797EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes); 1798 1799/** 1800 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer 1801 * @mtd: mtd info structure 1802 * @databuf: destination buffer to store ECC bytes 1803 * @oobbuf: OOB buffer 1804 * @start: first ECC byte to retrieve 1805 * @nbytes: number of ECC bytes to retrieve 1806 * 1807 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. 1808 * 1809 * Returns zero on success, a negative error code otherwise. 1810 */ 1811int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, 1812 const u8 *oobbuf, int start, int nbytes) 1813{ 1814 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes, 1815 mtd_ooblayout_free); 1816} 1817EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); 1818 1819/** 1820 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer 1821 * @mtd: mtd info structure 1822 * @databuf: source buffer to get data bytes from 1823 * @oobbuf: OOB buffer 1824 * @start: first ECC byte to set 1825 * @nbytes: number of ECC bytes to set 1826 * 1827 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes. 1828 * 1829 * Returns zero on success, a negative error code otherwise. 1830 */ 1831int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, 1832 u8 *oobbuf, int start, int nbytes) 1833{ 1834 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes, 1835 mtd_ooblayout_free); 1836} 1837EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes); 1838 1839/** 1840 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB 1841 * @mtd: mtd info structure 1842 * 1843 * Works like mtd_ooblayout_count_bytes(), except it count free bytes. 1844 * 1845 * Returns zero on success, a negative error code otherwise. 1846 */ 1847int mtd_ooblayout_count_freebytes(struct mtd_info *mtd) 1848{ 1849 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free); 1850} 1851EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes); 1852 1853/** 1854 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB 1855 * @mtd: mtd info structure 1856 * 1857 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes. 1858 * 1859 * Returns zero on success, a negative error code otherwise. 1860 */ 1861int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd) 1862{ 1863 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc); 1864} 1865EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes); 1866 1867/* 1868 * Method to access the protection register area, present in some flash 1869 * devices. The user data is one time programmable but the factory data is read 1870 * only. 1871 */ 1872int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 1873 struct otp_info *buf) 1874{ 1875 struct mtd_info *master = mtd_get_master(mtd); 1876 1877 if (!master->_get_fact_prot_info) 1878 return -EOPNOTSUPP; 1879 if (!len) 1880 return 0; 1881 return master->_get_fact_prot_info(master, len, retlen, buf); 1882} 1883EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); 1884 1885int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 1886 size_t *retlen, u_char *buf) 1887{ 1888 struct mtd_info *master = mtd_get_master(mtd); 1889 1890 *retlen = 0; 1891 if (!master->_read_fact_prot_reg) 1892 return -EOPNOTSUPP; 1893 if (!len) 1894 return 0; 1895 return master->_read_fact_prot_reg(master, from, len, retlen, buf); 1896} 1897EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); 1898 1899int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 1900 struct otp_info *buf) 1901{ 1902 struct mtd_info *master = mtd_get_master(mtd); 1903 1904 if (!master->_get_user_prot_info) 1905 return -EOPNOTSUPP; 1906 if (!len) 1907 return 0; 1908 return master->_get_user_prot_info(master, len, retlen, buf); 1909} 1910EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); 1911 1912int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 1913 size_t *retlen, u_char *buf) 1914{ 1915 struct mtd_info *master = mtd_get_master(mtd); 1916 1917 *retlen = 0; 1918 if (!master->_read_user_prot_reg) 1919 return -EOPNOTSUPP; 1920 if (!len) 1921 return 0; 1922 return master->_read_user_prot_reg(master, from, len, retlen, buf); 1923} 1924EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); 1925 1926int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, 1927 size_t *retlen, u_char *buf) 1928{ 1929 struct mtd_info *master = mtd_get_master(mtd); 1930 int ret; 1931 1932 *retlen = 0; 1933 if (!master->_write_user_prot_reg) 1934 return -EOPNOTSUPP; 1935 if (!len) 1936 return 0; 1937 ret = master->_write_user_prot_reg(master, to, len, retlen, buf); 1938 if (ret) 1939 return ret; 1940 1941 /* 1942 * If no data could be written at all, we are out of memory and 1943 * must return -ENOSPC. 1944 */ 1945 return (*retlen) ? 0 : -ENOSPC; 1946} 1947EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); 1948 1949int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 1950{ 1951 struct mtd_info *master = mtd_get_master(mtd); 1952 1953 if (!master->_lock_user_prot_reg) 1954 return -EOPNOTSUPP; 1955 if (!len) 1956 return 0; 1957 return master->_lock_user_prot_reg(master, from, len); 1958} 1959EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); 1960 1961/* Chip-supported device locking */ 1962int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1963{ 1964 struct mtd_info *master = mtd_get_master(mtd); 1965 1966 if (!master->_lock) 1967 return -EOPNOTSUPP; 1968 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1969 return -EINVAL; 1970 if (!len) 1971 return 0; 1972 1973 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1974 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 1975 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 1976 } 1977 1978 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len); 1979} 1980EXPORT_SYMBOL_GPL(mtd_lock); 1981 1982int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1983{ 1984 struct mtd_info *master = mtd_get_master(mtd); 1985 1986 if (!master->_unlock) 1987 return -EOPNOTSUPP; 1988 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1989 return -EINVAL; 1990 if (!len) 1991 return 0; 1992 1993 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1994 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 1995 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 1996 } 1997 1998 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len); 1999} 2000EXPORT_SYMBOL_GPL(mtd_unlock); 2001 2002int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2003{ 2004 struct mtd_info *master = mtd_get_master(mtd); 2005 2006 if (!master->_is_locked) 2007 return -EOPNOTSUPP; 2008 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 2009 return -EINVAL; 2010 if (!len) 2011 return 0; 2012 2013 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 2014 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2015 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 2016 } 2017 2018 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len); 2019} 2020EXPORT_SYMBOL_GPL(mtd_is_locked); 2021 2022int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) 2023{ 2024 struct mtd_info *master = mtd_get_master(mtd); 2025 2026 if (ofs < 0 || ofs >= mtd->size) 2027 return -EINVAL; 2028 if (!master->_block_isreserved) 2029 return 0; 2030 2031 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2032 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2033 2034 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs)); 2035} 2036EXPORT_SYMBOL_GPL(mtd_block_isreserved); 2037 2038int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) 2039{ 2040 struct mtd_info *master = mtd_get_master(mtd); 2041 2042 if (ofs < 0 || ofs >= mtd->size) 2043 return -EINVAL; 2044 if (!master->_block_isbad) 2045 return 0; 2046 2047 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2048 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2049 2050 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs)); 2051} 2052EXPORT_SYMBOL_GPL(mtd_block_isbad); 2053 2054int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) 2055{ 2056 struct mtd_info *master = mtd_get_master(mtd); 2057 int ret; 2058 2059 if (!master->_block_markbad) 2060 return -EOPNOTSUPP; 2061 if (ofs < 0 || ofs >= mtd->size) 2062 return -EINVAL; 2063 if (!(mtd->flags & MTD_WRITEABLE)) 2064 return -EROFS; 2065 2066 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2067 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2068 2069 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); 2070 if (ret) 2071 return ret; 2072 2073 while (mtd->parent) { 2074 mtd->ecc_stats.badblocks++; 2075 mtd = mtd->parent; 2076 } 2077 2078 return 0; 2079} 2080EXPORT_SYMBOL_GPL(mtd_block_markbad); 2081 2082/* 2083 * default_mtd_writev - the default writev method 2084 * @mtd: mtd device description object pointer 2085 * @vecs: the vectors to write 2086 * @count: count of vectors in @vecs 2087 * @to: the MTD device offset to write to 2088 * @retlen: on exit contains the count of bytes written to the MTD device. 2089 * 2090 * This function returns zero in case of success and a negative error code in 2091 * case of failure. 2092 */ 2093static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 2094 unsigned long count, loff_t to, size_t *retlen) 2095{ 2096 unsigned long i; 2097 size_t totlen = 0, thislen; 2098 int ret = 0; 2099 2100 for (i = 0; i < count; i++) { 2101 if (!vecs[i].iov_len) 2102 continue; 2103 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, 2104 vecs[i].iov_base); 2105 totlen += thislen; 2106 if (ret || thislen != vecs[i].iov_len) 2107 break; 2108 to += vecs[i].iov_len; 2109 } 2110 *retlen = totlen; 2111 return ret; 2112} 2113 2114/* 2115 * mtd_writev - the vector-based MTD write method 2116 * @mtd: mtd device description object pointer 2117 * @vecs: the vectors to write 2118 * @count: count of vectors in @vecs 2119 * @to: the MTD device offset to write to 2120 * @retlen: on exit contains the count of bytes written to the MTD device. 2121 * 2122 * This function returns zero in case of success and a negative error code in 2123 * case of failure. 2124 */ 2125int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 2126 unsigned long count, loff_t to, size_t *retlen) 2127{ 2128 struct mtd_info *master = mtd_get_master(mtd); 2129 2130 *retlen = 0; 2131 if (!(mtd->flags & MTD_WRITEABLE)) 2132 return -EROFS; 2133 2134 if (!master->_writev) 2135 return default_mtd_writev(mtd, vecs, count, to, retlen); 2136 2137 return master->_writev(master, vecs, count, 2138 mtd_get_master_ofs(mtd, to), retlen); 2139} 2140EXPORT_SYMBOL_GPL(mtd_writev); 2141 2142/** 2143 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size 2144 * @mtd: mtd device description object pointer 2145 * @size: a pointer to the ideal or maximum size of the allocation, points 2146 * to the actual allocation size on success. 2147 * 2148 * This routine attempts to allocate a contiguous kernel buffer up to 2149 * the specified size, backing off the size of the request exponentially 2150 * until the request succeeds or until the allocation size falls below 2151 * the system page size. This attempts to make sure it does not adversely 2152 * impact system performance, so when allocating more than one page, we 2153 * ask the memory allocator to avoid re-trying, swapping, writing back 2154 * or performing I/O. 2155 * 2156 * Note, this function also makes sure that the allocated buffer is aligned to 2157 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. 2158 * 2159 * This is called, for example by mtd_{read,write} and jffs2_scan_medium, 2160 * to handle smaller (i.e. degraded) buffer allocations under low- or 2161 * fragmented-memory situations where such reduced allocations, from a 2162 * requested ideal, are allowed. 2163 * 2164 * Returns a pointer to the allocated buffer on success; otherwise, NULL. 2165 */ 2166void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) 2167{ 2168 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY; 2169 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); 2170 void *kbuf; 2171 2172 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE); 2173 2174 while (*size > min_alloc) { 2175 kbuf = kmalloc(*size, flags); 2176 if (kbuf) 2177 return kbuf; 2178 2179 *size >>= 1; 2180 *size = ALIGN(*size, mtd->writesize); 2181 } 2182 2183 /* 2184 * For the last resort allocation allow 'kmalloc()' to do all sorts of 2185 * things (write-back, dropping caches, etc) by using GFP_KERNEL. 2186 */ 2187 return kmalloc(*size, GFP_KERNEL); 2188} 2189EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); 2190 2191#ifdef CONFIG_PROC_FS 2192 2193/*====================================================================*/ 2194/* Support for /proc/mtd */ 2195 2196static int mtd_proc_show(struct seq_file *m, void *v) 2197{ 2198 struct mtd_info *mtd; 2199 2200 seq_puts(m, "dev: size erasesize name\n"); 2201 mtd_table_mutex_lock(); 2202 mtd_for_each_device(mtd) { 2203 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n", 2204 mtd->index, (unsigned long long)mtd->size, 2205 mtd->erasesize, mtd->name); 2206 } 2207 mtd_table_mutex_unlock(); 2208 return 0; 2209} 2210#endif /* CONFIG_PROC_FS */ 2211 2212/*====================================================================*/ 2213/* Init code */ 2214 2215static struct backing_dev_info * __init mtd_bdi_init(char *name) 2216{ 2217 struct backing_dev_info *bdi; 2218 int ret; 2219 2220 bdi = bdi_alloc(NUMA_NO_NODE); 2221 if (!bdi) 2222 return ERR_PTR(-ENOMEM); 2223 bdi->ra_pages = 0; 2224 bdi->io_pages = 0; 2225 2226 /* 2227 * We put '-0' suffix to the name to get the same name format as we 2228 * used to get. Since this is called only once, we get a unique name. 2229 */ 2230 ret = bdi_register(bdi, "%.28s-0", name); 2231 if (ret) 2232 bdi_put(bdi); 2233 2234 return ret ? ERR_PTR(ret) : bdi; 2235} 2236 2237static struct proc_dir_entry *proc_mtd; 2238 2239static int __init init_mtd(void) 2240{ 2241 int ret; 2242 2243 ret = class_register(&mtd_class); 2244 if (ret) 2245 goto err_reg; 2246 2247 mtd_bdi = mtd_bdi_init("mtd"); 2248 if (IS_ERR(mtd_bdi)) { 2249 ret = PTR_ERR(mtd_bdi); 2250 goto err_bdi; 2251 } 2252 2253 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show); 2254 2255 ret = init_mtdchar(); 2256 if (ret) 2257 goto out_procfs; 2258 2259 dfs_dir_mtd = debugfs_create_dir("mtd", NULL); 2260 2261 return 0; 2262 2263out_procfs: 2264 if (proc_mtd) 2265 remove_proc_entry("mtd", NULL); 2266 bdi_put(mtd_bdi); 2267err_bdi: 2268 class_unregister(&mtd_class); 2269err_reg: 2270 pr_err("Error registering mtd class or bdi: %d\n", ret); 2271 return ret; 2272} 2273 2274static void __exit cleanup_mtd(void) 2275{ 2276 debugfs_remove_recursive(dfs_dir_mtd); 2277 cleanup_mtdchar(); 2278 if (proc_mtd) 2279 remove_proc_entry("mtd", NULL); 2280 class_unregister(&mtd_class); 2281 bdi_put(mtd_bdi); 2282 idr_destroy(&mtd_idr); 2283} 2284 2285module_init(init_mtd); 2286module_exit(cleanup_mtd); 2287 2288MODULE_LICENSE("GPL"); 2289MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 2290MODULE_DESCRIPTION("Core MTD registration and access routines"); 2291