1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Persistent Storage - platform driver interface parts. 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com> 7 */ 8 9#define pr_fmt(fmt) "pstore: " fmt 10 11#include <linux/atomic.h> 12#include <linux/types.h> 13#include <linux/errno.h> 14#include <linux/init.h> 15#include <linux/kmsg_dump.h> 16#include <linux/console.h> 17#include <linux/module.h> 18#include <linux/pstore.h> 19#ifdef CONFIG_PSTORE_BLACKBOX 20#include <linux/stacktrace.h> 21#include <linux/blackbox.h> 22#endif 23#if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS) 24#include <linux/lzo.h> 25#endif 26#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS) 27#include <linux/lz4.h> 28#endif 29#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS) 30#include <linux/zstd.h> 31#endif 32#include <linux/crypto.h> 33#include <linux/string.h> 34#include <linux/timer.h> 35#include <linux/slab.h> 36#include <linux/uaccess.h> 37#include <linux/jiffies.h> 38#include <linux/workqueue.h> 39 40#include "internal.h" 41 42/* 43 * We defer making "oops" entries appear in pstore - see 44 * whether the system is actually still running well enough 45 * to let someone see the entry 46 */ 47static int pstore_update_ms = -1; 48module_param_named(update_ms, pstore_update_ms, int, 0600); 49MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content " 50 "(default is -1, which means runtime updates are disabled; " 51 "enabling this option may not be safe; it may lead to further " 52 "corruption on Oopses)"); 53 54/* Names should be in the same order as the enum pstore_type_id */ 55static const char * const pstore_type_names[] = { 56 "dmesg", 57 "mce", 58 "console", 59 "ftrace", 60 "rtas", 61 "powerpc-ofw", 62 "powerpc-common", 63 "pmsg", 64 "powerpc-opal", 65 "blackbox", 66}; 67 68static int pstore_new_entry; 69 70static void pstore_timefunc(struct timer_list *); 71static DEFINE_TIMER(pstore_timer, pstore_timefunc); 72 73static void pstore_dowork(struct work_struct *); 74static DECLARE_WORK(pstore_work, pstore_dowork); 75 76/* 77 * psinfo_lock protects "psinfo" during calls to 78 * pstore_register(), pstore_unregister(), and 79 * the filesystem mount/unmount routines. 80 */ 81static DEFINE_MUTEX(psinfo_lock); 82struct pstore_info *psinfo; 83 84static char *backend; 85module_param(backend, charp, 0444); 86MODULE_PARM_DESC(backend, "specific backend to use"); 87 88static char *compress = 89#ifdef CONFIG_PSTORE_COMPRESS_DEFAULT 90 CONFIG_PSTORE_COMPRESS_DEFAULT; 91#else 92 NULL; 93#endif 94module_param(compress, charp, 0444); 95MODULE_PARM_DESC(compress, "compression to use"); 96 97/* Compression parameters */ 98static struct crypto_comp *tfm; 99 100struct pstore_zbackend { 101 int (*zbufsize)(size_t size); 102 const char *name; 103}; 104 105static char *big_oops_buf; 106static size_t big_oops_buf_sz; 107 108/* How much of the console log to snapshot */ 109unsigned long kmsg_bytes = PSTORE_DEFAULT_KMSG_BYTES; 110 111void pstore_set_kmsg_bytes(int bytes) 112{ 113 kmsg_bytes = bytes; 114} 115 116/* Tag each group of saved records with a sequence number */ 117static int oopscount; 118 119const char *pstore_type_to_name(enum pstore_type_id type) 120{ 121 BUILD_BUG_ON(ARRAY_SIZE(pstore_type_names) != PSTORE_TYPE_MAX); 122 123 if (WARN_ON_ONCE(type >= PSTORE_TYPE_MAX)) 124 return "unknown"; 125 126 return pstore_type_names[type]; 127} 128EXPORT_SYMBOL_GPL(pstore_type_to_name); 129 130enum pstore_type_id pstore_name_to_type(const char *name) 131{ 132 int i; 133 134 for (i = 0; i < PSTORE_TYPE_MAX; i++) { 135 if (!strcmp(pstore_type_names[i], name)) 136 return i; 137 } 138 139 return PSTORE_TYPE_MAX; 140} 141EXPORT_SYMBOL_GPL(pstore_name_to_type); 142 143static void pstore_timer_kick(void) 144{ 145 if (pstore_update_ms < 0) 146 return; 147 148 mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms)); 149} 150 151static bool pstore_cannot_block_path(enum kmsg_dump_reason reason) 152{ 153 /* 154 * In case of NMI path, pstore shouldn't be blocked 155 * regardless of reason. 156 */ 157 if (in_nmi()) 158 return true; 159 160 switch (reason) { 161 /* In panic case, other cpus are stopped by smp_send_stop(). */ 162 case KMSG_DUMP_PANIC: 163 /* 164 * Emergency restart shouldn't be blocked by spinning on 165 * pstore_info::buf_lock. 166 */ 167 case KMSG_DUMP_EMERG: 168 return true; 169 default: 170 return false; 171 } 172} 173 174#if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS) 175static int zbufsize_deflate(size_t size) 176{ 177 size_t cmpr; 178 179 switch (size) { 180 /* buffer range for efivars */ 181 case 1000 ... 2000: 182 cmpr = 56; 183 break; 184 case 2001 ... 3000: 185 cmpr = 54; 186 break; 187 case 3001 ... 3999: 188 cmpr = 52; 189 break; 190 /* buffer range for nvram, erst */ 191 case 4000 ... 10000: 192 cmpr = 45; 193 break; 194 default: 195 cmpr = 60; 196 break; 197 } 198 199 return (size * 100) / cmpr; 200} 201#endif 202 203#if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS) 204static int zbufsize_lzo(size_t size) 205{ 206 return lzo1x_worst_compress(size); 207} 208#endif 209 210#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS) 211static int zbufsize_lz4(size_t size) 212{ 213 return LZ4_compressBound(size); 214} 215#endif 216 217#if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS) 218static int zbufsize_842(size_t size) 219{ 220 return size; 221} 222#endif 223 224#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS) 225static int zbufsize_zstd(size_t size) 226{ 227 return ZSTD_compressBound(size); 228} 229#endif 230 231static const struct pstore_zbackend *zbackend __ro_after_init; 232 233static const struct pstore_zbackend zbackends[] = { 234#if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS) 235 { 236 .zbufsize = zbufsize_deflate, 237 .name = "deflate", 238 }, 239#endif 240#if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS) 241 { 242 .zbufsize = zbufsize_lzo, 243 .name = "lzo", 244 }, 245#endif 246#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) 247 { 248 .zbufsize = zbufsize_lz4, 249 .name = "lz4", 250 }, 251#endif 252#if IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS) 253 { 254 .zbufsize = zbufsize_lz4, 255 .name = "lz4hc", 256 }, 257#endif 258#if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS) 259 { 260 .zbufsize = zbufsize_842, 261 .name = "842", 262 }, 263#endif 264#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS) 265 { 266 .zbufsize = zbufsize_zstd, 267 .name = "zstd", 268 }, 269#endif 270 { } 271}; 272 273static int pstore_compress(const void *in, void *out, 274 unsigned int inlen, unsigned int outlen) 275{ 276 int ret; 277 278 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS)) 279 return -EINVAL; 280 281 ret = crypto_comp_compress(tfm, in, inlen, out, &outlen); 282 if (ret) { 283 pr_err("crypto_comp_compress failed, ret = %d!\n", ret); 284 return ret; 285 } 286 287 return outlen; 288} 289 290static void allocate_buf_for_compression(void) 291{ 292 struct crypto_comp *ctx; 293 int size; 294 char *buf; 295 296 /* Skip if not built-in or compression backend not selected yet. */ 297 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend) 298 return; 299 300 /* Skip if no pstore backend yet or compression init already done. */ 301 if (!psinfo || tfm) 302 return; 303 304 if (!crypto_has_comp(zbackend->name, 0, 0)) { 305 pr_err("Unknown compression: %s\n", zbackend->name); 306 return; 307 } 308 309 size = zbackend->zbufsize(psinfo->bufsize); 310 if (size <= 0) { 311 pr_err("Invalid compression size for %s: %d\n", 312 zbackend->name, size); 313 return; 314 } 315 316 buf = kmalloc(size, GFP_KERNEL); 317 if (!buf) { 318 pr_err("Failed %d byte compression buffer allocation for: %s\n", 319 size, zbackend->name); 320 return; 321 } 322 323 ctx = crypto_alloc_comp(zbackend->name, 0, 0); 324 if (IS_ERR_OR_NULL(ctx)) { 325 kfree(buf); 326 pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name, 327 PTR_ERR(ctx)); 328 return; 329 } 330 331 /* A non-NULL big_oops_buf indicates compression is available. */ 332 tfm = ctx; 333 big_oops_buf_sz = size; 334 big_oops_buf = buf; 335 336 pr_info("Using crash dump compression: %s\n", zbackend->name); 337} 338 339static void free_buf_for_compression(void) 340{ 341 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) { 342 crypto_free_comp(tfm); 343 tfm = NULL; 344 } 345 kfree(big_oops_buf); 346 big_oops_buf = NULL; 347 big_oops_buf_sz = 0; 348} 349 350/* 351 * Called when compression fails, since the printk buffer 352 * would be fetched for compression calling it again when 353 * compression fails would have moved the iterator of 354 * printk buffer which results in fetching old contents. 355 * Copy the recent messages from big_oops_buf to psinfo->buf 356 */ 357static size_t copy_kmsg_to_buffer(int hsize, size_t len) 358{ 359 size_t total_len; 360 size_t diff; 361 362 total_len = hsize + len; 363 364 if (total_len > psinfo->bufsize) { 365 diff = total_len - psinfo->bufsize + hsize; 366 memcpy(psinfo->buf, big_oops_buf, hsize); 367 memcpy(psinfo->buf + hsize, big_oops_buf + diff, 368 psinfo->bufsize - hsize); 369 total_len = psinfo->bufsize; 370 } else 371 memcpy(psinfo->buf, big_oops_buf, total_len); 372 373 return total_len; 374} 375 376void pstore_record_init(struct pstore_record *record, 377 struct pstore_info *psinfo) 378{ 379 memset(record, 0, sizeof(*record)); 380 381 record->psi = psinfo; 382 383 /* Report zeroed timestamp if called before timekeeping has resumed. */ 384 record->time = ns_to_timespec64(ktime_get_real_fast_ns()); 385} 386 387/* 388 * Store the customised fault log 389 */ 390#ifdef CONFIG_PSTORE_BLACKBOX 391#define PSTORE_FLAG "PSTORE" 392#define CALLSTACK_MAX_ENTRIES 20 393static void dump_stacktrace(char *pbuf, size_t buf_size, bool is_panic) 394{ 395 int i; 396 size_t stack_len = 0; 397 size_t com_len = 0; 398 unsigned long entries[CALLSTACK_MAX_ENTRIES]; 399 unsigned int nr_entries; 400 char tmp_buf[ERROR_DESC_MAX_LEN]; 401 bool find_panic = false; 402 403 if (unlikely(!pbuf || !buf_size)) 404 return; 405 406 memset(pbuf, 0, buf_size); 407 memset(tmp_buf, 0, sizeof(tmp_buf)); 408 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); 409 com_len = scnprintf(pbuf, buf_size, "Comm:%s,CPU:%d,Stack:", 410 current->comm, raw_smp_processor_id()); 411 for (i = 0; i < nr_entries; i++) { 412 if (stack_len >= sizeof(tmp_buf)) { 413 tmp_buf[sizeof(tmp_buf) - 1] = '\0'; 414 break; 415 } 416 stack_len += scnprintf(tmp_buf + stack_len, sizeof(tmp_buf) - stack_len, 417 "%pS-", (void *)entries[i]); 418 if (!find_panic && is_panic) { 419 if (strncmp(tmp_buf, "panic", strlen("panic")) == 0) 420 find_panic = true; 421 else 422 (void)memset(tmp_buf, 0, sizeof(tmp_buf)); 423 } 424 } 425 if (com_len >= buf_size) 426 return; 427 stack_len = min(buf_size - com_len, strlen(tmp_buf)); 428 memcpy(pbuf + com_len, tmp_buf, stack_len); 429 *(pbuf + buf_size - 1) = '\0'; 430} 431 432void pstore_blackbox_dump(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason) 433{ 434 struct fault_log_info *pfault_log_info; 435 struct pstore_record record; 436 size_t dst_size; 437 const char *why; 438 char *dst; 439 unsigned long flags = 0; 440 int ret; 441 442#if defined(CONFIG_PSTORE_BLK) || defined(CONFIG_PSTORE_RAM) 443 if (!pstore_ready) 444 return; 445#endif 446 447 why = kmsg_dump_reason_str(reason); 448 449 if (pstore_cannot_block_path(reason)) { 450 if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) { 451 pr_err("dump skipped in %s path because of concurrent dump\n", 452 in_nmi() ? "NMI" : why); 453 return; 454 } 455 } else { 456 spin_lock_irqsave(&psinfo->buf_lock, flags); 457 } 458 459 pfault_log_info = (struct fault_log_info *)psinfo->buf; 460 461 memset(pfault_log_info, 0, sizeof(*pfault_log_info)); 462 463 pstore_record_init(&record, psinfo); 464 465 record.type = PSTORE_TYPE_BLACKBOX; 466 record.reason = reason; 467 468 memcpy(pfault_log_info->flag, LOG_FLAG, strlen(LOG_FLAG)); 469 strncpy(pfault_log_info->info.event, why, 470 min(strlen(why), sizeof(pfault_log_info->info.event) - 1)); 471 strncpy(pfault_log_info->info.module, PSTORE_FLAG, 472 min(strlen(PSTORE_FLAG), sizeof(pfault_log_info->info.module) - 1)); 473 get_timestamp(pfault_log_info->info.error_time, TIMESTAMP_MAX_LEN); 474 dump_stacktrace(pfault_log_info->info.error_desc, sizeof(pfault_log_info->info.error_desc), false); 475 476 record.buf = psinfo->buf; 477 478 dst = psinfo->buf; 479 dst_size = psinfo->bufsize; 480 481 dst_size -= sizeof(struct fault_log_info); 482 483 (void)kmsg_dump_get_buffer(dumper, true, dst + sizeof(struct fault_log_info), dst_size, 484 &(pfault_log_info->len)); 485 486 record.size = sizeof(struct fault_log_info) + pfault_log_info->len; 487 ret = psinfo->write(&record); 488 489 spin_unlock_irqrestore(&psinfo->buf_lock, flags); 490} 491EXPORT_SYMBOL_GPL(pstore_blackbox_dump); 492#endif 493 494/* 495 * callback from kmsg_dump. Save as much as we can (up to kmsg_bytes) from the 496 * end of the buffer. 497 */ 498static void pstore_dump(struct kmsg_dumper *dumper, 499 enum kmsg_dump_reason reason) 500{ 501 unsigned long total = 0; 502 const char *why; 503 unsigned int part = 1; 504 unsigned long flags = 0; 505 int ret; 506 507 why = kmsg_dump_reason_str(reason); 508 509 if (pstore_cannot_block_path(reason)) { 510 if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) { 511 pr_err("dump skipped in %s path because of concurrent dump\n", 512 in_nmi() ? "NMI" : why); 513 return; 514 } 515 } else { 516 spin_lock_irqsave(&psinfo->buf_lock, flags); 517 } 518 519 oopscount++; 520 while (total < kmsg_bytes) { 521 char *dst; 522 size_t dst_size; 523 int header_size; 524 int zipped_len = -1; 525 size_t dump_size; 526 struct pstore_record record; 527 528 pstore_record_init(&record, psinfo); 529 record.type = PSTORE_TYPE_DMESG; 530 record.count = oopscount; 531 record.reason = reason; 532 record.part = part; 533 record.buf = psinfo->buf; 534 535 if (big_oops_buf) { 536 dst = big_oops_buf; 537 dst_size = big_oops_buf_sz; 538 } else { 539 dst = psinfo->buf; 540 dst_size = psinfo->bufsize; 541 } 542 543 /* Write dump header. */ 544 header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why, 545 oopscount, part); 546 dst_size -= header_size; 547 548 /* Write dump contents. */ 549 if (!kmsg_dump_get_buffer(dumper, true, dst + header_size, 550 dst_size, &dump_size)) 551 break; 552 553 if (big_oops_buf) { 554 zipped_len = pstore_compress(dst, psinfo->buf, 555 header_size + dump_size, 556 psinfo->bufsize); 557 558 if (zipped_len > 0) { 559 record.compressed = true; 560 record.size = zipped_len; 561 } else { 562 record.size = copy_kmsg_to_buffer(header_size, 563 dump_size); 564 } 565 } else { 566 record.size = header_size + dump_size; 567 } 568 569 ret = psinfo->write(&record); 570 if (ret == 0 && reason == KMSG_DUMP_OOPS) { 571 pstore_new_entry = 1; 572 pstore_timer_kick(); 573 } 574 575 total += record.size; 576 part++; 577 } 578 579 spin_unlock_irqrestore(&psinfo->buf_lock, flags); 580} 581 582static struct kmsg_dumper pstore_dumper = { 583 .dump = pstore_dump, 584}; 585 586/* 587 * Register with kmsg_dump to save last part of console log on panic. 588 */ 589static void pstore_register_kmsg(void) 590{ 591 kmsg_dump_register(&pstore_dumper); 592} 593 594static void pstore_unregister_kmsg(void) 595{ 596 kmsg_dump_unregister(&pstore_dumper); 597} 598 599#ifdef CONFIG_PSTORE_CONSOLE 600static void pstore_console_write(struct console *con, const char *s, unsigned c) 601{ 602 struct pstore_record record; 603 604 if (!c) 605 return; 606 607 pstore_record_init(&record, psinfo); 608 record.type = PSTORE_TYPE_CONSOLE; 609 610 record.buf = (char *)s; 611 record.size = c; 612 psinfo->write(&record); 613} 614 615static struct console pstore_console = { 616 .write = pstore_console_write, 617 .index = -1, 618}; 619 620static void pstore_register_console(void) 621{ 622 /* Show which backend is going to get console writes. */ 623 strscpy(pstore_console.name, psinfo->name, 624 sizeof(pstore_console.name)); 625 /* 626 * Always initialize flags here since prior unregister_console() 627 * calls may have changed settings (specifically CON_ENABLED). 628 */ 629 pstore_console.flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME; 630 register_console(&pstore_console); 631} 632 633static void pstore_unregister_console(void) 634{ 635 unregister_console(&pstore_console); 636} 637#else 638static void pstore_register_console(void) {} 639static void pstore_unregister_console(void) {} 640#endif 641 642static int pstore_write_user_compat(struct pstore_record *record, 643 const char __user *buf) 644{ 645 int ret = 0; 646 647 if (record->buf) 648 return -EINVAL; 649 650 record->buf = memdup_user(buf, record->size); 651 if (IS_ERR(record->buf)) { 652 ret = PTR_ERR(record->buf); 653 goto out; 654 } 655 656 ret = record->psi->write(record); 657 658 kfree(record->buf); 659out: 660 record->buf = NULL; 661 662 return unlikely(ret < 0) ? ret : record->size; 663} 664 665/* 666 * platform specific persistent storage driver registers with 667 * us here. If pstore is already mounted, call the platform 668 * read function right away to populate the file system. If not 669 * then the pstore mount code will call us later to fill out 670 * the file system. 671 */ 672int pstore_register(struct pstore_info *psi) 673{ 674 char *new_backend; 675 676 if (backend && strcmp(backend, psi->name)) { 677 pr_warn("ignoring unexpected backend '%s'\n", psi->name); 678 return -EPERM; 679 } 680 681 /* Sanity check flags. */ 682 if (!psi->flags) { 683 pr_warn("backend '%s' must support at least one frontend\n", 684 psi->name); 685 return -EINVAL; 686 } 687 688 /* Check for required functions. */ 689 if (!psi->read || !psi->write) { 690 pr_warn("backend '%s' must implement read() and write()\n", 691 psi->name); 692 return -EINVAL; 693 } 694 695 new_backend = kstrdup(psi->name, GFP_KERNEL); 696 if (!new_backend) 697 return -ENOMEM; 698 699 mutex_lock(&psinfo_lock); 700 if (psinfo) { 701 pr_warn("backend '%s' already loaded: ignoring '%s'\n", 702 psinfo->name, psi->name); 703 mutex_unlock(&psinfo_lock); 704 kfree(new_backend); 705 return -EBUSY; 706 } 707 708 if (!psi->write_user) 709 psi->write_user = pstore_write_user_compat; 710 psinfo = psi; 711 mutex_init(&psinfo->read_mutex); 712 spin_lock_init(&psinfo->buf_lock); 713 714 if (psi->flags & PSTORE_FLAGS_DMESG) 715 allocate_buf_for_compression(); 716 717 pstore_get_records(0); 718 719 if (psi->flags & PSTORE_FLAGS_DMESG) { 720 pstore_dumper.max_reason = psinfo->max_reason; 721 pstore_register_kmsg(); 722 } 723 if (psi->flags & PSTORE_FLAGS_CONSOLE) 724 pstore_register_console(); 725 if (psi->flags & PSTORE_FLAGS_FTRACE) 726 pstore_register_ftrace(); 727 if (psi->flags & PSTORE_FLAGS_PMSG) 728 pstore_register_pmsg(); 729 730 /* Start watching for new records, if desired. */ 731 pstore_timer_kick(); 732 733 /* 734 * Update the module parameter backend, so it is visible 735 * through /sys/module/pstore/parameters/backend 736 */ 737 backend = new_backend; 738 739 pr_info("Registered %s as persistent store backend\n", psi->name); 740 741 mutex_unlock(&psinfo_lock); 742 return 0; 743} 744EXPORT_SYMBOL_GPL(pstore_register); 745 746void pstore_unregister(struct pstore_info *psi) 747{ 748 /* It's okay to unregister nothing. */ 749 if (!psi) 750 return; 751 752 mutex_lock(&psinfo_lock); 753 754 /* Only one backend can be registered at a time. */ 755 if (WARN_ON(psi != psinfo)) { 756 mutex_unlock(&psinfo_lock); 757 return; 758 } 759 760 /* Unregister all callbacks. */ 761 if (psi->flags & PSTORE_FLAGS_PMSG) 762 pstore_unregister_pmsg(); 763 if (psi->flags & PSTORE_FLAGS_FTRACE) 764 pstore_unregister_ftrace(); 765 if (psi->flags & PSTORE_FLAGS_CONSOLE) 766 pstore_unregister_console(); 767 if (psi->flags & PSTORE_FLAGS_DMESG) 768 pstore_unregister_kmsg(); 769 770 /* Stop timer and make sure all work has finished. */ 771 del_timer_sync(&pstore_timer); 772 flush_work(&pstore_work); 773 774 /* Remove all backend records from filesystem tree. */ 775 pstore_put_backend_records(psi); 776 777 free_buf_for_compression(); 778 779 psinfo = NULL; 780 kfree(backend); 781 backend = NULL; 782 mutex_unlock(&psinfo_lock); 783} 784EXPORT_SYMBOL_GPL(pstore_unregister); 785 786static void decompress_record(struct pstore_record *record) 787{ 788 int ret; 789 int unzipped_len; 790 char *unzipped, *workspace; 791 792 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed) 793 return; 794 795 /* Only PSTORE_TYPE_DMESG support compression. */ 796 if (record->type != PSTORE_TYPE_DMESG) { 797 pr_warn("ignored compressed record type %d\n", record->type); 798 return; 799 } 800 801 /* Missing compression buffer means compression was not initialized. */ 802 if (!big_oops_buf) { 803 pr_warn("no decompression method initialized!\n"); 804 return; 805 } 806 807 /* Allocate enough space to hold max decompression and ECC. */ 808 unzipped_len = big_oops_buf_sz; 809 workspace = kmalloc(unzipped_len + record->ecc_notice_size, 810 GFP_KERNEL); 811 if (!workspace) 812 return; 813 814 /* After decompression "unzipped_len" is almost certainly smaller. */ 815 ret = crypto_comp_decompress(tfm, record->buf, record->size, 816 workspace, &unzipped_len); 817 if (ret) { 818 pr_err("crypto_comp_decompress failed, ret = %d!\n", ret); 819 kfree(workspace); 820 return; 821 } 822 823 /* Append ECC notice to decompressed buffer. */ 824 memcpy(workspace + unzipped_len, record->buf + record->size, 825 record->ecc_notice_size); 826 827 /* Copy decompressed contents into an minimum-sized allocation. */ 828 unzipped = kmemdup(workspace, unzipped_len + record->ecc_notice_size, 829 GFP_KERNEL); 830 kfree(workspace); 831 if (!unzipped) 832 return; 833 834 /* Swap out compressed contents with decompressed contents. */ 835 kfree(record->buf); 836 record->buf = unzipped; 837 record->size = unzipped_len; 838 record->compressed = false; 839} 840 841/* 842 * Read all the records from one persistent store backend. Create 843 * files in our filesystem. Don't warn about -EEXIST errors 844 * when we are re-scanning the backing store looking to add new 845 * error records. 846 */ 847void pstore_get_backend_records(struct pstore_info *psi, 848 struct dentry *root, int quiet) 849{ 850 int failed = 0; 851 unsigned int stop_loop = 65536; 852 853 if (!psi || !root) 854 return; 855 856 mutex_lock(&psi->read_mutex); 857 if (psi->open && psi->open(psi)) 858 goto out; 859 860 /* 861 * Backend callback read() allocates record.buf. decompress_record() 862 * may reallocate record.buf. On success, pstore_mkfile() will keep 863 * the record.buf, so free it only on failure. 864 */ 865 for (; stop_loop; stop_loop--) { 866 struct pstore_record *record; 867 int rc; 868 869 record = kzalloc(sizeof(*record), GFP_KERNEL); 870 if (!record) { 871 pr_err("out of memory creating record\n"); 872 break; 873 } 874 pstore_record_init(record, psi); 875 876 record->size = psi->read(record); 877 878 /* No more records left in backend? */ 879 if (record->size <= 0) { 880 kfree(record); 881 break; 882 } 883 884 decompress_record(record); 885 rc = pstore_mkfile(root, record); 886 if (rc) { 887 /* pstore_mkfile() did not take record, so free it. */ 888 kfree(record->buf); 889 kfree(record); 890 if (rc != -EEXIST || !quiet) 891 failed++; 892 } 893 } 894 if (psi->close) 895 psi->close(psi); 896out: 897 mutex_unlock(&psi->read_mutex); 898 899 if (failed) 900 pr_warn("failed to create %d record(s) from '%s'\n", 901 failed, psi->name); 902 if (!stop_loop) 903 pr_err("looping? Too many records seen from '%s'\n", 904 psi->name); 905} 906 907static void pstore_dowork(struct work_struct *work) 908{ 909 pstore_get_records(1); 910} 911 912static void pstore_timefunc(struct timer_list *unused) 913{ 914 if (pstore_new_entry) { 915 pstore_new_entry = 0; 916 schedule_work(&pstore_work); 917 } 918 919 pstore_timer_kick(); 920} 921 922static void __init pstore_choose_compression(void) 923{ 924 const struct pstore_zbackend *step; 925 926 if (!compress) 927 return; 928 929 for (step = zbackends; step->name; step++) { 930 if (!strcmp(compress, step->name)) { 931 zbackend = step; 932 return; 933 } 934 } 935} 936 937static int __init pstore_init(void) 938{ 939 int ret; 940 941 pstore_choose_compression(); 942 943 /* 944 * Check if any pstore backends registered earlier but did not 945 * initialize compression because crypto was not ready. If so, 946 * initialize compression now. 947 */ 948 allocate_buf_for_compression(); 949 950 ret = pstore_init_fs(); 951 if (ret) 952 free_buf_for_compression(); 953 954 return ret; 955} 956late_initcall(pstore_init); 957 958static void __exit pstore_exit(void) 959{ 960 pstore_exit_fs(); 961} 962module_exit(pstore_exit) 963 964MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>"); 965MODULE_LICENSE("GPL"); 966