1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * RAM Oops/Panic logger 4 * 5 * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com> 6 * Copyright (C) 2011 Kees Cook <keescook@chromium.org> 7 */ 8 9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11#include <linux/kernel.h> 12#include <linux/err.h> 13#include <linux/module.h> 14#include <linux/version.h> 15#include <linux/pstore.h> 16#include <linux/io.h> 17#include <linux/ioport.h> 18#include <linux/platform_device.h> 19#include <linux/slab.h> 20#include <linux/compiler.h> 21#include <linux/pstore_ram.h> 22#include <linux/of.h> 23#include <linux/of_address.h> 24#include "internal.h" 25 26#define RAMOOPS_KERNMSG_HDR "====" 27#define MIN_MEM_SIZE 4096UL 28 29static ulong record_size = MIN_MEM_SIZE; 30module_param(record_size, ulong, 0400); 31MODULE_PARM_DESC(record_size, 32 "size of each dump done on oops/panic"); 33 34static ulong ramoops_console_size = MIN_MEM_SIZE; 35module_param_named(console_size, ramoops_console_size, ulong, 0400); 36MODULE_PARM_DESC(console_size, "size of kernel console log"); 37 38static ulong ramoops_ftrace_size = MIN_MEM_SIZE; 39module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400); 40MODULE_PARM_DESC(ftrace_size, "size of ftrace log"); 41 42static ulong ramoops_pmsg_size = MIN_MEM_SIZE; 43module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400); 44MODULE_PARM_DESC(pmsg_size, "size of user space message log"); 45 46static ulong ramoops_blackbox_size = MIN_MEM_SIZE; 47module_param_named(blackbox_size, ramoops_blackbox_size, ulong, 0400); 48MODULE_PARM_DESC(blackbox_size, "size of blackbox log"); 49#if IS_ENABLED(CONFIG_PSTORE_BLACKBOX) 50bool pstore_ready; 51#endif 52 53 54static unsigned long long mem_address; 55module_param_hw(mem_address, ullong, other, 0400); 56MODULE_PARM_DESC(mem_address, 57 "start of reserved RAM used to store oops/panic logs"); 58 59static ulong mem_size; 60module_param(mem_size, ulong, 0400); 61MODULE_PARM_DESC(mem_size, 62 "size of reserved RAM used to store oops/panic logs"); 63 64static unsigned int mem_type; 65module_param(mem_type, uint, 0400); 66MODULE_PARM_DESC(mem_type, 67 "set to 1 to try to use unbuffered memory (default 0)"); 68 69static int ramoops_max_reason = -1; 70module_param_named(max_reason, ramoops_max_reason, int, 0400); 71MODULE_PARM_DESC(max_reason, 72 "maximum reason for kmsg dump (default 2: Oops and Panic) "); 73 74static int ramoops_ecc; 75module_param_named(ecc, ramoops_ecc, int, 0400); 76MODULE_PARM_DESC(ramoops_ecc, 77 "if non-zero, the option enables ECC support and specifies " 78 "ECC buffer size in bytes (1 is a special value, means 16 " 79 "bytes ECC)"); 80 81static int ramoops_dump_oops = -1; 82module_param_named(dump_oops, ramoops_dump_oops, int, 0400); 83MODULE_PARM_DESC(dump_oops, 84 "(deprecated: use max_reason instead) set to 1 to dump oopses & panics, 0 to only dump panics"); 85 86struct ramoops_context { 87 struct persistent_ram_zone **dprzs; /* Oops dump zones */ 88 struct persistent_ram_zone *cprz; /* Console zone */ 89 struct persistent_ram_zone **fprzs; /* Ftrace zones */ 90 struct persistent_ram_zone *mprz; /* PMSG zone */ 91 struct persistent_ram_zone *bprz; /* BLACKBOX zone */ 92 phys_addr_t phys_addr; 93 unsigned long size; 94 unsigned int memtype; 95 size_t record_size; 96 size_t console_size; 97 size_t ftrace_size; 98 size_t pmsg_size; 99 size_t blackbox_size; 100 u32 flags; 101 struct persistent_ram_ecc_info ecc_info; 102 unsigned int max_dump_cnt; 103 unsigned int dump_write_cnt; 104 /* _read_cnt need clear on ramoops_pstore_open */ 105 unsigned int dump_read_cnt; 106 unsigned int console_read_cnt; 107 unsigned int max_ftrace_cnt; 108 unsigned int ftrace_read_cnt; 109 unsigned int pmsg_read_cnt; 110 unsigned int blackbox_read_cnt; 111 struct pstore_info pstore; 112}; 113 114static struct platform_device *dummy; 115 116static int ramoops_pstore_open(struct pstore_info *psi) 117{ 118 struct ramoops_context *cxt = psi->data; 119 120 cxt->dump_read_cnt = 0; 121 cxt->console_read_cnt = 0; 122 cxt->ftrace_read_cnt = 0; 123 cxt->pmsg_read_cnt = 0; 124 cxt->blackbox_read_cnt = 0; 125 return 0; 126} 127 128static struct persistent_ram_zone * 129ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, 130 struct pstore_record *record) 131{ 132 struct persistent_ram_zone *prz; 133 134 /* Give up if we never existed or have hit the end. */ 135 if (!przs) 136 return NULL; 137 138 prz = przs[id]; 139 if (!prz) 140 return NULL; 141 142 /* Update old/shadowed buffer. */ 143 if (prz->type == PSTORE_TYPE_DMESG) 144 persistent_ram_save_old(prz); 145 146 if (!persistent_ram_old_size(prz)) 147 return NULL; 148 149 record->type = prz->type; 150 record->id = id; 151 152 return prz; 153} 154 155static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time, 156 bool *compressed) 157{ 158 char data_type; 159 int header_length = 0; 160 161 if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n", 162 (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type, 163 &header_length) == 3) { 164 time->tv_nsec *= 1000; 165 if (data_type == 'C') 166 *compressed = true; 167 else 168 *compressed = false; 169 } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n", 170 (time64_t *)&time->tv_sec, &time->tv_nsec, 171 &header_length) == 2) { 172 time->tv_nsec *= 1000; 173 *compressed = false; 174 } else { 175 time->tv_sec = 0; 176 time->tv_nsec = 0; 177 *compressed = false; 178 } 179 return header_length; 180} 181 182static bool prz_ok(struct persistent_ram_zone *prz) 183{ 184 return !!prz && !!(persistent_ram_old_size(prz) + 185 persistent_ram_ecc_string(prz, NULL, 0)); 186} 187 188static ssize_t ramoops_pstore_read(struct pstore_record *record) 189{ 190 ssize_t size = 0; 191 struct ramoops_context *cxt = record->psi->data; 192 struct persistent_ram_zone *prz = NULL; 193 int header_length = 0; 194 bool free_prz = false; 195 196 /* 197 * Ramoops headers provide time stamps for PSTORE_TYPE_DMESG, but 198 * PSTORE_TYPE_CONSOLE and PSTORE_TYPE_FTRACE don't currently have 199 * valid time stamps, so it is initialized to zero. 200 */ 201 record->time.tv_sec = 0; 202 record->time.tv_nsec = 0; 203 record->compressed = false; 204 205 /* Find the next valid persistent_ram_zone for DMESG */ 206 while (cxt->dump_read_cnt < cxt->max_dump_cnt && !prz) { 207 prz = ramoops_get_next_prz(cxt->dprzs, cxt->dump_read_cnt++, 208 record); 209 if (!prz_ok(prz)) 210 continue; 211 header_length = ramoops_read_kmsg_hdr(persistent_ram_old(prz), 212 &record->time, 213 &record->compressed); 214 /* Clear and skip this DMESG record if it has no valid header */ 215 if (!header_length) { 216 persistent_ram_free_old(prz); 217 persistent_ram_zap(prz); 218 prz = NULL; 219 } 220 } 221 222 if (!prz_ok(prz) && !cxt->console_read_cnt++) 223 prz = ramoops_get_next_prz(&cxt->cprz, 0 /* single */, record); 224 225 if (!prz_ok(prz) && !cxt->pmsg_read_cnt++) 226 prz = ramoops_get_next_prz(&cxt->mprz, 0 /* single */, record); 227 228 if (!prz_ok(prz) && !cxt->blackbox_read_cnt++) 229 prz = ramoops_get_next_prz(&cxt->bprz, 0 /* single */, record); 230 231 /* ftrace is last since it may want to dynamically allocate memory. */ 232 if (!prz_ok(prz)) { 233 if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) && 234 !cxt->ftrace_read_cnt++) { 235 prz = ramoops_get_next_prz(cxt->fprzs, 0 /* single */, 236 record); 237 } else { 238 /* 239 * Build a new dummy record which combines all the 240 * per-cpu records including metadata and ecc info. 241 */ 242 struct persistent_ram_zone *tmp_prz, *prz_next; 243 244 tmp_prz = kzalloc(sizeof(struct persistent_ram_zone), 245 GFP_KERNEL); 246 if (!tmp_prz) 247 return -ENOMEM; 248 prz = tmp_prz; 249 free_prz = true; 250 251 while (cxt->ftrace_read_cnt < cxt->max_ftrace_cnt) { 252 prz_next = ramoops_get_next_prz(cxt->fprzs, 253 cxt->ftrace_read_cnt++, record); 254 255 if (!prz_ok(prz_next)) 256 continue; 257 258 tmp_prz->ecc_info = prz_next->ecc_info; 259 tmp_prz->corrected_bytes += 260 prz_next->corrected_bytes; 261 tmp_prz->bad_blocks += prz_next->bad_blocks; 262 263 size = pstore_ftrace_combine_log( 264 &tmp_prz->old_log, 265 &tmp_prz->old_log_size, 266 prz_next->old_log, 267 prz_next->old_log_size); 268 if (size) 269 goto out; 270 } 271 record->id = 0; 272 } 273 } 274 275 if (!prz_ok(prz)) { 276 size = 0; 277 goto out; 278 } 279 280 size = persistent_ram_old_size(prz) - header_length; 281 282 /* ECC correction notice */ 283 record->ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0); 284 285 record->buf = kmalloc(size + record->ecc_notice_size + 1, GFP_KERNEL); 286 if (record->buf == NULL) { 287 size = -ENOMEM; 288 goto out; 289 } 290 291 memcpy(record->buf, (char *)persistent_ram_old(prz) + header_length, 292 size); 293 294 persistent_ram_ecc_string(prz, record->buf + size, 295 record->ecc_notice_size + 1); 296 297out: 298 if (free_prz) { 299 kfree(prz->old_log); 300 kfree(prz); 301 } 302 303 return size; 304} 305 306static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz, 307 struct pstore_record *record) 308{ 309 char hdr[36]; /* "===="(4), %lld(20), "."(1), %06lu(6), "-%c\n"(3) */ 310 size_t len; 311 312 len = scnprintf(hdr, sizeof(hdr), 313 RAMOOPS_KERNMSG_HDR "%lld.%06lu-%c\n", 314 (time64_t)record->time.tv_sec, 315 record->time.tv_nsec / 1000, 316 record->compressed ? 'C' : 'D'); 317 persistent_ram_write(prz, hdr, len); 318 319 return len; 320} 321 322static int notrace ramoops_pstore_write(struct pstore_record *record) 323{ 324 struct ramoops_context *cxt = record->psi->data; 325 struct persistent_ram_zone *prz; 326 size_t size, hlen; 327 328 if (record->type == PSTORE_TYPE_CONSOLE) { 329 if (!cxt->cprz) 330 return -ENOMEM; 331 persistent_ram_write(cxt->cprz, record->buf, record->size); 332 return 0; 333 } else if (record->type == PSTORE_TYPE_FTRACE) { 334 int zonenum; 335 336 if (!cxt->fprzs) 337 return -ENOMEM; 338 /* 339 * Choose zone by if we're using per-cpu buffers. 340 */ 341 if (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 342 zonenum = smp_processor_id(); 343 else 344 zonenum = 0; 345 346 persistent_ram_write(cxt->fprzs[zonenum], record->buf, 347 record->size); 348 return 0; 349 } else if (record->type == PSTORE_TYPE_PMSG) { 350 pr_warn_ratelimited("PMSG shouldn't call %s\n", __func__); 351 return -EINVAL; 352 } else if (record->type == PSTORE_TYPE_BLACKBOX) { 353 if (!cxt->bprz) 354 return -ENOMEM; 355 persistent_ram_write(cxt->bprz, record->buf, record->size); 356 return 0; 357 } 358 359 if (record->type != PSTORE_TYPE_DMESG) 360 return -EINVAL; 361 362 /* 363 * We could filter on record->reason here if we wanted to (which 364 * would duplicate what happened before the "max_reason" setting 365 * was added), but that would defeat the purpose of a system 366 * changing printk.always_kmsg_dump, so instead log everything that 367 * the kmsg dumper sends us, since it should be doing the filtering 368 * based on the combination of printk.always_kmsg_dump and our 369 * requested "max_reason". 370 */ 371 372 /* 373 * Explicitly only take the first part of any new crash. 374 * If our buffer is larger than kmsg_bytes, this can never happen, 375 * and if our buffer is smaller than kmsg_bytes, we don't want the 376 * report split across multiple records. 377 */ 378 if (record->part != 1) 379 return -ENOSPC; 380 381 if (!cxt->dprzs) 382 return -ENOSPC; 383 384 prz = cxt->dprzs[cxt->dump_write_cnt]; 385 386 /* 387 * Since this is a new crash dump, we need to reset the buffer in 388 * case it still has an old dump present. Without this, the new dump 389 * will get appended, which would seriously confuse anything trying 390 * to check dump file contents. Specifically, ramoops_read_kmsg_hdr() 391 * expects to find a dump header in the beginning of buffer data, so 392 * we must to reset the buffer values, in order to ensure that the 393 * header will be written to the beginning of the buffer. 394 */ 395 persistent_ram_zap(prz); 396 397 /* Build header and append record contents. */ 398 hlen = ramoops_write_kmsg_hdr(prz, record); 399 if (!hlen) 400 return -ENOMEM; 401 402 size = record->size; 403 if (size + hlen > prz->buffer_size) 404 size = prz->buffer_size - hlen; 405 persistent_ram_write(prz, record->buf, size); 406 407 cxt->dump_write_cnt = (cxt->dump_write_cnt + 1) % cxt->max_dump_cnt; 408 409 return 0; 410} 411 412static int notrace ramoops_pstore_write_user(struct pstore_record *record, 413 const char __user *buf) 414{ 415 if (record->type == PSTORE_TYPE_PMSG) { 416 struct ramoops_context *cxt = record->psi->data; 417 418 if (!cxt->mprz) 419 return -ENOMEM; 420 return persistent_ram_write_user(cxt->mprz, buf, record->size); 421 } 422 423 return -EINVAL; 424} 425 426static int ramoops_pstore_erase(struct pstore_record *record) 427{ 428 struct ramoops_context *cxt = record->psi->data; 429 struct persistent_ram_zone *prz; 430 431 switch (record->type) { 432 case PSTORE_TYPE_DMESG: 433 if (record->id >= cxt->max_dump_cnt) 434 return -EINVAL; 435 prz = cxt->dprzs[record->id]; 436 break; 437 case PSTORE_TYPE_CONSOLE: 438 prz = cxt->cprz; 439 break; 440 case PSTORE_TYPE_FTRACE: 441 if (record->id >= cxt->max_ftrace_cnt) 442 return -EINVAL; 443 prz = cxt->fprzs[record->id]; 444 break; 445 case PSTORE_TYPE_PMSG: 446 prz = cxt->mprz; 447 break; 448 case PSTORE_TYPE_BLACKBOX: 449 prz = cxt->bprz; 450 break; 451 default: 452 return -EINVAL; 453 } 454 455 persistent_ram_free_old(prz); 456 persistent_ram_zap(prz); 457 458 return 0; 459} 460 461static struct ramoops_context oops_cxt = { 462 .pstore = { 463 .owner = THIS_MODULE, 464 .name = "ramoops", 465 .open = ramoops_pstore_open, 466 .read = ramoops_pstore_read, 467 .write = ramoops_pstore_write, 468 .write_user = ramoops_pstore_write_user, 469 .erase = ramoops_pstore_erase, 470 }, 471}; 472 473static void ramoops_free_przs(struct ramoops_context *cxt) 474{ 475 int i; 476 477 /* Free dump PRZs */ 478 if (cxt->dprzs) { 479 for (i = 0; i < cxt->max_dump_cnt; i++) 480 persistent_ram_free(cxt->dprzs[i]); 481 482 kfree(cxt->dprzs); 483 cxt->max_dump_cnt = 0; 484 } 485 486 /* Free ftrace PRZs */ 487 if (cxt->fprzs) { 488 for (i = 0; i < cxt->max_ftrace_cnt; i++) 489 persistent_ram_free(cxt->fprzs[i]); 490 kfree(cxt->fprzs); 491 cxt->max_ftrace_cnt = 0; 492 } 493} 494 495static int ramoops_init_przs(const char *name, 496 struct device *dev, struct ramoops_context *cxt, 497 struct persistent_ram_zone ***przs, 498 phys_addr_t *paddr, size_t mem_sz, 499 ssize_t record_size, 500 unsigned int *cnt, u32 sig, u32 flags) 501{ 502 int err = -ENOMEM; 503 int i; 504 size_t zone_sz; 505 struct persistent_ram_zone **prz_ar; 506 507 /* Allocate nothing for 0 mem_sz or 0 record_size. */ 508 if (mem_sz == 0 || record_size == 0) { 509 *cnt = 0; 510 return 0; 511 } 512 513 /* 514 * If we have a negative record size, calculate it based on 515 * mem_sz / *cnt. If we have a positive record size, calculate 516 * cnt from mem_sz / record_size. 517 */ 518 if (record_size < 0) { 519 if (*cnt == 0) 520 return 0; 521 record_size = mem_sz / *cnt; 522 if (record_size == 0) { 523 dev_err(dev, "%s record size == 0 (%zu / %u)\n", 524 name, mem_sz, *cnt); 525 goto fail; 526 } 527 } else { 528 *cnt = mem_sz / record_size; 529 if (*cnt == 0) { 530 dev_err(dev, "%s record count == 0 (%zu / %zu)\n", 531 name, mem_sz, record_size); 532 goto fail; 533 } 534 } 535 536 if (*paddr + mem_sz - cxt->phys_addr > cxt->size) { 537 dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n", 538 name, 539 mem_sz, (unsigned long long)*paddr, 540 cxt->size, (unsigned long long)cxt->phys_addr); 541 goto fail; 542 } 543 544 zone_sz = mem_sz / *cnt; 545 zone_sz = ALIGN_DOWN(zone_sz, 2); 546 if (!zone_sz) { 547 dev_err(dev, "%s zone size == 0\n", name); 548 goto fail; 549 } 550 551 prz_ar = kcalloc(*cnt, sizeof(**przs), GFP_KERNEL); 552 if (!prz_ar) 553 goto fail; 554 555 for (i = 0; i < *cnt; i++) { 556 char *label; 557 558 if (*cnt == 1) 559 label = kasprintf(GFP_KERNEL, "ramoops:%s", name); 560 else 561 label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)", 562 name, i, *cnt - 1); 563 prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig, 564 &cxt->ecc_info, 565 cxt->memtype, flags, label); 566 kfree(label); 567 if (IS_ERR(prz_ar[i])) { 568 err = PTR_ERR(prz_ar[i]); 569 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 570 name, record_size, 571 (unsigned long long)*paddr, err); 572 573 while (i > 0) { 574 i--; 575 persistent_ram_free(prz_ar[i]); 576 } 577 kfree(prz_ar); 578 goto fail; 579 } 580 *paddr += zone_sz; 581 prz_ar[i]->type = pstore_name_to_type(name); 582 } 583 584 *przs = prz_ar; 585 return 0; 586 587fail: 588 *cnt = 0; 589 return err; 590} 591 592static int ramoops_init_prz(const char *name, 593 struct device *dev, struct ramoops_context *cxt, 594 struct persistent_ram_zone **prz, 595 phys_addr_t *paddr, size_t sz, u32 sig) 596{ 597 char *label; 598 599 if (!sz) 600 return 0; 601 602 if (*paddr + sz - cxt->phys_addr > cxt->size) { 603 dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n", 604 name, sz, (unsigned long long)*paddr, 605 cxt->size, (unsigned long long)cxt->phys_addr); 606 return -ENOMEM; 607 } 608 609 label = kasprintf(GFP_KERNEL, "ramoops:%s", name); 610 *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, 611 cxt->memtype, PRZ_FLAG_ZAP_OLD, label); 612 kfree(label); 613 if (IS_ERR(*prz)) { 614 int err = PTR_ERR(*prz); 615 616 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 617 name, sz, (unsigned long long)*paddr, err); 618 return err; 619 } 620 621 *paddr += sz; 622 (*prz)->type = pstore_name_to_type(name); 623 624 return 0; 625} 626 627/* Read a u32 from a dt property and make sure it's safe for an int. */ 628static int ramoops_parse_dt_u32(struct platform_device *pdev, 629 const char *propname, 630 u32 default_value, u32 *value) 631{ 632 u32 val32 = 0; 633 int ret; 634 635 ret = of_property_read_u32(pdev->dev.of_node, propname, &val32); 636 if (ret == -EINVAL) { 637 /* field is missing, use default value. */ 638 val32 = default_value; 639 } else if (ret < 0) { 640 dev_err(&pdev->dev, "failed to parse property %s: %d\n", 641 propname, ret); 642 return ret; 643 } 644 645 /* Sanity check our results. */ 646 if (val32 > INT_MAX) { 647 dev_err(&pdev->dev, "%s %u > INT_MAX\n", propname, val32); 648 return -EOVERFLOW; 649 } 650 651 *value = val32; 652 return 0; 653} 654 655static int ramoops_parse_dt(struct platform_device *pdev, 656 struct ramoops_platform_data *pdata) 657{ 658 struct device_node *of_node = pdev->dev.of_node; 659 struct device_node *parent_node; 660 struct resource *res; 661 u32 value; 662 int ret; 663 664 dev_dbg(&pdev->dev, "using Device Tree\n"); 665 666 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 667 if (!res) { 668 dev_err(&pdev->dev, 669 "failed to locate DT /reserved-memory resource\n"); 670 return -EINVAL; 671 } 672 673 pdata->mem_size = resource_size(res); 674 pdata->mem_address = res->start; 675 pdata->mem_type = of_property_read_bool(of_node, "unbuffered"); 676 /* 677 * Setting "no-dump-oops" is deprecated and will be ignored if 678 * "max_reason" is also specified. 679 */ 680 if (of_property_read_bool(of_node, "no-dump-oops")) 681 pdata->max_reason = KMSG_DUMP_PANIC; 682 else 683 pdata->max_reason = KMSG_DUMP_OOPS; 684 685#define parse_u32(name, field, default_value) { \ 686 ret = ramoops_parse_dt_u32(pdev, name, default_value, \ 687 &value); \ 688 if (ret < 0) \ 689 return ret; \ 690 field = value; \ 691 } 692 693 parse_u32("record-size", pdata->record_size, 0); 694 parse_u32("console-size", pdata->console_size, 0); 695 parse_u32("ftrace-size", pdata->ftrace_size, 0); 696 parse_u32("pmsg-size", pdata->pmsg_size, 0); 697 parse_u32("blackbox-size", pdata->blackbox_size, 0); 698 parse_u32("ecc-size", pdata->ecc_info.ecc_size, 0); 699 parse_u32("flags", pdata->flags, 0); 700 parse_u32("max-reason", pdata->max_reason, pdata->max_reason); 701 702#undef parse_u32 703 704 /* 705 * Some old Chromebooks relied on the kernel setting the 706 * console_size and pmsg_size to the record size since that's 707 * what the downstream kernel did. These same Chromebooks had 708 * "ramoops" straight under the root node which isn't 709 * according to the current upstream bindings (though it was 710 * arguably acceptable under a prior version of the bindings). 711 * Let's make those old Chromebooks work by detecting that 712 * we're not a child of "reserved-memory" and mimicking the 713 * expected behavior. 714 */ 715 parent_node = of_get_parent(of_node); 716 if (!of_node_name_eq(parent_node, "reserved-memory") && 717 !pdata->console_size && !pdata->ftrace_size && 718 !pdata->pmsg_size && !pdata->ecc_info.ecc_size && 719 !pdata->blackbox_size) { 720 pdata->console_size = pdata->record_size; 721 pdata->pmsg_size = pdata->record_size; 722 pdata->blackbox_size = pdata->record_size; 723 } 724 of_node_put(parent_node); 725 726 return 0; 727} 728 729static int ramoops_probe(struct platform_device *pdev) 730{ 731 struct device *dev = &pdev->dev; 732 struct ramoops_platform_data *pdata = dev->platform_data; 733 struct ramoops_platform_data pdata_local; 734 struct ramoops_context *cxt = &oops_cxt; 735 size_t dump_mem_sz; 736 phys_addr_t paddr; 737 int err = -EINVAL; 738 739 /* 740 * Only a single ramoops area allowed at a time, so fail extra 741 * probes. 742 */ 743 if (cxt->max_dump_cnt) { 744 pr_err("already initialized\n"); 745 goto fail_out; 746 } 747 748 if (dev_of_node(dev) && !pdata) { 749 pdata = &pdata_local; 750 memset(pdata, 0, sizeof(*pdata)); 751 752 err = ramoops_parse_dt(pdev, pdata); 753 if (err < 0) 754 goto fail_out; 755 } 756 757 /* Make sure we didn't get bogus platform data pointer. */ 758 if (!pdata) { 759 pr_err("NULL platform data\n"); 760 err = -EINVAL; 761 goto fail_out; 762 } 763 764 if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size && 765 !pdata->ftrace_size && !pdata->pmsg_size && !pdata->blackbox_size)) { 766 pr_err("The memory size and the record/console size must be " 767 "non-zero\n"); 768 err = -EINVAL; 769 goto fail_out; 770 } 771 772 if (pdata->record_size && !is_power_of_2(pdata->record_size)) 773 pdata->record_size = rounddown_pow_of_two(pdata->record_size); 774 if (pdata->console_size && !is_power_of_2(pdata->console_size)) 775 pdata->console_size = rounddown_pow_of_two(pdata->console_size); 776 if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size)) 777 pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); 778 if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size)) 779 pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size); 780 if (pdata->blackbox_size && !is_power_of_2(pdata->blackbox_size)) 781 pdata->blackbox_size = rounddown_pow_of_two(pdata->blackbox_size); 782 783 cxt->size = pdata->mem_size; 784 cxt->phys_addr = pdata->mem_address; 785 cxt->memtype = pdata->mem_type; 786 cxt->record_size = pdata->record_size; 787 cxt->console_size = pdata->console_size; 788 cxt->ftrace_size = pdata->ftrace_size; 789 cxt->pmsg_size = pdata->pmsg_size; 790 cxt->blackbox_size = pdata->blackbox_size; 791 cxt->flags = pdata->flags; 792 cxt->ecc_info = pdata->ecc_info; 793 794 paddr = cxt->phys_addr; 795 796 dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size 797 - cxt->pmsg_size - cxt->blackbox_size; 798 799 err = ramoops_init_prz("blackbox", dev, cxt, &cxt->bprz, &paddr, 800 cxt->blackbox_size, 0); 801 if (err) 802 goto fail_init_bprz; 803#if IS_ENABLED(CONFIG_PSTORE_BLACKBOX) 804 else 805 pstore_ready = true; 806#endif 807 808 err = ramoops_init_przs("dmesg", dev, cxt, &cxt->dprzs, &paddr, 809 dump_mem_sz, cxt->record_size, 810 &cxt->max_dump_cnt, 0, 0); 811 if (err) 812 goto fail_out; 813 814 err = ramoops_init_prz("console", dev, cxt, &cxt->cprz, &paddr, 815 cxt->console_size, 0); 816 if (err) 817 goto fail_init_cprz; 818 819 cxt->max_ftrace_cnt = (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 820 ? nr_cpu_ids 821 : 1; 822 err = ramoops_init_przs("ftrace", dev, cxt, &cxt->fprzs, &paddr, 823 cxt->ftrace_size, -1, 824 &cxt->max_ftrace_cnt, LINUX_VERSION_CODE, 825 (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 826 ? PRZ_FLAG_NO_LOCK : 0); 827 if (err) 828 goto fail_init_fprz; 829 830 err = ramoops_init_prz("pmsg", dev, cxt, &cxt->mprz, &paddr, 831 cxt->pmsg_size, 0); 832 if (err) 833 goto fail_init_mprz; 834 835 cxt->pstore.data = cxt; 836 /* 837 * Prepare frontend flags based on which areas are initialized. 838 * For ramoops_init_przs() cases, the "max count" variable tells 839 * if there are regions present. For ramoops_init_prz() cases, 840 * the single region size is how to check. 841 */ 842 cxt->pstore.flags = 0; 843 if (cxt->max_dump_cnt) { 844 cxt->pstore.flags |= PSTORE_FLAGS_DMESG; 845 cxt->pstore.max_reason = pdata->max_reason; 846 } 847 if (cxt->console_size) 848 cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE; 849 if (cxt->max_ftrace_cnt) 850 cxt->pstore.flags |= PSTORE_FLAGS_FTRACE; 851 if (cxt->pmsg_size) 852 cxt->pstore.flags |= PSTORE_FLAGS_PMSG; 853 if (cxt->blackbox_size) 854 cxt->pstore.flags |= PSTORE_FLAGS_BLACKBOX; 855 856 /* 857 * Since bufsize is only used for dmesg crash dumps, it 858 * must match the size of the dprz record (after PRZ header 859 * and ECC bytes have been accounted for). 860 */ 861 if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) { 862 cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size; 863 cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL); 864 if (!cxt->pstore.buf) { 865 pr_err("cannot allocate pstore crash dump buffer\n"); 866 err = -ENOMEM; 867 goto fail_clear; 868 } 869 } 870 871 err = pstore_register(&cxt->pstore); 872 if (err) { 873 pr_err("registering with pstore failed\n"); 874 goto fail_buf; 875 } 876 877 /* 878 * Update the module parameter variables as well so they are visible 879 * through /sys/module/ramoops/parameters/ 880 */ 881 mem_size = pdata->mem_size; 882 mem_address = pdata->mem_address; 883 record_size = pdata->record_size; 884 ramoops_max_reason = pdata->max_reason; 885 ramoops_console_size = pdata->console_size; 886 ramoops_pmsg_size = pdata->pmsg_size; 887 ramoops_ftrace_size = pdata->ftrace_size; 888 ramoops_blackbox_size = pdata->blackbox_size; 889 890 pr_info("using 0x%lx@0x%llx, ecc: %d\n", 891 cxt->size, (unsigned long long)cxt->phys_addr, 892 cxt->ecc_info.ecc_size); 893 894 return 0; 895 896fail_buf: 897 kfree(cxt->pstore.buf); 898fail_clear: 899 cxt->pstore.bufsize = 0; 900 persistent_ram_free(cxt->mprz); 901fail_init_mprz: 902fail_init_fprz: 903 persistent_ram_free(cxt->cprz); 904fail_init_bprz: 905 persistent_ram_free(cxt->bprz); 906fail_init_cprz: 907 ramoops_free_przs(cxt); 908fail_out: 909 return err; 910} 911 912static int ramoops_remove(struct platform_device *pdev) 913{ 914 struct ramoops_context *cxt = &oops_cxt; 915 916 pstore_unregister(&cxt->pstore); 917 918 kfree(cxt->pstore.buf); 919 cxt->pstore.bufsize = 0; 920 921 persistent_ram_free(cxt->mprz); 922 persistent_ram_free(cxt->cprz); 923 persistent_ram_free(cxt->bprz); 924 ramoops_free_przs(cxt); 925 926 return 0; 927} 928 929static const struct of_device_id dt_match[] = { 930 { .compatible = "ramoops" }, 931 {} 932}; 933 934static struct platform_driver ramoops_driver = { 935 .probe = ramoops_probe, 936 .remove = ramoops_remove, 937 .driver = { 938 .name = "ramoops", 939 .of_match_table = dt_match, 940 }, 941}; 942 943static inline void ramoops_unregister_dummy(void) 944{ 945 platform_device_unregister(dummy); 946 dummy = NULL; 947} 948 949static void __init ramoops_register_dummy(void) 950{ 951 struct ramoops_platform_data pdata; 952 953 /* 954 * Prepare a dummy platform data structure to carry the module 955 * parameters. If mem_size isn't set, then there are no module 956 * parameters, and we can skip this. 957 */ 958 if (!mem_size) 959 return; 960 961 pr_info("using module parameters\n"); 962 963 memset(&pdata, 0, sizeof(pdata)); 964 pdata.mem_size = mem_size; 965 pdata.mem_address = mem_address; 966 pdata.mem_type = mem_type; 967 pdata.record_size = record_size; 968 pdata.console_size = ramoops_console_size; 969 pdata.ftrace_size = ramoops_ftrace_size; 970 pdata.pmsg_size = ramoops_pmsg_size; 971 pdata.blackbox_size = ramoops_blackbox_size; 972 /* If "max_reason" is set, its value has priority over "dump_oops". */ 973 if (ramoops_max_reason >= 0) 974 pdata.max_reason = ramoops_max_reason; 975 /* Otherwise, if "dump_oops" is set, parse it into "max_reason". */ 976 else if (ramoops_dump_oops != -1) 977 pdata.max_reason = ramoops_dump_oops ? KMSG_DUMP_OOPS 978 : KMSG_DUMP_PANIC; 979 /* And if neither are explicitly set, use the default. */ 980 else 981 pdata.max_reason = KMSG_DUMP_OOPS; 982 pdata.flags = RAMOOPS_FLAG_FTRACE_PER_CPU; 983 984 /* 985 * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC 986 * (using 1 byte for ECC isn't much of use anyway). 987 */ 988 pdata.ecc_info.ecc_size = ramoops_ecc == 1 ? 16 : ramoops_ecc; 989 990 dummy = platform_device_register_data(NULL, "ramoops", -1, 991 &pdata, sizeof(pdata)); 992 if (IS_ERR(dummy)) { 993 pr_info("could not create platform device: %ld\n", 994 PTR_ERR(dummy)); 995 dummy = NULL; 996 } 997} 998 999static int __init ramoops_init(void) 1000{ 1001 int ret; 1002 1003 ramoops_register_dummy(); 1004 ret = platform_driver_register(&ramoops_driver); 1005 if (ret != 0) 1006 ramoops_unregister_dummy(); 1007 1008 return ret; 1009} 1010postcore_initcall(ramoops_init); 1011 1012static void __exit ramoops_exit(void) 1013{ 1014 platform_driver_unregister(&ramoops_driver); 1015 ramoops_unregister_dummy(); 1016} 1017module_exit(ramoops_exit); 1018 1019MODULE_LICENSE("GPL"); 1020MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>"); 1021MODULE_DESCRIPTION("RAM Oops/Panic logger/driver"); 1022