1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * scan.c - support for transforming the ACPI namespace into individual objects 4 */ 5 6#include <linux/module.h> 7#include <linux/init.h> 8#include <linux/slab.h> 9#include <linux/kernel.h> 10#include <linux/acpi.h> 11#include <linux/acpi_iort.h> 12#include <linux/signal.h> 13#include <linux/kthread.h> 14#include <linux/dmi.h> 15#include <linux/nls.h> 16#include <linux/dma-map-ops.h> 17#include <linux/platform_data/x86/apple.h> 18#include <linux/pgtable.h> 19#include <linux/dma-direct.h> 20 21#include "internal.h" 22 23#define _COMPONENT ACPI_BUS_COMPONENT 24ACPI_MODULE_NAME("scan"); 25extern struct acpi_device *acpi_root; 26 27#define ACPI_BUS_CLASS "system_bus" 28#define ACPI_BUS_HID "LNXSYBUS" 29#define ACPI_BUS_DEVICE_NAME "System Bus" 30 31#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent) 32 33#define INVALID_ACPI_HANDLE ((acpi_handle)empty_zero_page) 34 35static const char *dummy_hid = "device"; 36 37static LIST_HEAD(acpi_dep_list); 38static DEFINE_MUTEX(acpi_dep_list_lock); 39LIST_HEAD(acpi_bus_id_list); 40static DEFINE_MUTEX(acpi_scan_lock); 41static LIST_HEAD(acpi_scan_handlers_list); 42DEFINE_MUTEX(acpi_device_lock); 43LIST_HEAD(acpi_wakeup_device_list); 44static DEFINE_MUTEX(acpi_hp_context_lock); 45 46/* 47 * The UART device described by the SPCR table is the only object which needs 48 * special-casing. Everything else is covered by ACPI namespace paths in STAO 49 * table. 50 */ 51static u64 spcr_uart_addr; 52 53struct acpi_dep_data { 54 struct list_head node; 55 acpi_handle master; 56 acpi_handle slave; 57}; 58 59void acpi_scan_lock_acquire(void) 60{ 61 mutex_lock(&acpi_scan_lock); 62} 63EXPORT_SYMBOL_GPL(acpi_scan_lock_acquire); 64 65void acpi_scan_lock_release(void) 66{ 67 mutex_unlock(&acpi_scan_lock); 68} 69EXPORT_SYMBOL_GPL(acpi_scan_lock_release); 70 71void acpi_lock_hp_context(void) 72{ 73 mutex_lock(&acpi_hp_context_lock); 74} 75 76void acpi_unlock_hp_context(void) 77{ 78 mutex_unlock(&acpi_hp_context_lock); 79} 80 81void acpi_initialize_hp_context(struct acpi_device *adev, 82 struct acpi_hotplug_context *hp, 83 int (*notify)(struct acpi_device *, u32), 84 void (*uevent)(struct acpi_device *, u32)) 85{ 86 acpi_lock_hp_context(); 87 hp->notify = notify; 88 hp->uevent = uevent; 89 acpi_set_hp_context(adev, hp); 90 acpi_unlock_hp_context(); 91} 92EXPORT_SYMBOL_GPL(acpi_initialize_hp_context); 93 94int acpi_scan_add_handler(struct acpi_scan_handler *handler) 95{ 96 if (!handler) 97 return -EINVAL; 98 99 list_add_tail(&handler->list_node, &acpi_scan_handlers_list); 100 return 0; 101} 102 103int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler, 104 const char *hotplug_profile_name) 105{ 106 int error; 107 108 error = acpi_scan_add_handler(handler); 109 if (error) 110 return error; 111 112 acpi_sysfs_add_hotplug_profile(&handler->hotplug, hotplug_profile_name); 113 return 0; 114} 115 116bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent) 117{ 118 struct acpi_device_physical_node *pn; 119 bool offline = true; 120 char *envp[] = { "EVENT=offline", NULL }; 121 122 /* 123 * acpi_container_offline() calls this for all of the container's 124 * children under the container's physical_node_lock lock. 125 */ 126 mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING); 127 128 list_for_each_entry(pn, &adev->physical_node_list, node) 129 if (device_supports_offline(pn->dev) && !pn->dev->offline) { 130 if (uevent) 131 kobject_uevent_env(&pn->dev->kobj, KOBJ_CHANGE, envp); 132 133 offline = false; 134 break; 135 } 136 137 mutex_unlock(&adev->physical_node_lock); 138 return offline; 139} 140 141static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data, 142 void **ret_p) 143{ 144 struct acpi_device *device = NULL; 145 struct acpi_device_physical_node *pn; 146 bool second_pass = (bool)data; 147 acpi_status status = AE_OK; 148 149 if (acpi_bus_get_device(handle, &device)) 150 return AE_OK; 151 152 if (device->handler && !device->handler->hotplug.enabled) { 153 *ret_p = &device->dev; 154 return AE_SUPPORT; 155 } 156 157 mutex_lock(&device->physical_node_lock); 158 159 list_for_each_entry(pn, &device->physical_node_list, node) { 160 int ret; 161 162 if (second_pass) { 163 /* Skip devices offlined by the first pass. */ 164 if (pn->put_online) 165 continue; 166 } else { 167 pn->put_online = false; 168 } 169 ret = device_offline(pn->dev); 170 if (ret >= 0) { 171 pn->put_online = !ret; 172 } else { 173 *ret_p = pn->dev; 174 if (second_pass) { 175 status = AE_ERROR; 176 break; 177 } 178 } 179 } 180 181 mutex_unlock(&device->physical_node_lock); 182 183 return status; 184} 185 186static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data, 187 void **ret_p) 188{ 189 struct acpi_device *device = NULL; 190 struct acpi_device_physical_node *pn; 191 192 if (acpi_bus_get_device(handle, &device)) 193 return AE_OK; 194 195 mutex_lock(&device->physical_node_lock); 196 197 list_for_each_entry(pn, &device->physical_node_list, node) 198 if (pn->put_online) { 199 device_online(pn->dev); 200 pn->put_online = false; 201 } 202 203 mutex_unlock(&device->physical_node_lock); 204 205 return AE_OK; 206} 207 208static int acpi_scan_try_to_offline(struct acpi_device *device) 209{ 210 acpi_handle handle = device->handle; 211 struct device *errdev = NULL; 212 acpi_status status; 213 214 /* 215 * Carry out two passes here and ignore errors in the first pass, 216 * because if the devices in question are memory blocks and 217 * CONFIG_MEMCG is set, one of the blocks may hold data structures 218 * that the other blocks depend on, but it is not known in advance which 219 * block holds them. 220 * 221 * If the first pass is successful, the second one isn't needed, though. 222 */ 223 status = acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, 224 NULL, acpi_bus_offline, (void *)false, 225 (void **)&errdev); 226 if (status == AE_SUPPORT) { 227 dev_warn(errdev, "Offline disabled.\n"); 228 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, 229 acpi_bus_online, NULL, NULL, NULL); 230 return -EPERM; 231 } 232 acpi_bus_offline(handle, 0, (void *)false, (void **)&errdev); 233 if (errdev) { 234 errdev = NULL; 235 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, 236 NULL, acpi_bus_offline, (void *)true, 237 (void **)&errdev); 238 if (!errdev) 239 acpi_bus_offline(handle, 0, (void *)true, 240 (void **)&errdev); 241 242 if (errdev) { 243 dev_warn(errdev, "Offline failed.\n"); 244 acpi_bus_online(handle, 0, NULL, NULL); 245 acpi_walk_namespace(ACPI_TYPE_ANY, handle, 246 ACPI_UINT32_MAX, acpi_bus_online, 247 NULL, NULL, NULL); 248 return -EBUSY; 249 } 250 } 251 return 0; 252} 253 254static int acpi_scan_hot_remove(struct acpi_device *device) 255{ 256 acpi_handle handle = device->handle; 257 unsigned long long sta; 258 acpi_status status; 259 260 if (device->handler && device->handler->hotplug.demand_offline) { 261 if (!acpi_scan_is_offline(device, true)) 262 return -EBUSY; 263 } else { 264 int error = acpi_scan_try_to_offline(device); 265 if (error) 266 return error; 267 } 268 269 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 270 "Hot-removing device %s...\n", dev_name(&device->dev))); 271 272 acpi_bus_trim(device); 273 274 acpi_evaluate_lck(handle, 0); 275 /* 276 * TBD: _EJD support. 277 */ 278 status = acpi_evaluate_ej0(handle); 279 if (status == AE_NOT_FOUND) 280 return -ENODEV; 281 else if (ACPI_FAILURE(status)) 282 return -EIO; 283 284 /* 285 * Verify if eject was indeed successful. If not, log an error 286 * message. No need to call _OST since _EJ0 call was made OK. 287 */ 288 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 289 if (ACPI_FAILURE(status)) { 290 acpi_handle_warn(handle, 291 "Status check after eject failed (0x%x)\n", status); 292 } else if (sta & ACPI_STA_DEVICE_ENABLED) { 293 acpi_handle_warn(handle, 294 "Eject incomplete - status 0x%llx\n", sta); 295 } 296 297 return 0; 298} 299 300static int acpi_scan_device_not_present(struct acpi_device *adev) 301{ 302 if (!acpi_device_enumerated(adev)) { 303 dev_warn(&adev->dev, "Still not present\n"); 304 return -EALREADY; 305 } 306 acpi_bus_trim(adev); 307 return 0; 308} 309 310static int acpi_scan_device_check(struct acpi_device *adev) 311{ 312 int error; 313 314 acpi_bus_get_status(adev); 315 if (adev->status.present || adev->status.functional) { 316 /* 317 * This function is only called for device objects for which 318 * matching scan handlers exist. The only situation in which 319 * the scan handler is not attached to this device object yet 320 * is when the device has just appeared (either it wasn't 321 * present at all before or it was removed and then added 322 * again). 323 */ 324 if (adev->handler) { 325 dev_warn(&adev->dev, "Already enumerated\n"); 326 return -EALREADY; 327 } 328 error = acpi_bus_scan(adev->handle); 329 if (error) { 330 dev_warn(&adev->dev, "Namespace scan failure\n"); 331 return error; 332 } 333 if (!adev->handler) { 334 dev_warn(&adev->dev, "Enumeration failure\n"); 335 error = -ENODEV; 336 } 337 } else { 338 error = acpi_scan_device_not_present(adev); 339 } 340 return error; 341} 342 343static int acpi_scan_bus_check(struct acpi_device *adev) 344{ 345 struct acpi_scan_handler *handler = adev->handler; 346 struct acpi_device *child; 347 int error; 348 349 acpi_bus_get_status(adev); 350 if (!(adev->status.present || adev->status.functional)) { 351 acpi_scan_device_not_present(adev); 352 return 0; 353 } 354 if (handler && handler->hotplug.scan_dependent) 355 return handler->hotplug.scan_dependent(adev); 356 357 error = acpi_bus_scan(adev->handle); 358 if (error) { 359 dev_warn(&adev->dev, "Namespace scan failure\n"); 360 return error; 361 } 362 list_for_each_entry(child, &adev->children, node) { 363 error = acpi_scan_bus_check(child); 364 if (error) 365 return error; 366 } 367 return 0; 368} 369 370static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type) 371{ 372 switch (type) { 373 case ACPI_NOTIFY_BUS_CHECK: 374 return acpi_scan_bus_check(adev); 375 case ACPI_NOTIFY_DEVICE_CHECK: 376 return acpi_scan_device_check(adev); 377 case ACPI_NOTIFY_EJECT_REQUEST: 378 case ACPI_OST_EC_OSPM_EJECT: 379 if (adev->handler && !adev->handler->hotplug.enabled) { 380 dev_info(&adev->dev, "Eject disabled\n"); 381 return -EPERM; 382 } 383 acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_EJECT_REQUEST, 384 ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); 385 return acpi_scan_hot_remove(adev); 386 } 387 return -EINVAL; 388} 389 390void acpi_device_hotplug(struct acpi_device *adev, u32 src) 391{ 392 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; 393 int error = -ENODEV; 394 395 lock_device_hotplug(); 396 mutex_lock(&acpi_scan_lock); 397 398 /* 399 * The device object's ACPI handle cannot become invalid as long as we 400 * are holding acpi_scan_lock, but it might have become invalid before 401 * that lock was acquired. 402 */ 403 if (adev->handle == INVALID_ACPI_HANDLE) 404 goto err_out; 405 406 if (adev->flags.is_dock_station) { 407 error = dock_notify(adev, src); 408 } else if (adev->flags.hotplug_notify) { 409 error = acpi_generic_hotplug_event(adev, src); 410 } else { 411 int (*notify)(struct acpi_device *, u32); 412 413 acpi_lock_hp_context(); 414 notify = adev->hp ? adev->hp->notify : NULL; 415 acpi_unlock_hp_context(); 416 /* 417 * There may be additional notify handlers for device objects 418 * without the .event() callback, so ignore them here. 419 */ 420 if (notify) 421 error = notify(adev, src); 422 else 423 goto out; 424 } 425 switch (error) { 426 case 0: 427 ost_code = ACPI_OST_SC_SUCCESS; 428 break; 429 case -EPERM: 430 ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED; 431 break; 432 case -EBUSY: 433 ost_code = ACPI_OST_SC_DEVICE_BUSY; 434 break; 435 default: 436 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; 437 break; 438 } 439 440 err_out: 441 acpi_evaluate_ost(adev->handle, src, ost_code, NULL); 442 443 out: 444 acpi_bus_put_acpi_device(adev); 445 mutex_unlock(&acpi_scan_lock); 446 unlock_device_hotplug(); 447} 448 449static void acpi_free_power_resources_lists(struct acpi_device *device) 450{ 451 int i; 452 453 if (device->wakeup.flags.valid) 454 acpi_power_resources_list_free(&device->wakeup.resources); 455 456 if (!device->power.flags.power_resources) 457 return; 458 459 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { 460 struct acpi_device_power_state *ps = &device->power.states[i]; 461 acpi_power_resources_list_free(&ps->resources); 462 } 463} 464 465static void acpi_device_release(struct device *dev) 466{ 467 struct acpi_device *acpi_dev = to_acpi_device(dev); 468 469 acpi_free_properties(acpi_dev); 470 acpi_free_pnp_ids(&acpi_dev->pnp); 471 acpi_free_power_resources_lists(acpi_dev); 472 kfree(acpi_dev); 473} 474 475static void acpi_device_del(struct acpi_device *device) 476{ 477 struct acpi_device_bus_id *acpi_device_bus_id; 478 479 mutex_lock(&acpi_device_lock); 480 if (device->parent) 481 list_del(&device->node); 482 483 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) 484 if (!strcmp(acpi_device_bus_id->bus_id, 485 acpi_device_hid(device))) { 486 ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no); 487 if (ida_is_empty(&acpi_device_bus_id->instance_ida)) { 488 list_del(&acpi_device_bus_id->node); 489 kfree_const(acpi_device_bus_id->bus_id); 490 kfree(acpi_device_bus_id); 491 } 492 break; 493 } 494 495 list_del(&device->wakeup_list); 496 mutex_unlock(&acpi_device_lock); 497 498 acpi_power_add_remove_device(device, false); 499 acpi_device_remove_files(device); 500 if (device->remove) 501 device->remove(device); 502 503 device_del(&device->dev); 504} 505 506static BLOCKING_NOTIFIER_HEAD(acpi_reconfig_chain); 507 508static LIST_HEAD(acpi_device_del_list); 509static DEFINE_MUTEX(acpi_device_del_lock); 510 511static void acpi_device_del_work_fn(struct work_struct *work_not_used) 512{ 513 for (;;) { 514 struct acpi_device *adev; 515 516 mutex_lock(&acpi_device_del_lock); 517 518 if (list_empty(&acpi_device_del_list)) { 519 mutex_unlock(&acpi_device_del_lock); 520 break; 521 } 522 adev = list_first_entry(&acpi_device_del_list, 523 struct acpi_device, del_list); 524 list_del(&adev->del_list); 525 526 mutex_unlock(&acpi_device_del_lock); 527 528 blocking_notifier_call_chain(&acpi_reconfig_chain, 529 ACPI_RECONFIG_DEVICE_REMOVE, adev); 530 531 acpi_device_del(adev); 532 /* 533 * Drop references to all power resources that might have been 534 * used by the device. 535 */ 536 acpi_power_transition(adev, ACPI_STATE_D3_COLD); 537 put_device(&adev->dev); 538 } 539} 540 541/** 542 * acpi_scan_drop_device - Drop an ACPI device object. 543 * @handle: Handle of an ACPI namespace node, not used. 544 * @context: Address of the ACPI device object to drop. 545 * 546 * This is invoked by acpi_ns_delete_node() during the removal of the ACPI 547 * namespace node the device object pointed to by @context is attached to. 548 * 549 * The unregistration is carried out asynchronously to avoid running 550 * acpi_device_del() under the ACPICA's namespace mutex and the list is used to 551 * ensure the correct ordering (the device objects must be unregistered in the 552 * same order in which the corresponding namespace nodes are deleted). 553 */ 554static void acpi_scan_drop_device(acpi_handle handle, void *context) 555{ 556 static DECLARE_WORK(work, acpi_device_del_work_fn); 557 struct acpi_device *adev = context; 558 559 mutex_lock(&acpi_device_del_lock); 560 561 /* 562 * Use the ACPI hotplug workqueue which is ordered, so this work item 563 * won't run after any hotplug work items submitted subsequently. That 564 * prevents attempts to register device objects identical to those being 565 * deleted from happening concurrently (such attempts result from 566 * hotplug events handled via the ACPI hotplug workqueue). It also will 567 * run after all of the work items submitted previosuly, which helps 568 * those work items to ensure that they are not accessing stale device 569 * objects. 570 */ 571 if (list_empty(&acpi_device_del_list)) 572 acpi_queue_hotplug_work(&work); 573 574 list_add_tail(&adev->del_list, &acpi_device_del_list); 575 /* Make acpi_ns_validate_handle() return NULL for this handle. */ 576 adev->handle = INVALID_ACPI_HANDLE; 577 578 mutex_unlock(&acpi_device_del_lock); 579} 580 581static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device, 582 void (*callback)(void *)) 583{ 584 acpi_status status; 585 586 if (!device) 587 return -EINVAL; 588 589 *device = NULL; 590 591 status = acpi_get_data_full(handle, acpi_scan_drop_device, 592 (void **)device, callback); 593 if (ACPI_FAILURE(status) || !*device) { 594 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n", 595 handle)); 596 return -ENODEV; 597 } 598 return 0; 599} 600 601int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device) 602{ 603 return acpi_get_device_data(handle, device, NULL); 604} 605EXPORT_SYMBOL(acpi_bus_get_device); 606 607static void get_acpi_device(void *dev) 608{ 609 if (dev) 610 get_device(&((struct acpi_device *)dev)->dev); 611} 612 613struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle) 614{ 615 struct acpi_device *adev = NULL; 616 617 acpi_get_device_data(handle, &adev, get_acpi_device); 618 return adev; 619} 620 621void acpi_bus_put_acpi_device(struct acpi_device *adev) 622{ 623 put_device(&adev->dev); 624} 625 626static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id) 627{ 628 struct acpi_device_bus_id *acpi_device_bus_id; 629 630 /* Find suitable bus_id and instance number in acpi_bus_id_list. */ 631 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) { 632 if (!strcmp(acpi_device_bus_id->bus_id, dev_id)) 633 return acpi_device_bus_id; 634 } 635 return NULL; 636} 637 638static int acpi_device_set_name(struct acpi_device *device, 639 struct acpi_device_bus_id *acpi_device_bus_id) 640{ 641 struct ida *instance_ida = &acpi_device_bus_id->instance_ida; 642 int result; 643 644 result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL); 645 if (result < 0) 646 return result; 647 648 device->pnp.instance_no = result; 649 dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result); 650 return 0; 651} 652 653int acpi_device_add(struct acpi_device *device, 654 void (*release)(struct device *)) 655{ 656 struct acpi_device_bus_id *acpi_device_bus_id; 657 int result; 658 659 if (device->handle) { 660 acpi_status status; 661 662 status = acpi_attach_data(device->handle, acpi_scan_drop_device, 663 device); 664 if (ACPI_FAILURE(status)) { 665 acpi_handle_err(device->handle, 666 "Unable to attach device data\n"); 667 return -ENODEV; 668 } 669 } 670 671 /* 672 * Linkage 673 * ------- 674 * Link this device to its parent and siblings. 675 */ 676 INIT_LIST_HEAD(&device->children); 677 INIT_LIST_HEAD(&device->node); 678 INIT_LIST_HEAD(&device->wakeup_list); 679 INIT_LIST_HEAD(&device->physical_node_list); 680 INIT_LIST_HEAD(&device->del_list); 681 mutex_init(&device->physical_node_lock); 682 683 mutex_lock(&acpi_device_lock); 684 685 acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device)); 686 if (acpi_device_bus_id) { 687 result = acpi_device_set_name(device, acpi_device_bus_id); 688 if (result) 689 goto err_unlock; 690 } else { 691 acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id), 692 GFP_KERNEL); 693 if (!acpi_device_bus_id) { 694 result = -ENOMEM; 695 goto err_unlock; 696 } 697 acpi_device_bus_id->bus_id = 698 kstrdup_const(acpi_device_hid(device), GFP_KERNEL); 699 if (!acpi_device_bus_id->bus_id) { 700 kfree(acpi_device_bus_id); 701 result = -ENOMEM; 702 goto err_unlock; 703 } 704 705 ida_init(&acpi_device_bus_id->instance_ida); 706 707 result = acpi_device_set_name(device, acpi_device_bus_id); 708 if (result) { 709 kfree_const(acpi_device_bus_id->bus_id); 710 kfree(acpi_device_bus_id); 711 goto err_unlock; 712 } 713 714 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); 715 } 716 717 if (device->parent) 718 list_add_tail(&device->node, &device->parent->children); 719 720 if (device->wakeup.flags.valid) 721 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); 722 mutex_unlock(&acpi_device_lock); 723 724 if (device->parent) 725 device->dev.parent = &device->parent->dev; 726 device->dev.bus = &acpi_bus_type; 727 device->dev.release = release; 728 result = device_add(&device->dev); 729 if (result) { 730 dev_err(&device->dev, "Error registering device\n"); 731 goto err; 732 } 733 734 result = acpi_device_setup_files(device); 735 if (result) 736 printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n", 737 dev_name(&device->dev)); 738 739 return 0; 740 741 err: 742 mutex_lock(&acpi_device_lock); 743 if (device->parent) 744 list_del(&device->node); 745 list_del(&device->wakeup_list); 746 747 err_unlock: 748 mutex_unlock(&acpi_device_lock); 749 750 acpi_detach_data(device->handle, acpi_scan_drop_device); 751 return result; 752} 753 754/* -------------------------------------------------------------------------- 755 Device Enumeration 756 -------------------------------------------------------------------------- */ 757static struct acpi_device *acpi_bus_get_parent(acpi_handle handle) 758{ 759 struct acpi_device *device = NULL; 760 acpi_status status; 761 762 /* 763 * Fixed hardware devices do not appear in the namespace and do not 764 * have handles, but we fabricate acpi_devices for them, so we have 765 * to deal with them specially. 766 */ 767 if (!handle) 768 return acpi_root; 769 770 do { 771 status = acpi_get_parent(handle, &handle); 772 if (ACPI_FAILURE(status)) 773 return status == AE_NULL_ENTRY ? NULL : acpi_root; 774 } while (acpi_bus_get_device(handle, &device)); 775 return device; 776} 777 778acpi_status 779acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd) 780{ 781 acpi_status status; 782 acpi_handle tmp; 783 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 784 union acpi_object *obj; 785 786 status = acpi_get_handle(handle, "_EJD", &tmp); 787 if (ACPI_FAILURE(status)) 788 return status; 789 790 status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer); 791 if (ACPI_SUCCESS(status)) { 792 obj = buffer.pointer; 793 status = acpi_get_handle(ACPI_ROOT_OBJECT, obj->string.pointer, 794 ejd); 795 kfree(buffer.pointer); 796 } 797 return status; 798} 799EXPORT_SYMBOL_GPL(acpi_bus_get_ejd); 800 801static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev) 802{ 803 acpi_handle handle = dev->handle; 804 struct acpi_device_wakeup *wakeup = &dev->wakeup; 805 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 806 union acpi_object *package = NULL; 807 union acpi_object *element = NULL; 808 acpi_status status; 809 int err = -ENODATA; 810 811 INIT_LIST_HEAD(&wakeup->resources); 812 813 /* _PRW */ 814 status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer); 815 if (ACPI_FAILURE(status)) { 816 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW")); 817 return err; 818 } 819 820 package = (union acpi_object *)buffer.pointer; 821 822 if (!package || package->package.count < 2) 823 goto out; 824 825 element = &(package->package.elements[0]); 826 if (!element) 827 goto out; 828 829 if (element->type == ACPI_TYPE_PACKAGE) { 830 if ((element->package.count < 2) || 831 (element->package.elements[0].type != 832 ACPI_TYPE_LOCAL_REFERENCE) 833 || (element->package.elements[1].type != ACPI_TYPE_INTEGER)) 834 goto out; 835 836 wakeup->gpe_device = 837 element->package.elements[0].reference.handle; 838 wakeup->gpe_number = 839 (u32) element->package.elements[1].integer.value; 840 } else if (element->type == ACPI_TYPE_INTEGER) { 841 wakeup->gpe_device = NULL; 842 wakeup->gpe_number = element->integer.value; 843 } else { 844 goto out; 845 } 846 847 element = &(package->package.elements[1]); 848 if (element->type != ACPI_TYPE_INTEGER) 849 goto out; 850 851 wakeup->sleep_state = element->integer.value; 852 853 err = acpi_extract_power_resources(package, 2, &wakeup->resources); 854 if (err) 855 goto out; 856 857 if (!list_empty(&wakeup->resources)) { 858 int sleep_state; 859 860 err = acpi_power_wakeup_list_init(&wakeup->resources, 861 &sleep_state); 862 if (err) { 863 acpi_handle_warn(handle, "Retrieving current states " 864 "of wakeup power resources failed\n"); 865 acpi_power_resources_list_free(&wakeup->resources); 866 goto out; 867 } 868 if (sleep_state < wakeup->sleep_state) { 869 acpi_handle_warn(handle, "Overriding _PRW sleep state " 870 "(S%d) by S%d from power resources\n", 871 (int)wakeup->sleep_state, sleep_state); 872 wakeup->sleep_state = sleep_state; 873 } 874 } 875 876 out: 877 kfree(buffer.pointer); 878 return err; 879} 880 881static bool acpi_wakeup_gpe_init(struct acpi_device *device) 882{ 883 static const struct acpi_device_id button_device_ids[] = { 884 {"PNP0C0C", 0}, /* Power button */ 885 {"PNP0C0D", 0}, /* Lid */ 886 {"PNP0C0E", 0}, /* Sleep button */ 887 {"", 0}, 888 }; 889 struct acpi_device_wakeup *wakeup = &device->wakeup; 890 acpi_status status; 891 892 wakeup->flags.notifier_present = 0; 893 894 /* Power button, Lid switch always enable wakeup */ 895 if (!acpi_match_device_ids(device, button_device_ids)) { 896 if (!acpi_match_device_ids(device, &button_device_ids[1])) { 897 /* Do not use Lid/sleep button for S5 wakeup */ 898 if (wakeup->sleep_state == ACPI_STATE_S5) 899 wakeup->sleep_state = ACPI_STATE_S4; 900 } 901 acpi_mark_gpe_for_wake(wakeup->gpe_device, wakeup->gpe_number); 902 device_set_wakeup_capable(&device->dev, true); 903 return true; 904 } 905 906 status = acpi_setup_gpe_for_wake(device->handle, wakeup->gpe_device, 907 wakeup->gpe_number); 908 return ACPI_SUCCESS(status); 909} 910 911static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) 912{ 913 int err; 914 915 /* Presence of _PRW indicates wake capable */ 916 if (!acpi_has_method(device->handle, "_PRW")) 917 return; 918 919 err = acpi_bus_extract_wakeup_device_power_package(device); 920 if (err) { 921 dev_err(&device->dev, "_PRW evaluation error: %d\n", err); 922 return; 923 } 924 925 device->wakeup.flags.valid = acpi_wakeup_gpe_init(device); 926 device->wakeup.prepare_count = 0; 927 /* 928 * Call _PSW/_DSW object to disable its ability to wake the sleeping 929 * system for the ACPI device with the _PRW object. 930 * The _PSW object is deprecated in ACPI 3.0 and is replaced by _DSW. 931 * So it is necessary to call _DSW object first. Only when it is not 932 * present will the _PSW object used. 933 */ 934 err = acpi_device_sleep_wake(device, 0, 0, 0); 935 if (err) 936 pr_debug("error in _DSW or _PSW evaluation\n"); 937} 938 939static void acpi_bus_init_power_state(struct acpi_device *device, int state) 940{ 941 struct acpi_device_power_state *ps = &device->power.states[state]; 942 char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' }; 943 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 944 acpi_status status; 945 946 INIT_LIST_HEAD(&ps->resources); 947 948 /* Evaluate "_PRx" to get referenced power resources */ 949 status = acpi_evaluate_object(device->handle, pathname, NULL, &buffer); 950 if (ACPI_SUCCESS(status)) { 951 union acpi_object *package = buffer.pointer; 952 953 if (buffer.length && package 954 && package->type == ACPI_TYPE_PACKAGE 955 && package->package.count) 956 acpi_extract_power_resources(package, 0, &ps->resources); 957 958 ACPI_FREE(buffer.pointer); 959 } 960 961 /* Evaluate "_PSx" to see if we can do explicit sets */ 962 pathname[2] = 'S'; 963 if (acpi_has_method(device->handle, pathname)) 964 ps->flags.explicit_set = 1; 965 966 /* State is valid if there are means to put the device into it. */ 967 if (!list_empty(&ps->resources) || ps->flags.explicit_set) 968 ps->flags.valid = 1; 969 970 ps->power = -1; /* Unknown - driver assigned */ 971 ps->latency = -1; /* Unknown - driver assigned */ 972} 973 974static void acpi_bus_get_power_flags(struct acpi_device *device) 975{ 976 u32 i; 977 978 /* Presence of _PS0|_PR0 indicates 'power manageable' */ 979 if (!acpi_has_method(device->handle, "_PS0") && 980 !acpi_has_method(device->handle, "_PR0")) 981 return; 982 983 device->flags.power_manageable = 1; 984 985 /* 986 * Power Management Flags 987 */ 988 if (acpi_has_method(device->handle, "_PSC")) 989 device->power.flags.explicit_get = 1; 990 991 if (acpi_has_method(device->handle, "_IRC")) 992 device->power.flags.inrush_current = 1; 993 994 if (acpi_has_method(device->handle, "_DSW")) 995 device->power.flags.dsw_present = 1; 996 997 /* 998 * Enumerate supported power management states 999 */ 1000 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) 1001 acpi_bus_init_power_state(device, i); 1002 1003 INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); 1004 1005 /* Set the defaults for D0 and D3hot (always supported). */ 1006 device->power.states[ACPI_STATE_D0].flags.valid = 1; 1007 device->power.states[ACPI_STATE_D0].power = 100; 1008 device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; 1009 1010 /* 1011 * Use power resources only if the D0 list of them is populated, because 1012 * some platforms may provide _PR3 only to indicate D3cold support and 1013 * in those cases the power resources list returned by it may be bogus. 1014 */ 1015 if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) { 1016 device->power.flags.power_resources = 1; 1017 /* 1018 * D3cold is supported if the D3hot list of power resources is 1019 * not empty. 1020 */ 1021 if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) 1022 device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; 1023 } 1024 1025 if (acpi_bus_init_power(device)) 1026 device->flags.power_manageable = 0; 1027} 1028 1029static void acpi_bus_get_flags(struct acpi_device *device) 1030{ 1031 /* Presence of _STA indicates 'dynamic_status' */ 1032 if (acpi_has_method(device->handle, "_STA")) 1033 device->flags.dynamic_status = 1; 1034 1035 /* Presence of _RMV indicates 'removable' */ 1036 if (acpi_has_method(device->handle, "_RMV")) 1037 device->flags.removable = 1; 1038 1039 /* Presence of _EJD|_EJ0 indicates 'ejectable' */ 1040 if (acpi_has_method(device->handle, "_EJD") || 1041 acpi_has_method(device->handle, "_EJ0")) 1042 device->flags.ejectable = 1; 1043} 1044 1045static void acpi_device_get_busid(struct acpi_device *device) 1046{ 1047 char bus_id[5] = { '?', 0 }; 1048 struct acpi_buffer buffer = { sizeof(bus_id), bus_id }; 1049 int i = 0; 1050 1051 /* 1052 * Bus ID 1053 * ------ 1054 * The device's Bus ID is simply the object name. 1055 * TBD: Shouldn't this value be unique (within the ACPI namespace)? 1056 */ 1057 if (ACPI_IS_ROOT_DEVICE(device)) { 1058 strcpy(device->pnp.bus_id, "ACPI"); 1059 return; 1060 } 1061 1062 switch (device->device_type) { 1063 case ACPI_BUS_TYPE_POWER_BUTTON: 1064 strcpy(device->pnp.bus_id, "PWRF"); 1065 break; 1066 case ACPI_BUS_TYPE_SLEEP_BUTTON: 1067 strcpy(device->pnp.bus_id, "SLPF"); 1068 break; 1069 case ACPI_BUS_TYPE_ECDT_EC: 1070 strcpy(device->pnp.bus_id, "ECDT"); 1071 break; 1072 default: 1073 acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer); 1074 /* Clean up trailing underscores (if any) */ 1075 for (i = 3; i > 1; i--) { 1076 if (bus_id[i] == '_') 1077 bus_id[i] = '\0'; 1078 else 1079 break; 1080 } 1081 strcpy(device->pnp.bus_id, bus_id); 1082 break; 1083 } 1084} 1085 1086/* 1087 * acpi_ata_match - see if an acpi object is an ATA device 1088 * 1089 * If an acpi object has one of the ACPI ATA methods defined, 1090 * then we can safely call it an ATA device. 1091 */ 1092bool acpi_ata_match(acpi_handle handle) 1093{ 1094 return acpi_has_method(handle, "_GTF") || 1095 acpi_has_method(handle, "_GTM") || 1096 acpi_has_method(handle, "_STM") || 1097 acpi_has_method(handle, "_SDD"); 1098} 1099 1100/* 1101 * acpi_bay_match - see if an acpi object is an ejectable driver bay 1102 * 1103 * If an acpi object is ejectable and has one of the ACPI ATA methods defined, 1104 * then we can safely call it an ejectable drive bay 1105 */ 1106bool acpi_bay_match(acpi_handle handle) 1107{ 1108 acpi_handle phandle; 1109 1110 if (!acpi_has_method(handle, "_EJ0")) 1111 return false; 1112 if (acpi_ata_match(handle)) 1113 return true; 1114 if (ACPI_FAILURE(acpi_get_parent(handle, &phandle))) 1115 return false; 1116 1117 return acpi_ata_match(phandle); 1118} 1119 1120bool acpi_device_is_battery(struct acpi_device *adev) 1121{ 1122 struct acpi_hardware_id *hwid; 1123 1124 list_for_each_entry(hwid, &adev->pnp.ids, list) 1125 if (!strcmp("PNP0C0A", hwid->id)) 1126 return true; 1127 1128 return false; 1129} 1130 1131static bool is_ejectable_bay(struct acpi_device *adev) 1132{ 1133 acpi_handle handle = adev->handle; 1134 1135 if (acpi_has_method(handle, "_EJ0") && acpi_device_is_battery(adev)) 1136 return true; 1137 1138 return acpi_bay_match(handle); 1139} 1140 1141/* 1142 * acpi_dock_match - see if an acpi object has a _DCK method 1143 */ 1144bool acpi_dock_match(acpi_handle handle) 1145{ 1146 return acpi_has_method(handle, "_DCK"); 1147} 1148 1149static acpi_status 1150acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context, 1151 void **return_value) 1152{ 1153 long *cap = context; 1154 1155 if (acpi_has_method(handle, "_BCM") && 1156 acpi_has_method(handle, "_BCL")) { 1157 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight " 1158 "support\n")); 1159 *cap |= ACPI_VIDEO_BACKLIGHT; 1160 /* We have backlight support, no need to scan further */ 1161 return AE_CTRL_TERMINATE; 1162 } 1163 return 0; 1164} 1165 1166/* Returns true if the ACPI object is a video device which can be 1167 * handled by video.ko. 1168 * The device will get a Linux specific CID added in scan.c to 1169 * identify the device as an ACPI graphics device 1170 * Be aware that the graphics device may not be physically present 1171 * Use acpi_video_get_capabilities() to detect general ACPI video 1172 * capabilities of present cards 1173 */ 1174long acpi_is_video_device(acpi_handle handle) 1175{ 1176 long video_caps = 0; 1177 1178 /* Is this device able to support video switching ? */ 1179 if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS")) 1180 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; 1181 1182 /* Is this device able to retrieve a video ROM ? */ 1183 if (acpi_has_method(handle, "_ROM")) 1184 video_caps |= ACPI_VIDEO_ROM_AVAILABLE; 1185 1186 /* Is this device able to configure which video head to be POSTed ? */ 1187 if (acpi_has_method(handle, "_VPO") && 1188 acpi_has_method(handle, "_GPD") && 1189 acpi_has_method(handle, "_SPD")) 1190 video_caps |= ACPI_VIDEO_DEVICE_POSTING; 1191 1192 /* Only check for backlight functionality if one of the above hit. */ 1193 if (video_caps) 1194 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1195 ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL, 1196 &video_caps, NULL); 1197 1198 return video_caps; 1199} 1200EXPORT_SYMBOL(acpi_is_video_device); 1201 1202const char *acpi_device_hid(struct acpi_device *device) 1203{ 1204 struct acpi_hardware_id *hid; 1205 1206 if (list_empty(&device->pnp.ids)) 1207 return dummy_hid; 1208 1209 hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list); 1210 return hid->id; 1211} 1212EXPORT_SYMBOL(acpi_device_hid); 1213 1214static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id) 1215{ 1216 struct acpi_hardware_id *id; 1217 1218 id = kmalloc(sizeof(*id), GFP_KERNEL); 1219 if (!id) 1220 return; 1221 1222 id->id = kstrdup_const(dev_id, GFP_KERNEL); 1223 if (!id->id) { 1224 kfree(id); 1225 return; 1226 } 1227 1228 list_add_tail(&id->list, &pnp->ids); 1229 pnp->type.hardware_id = 1; 1230} 1231 1232/* 1233 * Old IBM workstations have a DSDT bug wherein the SMBus object 1234 * lacks the SMBUS01 HID and the methods do not have the necessary "_" 1235 * prefix. Work around this. 1236 */ 1237static bool acpi_ibm_smbus_match(acpi_handle handle) 1238{ 1239 char node_name[ACPI_PATH_SEGMENT_LENGTH]; 1240 struct acpi_buffer path = { sizeof(node_name), node_name }; 1241 1242 if (!dmi_name_in_vendors("IBM")) 1243 return false; 1244 1245 /* Look for SMBS object */ 1246 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &path)) || 1247 strcmp("SMBS", path.pointer)) 1248 return false; 1249 1250 /* Does it have the necessary (but misnamed) methods? */ 1251 if (acpi_has_method(handle, "SBI") && 1252 acpi_has_method(handle, "SBR") && 1253 acpi_has_method(handle, "SBW")) 1254 return true; 1255 1256 return false; 1257} 1258 1259static bool acpi_object_is_system_bus(acpi_handle handle) 1260{ 1261 acpi_handle tmp; 1262 1263 if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_SB", &tmp)) && 1264 tmp == handle) 1265 return true; 1266 if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_TZ", &tmp)) && 1267 tmp == handle) 1268 return true; 1269 1270 return false; 1271} 1272 1273static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, 1274 int device_type) 1275{ 1276 acpi_status status; 1277 struct acpi_device_info *info; 1278 struct acpi_pnp_device_id_list *cid_list; 1279 int i; 1280 1281 switch (device_type) { 1282 case ACPI_BUS_TYPE_DEVICE: 1283 if (handle == ACPI_ROOT_OBJECT) { 1284 acpi_add_id(pnp, ACPI_SYSTEM_HID); 1285 break; 1286 } 1287 1288 status = acpi_get_object_info(handle, &info); 1289 if (ACPI_FAILURE(status)) { 1290 pr_err(PREFIX "%s: Error reading device info\n", 1291 __func__); 1292 return; 1293 } 1294 1295 if (info->valid & ACPI_VALID_HID) { 1296 acpi_add_id(pnp, info->hardware_id.string); 1297 pnp->type.platform_id = 1; 1298 } 1299 if (info->valid & ACPI_VALID_CID) { 1300 cid_list = &info->compatible_id_list; 1301 for (i = 0; i < cid_list->count; i++) 1302 acpi_add_id(pnp, cid_list->ids[i].string); 1303 } 1304 if (info->valid & ACPI_VALID_ADR) { 1305 pnp->bus_address = info->address; 1306 pnp->type.bus_address = 1; 1307 } 1308 if (info->valid & ACPI_VALID_UID) 1309 pnp->unique_id = kstrdup(info->unique_id.string, 1310 GFP_KERNEL); 1311 if (info->valid & ACPI_VALID_CLS) 1312 acpi_add_id(pnp, info->class_code.string); 1313 1314 kfree(info); 1315 1316 /* 1317 * Some devices don't reliably have _HIDs & _CIDs, so add 1318 * synthetic HIDs to make sure drivers can find them. 1319 */ 1320 if (acpi_is_video_device(handle)) 1321 acpi_add_id(pnp, ACPI_VIDEO_HID); 1322 else if (acpi_bay_match(handle)) 1323 acpi_add_id(pnp, ACPI_BAY_HID); 1324 else if (acpi_dock_match(handle)) 1325 acpi_add_id(pnp, ACPI_DOCK_HID); 1326 else if (acpi_ibm_smbus_match(handle)) 1327 acpi_add_id(pnp, ACPI_SMBUS_IBM_HID); 1328 else if (list_empty(&pnp->ids) && 1329 acpi_object_is_system_bus(handle)) { 1330 /* \_SB, \_TZ, LNXSYBUS */ 1331 acpi_add_id(pnp, ACPI_BUS_HID); 1332 strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME); 1333 strcpy(pnp->device_class, ACPI_BUS_CLASS); 1334 } 1335 1336 break; 1337 case ACPI_BUS_TYPE_POWER: 1338 acpi_add_id(pnp, ACPI_POWER_HID); 1339 break; 1340 case ACPI_BUS_TYPE_PROCESSOR: 1341 acpi_add_id(pnp, ACPI_PROCESSOR_OBJECT_HID); 1342 break; 1343 case ACPI_BUS_TYPE_THERMAL: 1344 acpi_add_id(pnp, ACPI_THERMAL_HID); 1345 break; 1346 case ACPI_BUS_TYPE_POWER_BUTTON: 1347 acpi_add_id(pnp, ACPI_BUTTON_HID_POWERF); 1348 break; 1349 case ACPI_BUS_TYPE_SLEEP_BUTTON: 1350 acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF); 1351 break; 1352 case ACPI_BUS_TYPE_ECDT_EC: 1353 acpi_add_id(pnp, ACPI_ECDT_HID); 1354 break; 1355 } 1356} 1357 1358void acpi_free_pnp_ids(struct acpi_device_pnp *pnp) 1359{ 1360 struct acpi_hardware_id *id, *tmp; 1361 1362 list_for_each_entry_safe(id, tmp, &pnp->ids, list) { 1363 kfree_const(id->id); 1364 kfree(id); 1365 } 1366 kfree(pnp->unique_id); 1367} 1368 1369/** 1370 * acpi_dma_supported - Check DMA support for the specified device. 1371 * @adev: The pointer to acpi device 1372 * 1373 * Return false if DMA is not supported. Otherwise, return true 1374 */ 1375bool acpi_dma_supported(struct acpi_device *adev) 1376{ 1377 if (!adev) 1378 return false; 1379 1380 if (adev->flags.cca_seen) 1381 return true; 1382 1383 /* 1384 * Per ACPI 6.0 sec 6.2.17, assume devices can do cache-coherent 1385 * DMA on "Intel platforms". Presumably that includes all x86 and 1386 * ia64, and other arches will set CONFIG_ACPI_CCA_REQUIRED=y. 1387 */ 1388 if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED)) 1389 return true; 1390 1391 return false; 1392} 1393 1394/** 1395 * acpi_get_dma_attr - Check the supported DMA attr for the specified device. 1396 * @adev: The pointer to acpi device 1397 * 1398 * Return enum dev_dma_attr. 1399 */ 1400enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) 1401{ 1402 if (!acpi_dma_supported(adev)) 1403 return DEV_DMA_NOT_SUPPORTED; 1404 1405 if (adev->flags.coherent_dma) 1406 return DEV_DMA_COHERENT; 1407 else 1408 return DEV_DMA_NON_COHERENT; 1409} 1410 1411/** 1412 * acpi_dma_get_range() - Get device DMA parameters. 1413 * 1414 * @dev: device to configure 1415 * @map: pointer to DMA ranges result 1416 * 1417 * Evaluate DMA regions and return pointer to DMA regions on 1418 * parsing success; it does not update the passed in values on failure. 1419 * 1420 * Return 0 on success, < 0 on failure. 1421 */ 1422int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) 1423{ 1424 struct acpi_device *adev; 1425 LIST_HEAD(list); 1426 struct resource_entry *rentry; 1427 int ret; 1428 struct device *dma_dev = dev; 1429 struct bus_dma_region *r; 1430 1431 /* 1432 * Walk the device tree chasing an ACPI companion with a _DMA 1433 * object while we go. Stop if we find a device with an ACPI 1434 * companion containing a _DMA method. 1435 */ 1436 do { 1437 adev = ACPI_COMPANION(dma_dev); 1438 if (adev && acpi_has_method(adev->handle, METHOD_NAME__DMA)) 1439 break; 1440 1441 dma_dev = dma_dev->parent; 1442 } while (dma_dev); 1443 1444 if (!dma_dev) 1445 return -ENODEV; 1446 1447 if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) { 1448 acpi_handle_warn(adev->handle, "_DMA is valid only if _CRS is present\n"); 1449 return -EINVAL; 1450 } 1451 1452 ret = acpi_dev_get_dma_resources(adev, &list); 1453 if (ret > 0) { 1454 r = kcalloc(ret + 1, sizeof(*r), GFP_KERNEL); 1455 if (!r) { 1456 ret = -ENOMEM; 1457 goto out; 1458 } 1459 1460 list_for_each_entry(rentry, &list, node) { 1461 if (rentry->res->start >= rentry->res->end) { 1462 kfree(r); 1463 ret = -EINVAL; 1464 dev_dbg(dma_dev, "Invalid DMA regions configuration\n"); 1465 goto out; 1466 } 1467 1468 r->cpu_start = rentry->res->start; 1469 r->dma_start = rentry->res->start - rentry->offset; 1470 r->size = resource_size(rentry->res); 1471 r->offset = rentry->offset; 1472 r++; 1473 } 1474 1475 *map = r; 1476 } 1477 out: 1478 acpi_dev_free_resource_list(&list); 1479 1480 return ret >= 0 ? 0 : ret; 1481} 1482 1483/** 1484 * acpi_dma_configure_id - Set-up DMA configuration for the device. 1485 * @dev: The pointer to the device 1486 * @attr: device dma attributes 1487 * @input_id: input device id const value pointer 1488 */ 1489int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, 1490 const u32 *input_id) 1491{ 1492 const struct iommu_ops *iommu; 1493 1494 if (attr == DEV_DMA_NOT_SUPPORTED) { 1495 set_dma_ops(dev, &dma_dummy_ops); 1496 return 0; 1497 } 1498 1499 acpi_arch_dma_setup(dev); 1500 1501 iommu = iort_iommu_configure_id(dev, input_id); 1502 if (PTR_ERR(iommu) == -EPROBE_DEFER) 1503 return -EPROBE_DEFER; 1504 1505 arch_setup_dma_ops(dev, 0, U64_MAX, 1506 iommu, attr == DEV_DMA_COHERENT); 1507 1508 return 0; 1509} 1510EXPORT_SYMBOL_GPL(acpi_dma_configure_id); 1511 1512static void acpi_init_coherency(struct acpi_device *adev) 1513{ 1514 unsigned long long cca = 0; 1515 acpi_status status; 1516 struct acpi_device *parent = adev->parent; 1517 1518 if (parent && parent->flags.cca_seen) { 1519 /* 1520 * From ACPI spec, OSPM will ignore _CCA if an ancestor 1521 * already saw one. 1522 */ 1523 adev->flags.cca_seen = 1; 1524 cca = parent->flags.coherent_dma; 1525 } else { 1526 status = acpi_evaluate_integer(adev->handle, "_CCA", 1527 NULL, &cca); 1528 if (ACPI_SUCCESS(status)) 1529 adev->flags.cca_seen = 1; 1530 else if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED)) 1531 /* 1532 * If architecture does not specify that _CCA is 1533 * required for DMA-able devices (e.g. x86), 1534 * we default to _CCA=1. 1535 */ 1536 cca = 1; 1537 else 1538 acpi_handle_debug(adev->handle, 1539 "ACPI device is missing _CCA.\n"); 1540 } 1541 1542 adev->flags.coherent_dma = cca; 1543} 1544 1545static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data) 1546{ 1547 bool *is_serial_bus_slave_p = data; 1548 1549 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) 1550 return 1; 1551 1552 *is_serial_bus_slave_p = true; 1553 1554 /* no need to do more checking */ 1555 return -1; 1556} 1557 1558static bool acpi_is_indirect_io_slave(struct acpi_device *device) 1559{ 1560 struct acpi_device *parent = device->parent; 1561 static const struct acpi_device_id indirect_io_hosts[] = { 1562 {"HISI0191", 0}, 1563 {} 1564 }; 1565 1566 return parent && !acpi_match_device_ids(parent, indirect_io_hosts); 1567} 1568 1569static bool acpi_device_enumeration_by_parent(struct acpi_device *device) 1570{ 1571 struct list_head resource_list; 1572 bool is_serial_bus_slave = false; 1573 static const struct acpi_device_id ignore_serial_bus_ids[] = { 1574 /* 1575 * These devices have multiple I2cSerialBus resources and an i2c-client 1576 * must be instantiated for each, each with its own i2c_device_id. 1577 * Normally we only instantiate an i2c-client for the first resource, 1578 * using the ACPI HID as id. These special cases are handled by the 1579 * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows 1580 * which i2c_device_id to use for each resource. 1581 */ 1582 {"BSG1160", }, 1583 {"BSG2150", }, 1584 {"INT33FE", }, 1585 {"INT3515", }, 1586 /* 1587 * HIDs of device with an UartSerialBusV2 resource for which userspace 1588 * expects a regular tty cdev to be created (instead of the in kernel 1589 * serdev) and which have a kernel driver which expects a platform_dev 1590 * such as the rfkill-gpio driver. 1591 */ 1592 {"BCM4752", }, 1593 {"LNV4752", }, 1594 {} 1595 }; 1596 1597 if (acpi_is_indirect_io_slave(device)) 1598 return true; 1599 1600 /* Macs use device properties in lieu of _CRS resources */ 1601 if (x86_apple_machine && 1602 (fwnode_property_present(&device->fwnode, "spiSclkPeriod") || 1603 fwnode_property_present(&device->fwnode, "i2cAddress") || 1604 fwnode_property_present(&device->fwnode, "baud"))) 1605 return true; 1606 1607 if (!acpi_match_device_ids(device, ignore_serial_bus_ids)) 1608 return false; 1609 1610 INIT_LIST_HEAD(&resource_list); 1611 acpi_dev_get_resources(device, &resource_list, 1612 acpi_check_serial_bus_slave, 1613 &is_serial_bus_slave); 1614 acpi_dev_free_resource_list(&resource_list); 1615 1616 return is_serial_bus_slave; 1617} 1618 1619void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, 1620 int type, unsigned long long sta) 1621{ 1622 INIT_LIST_HEAD(&device->pnp.ids); 1623 device->device_type = type; 1624 device->handle = handle; 1625 device->parent = acpi_bus_get_parent(handle); 1626 device->fwnode.ops = &acpi_device_fwnode_ops; 1627 acpi_set_device_status(device, sta); 1628 acpi_device_get_busid(device); 1629 acpi_set_pnp_ids(handle, &device->pnp, type); 1630 acpi_init_properties(device); 1631 acpi_bus_get_flags(device); 1632 device->flags.match_driver = false; 1633 device->flags.initialized = true; 1634 device->flags.enumeration_by_parent = 1635 acpi_device_enumeration_by_parent(device); 1636 acpi_device_clear_enumerated(device); 1637 device_initialize(&device->dev); 1638 dev_set_uevent_suppress(&device->dev, true); 1639 acpi_init_coherency(device); 1640 /* Assume there are unmet deps until acpi_device_dep_initialize() runs */ 1641 device->dep_unmet = 1; 1642} 1643 1644void acpi_device_add_finalize(struct acpi_device *device) 1645{ 1646 dev_set_uevent_suppress(&device->dev, false); 1647 kobject_uevent(&device->dev.kobj, KOBJ_ADD); 1648} 1649 1650static int acpi_add_single_object(struct acpi_device **child, 1651 acpi_handle handle, int type, 1652 unsigned long long sta) 1653{ 1654 int result; 1655 struct acpi_device *device; 1656 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1657 1658 device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL); 1659 if (!device) { 1660 printk(KERN_ERR PREFIX "Memory allocation error\n"); 1661 return -ENOMEM; 1662 } 1663 1664 acpi_init_device_object(device, handle, type, sta); 1665 /* 1666 * For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so 1667 * that we can call acpi_bus_get_status() and use its quirk handling. 1668 * Note this must be done before the get power-/wakeup_dev-flags calls. 1669 */ 1670 if (type == ACPI_BUS_TYPE_DEVICE) 1671 if (acpi_bus_get_status(device) < 0) 1672 acpi_set_device_status(device, 0); 1673 1674 acpi_bus_get_power_flags(device); 1675 acpi_bus_get_wakeup_device_flags(device); 1676 1677 result = acpi_device_add(device, acpi_device_release); 1678 if (result) { 1679 acpi_device_release(&device->dev); 1680 return result; 1681 } 1682 1683 acpi_power_add_remove_device(device, true); 1684 acpi_device_add_finalize(device); 1685 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 1686 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Added %s [%s] parent %s\n", 1687 dev_name(&device->dev), (char *) buffer.pointer, 1688 device->parent ? dev_name(&device->parent->dev) : "(null)")); 1689 kfree(buffer.pointer); 1690 *child = device; 1691 return 0; 1692} 1693 1694static acpi_status acpi_get_resource_memory(struct acpi_resource *ares, 1695 void *context) 1696{ 1697 struct resource *res = context; 1698 1699 if (acpi_dev_resource_memory(ares, res)) 1700 return AE_CTRL_TERMINATE; 1701 1702 return AE_OK; 1703} 1704 1705static bool acpi_device_should_be_hidden(acpi_handle handle) 1706{ 1707 acpi_status status; 1708 struct resource res; 1709 1710 /* Check if it should ignore the UART device */ 1711 if (!(spcr_uart_addr && acpi_has_method(handle, METHOD_NAME__CRS))) 1712 return false; 1713 1714 /* 1715 * The UART device described in SPCR table is assumed to have only one 1716 * memory resource present. So we only look for the first one here. 1717 */ 1718 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 1719 acpi_get_resource_memory, &res); 1720 if (ACPI_FAILURE(status) || res.start != spcr_uart_addr) 1721 return false; 1722 1723 acpi_handle_info(handle, "The UART device @%pa in SPCR table will be hidden\n", 1724 &res.start); 1725 1726 return true; 1727} 1728 1729static int acpi_bus_type_and_status(acpi_handle handle, int *type, 1730 unsigned long long *sta) 1731{ 1732 acpi_status status; 1733 acpi_object_type acpi_type; 1734 1735 status = acpi_get_type(handle, &acpi_type); 1736 if (ACPI_FAILURE(status)) 1737 return -ENODEV; 1738 1739 switch (acpi_type) { 1740 case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ 1741 case ACPI_TYPE_DEVICE: 1742 if (acpi_device_should_be_hidden(handle)) 1743 return -ENODEV; 1744 1745 *type = ACPI_BUS_TYPE_DEVICE; 1746 /* 1747 * acpi_add_single_object updates this once we've an acpi_device 1748 * so that acpi_bus_get_status' quirk handling can be used. 1749 */ 1750 *sta = ACPI_STA_DEFAULT; 1751 break; 1752 case ACPI_TYPE_PROCESSOR: 1753 *type = ACPI_BUS_TYPE_PROCESSOR; 1754 status = acpi_bus_get_status_handle(handle, sta); 1755 if (ACPI_FAILURE(status)) 1756 return -ENODEV; 1757 break; 1758 case ACPI_TYPE_THERMAL: 1759 *type = ACPI_BUS_TYPE_THERMAL; 1760 *sta = ACPI_STA_DEFAULT; 1761 break; 1762 case ACPI_TYPE_POWER: 1763 *type = ACPI_BUS_TYPE_POWER; 1764 *sta = ACPI_STA_DEFAULT; 1765 break; 1766 default: 1767 return -ENODEV; 1768 } 1769 1770 return 0; 1771} 1772 1773bool acpi_device_is_present(const struct acpi_device *adev) 1774{ 1775 return adev->status.present || adev->status.functional; 1776} 1777 1778static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler, 1779 const char *idstr, 1780 const struct acpi_device_id **matchid) 1781{ 1782 const struct acpi_device_id *devid; 1783 1784 if (handler->match) 1785 return handler->match(idstr, matchid); 1786 1787 for (devid = handler->ids; devid->id[0]; devid++) 1788 if (!strcmp((char *)devid->id, idstr)) { 1789 if (matchid) 1790 *matchid = devid; 1791 1792 return true; 1793 } 1794 1795 return false; 1796} 1797 1798static struct acpi_scan_handler *acpi_scan_match_handler(const char *idstr, 1799 const struct acpi_device_id **matchid) 1800{ 1801 struct acpi_scan_handler *handler; 1802 1803 list_for_each_entry(handler, &acpi_scan_handlers_list, list_node) 1804 if (acpi_scan_handler_matching(handler, idstr, matchid)) 1805 return handler; 1806 1807 return NULL; 1808} 1809 1810void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val) 1811{ 1812 if (!!hotplug->enabled == !!val) 1813 return; 1814 1815 mutex_lock(&acpi_scan_lock); 1816 1817 hotplug->enabled = val; 1818 1819 mutex_unlock(&acpi_scan_lock); 1820} 1821 1822static void acpi_scan_init_hotplug(struct acpi_device *adev) 1823{ 1824 struct acpi_hardware_id *hwid; 1825 1826 if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) { 1827 acpi_dock_add(adev); 1828 return; 1829 } 1830 list_for_each_entry(hwid, &adev->pnp.ids, list) { 1831 struct acpi_scan_handler *handler; 1832 1833 handler = acpi_scan_match_handler(hwid->id, NULL); 1834 if (handler) { 1835 adev->flags.hotplug_notify = true; 1836 break; 1837 } 1838 } 1839} 1840 1841static void acpi_device_dep_initialize(struct acpi_device *adev) 1842{ 1843 struct acpi_dep_data *dep; 1844 struct acpi_handle_list dep_devices; 1845 acpi_status status; 1846 int i; 1847 1848 adev->dep_unmet = 0; 1849 1850 if (!acpi_has_method(adev->handle, "_DEP")) 1851 return; 1852 1853 status = acpi_evaluate_reference(adev->handle, "_DEP", NULL, 1854 &dep_devices); 1855 if (ACPI_FAILURE(status)) { 1856 dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n"); 1857 return; 1858 } 1859 1860 for (i = 0; i < dep_devices.count; i++) { 1861 struct acpi_device_info *info; 1862 int skip; 1863 1864 status = acpi_get_object_info(dep_devices.handles[i], &info); 1865 if (ACPI_FAILURE(status)) { 1866 dev_dbg(&adev->dev, "Error reading _DEP device info\n"); 1867 continue; 1868 } 1869 1870 /* 1871 * Skip the dependency of Windows System Power 1872 * Management Controller 1873 */ 1874 skip = info->valid & ACPI_VALID_HID && 1875 !strcmp(info->hardware_id.string, "INT3396"); 1876 1877 kfree(info); 1878 1879 if (skip) 1880 continue; 1881 1882 dep = kzalloc(sizeof(struct acpi_dep_data), GFP_KERNEL); 1883 if (!dep) 1884 return; 1885 1886 dep->master = dep_devices.handles[i]; 1887 dep->slave = adev->handle; 1888 adev->dep_unmet++; 1889 1890 mutex_lock(&acpi_dep_list_lock); 1891 list_add_tail(&dep->node , &acpi_dep_list); 1892 mutex_unlock(&acpi_dep_list_lock); 1893 } 1894} 1895 1896static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used, 1897 void *not_used, void **return_value) 1898{ 1899 struct acpi_device *device = NULL; 1900 int type; 1901 unsigned long long sta; 1902 int result; 1903 1904 acpi_bus_get_device(handle, &device); 1905 if (device) 1906 goto out; 1907 1908 result = acpi_bus_type_and_status(handle, &type, &sta); 1909 if (result) 1910 return AE_OK; 1911 1912 if (type == ACPI_BUS_TYPE_POWER) { 1913 acpi_add_power_resource(handle); 1914 return AE_OK; 1915 } 1916 1917 acpi_add_single_object(&device, handle, type, sta); 1918 if (!device) 1919 return AE_CTRL_DEPTH; 1920 1921 acpi_scan_init_hotplug(device); 1922 acpi_device_dep_initialize(device); 1923 1924 out: 1925 if (!*return_value) 1926 *return_value = device; 1927 1928 return AE_OK; 1929} 1930 1931static void acpi_default_enumeration(struct acpi_device *device) 1932{ 1933 /* 1934 * Do not enumerate devices with enumeration_by_parent flag set as 1935 * they will be enumerated by their respective parents. 1936 */ 1937 if (!device->flags.enumeration_by_parent) { 1938 acpi_create_platform_device(device, NULL); 1939 acpi_device_set_enumerated(device); 1940 } else { 1941 blocking_notifier_call_chain(&acpi_reconfig_chain, 1942 ACPI_RECONFIG_DEVICE_ADD, device); 1943 } 1944} 1945 1946static const struct acpi_device_id generic_device_ids[] = { 1947 {ACPI_DT_NAMESPACE_HID, }, 1948 {"", }, 1949}; 1950 1951static int acpi_generic_device_attach(struct acpi_device *adev, 1952 const struct acpi_device_id *not_used) 1953{ 1954 /* 1955 * Since ACPI_DT_NAMESPACE_HID is the only ID handled here, the test 1956 * below can be unconditional. 1957 */ 1958 if (adev->data.of_compatible) 1959 acpi_default_enumeration(adev); 1960 1961 return 1; 1962} 1963 1964static struct acpi_scan_handler generic_device_handler = { 1965 .ids = generic_device_ids, 1966 .attach = acpi_generic_device_attach, 1967}; 1968 1969static int acpi_scan_attach_handler(struct acpi_device *device) 1970{ 1971 struct acpi_hardware_id *hwid; 1972 int ret = 0; 1973 1974 list_for_each_entry(hwid, &device->pnp.ids, list) { 1975 const struct acpi_device_id *devid; 1976 struct acpi_scan_handler *handler; 1977 1978 handler = acpi_scan_match_handler(hwid->id, &devid); 1979 if (handler) { 1980 if (!handler->attach) { 1981 device->pnp.type.platform_id = 0; 1982 continue; 1983 } 1984 device->handler = handler; 1985 ret = handler->attach(device, devid); 1986 if (ret > 0) 1987 break; 1988 1989 device->handler = NULL; 1990 if (ret < 0) 1991 break; 1992 } 1993 } 1994 1995 return ret; 1996} 1997 1998static void acpi_bus_attach(struct acpi_device *device) 1999{ 2000 struct acpi_device *child; 2001 acpi_handle ejd; 2002 int ret; 2003 2004 if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd))) 2005 register_dock_dependent_device(device, ejd); 2006 2007 acpi_bus_get_status(device); 2008 /* Skip devices that are not present. */ 2009 if (!acpi_device_is_present(device)) { 2010 device->flags.initialized = false; 2011 acpi_device_clear_enumerated(device); 2012 device->flags.power_manageable = 0; 2013 return; 2014 } 2015 if (device->handler) 2016 goto ok; 2017 2018 if (!device->flags.initialized) { 2019 device->flags.power_manageable = 2020 device->power.states[ACPI_STATE_D0].flags.valid; 2021 if (acpi_bus_init_power(device)) 2022 device->flags.power_manageable = 0; 2023 2024 device->flags.initialized = true; 2025 } else if (device->flags.visited) { 2026 goto ok; 2027 } 2028 2029 ret = acpi_scan_attach_handler(device); 2030 if (ret < 0) 2031 return; 2032 2033 device->flags.match_driver = true; 2034 if (ret > 0 && !device->flags.enumeration_by_parent) { 2035 acpi_device_set_enumerated(device); 2036 goto ok; 2037 } 2038 2039 ret = device_attach(&device->dev); 2040 if (ret < 0) 2041 return; 2042 2043 if (device->pnp.type.platform_id || device->flags.enumeration_by_parent) 2044 acpi_default_enumeration(device); 2045 else 2046 acpi_device_set_enumerated(device); 2047 2048 ok: 2049 list_for_each_entry(child, &device->children, node) 2050 acpi_bus_attach(child); 2051 2052 if (device->handler && device->handler->hotplug.notify_online) 2053 device->handler->hotplug.notify_online(device); 2054} 2055 2056void acpi_walk_dep_device_list(acpi_handle handle) 2057{ 2058 struct acpi_dep_data *dep, *tmp; 2059 struct acpi_device *adev; 2060 2061 mutex_lock(&acpi_dep_list_lock); 2062 list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) { 2063 if (dep->master == handle) { 2064 acpi_bus_get_device(dep->slave, &adev); 2065 if (!adev) 2066 continue; 2067 2068 adev->dep_unmet--; 2069 if (!adev->dep_unmet) 2070 acpi_bus_attach(adev); 2071 list_del(&dep->node); 2072 kfree(dep); 2073 } 2074 } 2075 mutex_unlock(&acpi_dep_list_lock); 2076} 2077EXPORT_SYMBOL_GPL(acpi_walk_dep_device_list); 2078 2079/** 2080 * acpi_bus_scan - Add ACPI device node objects in a given namespace scope. 2081 * @handle: Root of the namespace scope to scan. 2082 * 2083 * Scan a given ACPI tree (probably recently hot-plugged) and create and add 2084 * found devices. 2085 * 2086 * If no devices were found, -ENODEV is returned, but it does not mean that 2087 * there has been a real error. There just have been no suitable ACPI objects 2088 * in the table trunk from which the kernel could create a device and add an 2089 * appropriate driver. 2090 * 2091 * Must be called under acpi_scan_lock. 2092 */ 2093int acpi_bus_scan(acpi_handle handle) 2094{ 2095 void *device = NULL; 2096 2097 if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device))) 2098 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, 2099 acpi_bus_check_add, NULL, NULL, &device); 2100 2101 if (device) { 2102 acpi_bus_attach(device); 2103 return 0; 2104 } 2105 return -ENODEV; 2106} 2107EXPORT_SYMBOL(acpi_bus_scan); 2108 2109/** 2110 * acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects. 2111 * @adev: Root of the ACPI namespace scope to walk. 2112 * 2113 * Must be called under acpi_scan_lock. 2114 */ 2115void acpi_bus_trim(struct acpi_device *adev) 2116{ 2117 struct acpi_scan_handler *handler = adev->handler; 2118 struct acpi_device *child; 2119 2120 list_for_each_entry_reverse(child, &adev->children, node) 2121 acpi_bus_trim(child); 2122 2123 adev->flags.match_driver = false; 2124 if (handler) { 2125 if (handler->detach) 2126 handler->detach(adev); 2127 2128 adev->handler = NULL; 2129 } else { 2130 device_release_driver(&adev->dev); 2131 } 2132 /* 2133 * Most likely, the device is going away, so put it into D3cold before 2134 * that. 2135 */ 2136 acpi_device_set_power(adev, ACPI_STATE_D3_COLD); 2137 adev->flags.initialized = false; 2138 acpi_device_clear_enumerated(adev); 2139} 2140EXPORT_SYMBOL_GPL(acpi_bus_trim); 2141 2142int acpi_bus_register_early_device(int type) 2143{ 2144 struct acpi_device *device = NULL; 2145 int result; 2146 2147 result = acpi_add_single_object(&device, NULL, 2148 type, ACPI_STA_DEFAULT); 2149 if (result) 2150 return result; 2151 2152 device->flags.match_driver = true; 2153 return device_attach(&device->dev); 2154} 2155EXPORT_SYMBOL_GPL(acpi_bus_register_early_device); 2156 2157static int acpi_bus_scan_fixed(void) 2158{ 2159 int result = 0; 2160 2161 /* 2162 * Enumerate all fixed-feature devices. 2163 */ 2164 if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) { 2165 struct acpi_device *device = NULL; 2166 2167 result = acpi_add_single_object(&device, NULL, 2168 ACPI_BUS_TYPE_POWER_BUTTON, 2169 ACPI_STA_DEFAULT); 2170 if (result) 2171 return result; 2172 2173 device->flags.match_driver = true; 2174 result = device_attach(&device->dev); 2175 if (result < 0) 2176 return result; 2177 2178 device_init_wakeup(&device->dev, true); 2179 } 2180 2181 if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) { 2182 struct acpi_device *device = NULL; 2183 2184 result = acpi_add_single_object(&device, NULL, 2185 ACPI_BUS_TYPE_SLEEP_BUTTON, 2186 ACPI_STA_DEFAULT); 2187 if (result) 2188 return result; 2189 2190 device->flags.match_driver = true; 2191 result = device_attach(&device->dev); 2192 } 2193 2194 return result < 0 ? result : 0; 2195} 2196 2197static void __init acpi_get_spcr_uart_addr(void) 2198{ 2199 acpi_status status; 2200 struct acpi_table_spcr *spcr_ptr; 2201 2202 status = acpi_get_table(ACPI_SIG_SPCR, 0, 2203 (struct acpi_table_header **)&spcr_ptr); 2204 if (ACPI_FAILURE(status)) { 2205 pr_warn(PREFIX "STAO table present, but SPCR is missing\n"); 2206 return; 2207 } 2208 2209 spcr_uart_addr = spcr_ptr->serial_port.address; 2210 acpi_put_table((struct acpi_table_header *)spcr_ptr); 2211} 2212 2213static bool acpi_scan_initialized; 2214 2215int __init acpi_scan_init(void) 2216{ 2217 int result; 2218 acpi_status status; 2219 struct acpi_table_stao *stao_ptr; 2220 2221 acpi_pci_root_init(); 2222 acpi_pci_link_init(); 2223 acpi_processor_init(); 2224 acpi_platform_init(); 2225 acpi_lpss_init(); 2226 acpi_apd_init(); 2227 acpi_cmos_rtc_init(); 2228 acpi_container_init(); 2229 acpi_memory_hotplug_init(); 2230 acpi_watchdog_init(); 2231 acpi_pnp_init(); 2232 acpi_int340x_thermal_init(); 2233 acpi_amba_init(); 2234 acpi_init_lpit(); 2235 2236 acpi_scan_add_handler(&generic_device_handler); 2237 2238 /* 2239 * If there is STAO table, check whether it needs to ignore the UART 2240 * device in SPCR table. 2241 */ 2242 status = acpi_get_table(ACPI_SIG_STAO, 0, 2243 (struct acpi_table_header **)&stao_ptr); 2244 if (ACPI_SUCCESS(status)) { 2245 if (stao_ptr->header.length > sizeof(struct acpi_table_stao)) 2246 pr_info(PREFIX "STAO Name List not yet supported.\n"); 2247 2248 if (stao_ptr->ignore_uart) 2249 acpi_get_spcr_uart_addr(); 2250 2251 acpi_put_table((struct acpi_table_header *)stao_ptr); 2252 } 2253 2254 acpi_gpe_apply_masked_gpes(); 2255 acpi_update_all_gpes(); 2256 2257 /* 2258 * Although we call __add_memory() that is documented to require the 2259 * device_hotplug_lock, it is not necessary here because this is an 2260 * early code when userspace or any other code path cannot trigger 2261 * hotplug/hotunplug operations. 2262 */ 2263 mutex_lock(&acpi_scan_lock); 2264 /* 2265 * Enumerate devices in the ACPI namespace. 2266 */ 2267 result = acpi_bus_scan(ACPI_ROOT_OBJECT); 2268 if (result) 2269 goto out; 2270 2271 result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root); 2272 if (result) 2273 goto out; 2274 2275 /* Fixed feature devices do not exist on HW-reduced platform */ 2276 if (!acpi_gbl_reduced_hardware) { 2277 result = acpi_bus_scan_fixed(); 2278 if (result) { 2279 acpi_detach_data(acpi_root->handle, 2280 acpi_scan_drop_device); 2281 acpi_device_del(acpi_root); 2282 put_device(&acpi_root->dev); 2283 goto out; 2284 } 2285 } 2286 2287 acpi_scan_initialized = true; 2288 2289 out: 2290 mutex_unlock(&acpi_scan_lock); 2291 return result; 2292} 2293 2294static struct acpi_probe_entry *ape; 2295static int acpi_probe_count; 2296static DEFINE_MUTEX(acpi_probe_mutex); 2297 2298static int __init acpi_match_madt(union acpi_subtable_headers *header, 2299 const unsigned long end) 2300{ 2301 if (!ape->subtable_valid || ape->subtable_valid(&header->common, ape)) 2302 if (!ape->probe_subtbl(header, end)) 2303 acpi_probe_count++; 2304 2305 return 0; 2306} 2307 2308int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) 2309{ 2310 int count = 0; 2311 2312 if (acpi_disabled) 2313 return 0; 2314 2315 mutex_lock(&acpi_probe_mutex); 2316 for (ape = ap_head; nr; ape++, nr--) { 2317 if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) { 2318 acpi_probe_count = 0; 2319 acpi_table_parse_madt(ape->type, acpi_match_madt, 0); 2320 count += acpi_probe_count; 2321 } else { 2322 int res; 2323 res = acpi_table_parse(ape->id, ape->probe_table); 2324 if (!res) 2325 count++; 2326 } 2327 } 2328 mutex_unlock(&acpi_probe_mutex); 2329 2330 return count; 2331} 2332 2333struct acpi_table_events_work { 2334 struct work_struct work; 2335 void *table; 2336 u32 event; 2337}; 2338 2339static void acpi_table_events_fn(struct work_struct *work) 2340{ 2341 struct acpi_table_events_work *tew; 2342 2343 tew = container_of(work, struct acpi_table_events_work, work); 2344 2345 if (tew->event == ACPI_TABLE_EVENT_LOAD) { 2346 acpi_scan_lock_acquire(); 2347 acpi_bus_scan(ACPI_ROOT_OBJECT); 2348 acpi_scan_lock_release(); 2349 } 2350 2351 kfree(tew); 2352} 2353 2354void acpi_scan_table_handler(u32 event, void *table, void *context) 2355{ 2356 struct acpi_table_events_work *tew; 2357 2358 if (!acpi_scan_initialized) 2359 return; 2360 2361 if (event != ACPI_TABLE_EVENT_LOAD) 2362 return; 2363 2364 tew = kmalloc(sizeof(*tew), GFP_KERNEL); 2365 if (!tew) 2366 return; 2367 2368 INIT_WORK(&tew->work, acpi_table_events_fn); 2369 tew->table = table; 2370 tew->event = event; 2371 2372 schedule_work(&tew->work); 2373} 2374 2375int acpi_reconfig_notifier_register(struct notifier_block *nb) 2376{ 2377 return blocking_notifier_chain_register(&acpi_reconfig_chain, nb); 2378} 2379EXPORT_SYMBOL(acpi_reconfig_notifier_register); 2380 2381int acpi_reconfig_notifier_unregister(struct notifier_block *nb) 2382{ 2383 return blocking_notifier_chain_unregister(&acpi_reconfig_chain, nb); 2384} 2385EXPORT_SYMBOL(acpi_reconfig_notifier_unregister); 2386