1// SPDX-License-Identifier: GPL-2.0 2/* 3 * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com> 4 * (C) Copyright 2007 Novell Inc. 5 */ 6 7#include <linux/pci.h> 8#include <linux/module.h> 9#include <linux/init.h> 10#include <linux/device.h> 11#include <linux/mempolicy.h> 12#include <linux/string.h> 13#include <linux/slab.h> 14#include <linux/sched.h> 15#include <linux/sched/isolation.h> 16#include <linux/cpu.h> 17#include <linux/pm_runtime.h> 18#include <linux/suspend.h> 19#include <linux/kexec.h> 20#include <linux/of_device.h> 21#include <linux/acpi.h> 22#include <linux/dma-map-ops.h> 23#include "pci.h" 24#include "pcie/portdrv.h" 25 26struct pci_dynid { 27 struct list_head node; 28 struct pci_device_id id; 29}; 30 31/** 32 * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices 33 * @drv: target pci driver 34 * @vendor: PCI vendor ID 35 * @device: PCI device ID 36 * @subvendor: PCI subvendor ID 37 * @subdevice: PCI subdevice ID 38 * @class: PCI class 39 * @class_mask: PCI class mask 40 * @driver_data: private driver data 41 * 42 * Adds a new dynamic pci device ID to this driver and causes the 43 * driver to probe for all devices again. @drv must have been 44 * registered prior to calling this function. 45 * 46 * CONTEXT: 47 * Does GFP_KERNEL allocation. 48 * 49 * RETURNS: 50 * 0 on success, -errno on failure. 51 */ 52int pci_add_dynid(struct pci_driver *drv, 53 unsigned int vendor, unsigned int device, 54 unsigned int subvendor, unsigned int subdevice, 55 unsigned int class, unsigned int class_mask, 56 unsigned long driver_data) 57{ 58 struct pci_dynid *dynid; 59 60 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 61 if (!dynid) 62 return -ENOMEM; 63 64 dynid->id.vendor = vendor; 65 dynid->id.device = device; 66 dynid->id.subvendor = subvendor; 67 dynid->id.subdevice = subdevice; 68 dynid->id.class = class; 69 dynid->id.class_mask = class_mask; 70 dynid->id.driver_data = driver_data; 71 72 spin_lock(&drv->dynids.lock); 73 list_add_tail(&dynid->node, &drv->dynids.list); 74 spin_unlock(&drv->dynids.lock); 75 76 return driver_attach(&drv->driver); 77} 78EXPORT_SYMBOL_GPL(pci_add_dynid); 79 80static void pci_free_dynids(struct pci_driver *drv) 81{ 82 struct pci_dynid *dynid, *n; 83 84 spin_lock(&drv->dynids.lock); 85 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { 86 list_del(&dynid->node); 87 kfree(dynid); 88 } 89 spin_unlock(&drv->dynids.lock); 90} 91 92/** 93 * store_new_id - sysfs frontend to pci_add_dynid() 94 * @driver: target device driver 95 * @buf: buffer for scanning device ID data 96 * @count: input size 97 * 98 * Allow PCI IDs to be added to an existing driver via sysfs. 99 */ 100static ssize_t new_id_store(struct device_driver *driver, const char *buf, 101 size_t count) 102{ 103 struct pci_driver *pdrv = to_pci_driver(driver); 104 const struct pci_device_id *ids = pdrv->id_table; 105 u32 vendor, device, subvendor = PCI_ANY_ID, 106 subdevice = PCI_ANY_ID, class = 0, class_mask = 0; 107 unsigned long driver_data = 0; 108 int fields = 0; 109 int retval = 0; 110 111 fields = sscanf(buf, "%x %x %x %x %x %x %lx", 112 &vendor, &device, &subvendor, &subdevice, 113 &class, &class_mask, &driver_data); 114 if (fields < 2) 115 return -EINVAL; 116 117 if (fields != 7) { 118 struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); 119 if (!pdev) 120 return -ENOMEM; 121 122 pdev->vendor = vendor; 123 pdev->device = device; 124 pdev->subsystem_vendor = subvendor; 125 pdev->subsystem_device = subdevice; 126 pdev->class = class; 127 128 if (pci_match_id(pdrv->id_table, pdev)) 129 retval = -EEXIST; 130 131 kfree(pdev); 132 133 if (retval) 134 return retval; 135 } 136 137 /* Only accept driver_data values that match an existing id_table 138 entry */ 139 if (ids) { 140 retval = -EINVAL; 141 while (ids->vendor || ids->subvendor || ids->class_mask) { 142 if (driver_data == ids->driver_data) { 143 retval = 0; 144 break; 145 } 146 ids++; 147 } 148 if (retval) /* No match */ 149 return retval; 150 } 151 152 retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice, 153 class, class_mask, driver_data); 154 if (retval) 155 return retval; 156 return count; 157} 158static DRIVER_ATTR_WO(new_id); 159 160/** 161 * store_remove_id - remove a PCI device ID from this driver 162 * @driver: target device driver 163 * @buf: buffer for scanning device ID data 164 * @count: input size 165 * 166 * Removes a dynamic pci device ID to this driver. 167 */ 168static ssize_t remove_id_store(struct device_driver *driver, const char *buf, 169 size_t count) 170{ 171 struct pci_dynid *dynid, *n; 172 struct pci_driver *pdrv = to_pci_driver(driver); 173 u32 vendor, device, subvendor = PCI_ANY_ID, 174 subdevice = PCI_ANY_ID, class = 0, class_mask = 0; 175 int fields = 0; 176 size_t retval = -ENODEV; 177 178 fields = sscanf(buf, "%x %x %x %x %x %x", 179 &vendor, &device, &subvendor, &subdevice, 180 &class, &class_mask); 181 if (fields < 2) 182 return -EINVAL; 183 184 spin_lock(&pdrv->dynids.lock); 185 list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) { 186 struct pci_device_id *id = &dynid->id; 187 if ((id->vendor == vendor) && 188 (id->device == device) && 189 (subvendor == PCI_ANY_ID || id->subvendor == subvendor) && 190 (subdevice == PCI_ANY_ID || id->subdevice == subdevice) && 191 !((id->class ^ class) & class_mask)) { 192 list_del(&dynid->node); 193 kfree(dynid); 194 retval = count; 195 break; 196 } 197 } 198 spin_unlock(&pdrv->dynids.lock); 199 200 return retval; 201} 202static DRIVER_ATTR_WO(remove_id); 203 204static struct attribute *pci_drv_attrs[] = { 205 &driver_attr_new_id.attr, 206 &driver_attr_remove_id.attr, 207 NULL, 208}; 209ATTRIBUTE_GROUPS(pci_drv); 210 211/** 212 * pci_match_id - See if a pci device matches a given pci_id table 213 * @ids: array of PCI device id structures to search in 214 * @dev: the PCI device structure to match against. 215 * 216 * Used by a driver to check whether a PCI device present in the 217 * system is in its list of supported devices. Returns the matching 218 * pci_device_id structure or %NULL if there is no match. 219 * 220 * Deprecated, don't use this as it will not catch any dynamic ids 221 * that a driver might want to check for. 222 */ 223const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 224 struct pci_dev *dev) 225{ 226 if (ids) { 227 while (ids->vendor || ids->subvendor || ids->class_mask) { 228 if (pci_match_one_device(ids, dev)) 229 return ids; 230 ids++; 231 } 232 } 233 return NULL; 234} 235EXPORT_SYMBOL(pci_match_id); 236 237static const struct pci_device_id pci_device_id_any = { 238 .vendor = PCI_ANY_ID, 239 .device = PCI_ANY_ID, 240 .subvendor = PCI_ANY_ID, 241 .subdevice = PCI_ANY_ID, 242}; 243 244/** 245 * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure 246 * @drv: the PCI driver to match against 247 * @dev: the PCI device structure to match against 248 * 249 * Used by a driver to check whether a PCI device present in the 250 * system is in its list of supported devices. Returns the matching 251 * pci_device_id structure or %NULL if there is no match. 252 */ 253static const struct pci_device_id *pci_match_device(struct pci_driver *drv, 254 struct pci_dev *dev) 255{ 256 struct pci_dynid *dynid; 257 const struct pci_device_id *found_id = NULL; 258 259 /* When driver_override is set, only bind to the matching driver */ 260 if (dev->driver_override && strcmp(dev->driver_override, drv->name)) 261 return NULL; 262 263 /* Look at the dynamic ids first, before the static ones */ 264 spin_lock(&drv->dynids.lock); 265 list_for_each_entry(dynid, &drv->dynids.list, node) { 266 if (pci_match_one_device(&dynid->id, dev)) { 267 found_id = &dynid->id; 268 break; 269 } 270 } 271 spin_unlock(&drv->dynids.lock); 272 273 if (!found_id) 274 found_id = pci_match_id(drv->id_table, dev); 275 276 /* driver_override will always match, send a dummy id */ 277 if (!found_id && dev->driver_override) 278 found_id = &pci_device_id_any; 279 280 return found_id; 281} 282 283struct drv_dev_and_id { 284 struct pci_driver *drv; 285 struct pci_dev *dev; 286 const struct pci_device_id *id; 287}; 288 289static long local_pci_probe(void *_ddi) 290{ 291 struct drv_dev_and_id *ddi = _ddi; 292 struct pci_dev *pci_dev = ddi->dev; 293 struct pci_driver *pci_drv = ddi->drv; 294 struct device *dev = &pci_dev->dev; 295 int rc; 296 297 /* 298 * Unbound PCI devices are always put in D0, regardless of 299 * runtime PM status. During probe, the device is set to 300 * active and the usage count is incremented. If the driver 301 * supports runtime PM, it should call pm_runtime_put_noidle(), 302 * or any other runtime PM helper function decrementing the usage 303 * count, in its probe routine and pm_runtime_get_noresume() in 304 * its remove routine. 305 */ 306 pm_runtime_get_sync(dev); 307 pci_dev->driver = pci_drv; 308 rc = pci_drv->probe(pci_dev, ddi->id); 309 if (!rc) 310 return rc; 311 if (rc < 0) { 312 pci_dev->driver = NULL; 313 pm_runtime_put_sync(dev); 314 return rc; 315 } 316 /* 317 * Probe function should return < 0 for failure, 0 for success 318 * Treat values > 0 as success, but warn. 319 */ 320 pci_warn(pci_dev, "Driver probe function unexpectedly returned %d\n", 321 rc); 322 return 0; 323} 324 325static bool pci_physfn_is_probed(struct pci_dev *dev) 326{ 327#ifdef CONFIG_PCI_IOV 328 return dev->is_virtfn && dev->physfn->is_probed; 329#else 330 return false; 331#endif 332} 333 334static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, 335 const struct pci_device_id *id) 336{ 337 int error, node, cpu; 338 int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; 339 struct drv_dev_and_id ddi = { drv, dev, id }; 340 341 /* 342 * Execute driver initialization on node where the device is 343 * attached. This way the driver likely allocates its local memory 344 * on the right node. 345 */ 346 node = dev_to_node(&dev->dev); 347 dev->is_probed = 1; 348 349 cpu_hotplug_disable(); 350 351 /* 352 * Prevent nesting work_on_cpu() for the case where a Virtual Function 353 * device is probed from work_on_cpu() of the Physical device. 354 */ 355 if (node < 0 || node >= MAX_NUMNODES || !node_online(node) || 356 pci_physfn_is_probed(dev)) 357 cpu = nr_cpu_ids; 358 else 359 cpu = cpumask_any_and(cpumask_of_node(node), 360 housekeeping_cpumask(hk_flags)); 361 362 if (cpu < nr_cpu_ids) 363 error = work_on_cpu(cpu, local_pci_probe, &ddi); 364 else 365 error = local_pci_probe(&ddi); 366 367 dev->is_probed = 0; 368 cpu_hotplug_enable(); 369 return error; 370} 371 372/** 373 * __pci_device_probe - check if a driver wants to claim a specific PCI device 374 * @drv: driver to call to check if it wants the PCI device 375 * @pci_dev: PCI device being probed 376 * 377 * returns 0 on success, else error. 378 * side-effect: pci_dev->driver is set to drv when drv claims pci_dev. 379 */ 380static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) 381{ 382 const struct pci_device_id *id; 383 int error = 0; 384 385 if (!pci_dev->driver && drv->probe) { 386 error = -ENODEV; 387 388 id = pci_match_device(drv, pci_dev); 389 if (id) 390 error = pci_call_probe(drv, pci_dev, id); 391 } 392 return error; 393} 394 395int __weak pcibios_alloc_irq(struct pci_dev *dev) 396{ 397 return 0; 398} 399 400void __weak pcibios_free_irq(struct pci_dev *dev) 401{ 402} 403 404#ifdef CONFIG_PCI_IOV 405static inline bool pci_device_can_probe(struct pci_dev *pdev) 406{ 407 return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe || 408 pdev->driver_override); 409} 410#else 411static inline bool pci_device_can_probe(struct pci_dev *pdev) 412{ 413 return true; 414} 415#endif 416 417static int pci_device_probe(struct device *dev) 418{ 419 int error; 420 struct pci_dev *pci_dev = to_pci_dev(dev); 421 struct pci_driver *drv = to_pci_driver(dev->driver); 422 423 if (!pci_device_can_probe(pci_dev)) 424 return -ENODEV; 425 426 pci_assign_irq(pci_dev); 427 428 error = pcibios_alloc_irq(pci_dev); 429 if (error < 0) 430 return error; 431 432 pci_dev_get(pci_dev); 433 error = __pci_device_probe(drv, pci_dev); 434 if (error) { 435 pcibios_free_irq(pci_dev); 436 pci_dev_put(pci_dev); 437 } 438 439 return error; 440} 441 442static int pci_device_remove(struct device *dev) 443{ 444 struct pci_dev *pci_dev = to_pci_dev(dev); 445 struct pci_driver *drv = pci_dev->driver; 446 447 if (drv->remove) { 448 pm_runtime_get_sync(dev); 449 /* 450 * If the driver provides a .runtime_idle() callback and it has 451 * started to run already, it may continue to run in parallel 452 * with the code below, so wait until all of the runtime PM 453 * activity has completed. 454 */ 455 pm_runtime_barrier(dev); 456 drv->remove(pci_dev); 457 pm_runtime_put_noidle(dev); 458 } 459 pcibios_free_irq(pci_dev); 460 pci_dev->driver = NULL; 461 pci_iov_remove(pci_dev); 462 463 /* Undo the runtime PM settings in local_pci_probe() */ 464 pm_runtime_put_sync(dev); 465 466 /* 467 * If the device is still on, set the power state as "unknown", 468 * since it might change by the next time we load the driver. 469 */ 470 if (pci_dev->current_state == PCI_D0) 471 pci_dev->current_state = PCI_UNKNOWN; 472 473 /* 474 * We would love to complain here if pci_dev->is_enabled is set, that 475 * the driver should have called pci_disable_device(), but the 476 * unfortunate fact is there are too many odd BIOS and bridge setups 477 * that don't like drivers doing that all of the time. 478 * Oh well, we can dream of sane hardware when we sleep, no matter how 479 * horrible the crap we have to deal with is when we are awake... 480 */ 481 482 pci_dev_put(pci_dev); 483 return 0; 484} 485 486static void pci_device_shutdown(struct device *dev) 487{ 488 struct pci_dev *pci_dev = to_pci_dev(dev); 489 struct pci_driver *drv = pci_dev->driver; 490 491 pm_runtime_resume(dev); 492 493 if (drv && drv->shutdown) 494 drv->shutdown(pci_dev); 495 496 /* 497 * If this is a kexec reboot, turn off Bus Master bit on the 498 * device to tell it to not continue to do DMA. Don't touch 499 * devices in D3cold or unknown states. 500 * If it is not a kexec reboot, firmware will hit the PCI 501 * devices with big hammer and stop their DMA any way. 502 */ 503 if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot)) 504 pci_clear_master(pci_dev); 505} 506 507#ifdef CONFIG_PM 508 509/* Auxiliary functions used for system resume and run-time resume. */ 510 511/** 512 * pci_restore_standard_config - restore standard config registers of PCI device 513 * @pci_dev: PCI device to handle 514 */ 515static int pci_restore_standard_config(struct pci_dev *pci_dev) 516{ 517 pci_update_current_state(pci_dev, PCI_UNKNOWN); 518 519 if (pci_dev->current_state != PCI_D0) { 520 int error = pci_set_power_state(pci_dev, PCI_D0); 521 if (error) 522 return error; 523 } 524 525 pci_restore_state(pci_dev); 526 pci_pme_restore(pci_dev); 527 return 0; 528} 529 530static void pci_pm_default_resume(struct pci_dev *pci_dev) 531{ 532 pci_fixup_device(pci_fixup_resume, pci_dev); 533 pci_enable_wake(pci_dev, PCI_D0, false); 534} 535 536#endif 537 538#ifdef CONFIG_PM_SLEEP 539 540static void pci_pm_default_resume_early(struct pci_dev *pci_dev) 541{ 542 pci_power_up(pci_dev); 543 pci_update_current_state(pci_dev, PCI_D0); 544 pci_restore_state(pci_dev); 545 pci_pme_restore(pci_dev); 546} 547 548/* 549 * Default "suspend" method for devices that have no driver provided suspend, 550 * or not even a driver at all (second part). 551 */ 552static void pci_pm_set_unknown_state(struct pci_dev *pci_dev) 553{ 554 /* 555 * mark its power state as "unknown", since we don't know if 556 * e.g. the BIOS will change its device state when we suspend. 557 */ 558 if (pci_dev->current_state == PCI_D0) 559 pci_dev->current_state = PCI_UNKNOWN; 560} 561 562/* 563 * Default "resume" method for devices that have no driver provided resume, 564 * or not even a driver at all (second part). 565 */ 566static int pci_pm_reenable_device(struct pci_dev *pci_dev) 567{ 568 int retval; 569 570 /* if the device was enabled before suspend, reenable */ 571 retval = pci_reenable_device(pci_dev); 572 /* 573 * if the device was busmaster before the suspend, make it busmaster 574 * again 575 */ 576 if (pci_dev->is_busmaster) 577 pci_set_master(pci_dev); 578 579 return retval; 580} 581 582static int pci_legacy_suspend(struct device *dev, pm_message_t state) 583{ 584 struct pci_dev *pci_dev = to_pci_dev(dev); 585 struct pci_driver *drv = pci_dev->driver; 586 587 if (drv && drv->suspend) { 588 pci_power_t prev = pci_dev->current_state; 589 int error; 590 591 error = drv->suspend(pci_dev, state); 592 suspend_report_result(drv->suspend, error); 593 if (error) 594 return error; 595 596 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 597 && pci_dev->current_state != PCI_UNKNOWN) { 598 pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev, 599 "PCI PM: Device state not saved by %pS\n", 600 drv->suspend); 601 } 602 } 603 604 pci_fixup_device(pci_fixup_suspend, pci_dev); 605 606 return 0; 607} 608 609static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) 610{ 611 struct pci_dev *pci_dev = to_pci_dev(dev); 612 613 if (!pci_dev->state_saved) 614 pci_save_state(pci_dev); 615 616 pci_pm_set_unknown_state(pci_dev); 617 618 pci_fixup_device(pci_fixup_suspend_late, pci_dev); 619 620 return 0; 621} 622 623static int pci_legacy_resume(struct device *dev) 624{ 625 struct pci_dev *pci_dev = to_pci_dev(dev); 626 struct pci_driver *drv = pci_dev->driver; 627 628 pci_fixup_device(pci_fixup_resume, pci_dev); 629 630 return drv && drv->resume ? 631 drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev); 632} 633 634/* Auxiliary functions used by the new power management framework */ 635 636static void pci_pm_default_suspend(struct pci_dev *pci_dev) 637{ 638 /* Disable non-bridge devices without PM support */ 639 if (!pci_has_subordinate(pci_dev)) 640 pci_disable_enabled_device(pci_dev); 641} 642 643static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) 644{ 645 struct pci_driver *drv = pci_dev->driver; 646 bool ret = drv && (drv->suspend || drv->resume); 647 648 /* 649 * Legacy PM support is used by default, so warn if the new framework is 650 * supported as well. Drivers are supposed to support either the 651 * former, or the latter, but not both at the same time. 652 */ 653 pci_WARN(pci_dev, ret && drv->driver.pm, "device %04x:%04x\n", 654 pci_dev->vendor, pci_dev->device); 655 656 return ret; 657} 658 659/* New power management framework */ 660 661static int pci_pm_prepare(struct device *dev) 662{ 663 struct pci_dev *pci_dev = to_pci_dev(dev); 664 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 665 666 if (pm && pm->prepare) { 667 int error = pm->prepare(dev); 668 if (error < 0) 669 return error; 670 671 if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE)) 672 return 0; 673 } 674 if (pci_dev_need_resume(pci_dev)) 675 return 0; 676 677 /* 678 * The PME setting needs to be adjusted here in case the direct-complete 679 * optimization is used with respect to this device. 680 */ 681 pci_dev_adjust_pme(pci_dev); 682 return 1; 683} 684 685static void pci_pm_complete(struct device *dev) 686{ 687 struct pci_dev *pci_dev = to_pci_dev(dev); 688 689 pci_dev_complete_resume(pci_dev); 690 pm_generic_complete(dev); 691 692 /* Resume device if platform firmware has put it in reset-power-on */ 693 if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) { 694 pci_power_t pre_sleep_state = pci_dev->current_state; 695 696 pci_refresh_power_state(pci_dev); 697 /* 698 * On platforms with ACPI this check may also trigger for 699 * devices sharing power resources if one of those power 700 * resources has been activated as a result of a change of the 701 * power state of another device sharing it. However, in that 702 * case it is also better to resume the device, in general. 703 */ 704 if (pci_dev->current_state < pre_sleep_state) 705 pm_request_resume(dev); 706 } 707} 708 709#else /* !CONFIG_PM_SLEEP */ 710 711#define pci_pm_prepare NULL 712#define pci_pm_complete NULL 713 714#endif /* !CONFIG_PM_SLEEP */ 715 716#ifdef CONFIG_SUSPEND 717static void pcie_pme_root_status_cleanup(struct pci_dev *pci_dev) 718{ 719 /* 720 * Some BIOSes forget to clear Root PME Status bits after system 721 * wakeup, which breaks ACPI-based runtime wakeup on PCI Express. 722 * Clear those bits now just in case (shouldn't hurt). 723 */ 724 if (pci_is_pcie(pci_dev) && 725 (pci_pcie_type(pci_dev) == PCI_EXP_TYPE_ROOT_PORT || 726 pci_pcie_type(pci_dev) == PCI_EXP_TYPE_RC_EC)) 727 pcie_clear_root_pme_status(pci_dev); 728} 729 730static int pci_pm_suspend(struct device *dev) 731{ 732 struct pci_dev *pci_dev = to_pci_dev(dev); 733 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 734 735 pci_dev->skip_bus_pm = false; 736 737 if (pci_has_legacy_pm_support(pci_dev)) 738 return pci_legacy_suspend(dev, PMSG_SUSPEND); 739 740 if (!pm) { 741 pci_pm_default_suspend(pci_dev); 742 return 0; 743 } 744 745 /* 746 * PCI devices suspended at run time may need to be resumed at this 747 * point, because in general it may be necessary to reconfigure them for 748 * system suspend. Namely, if the device is expected to wake up the 749 * system from the sleep state, it may have to be reconfigured for this 750 * purpose, or if the device is not expected to wake up the system from 751 * the sleep state, it should be prevented from signaling wakeup events 752 * going forward. 753 * 754 * Also if the driver of the device does not indicate that its system 755 * suspend callbacks can cope with runtime-suspended devices, it is 756 * better to resume the device from runtime suspend here. 757 */ 758 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || 759 pci_dev_need_resume(pci_dev)) { 760 pm_runtime_resume(dev); 761 pci_dev->state_saved = false; 762 } else { 763 pci_dev_adjust_pme(pci_dev); 764 } 765 766 if (pm->suspend) { 767 pci_power_t prev = pci_dev->current_state; 768 int error; 769 770 error = pm->suspend(dev); 771 suspend_report_result(pm->suspend, error); 772 if (error) 773 return error; 774 775 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 776 && pci_dev->current_state != PCI_UNKNOWN) { 777 pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev, 778 "PCI PM: State of device not saved by %pS\n", 779 pm->suspend); 780 } 781 } 782 783 return 0; 784} 785 786static int pci_pm_suspend_late(struct device *dev) 787{ 788 if (dev_pm_skip_suspend(dev)) 789 return 0; 790 791 pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev)); 792 793 return pm_generic_suspend_late(dev); 794} 795 796static int pci_pm_suspend_noirq(struct device *dev) 797{ 798 struct pci_dev *pci_dev = to_pci_dev(dev); 799 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 800 801 if (dev_pm_skip_suspend(dev)) 802 return 0; 803 804 if (pci_has_legacy_pm_support(pci_dev)) 805 return pci_legacy_suspend_late(dev, PMSG_SUSPEND); 806 807 if (!pm) { 808 pci_save_state(pci_dev); 809 goto Fixup; 810 } 811 812 if (pm->suspend_noirq) { 813 pci_power_t prev = pci_dev->current_state; 814 int error; 815 816 error = pm->suspend_noirq(dev); 817 suspend_report_result(pm->suspend_noirq, error); 818 if (error) 819 return error; 820 821 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 822 && pci_dev->current_state != PCI_UNKNOWN) { 823 pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev, 824 "PCI PM: State of device not saved by %pS\n", 825 pm->suspend_noirq); 826 goto Fixup; 827 } 828 } 829 830 if (pci_dev->skip_bus_pm) { 831 /* 832 * Either the device is a bridge with a child in D0 below it, or 833 * the function is running for the second time in a row without 834 * going through full resume, which is possible only during 835 * suspend-to-idle in a spurious wakeup case. The device should 836 * be in D0 at this point, but if it is a bridge, it may be 837 * necessary to save its state. 838 */ 839 if (!pci_dev->state_saved) 840 pci_save_state(pci_dev); 841 } else if (!pci_dev->state_saved) { 842 pci_save_state(pci_dev); 843 if (pci_power_manageable(pci_dev)) 844 pci_prepare_to_sleep(pci_dev); 845 } 846 847 pci_dbg(pci_dev, "PCI PM: Suspend power state: %s\n", 848 pci_power_name(pci_dev->current_state)); 849 850 if (pci_dev->current_state == PCI_D0) { 851 pci_dev->skip_bus_pm = true; 852 /* 853 * Per PCI PM r1.2, table 6-1, a bridge must be in D0 if any 854 * downstream device is in D0, so avoid changing the power state 855 * of the parent bridge by setting the skip_bus_pm flag for it. 856 */ 857 if (pci_dev->bus->self) 858 pci_dev->bus->self->skip_bus_pm = true; 859 } 860 861 if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) { 862 pci_dbg(pci_dev, "PCI PM: Skipped\n"); 863 goto Fixup; 864 } 865 866 pci_pm_set_unknown_state(pci_dev); 867 868 /* 869 * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's 870 * PCI COMMAND register isn't 0, the BIOS assumes that the controller 871 * hasn't been quiesced and tries to turn it off. If the controller 872 * is already in D3, this can hang or cause memory corruption. 873 * 874 * Since the value of the COMMAND register doesn't matter once the 875 * device has been suspended, we can safely set it to 0 here. 876 */ 877 if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) 878 pci_write_config_word(pci_dev, PCI_COMMAND, 0); 879 880Fixup: 881 pci_fixup_device(pci_fixup_suspend_late, pci_dev); 882 883 /* 884 * If the target system sleep state is suspend-to-idle, it is sufficient 885 * to check whether or not the device's wakeup settings are good for 886 * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause 887 * pci_pm_complete() to take care of fixing up the device's state 888 * anyway, if need be. 889 */ 890 if (device_can_wakeup(dev) && !device_may_wakeup(dev)) 891 dev->power.may_skip_resume = false; 892 893 return 0; 894} 895 896static int pci_pm_resume_noirq(struct device *dev) 897{ 898 struct pci_dev *pci_dev = to_pci_dev(dev); 899 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 900 pci_power_t prev_state = pci_dev->current_state; 901 bool skip_bus_pm = pci_dev->skip_bus_pm; 902 903 if (dev_pm_skip_resume(dev)) 904 return 0; 905 906 /* 907 * In the suspend-to-idle case, devices left in D0 during suspend will 908 * stay in D0, so it is not necessary to restore or update their 909 * configuration here and attempting to put them into D0 again is 910 * pointless, so avoid doing that. 911 */ 912 if (!(skip_bus_pm && pm_suspend_no_platform())) 913 pci_pm_default_resume_early(pci_dev); 914 915 pci_fixup_device(pci_fixup_resume_early, pci_dev); 916 pcie_pme_root_status_cleanup(pci_dev); 917 918 if (!skip_bus_pm && prev_state == PCI_D3cold) 919 pci_bridge_wait_for_secondary_bus(pci_dev, "resume", PCI_RESET_WAIT); 920 921 if (pci_has_legacy_pm_support(pci_dev)) 922 return 0; 923 924 if (pm && pm->resume_noirq) 925 return pm->resume_noirq(dev); 926 927 return 0; 928} 929 930static int pci_pm_resume_early(struct device *dev) 931{ 932 if (dev_pm_skip_resume(dev)) 933 return 0; 934 935 return pm_generic_resume_early(dev); 936} 937 938static int pci_pm_resume(struct device *dev) 939{ 940 struct pci_dev *pci_dev = to_pci_dev(dev); 941 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 942 943 /* 944 * This is necessary for the suspend error path in which resume is 945 * called without restoring the standard config registers of the device. 946 */ 947 if (pci_dev->state_saved) 948 pci_restore_standard_config(pci_dev); 949 950 if (pci_has_legacy_pm_support(pci_dev)) 951 return pci_legacy_resume(dev); 952 953 pci_pm_default_resume(pci_dev); 954 955 if (pm) { 956 if (pm->resume) 957 return pm->resume(dev); 958 } else { 959 pci_pm_reenable_device(pci_dev); 960 } 961 962 return 0; 963} 964 965#else /* !CONFIG_SUSPEND */ 966 967#define pci_pm_suspend NULL 968#define pci_pm_suspend_late NULL 969#define pci_pm_suspend_noirq NULL 970#define pci_pm_resume NULL 971#define pci_pm_resume_early NULL 972#define pci_pm_resume_noirq NULL 973 974#endif /* !CONFIG_SUSPEND */ 975 976#ifdef CONFIG_HIBERNATE_CALLBACKS 977 978static int pci_pm_freeze(struct device *dev) 979{ 980 struct pci_dev *pci_dev = to_pci_dev(dev); 981 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 982 983 if (pci_has_legacy_pm_support(pci_dev)) 984 return pci_legacy_suspend(dev, PMSG_FREEZE); 985 986 if (!pm) { 987 pci_pm_default_suspend(pci_dev); 988 return 0; 989 } 990 991 /* 992 * Resume all runtime-suspended devices before creating a snapshot 993 * image of system memory, because the restore kernel generally cannot 994 * be expected to always handle them consistently and they need to be 995 * put into the runtime-active metastate during system resume anyway, 996 * so it is better to ensure that the state saved in the image will be 997 * always consistent with that. 998 */ 999 pm_runtime_resume(dev); 1000 pci_dev->state_saved = false; 1001 1002 if (pm->freeze) { 1003 int error; 1004 1005 error = pm->freeze(dev); 1006 suspend_report_result(pm->freeze, error); 1007 if (error) 1008 return error; 1009 } 1010 1011 return 0; 1012} 1013 1014static int pci_pm_freeze_noirq(struct device *dev) 1015{ 1016 struct pci_dev *pci_dev = to_pci_dev(dev); 1017 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1018 1019 if (pci_has_legacy_pm_support(pci_dev)) 1020 return pci_legacy_suspend_late(dev, PMSG_FREEZE); 1021 1022 if (pm && pm->freeze_noirq) { 1023 int error; 1024 1025 error = pm->freeze_noirq(dev); 1026 suspend_report_result(pm->freeze_noirq, error); 1027 if (error) 1028 return error; 1029 } 1030 1031 if (!pci_dev->state_saved) 1032 pci_save_state(pci_dev); 1033 1034 pci_pm_set_unknown_state(pci_dev); 1035 1036 return 0; 1037} 1038 1039static int pci_pm_thaw_noirq(struct device *dev) 1040{ 1041 struct pci_dev *pci_dev = to_pci_dev(dev); 1042 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1043 1044 /* 1045 * The pm->thaw_noirq() callback assumes the device has been 1046 * returned to D0 and its config state has been restored. 1047 * 1048 * In addition, pci_restore_state() restores MSI-X state in MMIO 1049 * space, which requires the device to be in D0, so return it to D0 1050 * in case the driver's "freeze" callbacks put it into a low-power 1051 * state. 1052 */ 1053 pci_set_power_state(pci_dev, PCI_D0); 1054 pci_restore_state(pci_dev); 1055 1056 if (pci_has_legacy_pm_support(pci_dev)) 1057 return 0; 1058 1059 if (pm && pm->thaw_noirq) 1060 return pm->thaw_noirq(dev); 1061 1062 return 0; 1063} 1064 1065static int pci_pm_thaw(struct device *dev) 1066{ 1067 struct pci_dev *pci_dev = to_pci_dev(dev); 1068 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1069 int error = 0; 1070 1071 if (pci_has_legacy_pm_support(pci_dev)) 1072 return pci_legacy_resume(dev); 1073 1074 if (pm) { 1075 if (pm->thaw) 1076 error = pm->thaw(dev); 1077 } else { 1078 pci_pm_reenable_device(pci_dev); 1079 } 1080 1081 pci_dev->state_saved = false; 1082 1083 return error; 1084} 1085 1086static int pci_pm_poweroff(struct device *dev) 1087{ 1088 struct pci_dev *pci_dev = to_pci_dev(dev); 1089 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1090 1091 if (pci_has_legacy_pm_support(pci_dev)) 1092 return pci_legacy_suspend(dev, PMSG_HIBERNATE); 1093 1094 if (!pm) { 1095 pci_pm_default_suspend(pci_dev); 1096 return 0; 1097 } 1098 1099 /* The reason to do that is the same as in pci_pm_suspend(). */ 1100 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || 1101 pci_dev_need_resume(pci_dev)) { 1102 pm_runtime_resume(dev); 1103 pci_dev->state_saved = false; 1104 } else { 1105 pci_dev_adjust_pme(pci_dev); 1106 } 1107 1108 if (pm->poweroff) { 1109 int error; 1110 1111 error = pm->poweroff(dev); 1112 suspend_report_result(pm->poweroff, error); 1113 if (error) 1114 return error; 1115 } 1116 1117 return 0; 1118} 1119 1120static int pci_pm_poweroff_late(struct device *dev) 1121{ 1122 if (dev_pm_skip_suspend(dev)) 1123 return 0; 1124 1125 pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev)); 1126 1127 return pm_generic_poweroff_late(dev); 1128} 1129 1130static int pci_pm_poweroff_noirq(struct device *dev) 1131{ 1132 struct pci_dev *pci_dev = to_pci_dev(dev); 1133 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1134 1135 if (dev_pm_skip_suspend(dev)) 1136 return 0; 1137 1138 if (pci_has_legacy_pm_support(pci_dev)) 1139 return pci_legacy_suspend_late(dev, PMSG_HIBERNATE); 1140 1141 if (!pm) { 1142 pci_fixup_device(pci_fixup_suspend_late, pci_dev); 1143 return 0; 1144 } 1145 1146 if (pm->poweroff_noirq) { 1147 int error; 1148 1149 error = pm->poweroff_noirq(dev); 1150 suspend_report_result(pm->poweroff_noirq, error); 1151 if (error) 1152 return error; 1153 } 1154 1155 if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev)) 1156 pci_prepare_to_sleep(pci_dev); 1157 1158 /* 1159 * The reason for doing this here is the same as for the analogous code 1160 * in pci_pm_suspend_noirq(). 1161 */ 1162 if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) 1163 pci_write_config_word(pci_dev, PCI_COMMAND, 0); 1164 1165 pci_fixup_device(pci_fixup_suspend_late, pci_dev); 1166 1167 return 0; 1168} 1169 1170static int pci_pm_restore_noirq(struct device *dev) 1171{ 1172 struct pci_dev *pci_dev = to_pci_dev(dev); 1173 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1174 1175 pci_pm_default_resume_early(pci_dev); 1176 pci_fixup_device(pci_fixup_resume_early, pci_dev); 1177 1178 if (pci_has_legacy_pm_support(pci_dev)) 1179 return 0; 1180 1181 if (pm && pm->restore_noirq) 1182 return pm->restore_noirq(dev); 1183 1184 return 0; 1185} 1186 1187static int pci_pm_restore(struct device *dev) 1188{ 1189 struct pci_dev *pci_dev = to_pci_dev(dev); 1190 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1191 1192 /* 1193 * This is necessary for the hibernation error path in which restore is 1194 * called without restoring the standard config registers of the device. 1195 */ 1196 if (pci_dev->state_saved) 1197 pci_restore_standard_config(pci_dev); 1198 1199 if (pci_has_legacy_pm_support(pci_dev)) 1200 return pci_legacy_resume(dev); 1201 1202 pci_pm_default_resume(pci_dev); 1203 1204 if (pm) { 1205 if (pm->restore) 1206 return pm->restore(dev); 1207 } else { 1208 pci_pm_reenable_device(pci_dev); 1209 } 1210 1211 return 0; 1212} 1213 1214#else /* !CONFIG_HIBERNATE_CALLBACKS */ 1215 1216#define pci_pm_freeze NULL 1217#define pci_pm_freeze_noirq NULL 1218#define pci_pm_thaw NULL 1219#define pci_pm_thaw_noirq NULL 1220#define pci_pm_poweroff NULL 1221#define pci_pm_poweroff_late NULL 1222#define pci_pm_poweroff_noirq NULL 1223#define pci_pm_restore NULL 1224#define pci_pm_restore_noirq NULL 1225 1226#endif /* !CONFIG_HIBERNATE_CALLBACKS */ 1227 1228#ifdef CONFIG_PM 1229 1230static int pci_pm_runtime_suspend(struct device *dev) 1231{ 1232 struct pci_dev *pci_dev = to_pci_dev(dev); 1233 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1234 pci_power_t prev = pci_dev->current_state; 1235 int error; 1236 1237 /* 1238 * If pci_dev->driver is not set (unbound), we leave the device in D0, 1239 * but it may go to D3cold when the bridge above it runtime suspends. 1240 * Save its config space in case that happens. 1241 */ 1242 if (!pci_dev->driver) { 1243 pci_save_state(pci_dev); 1244 return 0; 1245 } 1246 1247 pci_dev->state_saved = false; 1248 if (pm && pm->runtime_suspend) { 1249 error = pm->runtime_suspend(dev); 1250 /* 1251 * -EBUSY and -EAGAIN is used to request the runtime PM core 1252 * to schedule a new suspend, so log the event only with debug 1253 * log level. 1254 */ 1255 if (error == -EBUSY || error == -EAGAIN) { 1256 pci_dbg(pci_dev, "can't suspend now (%ps returned %d)\n", 1257 pm->runtime_suspend, error); 1258 return error; 1259 } else if (error) { 1260 pci_err(pci_dev, "can't suspend (%ps returned %d)\n", 1261 pm->runtime_suspend, error); 1262 return error; 1263 } 1264 } 1265 1266 pci_fixup_device(pci_fixup_suspend, pci_dev); 1267 1268 if (pm && pm->runtime_suspend 1269 && !pci_dev->state_saved && pci_dev->current_state != PCI_D0 1270 && pci_dev->current_state != PCI_UNKNOWN) { 1271 pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev, 1272 "PCI PM: State of device not saved by %pS\n", 1273 pm->runtime_suspend); 1274 return 0; 1275 } 1276 1277 if (!pci_dev->state_saved) { 1278 pci_save_state(pci_dev); 1279 pci_finish_runtime_suspend(pci_dev); 1280 } 1281 1282 return 0; 1283} 1284 1285static int pci_pm_runtime_resume(struct device *dev) 1286{ 1287 struct pci_dev *pci_dev = to_pci_dev(dev); 1288 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1289 pci_power_t prev_state = pci_dev->current_state; 1290 int error = 0; 1291 1292 /* 1293 * Restoring config space is necessary even if the device is not bound 1294 * to a driver because although we left it in D0, it may have gone to 1295 * D3cold when the bridge above it runtime suspended. 1296 */ 1297 pci_restore_standard_config(pci_dev); 1298 1299 if (!pci_dev->driver) 1300 return 0; 1301 1302 pci_fixup_device(pci_fixup_resume_early, pci_dev); 1303 pci_pm_default_resume(pci_dev); 1304 1305 if (prev_state == PCI_D3cold) 1306 pci_bridge_wait_for_secondary_bus(pci_dev, "resume", PCI_RESET_WAIT); 1307 1308 if (pm && pm->runtime_resume) 1309 error = pm->runtime_resume(dev); 1310 1311 pci_dev->runtime_d3cold = false; 1312 1313 return error; 1314} 1315 1316static int pci_pm_runtime_idle(struct device *dev) 1317{ 1318 struct pci_dev *pci_dev = to_pci_dev(dev); 1319 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1320 1321 /* 1322 * If pci_dev->driver is not set (unbound), the device should 1323 * always remain in D0 regardless of the runtime PM status 1324 */ 1325 if (!pci_dev->driver) 1326 return 0; 1327 1328 if (!pm) 1329 return -ENOSYS; 1330 1331 if (pm->runtime_idle) 1332 return pm->runtime_idle(dev); 1333 1334 return 0; 1335} 1336 1337static const struct dev_pm_ops pci_dev_pm_ops = { 1338 .prepare = pci_pm_prepare, 1339 .complete = pci_pm_complete, 1340 .suspend = pci_pm_suspend, 1341 .suspend_late = pci_pm_suspend_late, 1342 .resume = pci_pm_resume, 1343 .resume_early = pci_pm_resume_early, 1344 .freeze = pci_pm_freeze, 1345 .thaw = pci_pm_thaw, 1346 .poweroff = pci_pm_poweroff, 1347 .poweroff_late = pci_pm_poweroff_late, 1348 .restore = pci_pm_restore, 1349 .suspend_noirq = pci_pm_suspend_noirq, 1350 .resume_noirq = pci_pm_resume_noirq, 1351 .freeze_noirq = pci_pm_freeze_noirq, 1352 .thaw_noirq = pci_pm_thaw_noirq, 1353 .poweroff_noirq = pci_pm_poweroff_noirq, 1354 .restore_noirq = pci_pm_restore_noirq, 1355 .runtime_suspend = pci_pm_runtime_suspend, 1356 .runtime_resume = pci_pm_runtime_resume, 1357 .runtime_idle = pci_pm_runtime_idle, 1358}; 1359 1360#define PCI_PM_OPS_PTR (&pci_dev_pm_ops) 1361 1362#else /* !CONFIG_PM */ 1363 1364#define pci_pm_runtime_suspend NULL 1365#define pci_pm_runtime_resume NULL 1366#define pci_pm_runtime_idle NULL 1367 1368#define PCI_PM_OPS_PTR NULL 1369 1370#endif /* !CONFIG_PM */ 1371 1372/** 1373 * __pci_register_driver - register a new pci driver 1374 * @drv: the driver structure to register 1375 * @owner: owner module of drv 1376 * @mod_name: module name string 1377 * 1378 * Adds the driver structure to the list of registered drivers. 1379 * Returns a negative value on error, otherwise 0. 1380 * If no error occurred, the driver remains registered even if 1381 * no device was claimed during registration. 1382 */ 1383int __pci_register_driver(struct pci_driver *drv, struct module *owner, 1384 const char *mod_name) 1385{ 1386 /* initialize common driver fields */ 1387 drv->driver.name = drv->name; 1388 drv->driver.bus = &pci_bus_type; 1389 drv->driver.owner = owner; 1390 drv->driver.mod_name = mod_name; 1391 drv->driver.groups = drv->groups; 1392 1393 spin_lock_init(&drv->dynids.lock); 1394 INIT_LIST_HEAD(&drv->dynids.list); 1395 1396 /* register with core */ 1397 return driver_register(&drv->driver); 1398} 1399EXPORT_SYMBOL(__pci_register_driver); 1400 1401/** 1402 * pci_unregister_driver - unregister a pci driver 1403 * @drv: the driver structure to unregister 1404 * 1405 * Deletes the driver structure from the list of registered PCI drivers, 1406 * gives it a chance to clean up by calling its remove() function for 1407 * each device it was responsible for, and marks those devices as 1408 * driverless. 1409 */ 1410 1411void pci_unregister_driver(struct pci_driver *drv) 1412{ 1413 driver_unregister(&drv->driver); 1414 pci_free_dynids(drv); 1415} 1416EXPORT_SYMBOL(pci_unregister_driver); 1417 1418static struct pci_driver pci_compat_driver = { 1419 .name = "compat" 1420}; 1421 1422/** 1423 * pci_dev_driver - get the pci_driver of a device 1424 * @dev: the device to query 1425 * 1426 * Returns the appropriate pci_driver structure or %NULL if there is no 1427 * registered driver for the device. 1428 */ 1429struct pci_driver *pci_dev_driver(const struct pci_dev *dev) 1430{ 1431 if (dev->driver) 1432 return dev->driver; 1433 else { 1434 int i; 1435 for (i = 0; i <= PCI_ROM_RESOURCE; i++) 1436 if (dev->resource[i].flags & IORESOURCE_BUSY) 1437 return &pci_compat_driver; 1438 } 1439 return NULL; 1440} 1441EXPORT_SYMBOL(pci_dev_driver); 1442 1443/** 1444 * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure 1445 * @dev: the PCI device structure to match against 1446 * @drv: the device driver to search for matching PCI device id structures 1447 * 1448 * Used by a driver to check whether a PCI device present in the 1449 * system is in its list of supported devices. Returns the matching 1450 * pci_device_id structure or %NULL if there is no match. 1451 */ 1452static int pci_bus_match(struct device *dev, struct device_driver *drv) 1453{ 1454 struct pci_dev *pci_dev = to_pci_dev(dev); 1455 struct pci_driver *pci_drv; 1456 const struct pci_device_id *found_id; 1457 1458 if (!pci_dev->match_driver) 1459 return 0; 1460 1461 pci_drv = to_pci_driver(drv); 1462 found_id = pci_match_device(pci_drv, pci_dev); 1463 if (found_id) 1464 return 1; 1465 1466 return 0; 1467} 1468 1469/** 1470 * pci_dev_get - increments the reference count of the pci device structure 1471 * @dev: the device being referenced 1472 * 1473 * Each live reference to a device should be refcounted. 1474 * 1475 * Drivers for PCI devices should normally record such references in 1476 * their probe() methods, when they bind to a device, and release 1477 * them by calling pci_dev_put(), in their disconnect() methods. 1478 * 1479 * A pointer to the device with the incremented reference counter is returned. 1480 */ 1481struct pci_dev *pci_dev_get(struct pci_dev *dev) 1482{ 1483 if (dev) 1484 get_device(&dev->dev); 1485 return dev; 1486} 1487EXPORT_SYMBOL(pci_dev_get); 1488 1489/** 1490 * pci_dev_put - release a use of the pci device structure 1491 * @dev: device that's been disconnected 1492 * 1493 * Must be called when a user of a device is finished with it. When the last 1494 * user of the device calls this function, the memory of the device is freed. 1495 */ 1496void pci_dev_put(struct pci_dev *dev) 1497{ 1498 if (dev) 1499 put_device(&dev->dev); 1500} 1501EXPORT_SYMBOL(pci_dev_put); 1502 1503static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) 1504{ 1505 struct pci_dev *pdev; 1506 1507 if (!dev) 1508 return -ENODEV; 1509 1510 pdev = to_pci_dev(dev); 1511 1512 if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class)) 1513 return -ENOMEM; 1514 1515 if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device)) 1516 return -ENOMEM; 1517 1518 if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor, 1519 pdev->subsystem_device)) 1520 return -ENOMEM; 1521 1522 if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev))) 1523 return -ENOMEM; 1524 1525 if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X", 1526 pdev->vendor, pdev->device, 1527 pdev->subsystem_vendor, pdev->subsystem_device, 1528 (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), 1529 (u8)(pdev->class))) 1530 return -ENOMEM; 1531 1532 return 0; 1533} 1534 1535#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) 1536/** 1537 * pci_uevent_ers - emit a uevent during recovery path of PCI device 1538 * @pdev: PCI device undergoing error recovery 1539 * @err_type: type of error event 1540 */ 1541void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type) 1542{ 1543 int idx = 0; 1544 char *envp[3]; 1545 1546 switch (err_type) { 1547 case PCI_ERS_RESULT_NONE: 1548 case PCI_ERS_RESULT_CAN_RECOVER: 1549 envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY"; 1550 envp[idx++] = "DEVICE_ONLINE=0"; 1551 break; 1552 case PCI_ERS_RESULT_RECOVERED: 1553 envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY"; 1554 envp[idx++] = "DEVICE_ONLINE=1"; 1555 break; 1556 case PCI_ERS_RESULT_DISCONNECT: 1557 envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY"; 1558 envp[idx++] = "DEVICE_ONLINE=0"; 1559 break; 1560 default: 1561 break; 1562 } 1563 1564 if (idx > 0) { 1565 envp[idx++] = NULL; 1566 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp); 1567 } 1568} 1569#endif 1570 1571static int pci_bus_num_vf(struct device *dev) 1572{ 1573 return pci_num_vf(to_pci_dev(dev)); 1574} 1575 1576/** 1577 * pci_dma_configure - Setup DMA configuration 1578 * @dev: ptr to dev structure 1579 * 1580 * Function to update PCI devices's DMA configuration using the same 1581 * info from the OF node or ACPI node of host bridge's parent (if any). 1582 */ 1583static int pci_dma_configure(struct device *dev) 1584{ 1585 struct device *bridge; 1586 int ret = 0; 1587 1588 bridge = pci_get_host_bridge_device(to_pci_dev(dev)); 1589 1590 if (IS_ENABLED(CONFIG_OF) && bridge->parent && 1591 bridge->parent->of_node) { 1592 ret = of_dma_configure(dev, bridge->parent->of_node, true); 1593 } else if (has_acpi_companion(bridge)) { 1594 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); 1595 1596 ret = acpi_dma_configure(dev, acpi_get_dma_attr(adev)); 1597 } 1598 1599 pci_put_host_bridge_device(bridge); 1600 return ret; 1601} 1602 1603struct bus_type pci_bus_type = { 1604 .name = "pci", 1605 .match = pci_bus_match, 1606 .uevent = pci_uevent, 1607 .probe = pci_device_probe, 1608 .remove = pci_device_remove, 1609 .shutdown = pci_device_shutdown, 1610 .dev_groups = pci_dev_groups, 1611 .bus_groups = pci_bus_groups, 1612 .drv_groups = pci_drv_groups, 1613 .pm = PCI_PM_OPS_PTR, 1614 .num_vf = pci_bus_num_vf, 1615 .dma_configure = pci_dma_configure, 1616}; 1617EXPORT_SYMBOL(pci_bus_type); 1618 1619#ifdef CONFIG_PCIEPORTBUS 1620static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) 1621{ 1622 struct pcie_device *pciedev; 1623 struct pcie_port_service_driver *driver; 1624 1625 if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type) 1626 return 0; 1627 1628 pciedev = to_pcie_device(dev); 1629 driver = to_service_driver(drv); 1630 1631 if (driver->service != pciedev->service) 1632 return 0; 1633 1634 if (driver->port_type != PCIE_ANY_PORT && 1635 driver->port_type != pci_pcie_type(pciedev->port)) 1636 return 0; 1637 1638 return 1; 1639} 1640 1641struct bus_type pcie_port_bus_type = { 1642 .name = "pci_express", 1643 .match = pcie_port_bus_match, 1644}; 1645EXPORT_SYMBOL_GPL(pcie_port_bus_type); 1646#endif 1647 1648static int __init pci_driver_init(void) 1649{ 1650 int ret; 1651 1652 ret = bus_register(&pci_bus_type); 1653 if (ret) 1654 return ret; 1655 1656#ifdef CONFIG_PCIEPORTBUS 1657 ret = bus_register(&pcie_port_bus_type); 1658 if (ret) 1659 return ret; 1660#endif 1661 dma_debug_add_bus(&pci_bus_type); 1662 return 0; 1663} 1664postcore_initcall(pci_driver_init); 1665