1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright IBM Corp. 2012 4 * 5 * Author(s): 6 * Jan Glauber <jang@linux.vnet.ibm.com> 7 * 8 * The System z PCI code is a rewrite from a prototype by 9 * the following people (Kudoz!): 10 * Alexander Schmidt 11 * Christoph Raisch 12 * Hannes Hering 13 * Hoang-Nam Nguyen 14 * Jan-Bernd Themann 15 * Stefan Roscher 16 * Thomas Klein 17 */ 18 19#define KMSG_COMPONENT "zpci" 20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 21 22#include <linux/kernel.h> 23#include <linux/slab.h> 24#include <linux/err.h> 25#include <linux/export.h> 26#include <linux/delay.h> 27#include <linux/seq_file.h> 28#include <linux/jump_label.h> 29#include <linux/pci.h> 30#include <linux/printk.h> 31 32#include <asm/isc.h> 33#include <asm/airq.h> 34#include <asm/facility.h> 35#include <asm/pci_insn.h> 36#include <asm/pci_clp.h> 37#include <asm/pci_dma.h> 38 39#include "pci_bus.h" 40#include "pci_iov.h" 41 42/* list of all detected zpci devices */ 43static LIST_HEAD(zpci_list); 44static DEFINE_SPINLOCK(zpci_list_lock); 45 46static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE); 47static DEFINE_SPINLOCK(zpci_domain_lock); 48 49#define ZPCI_IOMAP_ENTRIES \ 50 min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \ 51 ZPCI_IOMAP_MAX_ENTRIES) 52 53unsigned int s390_pci_no_rid; 54 55static DEFINE_SPINLOCK(zpci_iomap_lock); 56static unsigned long *zpci_iomap_bitmap; 57struct zpci_iomap_entry *zpci_iomap_start; 58EXPORT_SYMBOL_GPL(zpci_iomap_start); 59 60DEFINE_STATIC_KEY_FALSE(have_mio); 61 62static struct kmem_cache *zdev_fmb_cache; 63 64struct zpci_dev *get_zdev_by_fid(u32 fid) 65{ 66 struct zpci_dev *tmp, *zdev = NULL; 67 68 spin_lock(&zpci_list_lock); 69 list_for_each_entry(tmp, &zpci_list, entry) { 70 if (tmp->fid == fid) { 71 zdev = tmp; 72 zpci_zdev_get(zdev); 73 break; 74 } 75 } 76 spin_unlock(&zpci_list_lock); 77 return zdev; 78} 79 80void zpci_remove_reserved_devices(void) 81{ 82 struct zpci_dev *tmp, *zdev; 83 enum zpci_state state; 84 LIST_HEAD(remove); 85 86 spin_lock(&zpci_list_lock); 87 list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) { 88 if (zdev->state == ZPCI_FN_STATE_STANDBY && 89 !clp_get_state(zdev->fid, &state) && 90 state == ZPCI_FN_STATE_RESERVED) 91 list_move_tail(&zdev->entry, &remove); 92 } 93 spin_unlock(&zpci_list_lock); 94 95 list_for_each_entry_safe(zdev, tmp, &remove, entry) 96 zpci_device_reserved(zdev); 97} 98 99int pci_domain_nr(struct pci_bus *bus) 100{ 101 return ((struct zpci_bus *) bus->sysdata)->domain_nr; 102} 103EXPORT_SYMBOL_GPL(pci_domain_nr); 104 105int pci_proc_domain(struct pci_bus *bus) 106{ 107 return pci_domain_nr(bus); 108} 109EXPORT_SYMBOL_GPL(pci_proc_domain); 110 111/* Modify PCI: Register I/O address translation parameters */ 112int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, 113 u64 base, u64 limit, u64 iota) 114{ 115 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT); 116 struct zpci_fib fib = {0}; 117 u8 status; 118 119 WARN_ON_ONCE(iota & 0x3fff); 120 fib.pba = base; 121 fib.pal = limit; 122 fib.iota = iota | ZPCI_IOTA_RTTO_FLAG; 123 return zpci_mod_fc(req, &fib, &status) ? -EIO : 0; 124} 125 126/* Modify PCI: Unregister I/O address translation parameters */ 127int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) 128{ 129 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT); 130 struct zpci_fib fib = {0}; 131 u8 cc, status; 132 133 cc = zpci_mod_fc(req, &fib, &status); 134 if (cc == 3) /* Function already gone. */ 135 cc = 0; 136 return cc ? -EIO : 0; 137} 138 139/* Modify PCI: Set PCI function measurement parameters */ 140int zpci_fmb_enable_device(struct zpci_dev *zdev) 141{ 142 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE); 143 struct zpci_fib fib = {0}; 144 u8 cc, status; 145 146 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length) 147 return -EINVAL; 148 149 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL); 150 if (!zdev->fmb) 151 return -ENOMEM; 152 WARN_ON((u64) zdev->fmb & 0xf); 153 154 /* reset software counters */ 155 atomic64_set(&zdev->allocated_pages, 0); 156 atomic64_set(&zdev->mapped_pages, 0); 157 atomic64_set(&zdev->unmapped_pages, 0); 158 159 fib.fmb_addr = virt_to_phys(zdev->fmb); 160 cc = zpci_mod_fc(req, &fib, &status); 161 if (cc) { 162 kmem_cache_free(zdev_fmb_cache, zdev->fmb); 163 zdev->fmb = NULL; 164 } 165 return cc ? -EIO : 0; 166} 167 168/* Modify PCI: Disable PCI function measurement */ 169int zpci_fmb_disable_device(struct zpci_dev *zdev) 170{ 171 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE); 172 struct zpci_fib fib = {0}; 173 u8 cc, status; 174 175 if (!zdev->fmb) 176 return -EINVAL; 177 178 /* Function measurement is disabled if fmb address is zero */ 179 cc = zpci_mod_fc(req, &fib, &status); 180 if (cc == 3) /* Function already gone. */ 181 cc = 0; 182 183 if (!cc) { 184 kmem_cache_free(zdev_fmb_cache, zdev->fmb); 185 zdev->fmb = NULL; 186 } 187 return cc ? -EIO : 0; 188} 189 190static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) 191{ 192 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 193 u64 data; 194 int rc; 195 196 rc = __zpci_load(&data, req, offset); 197 if (!rc) { 198 data = le64_to_cpu((__force __le64) data); 199 data >>= (8 - len) * 8; 200 *val = (u32) data; 201 } else 202 *val = 0xffffffff; 203 return rc; 204} 205 206static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) 207{ 208 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 209 u64 data = val; 210 int rc; 211 212 data <<= (8 - len) * 8; 213 data = (__force u64) cpu_to_le64(data); 214 rc = __zpci_store(data, req, offset); 215 return rc; 216} 217 218resource_size_t pcibios_align_resource(void *data, const struct resource *res, 219 resource_size_t size, 220 resource_size_t align) 221{ 222 return 0; 223} 224 225/* combine single writes by using store-block insn */ 226void __iowrite64_copy(void __iomem *to, const void *from, size_t count) 227{ 228 zpci_memcpy_toio(to, from, count); 229} 230 231static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot) 232{ 233 unsigned long offset, vaddr; 234 struct vm_struct *area; 235 phys_addr_t last_addr; 236 237 last_addr = addr + size - 1; 238 if (!size || last_addr < addr) 239 return NULL; 240 241 if (!static_branch_unlikely(&have_mio)) 242 return (void __iomem *) addr; 243 244 offset = addr & ~PAGE_MASK; 245 addr &= PAGE_MASK; 246 size = PAGE_ALIGN(size + offset); 247 area = get_vm_area(size, VM_IOREMAP); 248 if (!area) 249 return NULL; 250 251 vaddr = (unsigned long) area->addr; 252 if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { 253 free_vm_area(area); 254 return NULL; 255 } 256 return (void __iomem *) ((unsigned long) area->addr + offset); 257} 258 259void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot) 260{ 261 return __ioremap(addr, size, __pgprot(prot)); 262} 263EXPORT_SYMBOL(ioremap_prot); 264 265void __iomem *ioremap(phys_addr_t addr, size_t size) 266{ 267 return __ioremap(addr, size, PAGE_KERNEL); 268} 269EXPORT_SYMBOL(ioremap); 270 271void __iomem *ioremap_wc(phys_addr_t addr, size_t size) 272{ 273 return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL)); 274} 275EXPORT_SYMBOL(ioremap_wc); 276 277void __iomem *ioremap_wt(phys_addr_t addr, size_t size) 278{ 279 return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL)); 280} 281EXPORT_SYMBOL(ioremap_wt); 282 283void iounmap(volatile void __iomem *addr) 284{ 285 if (static_branch_likely(&have_mio)) 286 vunmap((__force void *) ((unsigned long) addr & PAGE_MASK)); 287} 288EXPORT_SYMBOL(iounmap); 289 290/* Create a virtual mapping cookie for a PCI BAR */ 291static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar, 292 unsigned long offset, unsigned long max) 293{ 294 struct zpci_dev *zdev = to_zpci(pdev); 295 int idx; 296 297 idx = zdev->bars[bar].map_idx; 298 spin_lock(&zpci_iomap_lock); 299 /* Detect overrun */ 300 WARN_ON(!++zpci_iomap_start[idx].count); 301 zpci_iomap_start[idx].fh = zdev->fh; 302 zpci_iomap_start[idx].bar = bar; 303 spin_unlock(&zpci_iomap_lock); 304 305 return (void __iomem *) ZPCI_ADDR(idx) + offset; 306} 307 308static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar, 309 unsigned long offset, 310 unsigned long max) 311{ 312 unsigned long barsize = pci_resource_len(pdev, bar); 313 struct zpci_dev *zdev = to_zpci(pdev); 314 void __iomem *iova; 315 316 iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize); 317 return iova ? iova + offset : iova; 318} 319 320void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar, 321 unsigned long offset, unsigned long max) 322{ 323 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar)) 324 return NULL; 325 326 if (static_branch_likely(&have_mio)) 327 return pci_iomap_range_mio(pdev, bar, offset, max); 328 else 329 return pci_iomap_range_fh(pdev, bar, offset, max); 330} 331EXPORT_SYMBOL(pci_iomap_range); 332 333void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 334{ 335 return pci_iomap_range(dev, bar, 0, maxlen); 336} 337EXPORT_SYMBOL(pci_iomap); 338 339static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar, 340 unsigned long offset, unsigned long max) 341{ 342 unsigned long barsize = pci_resource_len(pdev, bar); 343 struct zpci_dev *zdev = to_zpci(pdev); 344 void __iomem *iova; 345 346 iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize); 347 return iova ? iova + offset : iova; 348} 349 350void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar, 351 unsigned long offset, unsigned long max) 352{ 353 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar)) 354 return NULL; 355 356 if (static_branch_likely(&have_mio)) 357 return pci_iomap_wc_range_mio(pdev, bar, offset, max); 358 else 359 return pci_iomap_range_fh(pdev, bar, offset, max); 360} 361EXPORT_SYMBOL(pci_iomap_wc_range); 362 363void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen) 364{ 365 return pci_iomap_wc_range(dev, bar, 0, maxlen); 366} 367EXPORT_SYMBOL(pci_iomap_wc); 368 369static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr) 370{ 371 unsigned int idx = ZPCI_IDX(addr); 372 373 spin_lock(&zpci_iomap_lock); 374 /* Detect underrun */ 375 WARN_ON(!zpci_iomap_start[idx].count); 376 if (!--zpci_iomap_start[idx].count) { 377 zpci_iomap_start[idx].fh = 0; 378 zpci_iomap_start[idx].bar = 0; 379 } 380 spin_unlock(&zpci_iomap_lock); 381} 382 383static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr) 384{ 385 iounmap(addr); 386} 387 388void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) 389{ 390 if (static_branch_likely(&have_mio)) 391 pci_iounmap_mio(pdev, addr); 392 else 393 pci_iounmap_fh(pdev, addr); 394} 395EXPORT_SYMBOL(pci_iounmap); 396 397static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, 398 int size, u32 *val) 399{ 400 struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn); 401 402 return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV; 403} 404 405static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, 406 int size, u32 val) 407{ 408 struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn); 409 410 return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV; 411} 412 413static struct pci_ops pci_root_ops = { 414 .read = pci_read, 415 .write = pci_write, 416}; 417 418static void zpci_map_resources(struct pci_dev *pdev) 419{ 420 struct zpci_dev *zdev = to_zpci(pdev); 421 resource_size_t len; 422 int i; 423 424 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 425 len = pci_resource_len(pdev, i); 426 if (!len) 427 continue; 428 429 if (zpci_use_mio(zdev)) 430 pdev->resource[i].start = 431 (resource_size_t __force) zdev->bars[i].mio_wt; 432 else 433 pdev->resource[i].start = (resource_size_t __force) 434 pci_iomap_range_fh(pdev, i, 0, 0); 435 pdev->resource[i].end = pdev->resource[i].start + len - 1; 436 } 437 438 zpci_iov_map_resources(pdev); 439} 440 441static void zpci_unmap_resources(struct pci_dev *pdev) 442{ 443 struct zpci_dev *zdev = to_zpci(pdev); 444 resource_size_t len; 445 int i; 446 447 if (zpci_use_mio(zdev)) 448 return; 449 450 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 451 len = pci_resource_len(pdev, i); 452 if (!len) 453 continue; 454 pci_iounmap_fh(pdev, (void __iomem __force *) 455 pdev->resource[i].start); 456 } 457} 458 459static int zpci_alloc_iomap(struct zpci_dev *zdev) 460{ 461 unsigned long entry; 462 463 spin_lock(&zpci_iomap_lock); 464 entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES); 465 if (entry == ZPCI_IOMAP_ENTRIES) { 466 spin_unlock(&zpci_iomap_lock); 467 return -ENOSPC; 468 } 469 set_bit(entry, zpci_iomap_bitmap); 470 spin_unlock(&zpci_iomap_lock); 471 return entry; 472} 473 474static void zpci_free_iomap(struct zpci_dev *zdev, int entry) 475{ 476 spin_lock(&zpci_iomap_lock); 477 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry)); 478 clear_bit(entry, zpci_iomap_bitmap); 479 spin_unlock(&zpci_iomap_lock); 480} 481 482static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start, 483 unsigned long size, unsigned long flags) 484{ 485 struct resource *r; 486 487 r = kzalloc(sizeof(*r), GFP_KERNEL); 488 if (!r) 489 return NULL; 490 491 r->start = start; 492 r->end = r->start + size - 1; 493 r->flags = flags; 494 r->name = zdev->res_name; 495 496 if (request_resource(&iomem_resource, r)) { 497 kfree(r); 498 return NULL; 499 } 500 return r; 501} 502 503int zpci_setup_bus_resources(struct zpci_dev *zdev, 504 struct list_head *resources) 505{ 506 unsigned long addr, size, flags; 507 struct resource *res; 508 int i, entry; 509 510 snprintf(zdev->res_name, sizeof(zdev->res_name), 511 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR); 512 513 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 514 if (!zdev->bars[i].size) 515 continue; 516 entry = zpci_alloc_iomap(zdev); 517 if (entry < 0) 518 return entry; 519 zdev->bars[i].map_idx = entry; 520 521 /* only MMIO is supported */ 522 flags = IORESOURCE_MEM; 523 if (zdev->bars[i].val & 8) 524 flags |= IORESOURCE_PREFETCH; 525 if (zdev->bars[i].val & 4) 526 flags |= IORESOURCE_MEM_64; 527 528 if (zpci_use_mio(zdev)) 529 addr = (unsigned long) zdev->bars[i].mio_wt; 530 else 531 addr = ZPCI_ADDR(entry); 532 size = 1UL << zdev->bars[i].size; 533 534 res = __alloc_res(zdev, addr, size, flags); 535 if (!res) { 536 zpci_free_iomap(zdev, entry); 537 return -ENOMEM; 538 } 539 zdev->bars[i].res = res; 540 pci_add_resource(resources, res); 541 } 542 543 return 0; 544} 545 546static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) 547{ 548 int i; 549 550 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 551 if (!zdev->bars[i].size || !zdev->bars[i].res) 552 continue; 553 554 zpci_free_iomap(zdev, zdev->bars[i].map_idx); 555 release_resource(zdev->bars[i].res); 556 kfree(zdev->bars[i].res); 557 } 558} 559 560int pcibios_add_device(struct pci_dev *pdev) 561{ 562 struct zpci_dev *zdev = to_zpci(pdev); 563 struct resource *res; 564 int i; 565 566 /* The pdev has a reference to the zdev via its bus */ 567 zpci_zdev_get(zdev); 568 if (pdev->is_physfn) 569 pdev->no_vf_scan = 1; 570 571 pdev->dev.groups = zpci_attr_groups; 572 pdev->dev.dma_ops = &s390_pci_dma_ops; 573 zpci_map_resources(pdev); 574 575 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 576 res = &pdev->resource[i]; 577 if (res->parent || !res->flags) 578 continue; 579 pci_claim_resource(pdev, i); 580 } 581 582 return 0; 583} 584 585void pcibios_release_device(struct pci_dev *pdev) 586{ 587 struct zpci_dev *zdev = to_zpci(pdev); 588 589 zpci_unmap_resources(pdev); 590 zpci_zdev_put(zdev); 591} 592 593int pcibios_enable_device(struct pci_dev *pdev, int mask) 594{ 595 struct zpci_dev *zdev = to_zpci(pdev); 596 597 zpci_debug_init_device(zdev, dev_name(&pdev->dev)); 598 zpci_fmb_enable_device(zdev); 599 600 return pci_enable_resources(pdev, mask); 601} 602 603void pcibios_disable_device(struct pci_dev *pdev) 604{ 605 struct zpci_dev *zdev = to_zpci(pdev); 606 607 zpci_fmb_disable_device(zdev); 608 zpci_debug_exit_device(zdev); 609} 610 611static int __zpci_register_domain(int domain) 612{ 613 spin_lock(&zpci_domain_lock); 614 if (test_bit(domain, zpci_domain)) { 615 spin_unlock(&zpci_domain_lock); 616 pr_err("Domain %04x is already assigned\n", domain); 617 return -EEXIST; 618 } 619 set_bit(domain, zpci_domain); 620 spin_unlock(&zpci_domain_lock); 621 return domain; 622} 623 624static int __zpci_alloc_domain(void) 625{ 626 int domain; 627 628 spin_lock(&zpci_domain_lock); 629 /* 630 * We can always auto allocate domains below ZPCI_NR_DEVICES. 631 * There is either a free domain or we have reached the maximum in 632 * which case we would have bailed earlier. 633 */ 634 domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); 635 set_bit(domain, zpci_domain); 636 spin_unlock(&zpci_domain_lock); 637 return domain; 638} 639 640int zpci_alloc_domain(int domain) 641{ 642 if (zpci_unique_uid) { 643 if (domain) 644 return __zpci_register_domain(domain); 645 pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n"); 646 update_uid_checking(false); 647 } 648 return __zpci_alloc_domain(); 649} 650 651void zpci_free_domain(int domain) 652{ 653 spin_lock(&zpci_domain_lock); 654 clear_bit(domain, zpci_domain); 655 spin_unlock(&zpci_domain_lock); 656} 657 658 659int zpci_enable_device(struct zpci_dev *zdev) 660{ 661 int rc; 662 663 if (clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES)) { 664 rc = -EIO; 665 goto out; 666 } 667 668 rc = zpci_dma_init_device(zdev); 669 if (rc) 670 goto out_dma; 671 672 zdev->state = ZPCI_FN_STATE_ONLINE; 673 return 0; 674 675out_dma: 676 clp_disable_fh(zdev); 677out: 678 return rc; 679} 680EXPORT_SYMBOL_GPL(zpci_enable_device); 681 682int zpci_disable_device(struct zpci_dev *zdev) 683{ 684 zpci_dma_exit_device(zdev); 685 /* 686 * The zPCI function may already be disabled by the platform, this is 687 * detected in clp_disable_fh() which becomes a no-op. 688 */ 689 return clp_disable_fh(zdev) ? -EIO : 0; 690} 691EXPORT_SYMBOL_GPL(zpci_disable_device); 692 693/* zpci_remove_device - Removes the given zdev from the PCI core 694 * @zdev: the zdev to be removed from the PCI core 695 * @set_error: if true the device's error state is set to permanent failure 696 * 697 * Sets a zPCI device to a configured but offline state; the zPCI 698 * device is still accessible through its hotplug slot and the zPCI 699 * API but is removed from the common code PCI bus, making it 700 * no longer available to drivers. 701 */ 702void zpci_remove_device(struct zpci_dev *zdev, bool set_error) 703{ 704 struct zpci_bus *zbus = zdev->zbus; 705 struct pci_dev *pdev; 706 707 if (!zdev->zbus->bus) 708 return; 709 710 pdev = pci_get_slot(zbus->bus, zdev->devfn); 711 if (pdev) { 712 if (set_error) 713 pdev->error_state = pci_channel_io_perm_failure; 714 if (pdev->is_virtfn) { 715 zpci_iov_remove_virtfn(pdev, zdev->vfn); 716 /* balance pci_get_slot */ 717 pci_dev_put(pdev); 718 return; 719 } 720 pci_stop_and_remove_bus_device_locked(pdev); 721 /* balance pci_get_slot */ 722 pci_dev_put(pdev); 723 } 724} 725 726/** 727 * zpci_create_device() - Create a new zpci_dev and add it to the zbus 728 * @fid: Function ID of the device to be created 729 * @fh: Current Function Handle of the device to be created 730 * @state: Initial state after creation either Standby or Configured 731 * 732 * Creates a new zpci device and adds it to its, possibly newly created, zbus 733 * as well as zpci_list. 734 * 735 * Returns: 0 on success, an error value otherwise 736 */ 737int zpci_create_device(u32 fid, u32 fh, enum zpci_state state) 738{ 739 struct zpci_dev *zdev; 740 int rc; 741 742 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state); 743 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); 744 if (!zdev) 745 return -ENOMEM; 746 747 /* FID and Function Handle are the static/dynamic identifiers */ 748 zdev->fid = fid; 749 zdev->fh = fh; 750 751 /* Query function properties and update zdev */ 752 rc = clp_query_pci_fn(zdev); 753 if (rc) 754 goto error; 755 zdev->state = state; 756 757 kref_init(&zdev->kref); 758 mutex_init(&zdev->lock); 759 760 rc = zpci_init_iommu(zdev); 761 if (rc) 762 goto error; 763 764 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { 765 rc = zpci_enable_device(zdev); 766 if (rc) 767 goto error_destroy_iommu; 768 } 769 770 rc = zpci_bus_device_register(zdev, &pci_root_ops); 771 if (rc) 772 goto error_disable; 773 774 spin_lock(&zpci_list_lock); 775 list_add_tail(&zdev->entry, &zpci_list); 776 spin_unlock(&zpci_list_lock); 777 778 return 0; 779 780error_disable: 781 if (zdev->state == ZPCI_FN_STATE_ONLINE) 782 zpci_disable_device(zdev); 783error_destroy_iommu: 784 zpci_destroy_iommu(zdev); 785error: 786 zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc); 787 kfree(zdev); 788 return rc; 789} 790 791bool zpci_is_device_configured(struct zpci_dev *zdev) 792{ 793 enum zpci_state state = zdev->state; 794 795 return state != ZPCI_FN_STATE_RESERVED && 796 state != ZPCI_FN_STATE_STANDBY; 797} 798 799/** 800 * zpci_device_reserved() - Mark device as resverved 801 * @zdev: the zpci_dev that was reserved 802 * 803 * Handle the case that a given zPCI function was reserved by another system. 804 * After a call to this function the zpci_dev can not be found via 805 * get_zdev_by_fid() anymore but may still be accessible via existing 806 * references though it will not be functional anymore. 807 */ 808void zpci_device_reserved(struct zpci_dev *zdev) 809{ 810 if (zdev->has_hp_slot) 811 zpci_exit_slot(zdev); 812 /* 813 * Remove device from zpci_list as it is going away. This also 814 * makes sure we ignore subsequent zPCI events for this device. 815 */ 816 spin_lock(&zpci_list_lock); 817 list_del(&zdev->entry); 818 spin_unlock(&zpci_list_lock); 819 zdev->state = ZPCI_FN_STATE_RESERVED; 820 zpci_dbg(3, "rsv fid:%x\n", zdev->fid); 821 zpci_zdev_put(zdev); 822} 823 824void zpci_release_device(struct kref *kref) 825{ 826 struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref); 827 828 if (zdev->zbus->bus) 829 zpci_remove_device(zdev, false); 830 831 switch (zdev->state) { 832 case ZPCI_FN_STATE_ONLINE: 833 case ZPCI_FN_STATE_CONFIGURED: 834 zpci_disable_device(zdev); 835 fallthrough; 836 case ZPCI_FN_STATE_STANDBY: 837 if (zdev->has_hp_slot) 838 zpci_exit_slot(zdev); 839 spin_lock(&zpci_list_lock); 840 list_del(&zdev->entry); 841 spin_unlock(&zpci_list_lock); 842 zpci_dbg(3, "rsv fid:%x\n", zdev->fid); 843 fallthrough; 844 case ZPCI_FN_STATE_RESERVED: 845 zpci_cleanup_bus_resources(zdev); 846 zpci_bus_device_unregister(zdev); 847 zpci_destroy_iommu(zdev); 848 fallthrough; 849 default: 850 break; 851 } 852 zpci_dbg(3, "rem fid:%x\n", zdev->fid); 853 kfree(zdev); 854} 855 856int zpci_report_error(struct pci_dev *pdev, 857 struct zpci_report_error_header *report) 858{ 859 struct zpci_dev *zdev = to_zpci(pdev); 860 861 return sclp_pci_report(report, zdev->fh, zdev->fid); 862} 863EXPORT_SYMBOL(zpci_report_error); 864 865static int zpci_mem_init(void) 866{ 867 BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) || 868 __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb)); 869 870 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), 871 __alignof__(struct zpci_fmb), 0, NULL); 872 if (!zdev_fmb_cache) 873 goto error_fmb; 874 875 zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES, 876 sizeof(*zpci_iomap_start), GFP_KERNEL); 877 if (!zpci_iomap_start) 878 goto error_iomap; 879 880 zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES), 881 sizeof(*zpci_iomap_bitmap), GFP_KERNEL); 882 if (!zpci_iomap_bitmap) 883 goto error_iomap_bitmap; 884 885 if (static_branch_likely(&have_mio)) 886 clp_setup_writeback_mio(); 887 888 return 0; 889error_iomap_bitmap: 890 kfree(zpci_iomap_start); 891error_iomap: 892 kmem_cache_destroy(zdev_fmb_cache); 893error_fmb: 894 return -ENOMEM; 895} 896 897static void zpci_mem_exit(void) 898{ 899 kfree(zpci_iomap_bitmap); 900 kfree(zpci_iomap_start); 901 kmem_cache_destroy(zdev_fmb_cache); 902} 903 904static unsigned int s390_pci_probe __initdata = 1; 905unsigned int s390_pci_force_floating __initdata; 906static unsigned int s390_pci_initialized; 907 908char * __init pcibios_setup(char *str) 909{ 910 if (!strcmp(str, "off")) { 911 s390_pci_probe = 0; 912 return NULL; 913 } 914 if (!strcmp(str, "nomio")) { 915 S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO; 916 return NULL; 917 } 918 if (!strcmp(str, "force_floating")) { 919 s390_pci_force_floating = 1; 920 return NULL; 921 } 922 if (!strcmp(str, "norid")) { 923 s390_pci_no_rid = 1; 924 return NULL; 925 } 926 return str; 927} 928 929bool zpci_is_enabled(void) 930{ 931 return s390_pci_initialized; 932} 933 934static int __init pci_base_init(void) 935{ 936 int rc; 937 938 if (!s390_pci_probe) 939 return 0; 940 941 if (!test_facility(69) || !test_facility(71)) 942 return 0; 943 944 if (MACHINE_HAS_PCI_MIO) { 945 static_branch_enable(&have_mio); 946 ctl_set_bit(2, 5); 947 } 948 949 rc = zpci_debug_init(); 950 if (rc) 951 goto out; 952 953 rc = zpci_mem_init(); 954 if (rc) 955 goto out_mem; 956 957 rc = zpci_irq_init(); 958 if (rc) 959 goto out_irq; 960 961 rc = zpci_dma_init(); 962 if (rc) 963 goto out_dma; 964 965 rc = clp_scan_pci_devices(); 966 if (rc) 967 goto out_find; 968 969 s390_pci_initialized = 1; 970 return 0; 971 972out_find: 973 zpci_dma_exit(); 974out_dma: 975 zpci_irq_exit(); 976out_irq: 977 zpci_mem_exit(); 978out_mem: 979 zpci_debug_exit(); 980out: 981 return rc; 982} 983subsys_initcall_sync(pci_base_init); 984