Lines Matching defs:address
51 /* IO virtual address start page frame number */
392 * invalid address), we ignore the capability for the device so
550 u64 address, int flags)
561 pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
562 domain_id, address, flags);
564 pr_err("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
566 domain_id, address, flags);
579 u64 address;
588 address = (u64)(((u64)event[3]) << 32) | event[2];
601 amd_iommu_report_page_fault(devid, pasid, address, flags);
607 dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
609 pasid, address, flags);
614 "address=0x%llx flags=0x%04x]\n",
616 address, flags);
619 dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
621 pasid, address, flags);
624 dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
625 dump_command(address);
628 dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
629 address, flags);
632 dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n",
634 address);
637 dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
639 pasid, address, flags);
650 dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
652 pasid, address, flags, tag);
686 fault.address = raw[1];
938 static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
944 pages = iommu_num_pages(address, size, PAGE_SIZE);
952 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
956 address &= PAGE_MASK;
960 cmd->data[2] = lower_32_bits(address);
961 cmd->data[3] = upper_32_bits(address);
970 u64 address, size_t size)
975 pages = iommu_num_pages(address, size, PAGE_SIZE);
983 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
987 address &= PAGE_MASK;
993 cmd->data[2] = lower_32_bits(address);
994 cmd->data[3] = upper_32_bits(address);
1001 u64 address, bool size)
1005 address &= ~(0xfffULL);
1009 cmd->data[2] = lower_32_bits(address);
1010 cmd->data[3] = upper_32_bits(address);
1019 int qdep, u64 address, bool size)
1023 address &= ~(0xfffULL);
1030 cmd->data[2] = lower_32_bits(address);
1032 cmd->data[3] = upper_32_bits(address);
1250 u64 address, size_t size)
1259 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
1309 u64 address, size_t size, int pde)
1315 build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
1333 ret |= device_flush_iotlb(dev_data, address, size);
1340 u64 address, size_t size)
1342 __domain_flush_pages(domain, address, size, 0);
1501 * another level increases the size of the address space by 9 bits to a size up
1505 unsigned long address,
1521 if (address <= PM_LEVEL_SIZE(pgtable.mode))
1552 unsigned long address,
1566 while (address > PM_LEVEL_SIZE(pgtable.mode)) {
1571 if (!increase_address_space(domain, address, gfp))
1580 pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
1581 address = PAGE_SIZE_ALIGN(address, page_size);
1642 pte = &pte[PM_LEVEL_INDEX(level, address)];
1649 * This function checks if there is a PTE for a given dma address. If
1653 unsigned long address,
1664 if (address > PM_LEVEL_SIZE(pgtable.mode))
1668 pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
1690 pte = &pte[PM_LEVEL_INDEX(level, address)];
1724 * Generic mapping functions. It maps a physical address into a DMA
1725 * address space. It allocates the page table pages if necessary.
1728 * and full 64 bit address spaces.
1837 * about domains is the page table mapping the DMA address space they
2880 u64 address, bool size)
2889 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
2923 qdep, address, size);
2941 u64 address)
2943 return __flush_pasid(domain, pasid, address, false);
2947 u64 address)
2954 ret = __amd_iommu_flush_page(domain, pasid, address);