Lines Matching defs:dev
331 static void dmar_remove_one_dev_info(struct device *dev);
334 struct device *dev);
368 struct device_domain_info *get_domain_info(struct device *dev)
372 if (!dev)
375 info = dev_iommu_priv_get(dev);
741 if (!info->dev)
750 nid = dev_to_node(info->dev);
819 static bool attach_deferred(struct device *dev)
821 return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
827 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
830 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
833 is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
837 if (!dev_is_pci(dev) || !dev_is_pci(bridge))
840 pdev = to_pci_dev(dev);
865 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
881 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
886 if (dev_is_pci(dev)) {
887 struct pci_dev *pdev = to_pci_dev(dev);
898 struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
907 if (!dev)
910 if (dev_is_pci(dev)) {
913 pdev = pci_real_dma_dev(to_pci_dev(dev));
918 dev = &pf_pdev->dev;
920 } else if (has_acpi_companion(dev))
921 dev = &ACPI_COMPANION(dev)->dev;
930 if (tmp == dev) {
945 if (is_downstream_to_pci_bridge(dev, tmp))
960 if (iommu_is_dummy(iommu, dev))
1503 if (info->ats_supported && info->dev)
1521 if (!info->dev || !dev_is_pci(info->dev))
1524 pdev = to_pci_dev(info->dev);
1540 if (!info || !dev_is_pci(info->dev))
1543 pdev = to_pci_dev(info->dev);
1587 if (!dev_is_pci(info->dev))
1590 pdev = to_pci_dev(info->dev);
1871 if (!info->dev || !info->domain)
2306 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2313 iommu = device_to_iommu(dev, &bus, &devfn);
2317 table = intel_pasid_get_table(dev);
2319 if (!dev_is_pci(dev))
2327 return pci_for_each_dma_alias(to_pci_dev(dev),
2339 static int domain_context_mapped(struct device *dev)
2344 iommu = device_to_iommu(dev, &bus, &devfn);
2348 if (!dev_is_pci(dev))
2351 return !pci_for_each_dma_alias(to_pci_dev(dev),
2591 if (info->dev)
2592 dev_iommu_priv_set(info->dev, NULL);
2606 struct dmar_domain *find_domain(struct device *dev)
2610 if (unlikely(!dev || !dev->iommu))
2613 if (unlikely(attach_deferred(dev)))
2617 info = get_domain_info(dev);
2624 static void do_deferred_attach(struct device *dev)
2628 dev_iommu_priv_set(dev, NULL);
2629 domain = iommu_get_domain_for_dev(dev);
2631 intel_iommu_attach_device(domain, dev);
2649 struct device *dev,
2678 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
2683 static bool dev_is_real_dma_subdevice(struct device *dev)
2685 return dev && dev_is_pci(dev) &&
2686 pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
2691 struct device *dev,
2703 if (!dev_is_real_dma_subdevice(dev)) {
2708 struct pci_dev *pdev = to_pci_dev(dev);
2718 info->dev = dev;
2725 if (dev && dev_is_pci(dev)) {
2726 struct pci_dev *pdev = to_pci_dev(info->dev);
2747 if (dev)
2748 found = find_domain(dev);
2756 info2->dev = dev;
2779 if (dev)
2780 dev_iommu_priv_set(dev, info);
2784 if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
2785 ret = intel_pasid_alloc_table(dev);
2787 dev_err(dev, "PASID table allocation failed\n");
2788 dmar_remove_one_dev_info(dev);
2796 dev, PASID_RID2PASID);
2798 ret = domain_setup_first_level(iommu, domain, dev,
2802 dev, PASID_RID2PASID);
2805 dev_err(dev, "Setup RID2PASID failed\n");
2806 dmar_remove_one_dev_info(dev);
2811 if (dev && domain_context_mapping(domain, dev)) {
2812 dev_err(dev, "Domain context map failed\n");
2813 dmar_remove_one_dev_info(dev);
2840 struct device *dev;
2875 i, dev) {
2894 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2900 iommu = device_to_iommu(dev, &bus, &devfn);
2904 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2911 static bool device_has_rmrr(struct device *dev)
2925 if (tmp == dev ||
2926 is_downstream_to_pci_bridge(dev, tmp)) {
2938 * @dev: device handle
2950 static bool device_rmrr_is_relaxable(struct device *dev)
2954 if (!dev_is_pci(dev))
2957 pdev = to_pci_dev(dev);
2981 static bool device_is_rmrr_locked(struct device *dev)
2983 if (!device_has_rmrr(dev))
2986 if (device_rmrr_is_relaxable(dev))
2995 * @dev: the device in query
3003 static int device_def_domain_type(struct device *dev)
3005 if (dev_is_pci(dev)) {
3006 struct pci_dev *pdev = to_pci_dev(dev);
3521 static unsigned long intel_alloc_iova(struct device *dev,
3559 dev_err_once(dev, "Allocating %ld-page iova failed\n",
3567 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3580 if (unlikely(attach_deferred(dev)))
3581 do_deferred_attach(dev);
3583 domain = find_domain(dev);
3590 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3617 trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
3624 dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
3629 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3634 return __intel_map_single(dev, page_to_phys(page) + offset,
3635 size, dir, *dev->dma_mask);
3638 static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
3642 return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
3645 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3655 domain = find_domain(dev);
3666 if (dev_is_pci(dev))
3667 pdev = to_pci_dev(dev);
3686 trace_unmap_single(dev, dev_addr, size);
3689 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3693 intel_unmap(dev, dev_addr, size);
3696 static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
3699 intel_unmap(dev, dev_addr, size);
3702 static void *intel_alloc_coherent(struct device *dev, size_t size,
3709 if (unlikely(attach_deferred(dev)))
3710 do_deferred_attach(dev);
3718 page = dma_alloc_from_contiguous(dev, count, order,
3728 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3730 dev->coherent_dma_mask);
3733 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3739 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3748 intel_unmap(dev, dma_handle, size);
3749 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3753 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3766 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3768 trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3771 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3786 if (unlikely(attach_deferred(dev)))
3787 do_deferred_attach(dev);
3789 domain = find_domain(dev);
3798 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3799 *dev->dma_mask);
3827 trace_map_sg(dev, i + 1, nelems, sg);
3832 static u64 intel_get_required_mask(struct device *dev)
3855 bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
3861 domain = find_domain(dev);
3867 swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
3871 bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
3884 if (unlikely(attach_deferred(dev)))
3885 do_deferred_attach(dev);
3887 domain = find_domain(dev);
3897 iova_pfn = intel_alloc_iova(dev, domain,
3917 tlb_addr = swiotlb_tbl_map_single(dev, paddr, size,
3944 trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
3950 swiotlb_tbl_unmap_single(dev, tlb_addr, size,
3954 dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n",
3961 bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
3968 domain = find_domain(dev);
3976 intel_unmap(dev, dev_addr, size);
3978 swiotlb_tbl_unmap_single(dev, tlb_addr, size,
3981 trace_bounce_unmap_single(dev, dev_addr, size);
3985 bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
3988 return bounce_map_single(dev, page_to_phys(page) + offset,
3989 size, dir, attrs, *dev->dma_mask);
3993 bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
3996 return bounce_map_single(dev, phys_addr, size,
3997 dir, attrs, *dev->dma_mask);
4001 bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
4004 bounce_unmap_single(dev, dev_addr, size, dir, attrs);
4008 bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
4011 bounce_unmap_single(dev, dev_addr, size, dir, attrs);
4015 bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,
4022 bounce_unmap_page(dev, sg->dma_address,
4027 bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
4034 sg->dma_address = bounce_map_page(dev, sg_page(sg),
4043 trace_bounce_map_sg(dev, i + 1, nelems, sg);
4048 bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
4053 bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
4056 bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
4060 bounce_sync_single_for_device(struct device *dev, dma_addr_t addr,
4063 bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
4067 bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
4074 bounce_sync_single(dev, sg_dma_address(sg),
4079 bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
4086 bounce_sync_single(dev, sg_dma_address(sg),
4175 struct device *dev;
4181 drhd->devices_cnt, i, dev)
4194 drhd->devices_cnt, i, dev)
4195 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
4475 struct device *dev;
4486 i, dev)
4607 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4616 dev = pci_physfn(dev);
4617 for (bus = dev->bus; bus; bus = bus->parent) {
4634 if (atsr->segment != pci_domain_nr(dev->bus))
4638 if (tmp == &bridge->dev)
4807 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4809 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4814 static ssize_t intel_iommu_show_version(struct device *dev,
4818 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4825 static ssize_t intel_iommu_show_address(struct device *dev,
4829 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4834 static ssize_t intel_iommu_show_cap(struct device *dev,
4838 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4843 static ssize_t intel_iommu_show_ecap(struct device *dev,
4847 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4852 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4856 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4861 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4865 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4930 struct device *dev;
4935 drhd->devices_cnt, i, dev) {
4940 if (dev->bus != &acpi_bus_type)
4943 adev = to_acpi_device(dev);
4947 group = iommu_group_get(pn->dev);
4953 pn->dev->bus->iommu_ops = &intel_iommu_ops;
4954 ret = iommu_probe_device(pn->dev);
5122 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
5124 if (!iommu || !dev || !dev_is_pci(dev))
5127 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
5144 if (info->dev) {
5145 if (dev_is_pci(info->dev) && sm_supported(iommu))
5146 intel_pasid_tear_down_entry(iommu, info->dev,
5150 if (!dev_is_real_dma_subdevice(info->dev))
5151 domain_context_clear(iommu, info->dev);
5152 intel_pasid_free_table(info->dev);
5164 static void dmar_remove_one_dev_info(struct device *dev)
5170 info = get_domain_info(dev);
5254 * Check whether a @domain could be attached to the @dev through the
5258 is_aux_domain(struct device *dev, struct iommu_domain *domain)
5260 struct device_domain_info *info = get_domain_info(dev);
5267 struct device *dev)
5269 struct device_domain_info *info = get_domain_info(dev);
5280 struct device *dev)
5282 struct device_domain_info *info = get_domain_info(dev);
5296 struct device *dev)
5302 iommu = device_to_iommu(dev, NULL, NULL);
5311 pci_max_pasids(to_pci_dev(dev)) - 1,
5332 ret = domain_setup_first_level(iommu, domain, dev,
5335 ret = intel_pasid_setup_second_level(iommu, domain, dev,
5341 auxiliary_link_device(domain, dev);
5359 struct device *dev)
5365 if (!is_aux_domain(dev, &domain->domain))
5369 info = get_domain_info(dev);
5372 auxiliary_unlink_device(domain, dev);
5375 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
5383 struct device *dev)
5389 iommu = device_to_iommu(dev, NULL, NULL);
5399 dev_err(dev, "%s: iommu width (%d) is not "
5425 struct device *dev)
5430 device_is_rmrr_locked(dev)) {
5431 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5435 if (is_aux_domain(dev, domain))
5438 /* normally dev is not mapped */
5439 if (unlikely(domain_context_mapped(dev))) {
5442 old_domain = find_domain(dev);
5444 dmar_remove_one_dev_info(dev);
5447 ret = prepare_domain_attach_device(domain, dev);
5451 return domain_add_dev_info(to_dmar_domain(domain), dev);
5455 struct device *dev)
5459 if (!is_aux_domain(dev, domain))
5462 ret = prepare_domain_attach_device(domain, dev);
5466 return aux_domain_add_dev(to_dmar_domain(domain), dev);
5470 struct device *dev)
5472 dmar_remove_one_dev_info(dev);
5476 struct device *dev)
5478 aux_domain_remove_dev(to_dmar_domain(domain), dev);
5506 /* PASID based dev TLBs */
5529 intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
5545 if (!dev || !dev_is_pci(dev))
5548 iommu = device_to_iommu(dev, &bus, &devfn);
5557 info = get_domain_info(dev);
5647 dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
5815 static struct iommu_device *intel_iommu_probe_device(struct device *dev)
5819 iommu = device_to_iommu(dev, NULL, NULL);
5824 dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
5829 static void intel_iommu_release_device(struct device *dev)
5833 iommu = device_to_iommu(dev, NULL, NULL);
5837 dmar_remove_one_dev_info(dev);
5839 set_dma_ops(dev, NULL);
5842 static void intel_iommu_probe_finalize(struct device *dev)
5846 domain = iommu_get_domain_for_dev(dev);
5847 if (device_needs_bounce(dev))
5848 set_dma_ops(dev, &bounce_dma_ops);
5850 set_dma_ops(dev, &intel_dma_ops);
5852 set_dma_ops(dev, NULL);
5912 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
5921 domain = find_domain(dev);
5929 info = get_domain_info(dev);
5963 static void intel_iommu_apply_resv_region(struct device *dev,
5976 static struct iommu_group *intel_iommu_device_group(struct device *dev)
5978 if (dev_is_pci(dev))
5979 return pci_device_group(dev);
5980 return generic_device_group(dev);
5983 static int intel_iommu_enable_auxd(struct device *dev)
5990 iommu = device_to_iommu(dev, NULL, NULL);
5997 ret = intel_iommu_enable_pasid(iommu, dev);
6002 info = get_domain_info(dev);
6009 static int intel_iommu_disable_auxd(struct device *dev)
6015 info = get_domain_info(dev);
6052 intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
6057 if (!dev_is_pci(dev) || dmar_disabled ||
6061 ret = pci_pasid_features(to_pci_dev(dev));
6065 return !!siov_find_pci_dvsec(to_pci_dev(dev));
6069 struct device_domain_info *info = get_domain_info(dev);
6080 intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
6083 return intel_iommu_enable_auxd(dev);
6086 struct device_domain_info *info = get_domain_info(dev);
6099 intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
6102 return intel_iommu_disable_auxd(dev);
6108 intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
6110 struct device_domain_info *info = get_domain_info(dev);
6119 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
6128 struct device *dev)
6130 return attach_deferred(dev);
6173 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
6219 static void quirk_iommu_igfx(struct pci_dev *dev)
6221 if (risky_device(dev))
6224 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
6263 static void quirk_iommu_rwbf(struct pci_dev *dev)
6265 if (risky_device(dev))
6272 pci_info(dev, "Forcing write-buffer flush capability\n");
6294 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
6298 if (risky_device(dev))
6301 if (pci_read_config_word(dev, GGC, &ggc))
6305 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
6309 pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
6318 static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
6322 if (!IS_GFX_DEVICE(dev))
6325 ver = (dev->device >> 8) & 0xff;
6331 if (risky_device(dev))
6334 pci_info(dev, "Skip IOMMU disabling for graphics\n");