Lines Matching defs:dev

94 		if (of_property_read_bool(smmu->dev->of_node,
97 dev_notice(smmu->dev, "option %s\n",
390 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
395 dev_err(smmu->dev, "retrying command fetch\n");
417 dev_err(smmu->dev, "skipping command in error state:\n");
419 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
766 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
843 dev_err_ratelimited(smmu->dev,
871 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
910 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
924 static int arm_smmu_page_response(struct device *dev,
929 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
1009 l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
1012 dev_warn(smmu->dev,
1157 cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
1166 cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
1169 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1178 devm_kfree(smmu->dev, cdcfg->l1_desc);
1198 dmam_free_coherent(smmu->dev, size,
1202 devm_kfree(smmu->dev, cdcfg->l1_desc);
1210 dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
1432 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1435 dev_err(smmu->dev,
1547 ret = iommu_report_device_fault(master->dev, &fault_evt);
1555 arm_smmu_page_response(master->dev, &fault_evt, &resp);
1563 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1566 struct arm_smmu_device *smmu = dev;
1581 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1583 dev_info(smmu->dev, "\t0x%016llx\n",
1594 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1614 dev_info(smmu->dev, "unexpected PRI request received:\n");
1615 dev_info(smmu->dev,
1640 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1642 struct arm_smmu_device *smmu = dev;
1652 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1662 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1665 struct arm_smmu_device *smmu = dev;
1674 dev_warn(smmu->dev,
1679 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1684 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1687 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1690 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1693 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1696 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1699 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1708 static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev)
1710 struct arm_smmu_device *smmu = dev;
1712 arm_smmu_evtq_thread(irq, dev);
1714 arm_smmu_priq_thread(irq, dev);
1719 static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
1721 arm_smmu_gerror_handler(irq, dev);
2018 static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
2020 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
2226 .iommu_dev = smmu->dev,
2291 struct device *dev = master->dev;
2293 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2301 return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
2317 pdev = to_pci_dev(master->dev);
2322 dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
2332 pci_disable_ats(to_pci_dev(master->dev));
2349 if (!dev_is_pci(master->dev))
2352 pdev = to_pci_dev(master->dev);
2364 dev_err(&pdev->dev, "Failed to enable PASID\n");
2377 if (!dev_is_pci(master->dev))
2380 pdev = to_pci_dev(master->dev);
2408 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2412 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2420 master = dev_iommu_priv_get(dev);
2429 dev_err(dev, "cannot attach - SVA enabled\n");
2544 struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
2546 put_device(dev);
2547 return dev ? dev_get_drvdata(dev) : NULL;
2580 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
2611 dev_warn(master->dev,
2639 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
2654 static struct iommu_device *arm_smmu_probe_device(struct device *dev)
2659 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2664 if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
2675 master->dev = dev;
2678 dev_iommu_priv_set(dev, master);
2684 device_property_read_u32(dev, "pasid-num-bits", &master->ssid_bits);
2702 device_property_read_bool(dev, "dma-can-stall")) ||
2710 dev_iommu_priv_set(dev, NULL);
2714 static void arm_smmu_release_device(struct device *dev)
2716 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
2719 iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
2726 static struct iommu_group *arm_smmu_device_group(struct device *dev)
2735 if (dev_is_pci(dev))
2736 group = pci_device_group(dev);
2738 group = generic_device_group(dev);
2758 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
2760 return iommu_fwspec_add_ids(dev, args->args, 1);
2763 static void arm_smmu_get_resv_regions(struct device *dev,
2776 iommu_dma_get_resv_regions(dev, head);
2779 static int arm_smmu_dev_enable_feature(struct device *dev,
2782 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
2806 static int arm_smmu_dev_disable_feature(struct device *dev,
2809 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
2839 static int arm_smmu_def_domain_type(struct device *dev)
2841 if (dev_is_pci(dev)) {
2842 struct pci_dev *pdev = to_pci_dev(dev);
2851 static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
2855 domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA);
2859 arm_smmu_sva_remove_dev_pasid(domain, dev, pasid);
2901 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
2910 dev_err(smmu->dev,
2917 dev_info(smmu->dev, "allocated %u entries for %s\n",
2941 cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents,
2973 smmu->evtq.iopf = iopf_queue_alloc(dev_name(smmu->dev));
2993 cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents,
3020 dev_warn(smmu->dev,
3025 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
3028 dev_err(smmu->dev,
3052 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
3055 dev_err(smmu->dev,
3137 dev_err(smmu->dev, "GBPA not responding to update\n");
3143 struct device *dev = data;
3144 platform_msi_domain_free_irqs(dev);
3150 struct device *dev = msi_desc_to_dev(desc);
3151 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
3165 struct device *dev = smmu->dev;
3179 if (!dev->msi.domain) {
3180 dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n");
3185 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
3187 dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
3191 smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX);
3192 smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX);
3193 smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
3196 devm_add_action(dev, arm_smmu_free_msis, dev);
3208 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
3213 dev_warn(smmu->dev, "failed to enable evtq irq\n");
3215 dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n");
3220 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
3223 dev_warn(smmu->dev, "failed to enable gerror irq\n");
3225 dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n");
3231 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
3237 dev_warn(smmu->dev,
3240 dev_warn(smmu->dev, "no priq irq - PRI will be broken\n");
3254 dev_err(smmu->dev, "failed to disable irqs\n");
3264 ret = devm_request_threaded_irq(smmu->dev, irq,
3270 dev_warn(smmu->dev, "failed to enable combined irq\n");
3281 dev_warn(smmu->dev, "failed to enable irqs\n");
3292 dev_err(smmu->dev, "failed to clear cr0\n");
3306 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
3347 dev_err(smmu->dev, "failed to enable command queue\n");
3373 dev_err(smmu->dev, "failed to enable event queue\n");
3390 dev_err(smmu->dev, "failed to enable PRI queue\n");
3400 dev_err(smmu->dev, "failed to enable ATS check\n");
3407 dev_err(smmu->dev, "failed to setup irqs\n");
3425 dev_err(smmu->dev, "failed to enable SMMU interface\n");
3504 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
3535 dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
3553 dev_err(smmu->dev, "no translation support!\n");
3565 dev_err(smmu->dev, "AArch64 table format not supported!\n");
3576 dev_err(smmu->dev, "embedded implementation not supported\n");
3590 dev_err(smmu->dev, "command queue size <= %d entries not supported\n",
3657 dev_info(smmu->dev,
3670 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
3671 dev_warn(smmu->dev,
3685 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
3702 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
3709 struct device *dev = smmu->dev;
3712 node = *(struct acpi_iort_node **)dev_get_platdata(dev);
3735 struct device *dev = &pdev->dev;
3739 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
3740 dev_err(dev, "missing #iommu-cells property\n");
3742 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
3748 if (of_dma_is_coherent(dev->of_node))
3762 static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
3767 return devm_ioremap_resource(dev, &res);
3776 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
3787 dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n",
3797 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
3806 struct device *dev = &pdev->dev;
3809 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3812 smmu->dev = dev;
3814 if (dev->of_node) {
3830 dev_err(dev, "MMIO region too small (%pr)\n", res);
3839 smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ);
3844 smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K,
3892 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
3897 ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
3899 dev_err(dev, "Failed to register iommu\n");