Lines Matching refs:iommu
13 #include <linux/intel-iommu.h>
33 struct intel_iommu *iommu;
40 struct intel_iommu *iommu;
47 struct intel_iommu *iommu;
74 * ->iommu->register_lock
83 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
86 static bool ir_pre_enabled(struct intel_iommu *iommu)
88 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
91 static void clear_ir_pre_enabled(struct intel_iommu *iommu)
93 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
96 static void init_ir_status(struct intel_iommu *iommu)
100 gsts = readl(iommu->reg + DMAR_GSTS_REG);
102 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
105 static int alloc_irte(struct intel_iommu *iommu,
108 struct ir_table *table = iommu->ir_table;
121 if (mask > ecap_max_handle_mask(iommu->ecap)) {
124 ecap_max_handle_mask(iommu->ecap));
132 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
134 irq_iommu->iommu = iommu;
145 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
155 return qi_submit_sync(iommu, &desc, 1, 0);
161 struct intel_iommu *iommu;
171 iommu = irq_iommu->iommu;
174 irte = &iommu->ir_table->base[index];
196 __iommu_flush_cache(iommu, irte, sizeof(*irte));
198 rc = qi_flush_iec(iommu, index, 0);
200 /* Update iommu mode according to the IRTE mode */
212 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
213 return ir_hpet[i].iommu->ir_domain;
223 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
224 return ir_ioapic[i].iommu;
231 struct intel_iommu *iommu = map_ioapic_to_iommu(apic);
233 return iommu ? iommu->ir_domain : NULL;
240 return drhd ? drhd->iommu->ir_msi_domain : NULL;
246 struct intel_iommu *iommu;
252 iommu = irq_iommu->iommu;
255 start = iommu->ir_table->base + index;
262 bitmap_release_region(iommu->ir_table->bitmap, index,
265 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
325 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
352 if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
438 static int iommu_load_old_irte(struct intel_iommu *iommu)
447 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
461 memcpy(iommu->ir_table->base, old_ir_table, size);
463 __iommu_flush_cache(iommu, iommu->ir_table->base, size);
470 if (iommu->ir_table->base[i].present)
471 bitmap_set(iommu->ir_table->bitmap, i, 1);
480 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
486 addr = virt_to_phys((void *)iommu->ir_table->base);
488 raw_spin_lock_irqsave(&iommu->register_lock, flags);
490 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
494 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
496 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
498 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
504 qi_global_iec(iommu);
507 static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
512 raw_spin_lock_irqsave(&iommu->register_lock, flags);
515 iommu->gcmd |= DMA_GCMD_IRE;
516 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
517 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
522 iommu->gcmd &= ~DMA_GCMD_CFI;
523 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
524 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
538 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
541 static int intel_setup_irq_remapping(struct intel_iommu *iommu)
548 if (iommu->ir_table)
555 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
559 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
565 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
569 fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
573 iommu->ir_domain =
577 iommu);
578 if (!iommu->ir_domain) {
579 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
582 iommu->ir_msi_domain =
583 arch_create_remap_msi_irq_domain(iommu->ir_domain,
585 iommu->seq_id);
589 iommu->ir_table = ir_table;
595 if (!iommu->qi) {
599 dmar_fault(-1, iommu);
600 dmar_disable_qi(iommu);
602 if (dmar_enable_qi(iommu)) {
608 init_ir_status(iommu);
610 if (ir_pre_enabled(iommu)) {
613 iommu->name);
614 clear_ir_pre_enabled(iommu);
615 iommu_disable_irq_remapping(iommu);
616 } else if (iommu_load_old_irte(iommu))
618 iommu->name);
621 iommu->name);
624 iommu_set_irq_remapping(iommu, eim_mode);
629 if (iommu->ir_msi_domain)
630 irq_domain_remove(iommu->ir_msi_domain);
631 iommu->ir_msi_domain = NULL;
632 irq_domain_remove(iommu->ir_domain);
633 iommu->ir_domain = NULL;
643 iommu->ir_table = NULL;
648 static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
652 if (iommu && iommu->ir_table) {
653 if (iommu->ir_msi_domain) {
654 fn = iommu->ir_msi_domain->fwnode;
656 irq_domain_remove(iommu->ir_msi_domain);
658 iommu->ir_msi_domain = NULL;
660 if (iommu->ir_domain) {
661 fn = iommu->ir_domain->fwnode;
663 irq_domain_remove(iommu->ir_domain);
665 iommu->ir_domain = NULL;
667 free_pages((unsigned long)iommu->ir_table->base,
669 bitmap_free(iommu->ir_table->bitmap);
670 kfree(iommu->ir_table);
671 iommu->ir_table = NULL;
678 static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
683 if (!ecap_ir_support(iommu->ecap))
690 qi_global_iec(iommu);
692 raw_spin_lock_irqsave(&iommu->register_lock, flags);
694 sts = readl(iommu->reg + DMAR_GSTS_REG);
698 iommu->gcmd &= ~DMA_GCMD_IRE;
699 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
701 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
705 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
720 struct intel_iommu *iommu;
722 for_each_iommu(iommu, drhd) {
723 if (ecap_ir_support(iommu->ecap)) {
724 iommu_disable_irq_remapping(iommu);
725 intel_teardown_irq_remapping(iommu);
736 struct intel_iommu *iommu;
761 for_each_iommu(iommu, drhd)
762 if (!ecap_ir_support(iommu->ecap))
774 for_each_iommu(iommu, drhd) {
775 if (eim && !ecap_eim_support(iommu->ecap)) {
776 pr_info("%s does not support EIM\n", iommu->name);
786 for_each_iommu(iommu, drhd) {
787 if (intel_setup_irq_remapping(iommu)) {
789 iommu->name);
807 struct intel_iommu *iommu;
821 for_each_iommu(iommu, drhd)
822 if (!cap_pi_support(iommu->cap)) {
833 struct intel_iommu *iommu;
839 for_each_iommu(iommu, drhd) {
840 if (!ir_pre_enabled(iommu))
841 iommu_enable_irq_remapping(iommu);
862 struct intel_iommu *iommu,
885 if (ir_hpet[count].iommu == iommu &&
888 else if (ir_hpet[count].iommu == NULL && free == -1)
896 ir_hpet[free].iommu = iommu;
907 struct intel_iommu *iommu,
930 if (ir_ioapic[count].iommu == iommu &&
933 else if (ir_ioapic[count].iommu == NULL && free == -1)
943 ir_ioapic[free].iommu = iommu;
946 scope->enumeration_id, drhd->address, iommu->seq_id);
952 struct intel_iommu *iommu)
966 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
968 ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
975 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
980 if (ir_hpet[i].iommu == iommu)
981 ir_hpet[i].iommu = NULL;
984 if (ir_ioapic[i].iommu == iommu)
985 ir_ioapic[i].iommu = NULL;
995 struct intel_iommu *iommu;
999 for_each_iommu(iommu, drhd) {
1002 if (!ecap_ir_support(iommu->ecap))
1005 ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu);
1018 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
1046 struct intel_iommu *iommu = NULL;
1051 for_each_iommu(iommu, drhd) {
1052 if (!ecap_ir_support(iommu->ecap))
1055 iommu_disable_irq_remapping(iommu);
1069 struct intel_iommu *iommu = NULL;
1071 for_each_iommu(iommu, drhd)
1072 if (iommu->qi)
1073 dmar_reenable_qi(iommu);
1078 for_each_iommu(iommu, drhd) {
1079 if (!ecap_ir_support(iommu->ecap))
1082 /* Set up interrupt remapping for iommu.*/
1083 iommu_set_irq_remapping(iommu, eim);
1084 iommu_enable_irq_remapping(iommu);
1358 struct intel_iommu *iommu = domain->host_data;
1365 if (!info || !iommu)
1388 index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
1466 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1471 if (eim && !ecap_eim_support(iommu->ecap)) {
1473 iommu->reg_phys, iommu->ecap);
1477 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1479 iommu->reg_phys);
1486 ret = intel_setup_irq_remapping(iommu);
1489 iommu->name);
1490 intel_teardown_irq_remapping(iommu);
1491 ir_remove_ioapic_hpet_scope(iommu);
1493 iommu_enable_irq_remapping(iommu);
1502 struct intel_iommu *iommu = dmaru->iommu;
1506 if (iommu == NULL)
1508 if (!ecap_ir_support(iommu->ecap))
1511 !cap_pi_support(iommu->cap))
1515 if (!iommu->ir_table)
1516 ret = dmar_ir_add(dmaru, iommu);
1518 if (iommu->ir_table) {
1519 if (!bitmap_empty(iommu->ir_table->bitmap,
1523 iommu_disable_irq_remapping(iommu);
1524 intel_teardown_irq_remapping(iommu);
1525 ir_remove_ioapic_hpet_scope(iommu);