Lines Matching defs:smmu_domain
245 static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
247 struct arm_smmu_device *smmu = smmu_domain->smmu;
250 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
251 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
253 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
258 struct arm_smmu_domain *smmu_domain = cookie;
264 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
265 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
266 arm_smmu_tlb_sync_context(smmu_domain);
271 struct arm_smmu_domain *smmu_domain = cookie;
272 struct arm_smmu_device *smmu = smmu_domain->smmu;
276 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
283 struct arm_smmu_domain *smmu_domain = cookie;
284 struct arm_smmu_device *smmu = smmu_domain->smmu;
285 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
311 struct arm_smmu_domain *smmu_domain = cookie;
312 struct arm_smmu_device *smmu = smmu_domain->smmu;
313 int idx = smmu_domain->cfg.cbndx;
320 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
392 struct arm_smmu_domain *smmu_domain = cookie;
393 struct arm_smmu_device *smmu = smmu_domain->smmu;
398 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
427 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
428 struct arm_smmu_device *smmu = smmu_domain->smmu;
429 int idx = smmu_domain->cfg.cbndx;
480 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
483 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
484 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
623 static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
628 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
642 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
643 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
646 mutex_lock(&smmu_domain->init_mutex);
647 if (smmu_domain->smmu)
651 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
652 smmu_domain->smmu = smmu;
675 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
677 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
692 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
705 switch (smmu_domain->stage) {
722 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
742 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
744 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
751 ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
756 smmu_domain->smmu = smmu;
766 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
776 .tlb = smmu_domain->flush_ops,
781 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
786 if (smmu_domain->non_strict)
789 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
808 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
830 mutex_unlock(&smmu_domain->init_mutex);
833 smmu_domain->pgtbl_ops = pgtbl_ops;
838 smmu_domain->smmu = NULL;
840 mutex_unlock(&smmu_domain->init_mutex);
846 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
847 struct arm_smmu_device *smmu = smmu_domain->smmu;
848 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
870 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
878 struct arm_smmu_domain *smmu_domain;
889 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
890 if (!smmu_domain)
894 iommu_get_dma_cookie(&smmu_domain->domain))) {
895 kfree(smmu_domain);
899 mutex_init(&smmu_domain->init_mutex);
900 spin_lock_init(&smmu_domain->cb_lock);
902 return &smmu_domain->domain;
907 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
915 kfree(smmu_domain);
1114 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1118 struct arm_smmu_device *smmu = smmu_domain->smmu;
1120 u8 cbndx = smmu_domain->cfg.cbndx;
1124 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1143 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1180 if (smmu_domain->smmu != smmu) {
1183 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1189 ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
1246 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1247 struct arm_smmu_device *smmu = smmu_domain->smmu;
1249 if (smmu_domain->flush_ops) {
1251 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
1259 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1260 struct arm_smmu_device *smmu = smmu_domain->smmu;
1267 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1268 arm_smmu_tlb_sync_context(smmu_domain);
1277 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1278 struct arm_smmu_device *smmu = smmu_domain->smmu;
1279 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1280 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1293 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1303 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1312 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1329 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1330 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1338 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1339 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1507 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1513 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1522 *(int *)data = smmu_domain->non_strict;
1537 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1539 mutex_lock(&smmu_domain->init_mutex);
1545 if (smmu_domain->smmu) {
1551 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1553 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1562 smmu_domain->non_strict = *(int *)data;
1572 mutex_unlock(&smmu_domain->init_mutex);