Lines Matching defs:smmu_domain

226 static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
228 struct arm_smmu_device *smmu = smmu_domain->smmu;
231 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
232 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
234 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
239 struct arm_smmu_domain *smmu_domain = cookie;
245 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
246 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
247 arm_smmu_tlb_sync_context(smmu_domain);
252 struct arm_smmu_domain *smmu_domain = cookie;
253 struct arm_smmu_device *smmu = smmu_domain->smmu;
257 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
264 struct arm_smmu_domain *smmu_domain = cookie;
265 struct arm_smmu_device *smmu = smmu_domain->smmu;
266 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
292 struct arm_smmu_domain *smmu_domain = cookie;
293 struct arm_smmu_device *smmu = smmu_domain->smmu;
294 int idx = smmu_domain->cfg.cbndx;
301 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
312 struct arm_smmu_domain *smmu_domain = cookie;
313 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
364 struct arm_smmu_domain *smmu_domain = cookie;
365 struct arm_smmu_device *smmu = smmu_domain->smmu;
370 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
396 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
397 struct arm_smmu_device *smmu = smmu_domain->smmu;
398 int idx = smmu_domain->cfg.cbndx;
454 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
457 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
458 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
600 static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
605 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
619 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
620 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
623 mutex_lock(&smmu_domain->init_mutex);
624 if (smmu_domain->smmu)
628 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
629 smmu_domain->smmu = smmu;
652 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
654 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
669 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
682 switch (smmu_domain->stage) {
699 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
719 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
721 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
728 ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
733 smmu_domain->smmu = smmu;
743 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
753 .tlb = smmu_domain->flush_ops,
758 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
763 if (smmu_domain->pgtbl_quirks)
764 pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks;
766 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
785 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
807 mutex_unlock(&smmu_domain->init_mutex);
810 smmu_domain->pgtbl_ops = pgtbl_ops;
815 smmu_domain->smmu = NULL;
817 mutex_unlock(&smmu_domain->init_mutex);
823 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
824 struct arm_smmu_device *smmu = smmu_domain->smmu;
825 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
847 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
855 struct arm_smmu_domain *smmu_domain;
866 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
867 if (!smmu_domain)
870 mutex_init(&smmu_domain->init_mutex);
871 spin_lock_init(&smmu_domain->cb_lock);
873 return &smmu_domain->domain;
878 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
885 kfree(smmu_domain);
1084 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1088 struct arm_smmu_device *smmu = smmu_domain->smmu;
1090 u8 cbndx = smmu_domain->cfg.cbndx;
1094 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1113 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1150 if (smmu_domain->smmu != smmu) {
1156 ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
1215 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1216 struct arm_smmu_device *smmu = smmu_domain->smmu;
1218 if (smmu_domain->flush_ops) {
1220 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
1228 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1229 struct arm_smmu_device *smmu = smmu_domain->smmu;
1236 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1237 arm_smmu_tlb_sync_context(smmu_domain);
1246 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1247 struct arm_smmu_device *smmu = smmu_domain->smmu;
1248 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1249 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1262 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1272 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1281 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1298 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1299 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1304 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1305 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1488 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1491 mutex_lock(&smmu_domain->init_mutex);
1492 if (smmu_domain->smmu)
1495 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1496 mutex_unlock(&smmu_domain->init_mutex);
1504 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1507 mutex_lock(&smmu_domain->init_mutex);
1508 if (smmu_domain->smmu)
1511 smmu_domain->pgtbl_quirks = quirks;
1512 mutex_unlock(&smmu_domain->init_mutex);