Lines Matching defs:smmu_domain

974 static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
981 struct arm_smmu_device *smmu = smmu_domain->smmu;
992 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
993 list_for_each_entry(master, &smmu_domain->devices, domain_head) {
999 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
1029 static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
1035 struct arm_smmu_device *smmu = smmu_domain->smmu;
1036 struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
1038 if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
1050 arm_smmu_sync_cd(smmu_domain, ssid, false);
1056 int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
1074 if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
1077 cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
1105 arm_smmu_sync_cd(smmu_domain, ssid, true);
1117 if (smmu_domain->stall_enabled)
1131 arm_smmu_sync_cd(smmu_domain, ssid, true);
1135 static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
1140 struct arm_smmu_device *smmu = smmu_domain->smmu;
1141 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1184 static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
1188 struct arm_smmu_device *smmu = smmu_domain->smmu;
1189 struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
1281 struct arm_smmu_domain *smmu_domain = NULL;
1290 smmu_domain = master->domain;
1294 if (smmu_domain) {
1295 switch (smmu_domain->stage) {
1297 s1_cfg = &smmu_domain->s1_cfg;
1301 s2_cfg = &smmu_domain->s2_cfg;
1328 if (!smmu_domain || !(s1_cfg || s2_cfg)) {
1329 if (!smmu_domain && disable_bypass)
1809 int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
1818 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
1835 if (!atomic_read(&smmu_domain->nr_ats_masters))
1842 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
1843 list_for_each_entry(master, &smmu_domain->devices, domain_head) {
1849 arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
1852 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
1854 return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
1860 struct arm_smmu_domain *smmu_domain = cookie;
1861 struct arm_smmu_device *smmu = smmu_domain->smmu;
1871 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1872 arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
1875 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1878 arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
1884 struct arm_smmu_domain *smmu_domain)
1886 struct arm_smmu_device *smmu = smmu_domain->smmu;
1896 tg = __ffs(smmu_domain->domain.pgsize_bitmap);
1954 struct arm_smmu_domain *smmu_domain)
1962 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1963 cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
1965 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1968 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1970 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
1976 arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size);
1981 struct arm_smmu_domain *smmu_domain)
1984 .opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
1992 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
1999 struct arm_smmu_domain *smmu_domain = cookie;
2000 struct iommu_domain *domain = &smmu_domain->domain;
2036 struct arm_smmu_domain *smmu_domain;
2051 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
2052 if (!smmu_domain)
2055 mutex_init(&smmu_domain->init_mutex);
2056 INIT_LIST_HEAD(&smmu_domain->devices);
2057 spin_lock_init(&smmu_domain->devices_lock);
2058 INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
2060 return &smmu_domain->domain;
2065 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2066 struct arm_smmu_device *smmu = smmu_domain->smmu;
2068 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
2071 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
2072 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
2077 arm_smmu_free_cd_tables(smmu_domain);
2081 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
2086 kfree(smmu_domain);
2089 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
2095 struct arm_smmu_device *smmu = smmu_domain->smmu;
2096 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
2110 smmu_domain->stall_enabled = master->stall_enabled;
2112 ret = arm_smmu_alloc_cd_tables(smmu_domain);
2132 ret = arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, &cfg->cd);
2140 arm_smmu_free_cd_tables(smmu_domain);
2148 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
2153 struct arm_smmu_device *smmu = smmu_domain->smmu;
2154 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
2187 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2188 struct arm_smmu_device *smmu = smmu_domain->smmu;
2191 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
2197 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
2199 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2201 switch (smmu_domain->stage) {
2229 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
2237 ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
2243 smmu_domain->pgtbl_ops = pgtbl_ops;
2309 struct arm_smmu_domain *smmu_domain = master->domain;
2319 atomic_inc(&smmu_domain->nr_ats_masters);
2320 arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
2327 struct arm_smmu_domain *smmu_domain = master->domain;
2339 atomic_dec(&smmu_domain->nr_ats_masters);
2392 struct arm_smmu_domain *smmu_domain = master->domain;
2394 if (!smmu_domain)
2399 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
2401 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
2414 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2435 mutex_lock(&smmu_domain->init_mutex);
2437 if (!smmu_domain->smmu) {
2438 smmu_domain->smmu = smmu;
2441 smmu_domain->smmu = NULL;
2444 } else if (smmu_domain->smmu != smmu) {
2447 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
2448 master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
2451 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
2452 smmu_domain->stall_enabled != master->stall_enabled) {
2457 master->domain = smmu_domain;
2466 if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
2471 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
2472 list_add(&master->domain_head, &smmu_domain->devices);
2473 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
2478 mutex_unlock(&smmu_domain->init_mutex);
2498 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2499 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2509 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2511 if (smmu_domain->smmu)
2512 arm_smmu_tlb_inv_context(smmu_domain);
2518 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2525 gather->pgsize, true, smmu_domain);
2745 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2748 mutex_lock(&smmu_domain->init_mutex);
2749 if (smmu_domain->smmu)
2752 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2753 mutex_unlock(&smmu_domain->init_mutex);