Lines Matching defs:smmu_domain
908 static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
915 struct arm_smmu_device *smmu = smmu_domain->smmu;
924 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
925 list_for_each_entry(master, &smmu_domain->devices, domain_head) {
931 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
961 static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
967 struct arm_smmu_device *smmu = smmu_domain->smmu;
968 struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
970 if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
982 arm_smmu_sync_cd(smmu_domain, ssid, false);
988 int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
1003 struct arm_smmu_device *smmu = smmu_domain->smmu;
1005 if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
1008 cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
1034 arm_smmu_sync_cd(smmu_domain, ssid, true);
1061 arm_smmu_sync_cd(smmu_domain, ssid, true);
1065 static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
1070 struct arm_smmu_device *smmu = smmu_domain->smmu;
1071 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1114 static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
1118 struct arm_smmu_device *smmu = smmu_domain->smmu;
1119 struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
1212 struct arm_smmu_domain *smmu_domain = NULL;
1221 smmu_domain = master->domain;
1225 if (smmu_domain) {
1226 switch (smmu_domain->stage) {
1228 s1_cfg = &smmu_domain->s1_cfg;
1232 s2_cfg = &smmu_domain->s2_cfg;
1259 if (!smmu_domain || !(s1_cfg || s2_cfg)) {
1260 if (!smmu_domain && disable_bypass)
1590 static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
1599 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
1616 if (!atomic_read(&smmu_domain->nr_ats_masters))
1621 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
1622 list_for_each_entry(master, &smmu_domain->devices, domain_head) {
1628 arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
1631 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
1633 return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
1639 struct arm_smmu_domain *smmu_domain = cookie;
1640 struct arm_smmu_device *smmu = smmu_domain->smmu;
1650 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1651 arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
1654 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1658 arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
1663 struct arm_smmu_domain *smmu_domain)
1665 struct arm_smmu_device *smmu = smmu_domain->smmu;
1678 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1680 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1683 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1688 tg = __ffs(smmu_domain->domain.pgsize_bitmap);
1735 arm_smmu_atc_inv_domain(smmu_domain, 0, start, size);
1742 struct arm_smmu_domain *smmu_domain = cookie;
1743 struct iommu_domain *domain = &smmu_domain->domain;
1782 struct arm_smmu_domain *smmu_domain;
1794 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1795 if (!smmu_domain)
1799 iommu_get_dma_cookie(&smmu_domain->domain)) {
1800 kfree(smmu_domain);
1804 mutex_init(&smmu_domain->init_mutex);
1805 INIT_LIST_HEAD(&smmu_domain->devices);
1806 spin_lock_init(&smmu_domain->devices_lock);
1808 return &smmu_domain->domain;
1831 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1832 struct arm_smmu_device *smmu = smmu_domain->smmu;
1835 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1838 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1839 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1844 arm_smmu_free_cd_tables(smmu_domain);
1848 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1853 kfree(smmu_domain);
1856 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1862 struct arm_smmu_device *smmu = smmu_domain->smmu;
1863 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1877 ret = arm_smmu_alloc_cd_tables(smmu_domain);
1897 ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd);
1905 arm_smmu_free_cd_tables(smmu_domain);
1913 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1918 struct arm_smmu_device *smmu = smmu_domain->smmu;
1919 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1950 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1951 struct arm_smmu_device *smmu = smmu_domain->smmu;
1954 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
1960 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1962 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1964 switch (smmu_domain->stage) {
1992 if (smmu_domain->non_strict)
1995 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
2003 ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
2009 smmu_domain->pgtbl_ops = pgtbl_ops;
2075 struct arm_smmu_domain *smmu_domain = master->domain;
2085 atomic_inc(&smmu_domain->nr_ats_masters);
2086 arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
2093 struct arm_smmu_domain *smmu_domain = master->domain;
2105 atomic_dec(&smmu_domain->nr_ats_masters);
2158 struct arm_smmu_domain *smmu_domain = master->domain;
2160 if (!smmu_domain)
2165 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
2167 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
2180 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2201 mutex_lock(&smmu_domain->init_mutex);
2203 if (!smmu_domain->smmu) {
2204 smmu_domain->smmu = smmu;
2207 smmu_domain->smmu = NULL;
2210 } else if (smmu_domain->smmu != smmu) {
2213 dev_name(smmu_domain->smmu->dev),
2217 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
2218 master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
2221 smmu_domain->s1_cfg.s1cdmax, master->ssid_bits);
2226 master->domain = smmu_domain;
2228 if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
2233 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
2234 list_add(&master->domain_head, &smmu_domain->devices);
2235 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
2240 mutex_unlock(&smmu_domain->init_mutex);
2258 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2259 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
2269 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2271 if (smmu_domain->smmu)
2272 arm_smmu_tlb_inv_context(smmu_domain);
2278 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2281 gather->pgsize, true, smmu_domain);
2426 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2432 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
2441 *(int *)data = smmu_domain->non_strict;
2456 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
2458 mutex_lock(&smmu_domain->init_mutex);
2464 if (smmu_domain->smmu) {
2470 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
2472 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
2481 smmu_domain->non_strict = *(int *)data;
2492 mutex_unlock(&smmu_domain->init_mutex);