Lines Matching refs:s390_domain

24 struct s390_domain {
37 static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
39 return container_of(dom, struct s390_domain, domain);
56 struct s390_domain *s390_domain;
61 s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
62 if (!s390_domain)
65 s390_domain->dma_table = dma_alloc_cpu_table();
66 if (!s390_domain->dma_table) {
67 kfree(s390_domain);
71 spin_lock_init(&s390_domain->dma_table_lock);
72 spin_lock_init(&s390_domain->list_lock);
73 INIT_LIST_HEAD(&s390_domain->devices);
75 return &s390_domain->domain;
80 struct s390_domain *s390_domain = to_s390_domain(domain);
82 dma_cleanup_tables(s390_domain->dma_table);
83 kfree(s390_domain);
89 struct s390_domain *s390_domain = to_s390_domain(domain);
105 zdev->dma_table = s390_domain->dma_table;
111 spin_lock_irqsave(&s390_domain->list_lock, flags);
113 if (list_empty(&s390_domain->devices)) {
121 spin_unlock_irqrestore(&s390_domain->list_lock, flags);
125 zdev->s390_domain = s390_domain;
126 list_add(&domain_device->list, &s390_domain->devices);
127 spin_unlock_irqrestore(&s390_domain->list_lock, flags);
141 struct s390_domain *s390_domain = to_s390_domain(domain);
150 spin_lock_irqsave(&s390_domain->list_lock, flags);
151 list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices,
160 spin_unlock_irqrestore(&s390_domain->list_lock, flags);
163 zdev->s390_domain = NULL;
192 if (zdev && zdev->s390_domain) {
199 static int s390_iommu_update_trans(struct s390_domain *s390_domain,
210 if (dma_addr < s390_domain->domain.geometry.aperture_start ||
211 dma_addr + size > s390_domain->domain.geometry.aperture_end)
218 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
220 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
230 spin_lock(&s390_domain->list_lock);
231 list_for_each_entry(domain_device, &s390_domain->devices, list) {
237 spin_unlock(&s390_domain->list_lock);
245 entry = dma_walk_cpu_trans(s390_domain->dma_table,
252 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
260 struct s390_domain *s390_domain = to_s390_domain(domain);
269 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova,
278 struct s390_domain *s390_domain = to_s390_domain(domain);
290 rto = s390_domain->dma_table;
292 spin_lock_irqsave(&s390_domain->dma_table_lock, flags);
301 spin_unlock_irqrestore(&s390_domain->dma_table_lock, flags);
310 struct s390_domain *s390_domain = to_s390_domain(domain);
319 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova,