Lines Matching refs:dma_domain
57 static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
59 u32 win_cnt = dma_domain->win_cnt;
60 struct dma_window *win_ptr = &dma_domain->win_arr[0];
63 geom = &dma_domain->iommu_domain.geometry;
65 if (!win_cnt || !dma_domain->geom_size) {
75 subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
78 win_ptr = &dma_domain->win_arr[wnd];
87 static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
89 struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
93 for (i = 0; i < dma_domain->win_cnt; i++) {
97 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
101 dma_domain->snoop_id,
102 dma_domain->stash_id,
117 static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
120 struct dma_window *wnd = &dma_domain->win_arr[0];
121 phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
129 dma_domain->snoop_id, dma_domain->stash_id,
139 static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
141 if (dma_domain->win_cnt > 1)
142 return map_subwins(liodn, dma_domain);
144 return map_win(liodn, dma_domain);
148 static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
151 struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
155 if (dma_domain->win_cnt > 1) {
156 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
160 dma_domain->snoop_id,
161 dma_domain->stash_id,
170 wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
176 dma_domain->snoop_id, dma_domain->stash_id,
188 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
195 if (!dma_domain->win_arr) {
202 for (i = 0; i < dma_domain->win_cnt; i++) {
219 struct fsl_dma_domain *dma_domain,
237 window_size = dma_domain->geom_size;
243 0, dma_domain->snoop_id,
244 dma_domain->stash_id, win_cnt, 0);
260 0, dma_domain->snoop_id,
261 dma_domain->stash_id,
331 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
336 spin_lock_irqsave(&dma_domain->domain_lock, flags);
338 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
340 remove_device_ref(info, dma_domain->win_cnt);
342 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
345 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
356 if (old_domain_info && old_domain_info->domain != dma_domain) {
366 info->domain = dma_domain;
368 list_add(&info->link, &dma_domain->devices);
382 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
388 return get_phys_addr(dma_domain, iova);
398 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
401 detach_device(NULL, dma_domain);
403 dma_domain->enabled = 0;
404 dma_domain->mapped = 0;
406 kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
411 struct fsl_dma_domain *dma_domain;
416 dma_domain = iommu_alloc_dma_domain();
417 if (!dma_domain) {
418 pr_debug("dma_domain allocation failed\n");
422 dma_domain->iommu_domain. geometry.aperture_start = 0;
423 dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
424 dma_domain->iommu_domain.geometry.force_aperture = true;
426 return &dma_domain->iommu_domain;
430 static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
437 list_for_each_entry(info, &dma_domain->devices, link) {
438 ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
448 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
453 list_for_each_entry(info, &dma_domain->devices, link) {
454 ret = update_liodn_stash(info->liodn, dma_domain, val);
463 static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
468 list_for_each_entry(info, &dma_domain->devices, link) {
469 ret = update_liodn(info->liodn, dma_domain, wnd_nr);
476 static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
481 list_for_each_entry(info, &dma_domain->devices, link) {
482 if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
485 dma_domain->enabled = 0;
496 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
500 spin_lock_irqsave(&dma_domain->domain_lock, flags);
501 if (!dma_domain->win_arr) {
503 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
507 if (wnd_nr >= dma_domain->win_cnt) {
509 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
513 if (dma_domain->win_arr[wnd_nr].valid) {
514 ret = disable_domain_win(dma_domain, wnd_nr);
516 dma_domain->win_arr[wnd_nr].valid = 0;
517 dma_domain->mapped--;
521 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
527 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
539 spin_lock_irqsave(&dma_domain->domain_lock, flags);
540 if (!dma_domain->win_arr) {
542 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
546 if (wnd_nr >= dma_domain->win_cnt) {
548 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
552 win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
555 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
559 if (dma_domain->win_cnt == 1) {
560 if (dma_domain->enabled) {
562 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
569 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
574 wnd = &dma_domain->win_arr[wnd_nr];
580 ret = update_domain_mapping(dma_domain, wnd_nr);
583 dma_domain->mapped++;
590 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
599 static int handle_attach_device(struct fsl_dma_domain *dma_domain,
604 struct iommu_domain *domain = &dma_domain->iommu_domain;
608 spin_lock_irqsave(&dma_domain->domain_lock, flags);
618 attach_device(dma_domain, liodn[i], dev);
624 if (dma_domain->win_arr) {
625 u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
627 ret = pamu_set_liodn(liodn[i], dev, dma_domain,
631 if (dma_domain->mapped) {
636 ret = map_liodn(liodn[i], dma_domain);
642 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
650 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
675 ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
687 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
710 detach_device(dev, dma_domain);
718 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
733 spin_lock_irqsave(&dma_domain->domain_lock, flags);
734 if (dma_domain->enabled) {
736 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
743 dma_domain->geom_size = geom_size;
745 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
751 static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
757 spin_lock_irqsave(&dma_domain->domain_lock, flags);
759 memcpy(&dma_domain->dma_stash, stash_attr,
762 dma_domain->stash_id = get_stash_id(stash_attr->cache,
764 if (dma_domain->stash_id == ~(u32)0) {
766 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
770 ret = update_domain_stash(dma_domain, dma_domain->stash_id);
772 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
778 static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
784 spin_lock_irqsave(&dma_domain->domain_lock, flags);
786 if (enable && !dma_domain->mapped) {
788 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
792 dma_domain->enabled = enable;
793 list_for_each_entry(info, &dma_domain->devices, link) {
800 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
807 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
811 spin_lock_irqsave(&dma_domain->domain_lock, flags);
813 if (dma_domain->enabled) {
815 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
820 if (!dma_domain->geom_size) {
822 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
832 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
836 ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
839 kfree(dma_domain->win_arr);
840 dma_domain->win_arr = kcalloc(w_count,
841 sizeof(*dma_domain->win_arr),
843 if (!dma_domain->win_arr) {
844 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
847 dma_domain->win_cnt = w_count;
849 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
857 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
865 ret = configure_domain_stash(dma_domain, data);
868 ret = configure_domain_dma_state(dma_domain, *(int *)data);
885 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
890 memcpy(data, &dma_domain->dma_stash,
894 *(int *)data = dma_domain->enabled;
900 *(u32 *)data = dma_domain->win_cnt;