Lines Matching defs:dev_state

69 	struct device_state *dev_state;
84 static void free_pasid_states(struct device_state *dev_state);
88 struct device_state *dev_state;
90 list_for_each_entry(dev_state, &state_list, list) {
91 if (dev_state->sbdf == sbdf)
92 return dev_state;
100 struct device_state *dev_state;
104 dev_state = __get_device_state(sbdf);
105 if (dev_state != NULL)
106 atomic_inc(&dev_state->count);
109 return dev_state;
112 static void free_device_state(struct device_state *dev_state)
117 free_pasid_states(dev_state);
123 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
129 group = iommu_group_get(&dev_state->pdev->dev);
133 iommu_detach_group(dev_state->domain, group);
138 iommu_domain_free(dev_state->domain);
141 kfree(dev_state);
144 static void put_device_state(struct device_state *dev_state)
146 if (atomic_dec_and_test(&dev_state->count))
147 wake_up(&dev_state->wq);
150 /* Must be called under dev_state->lock */
151 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
157 level = dev_state->pasid_levels;
158 root = dev_state->states;
184 static int set_pasid_state(struct device_state *dev_state,
192 spin_lock_irqsave(&dev_state->lock, flags);
193 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
208 spin_unlock_irqrestore(&dev_state->lock, flags);
213 static void clear_pasid_state(struct device_state *dev_state, u32 pasid)
218 spin_lock_irqsave(&dev_state->lock, flags);
219 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
227 spin_unlock_irqrestore(&dev_state->lock, flags);
230 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
236 spin_lock_irqsave(&dev_state->lock, flags);
237 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
247 spin_unlock_irqrestore(&dev_state->lock, flags);
318 static void free_pasid_states(struct device_state *dev_state)
323 for (i = 0; i < dev_state->max_pasids; ++i) {
324 pasid_state = get_pasid_state(dev_state, i);
331 clear_pasid_state(dev_state, pasid_state->pasid);
343 put_device_state(dev_state);
346 if (dev_state->pasid_levels == 2)
347 free_pasid_states_level2(dev_state->states);
348 else if (dev_state->pasid_levels == 1)
349 free_pasid_states_level1(dev_state->states);
351 BUG_ON(dev_state->pasid_levels != 0);
353 free_page((unsigned long)dev_state->states);
366 struct device_state *dev_state;
369 dev_state = pasid_state->device_state;
372 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
375 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
381 struct device_state *dev_state;
387 dev_state = pasid_state->device_state;
390 if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
391 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
411 static void finish_pri_tag(struct device_state *dev_state,
420 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
432 if (!fault->dev_state->inv_ppr_cb) {
437 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
508 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
519 struct device_state *dev_state;
547 dev_state = get_device_state(iommu_fault->sbdf);
548 if (dev_state == NULL)
551 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
554 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
568 finish_pri_tag(dev_state, pasid_state, tag);
572 fault->dev_state = dev_state;
590 put_device_state(dev_state);
605 struct device_state *dev_state;
616 dev_state = get_device_state(sbdf);
618 if (dev_state == NULL)
622 if (pasid >= dev_state->max_pasids)
637 pasid_state->device_state = dev_state;
650 ret = set_pasid_state(dev_state, pasid_state, pasid);
654 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
672 clear_pasid_state(dev_state, pasid);
682 put_device_state(dev_state);
691 struct device_state *dev_state;
700 dev_state = get_device_state(sbdf);
701 if (dev_state == NULL)
704 if (pasid >= dev_state->max_pasids)
707 pasid_state = get_pasid_state(dev_state, pasid);
717 clear_pasid_state(dev_state, pasid_state->pasid);
729 put_device_state(dev_state);
732 put_device_state(dev_state);
738 struct device_state *dev_state;
761 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
762 if (dev_state == NULL)
765 spin_lock_init(&dev_state->lock);
766 init_waitqueue_head(&dev_state->wq);
767 dev_state->pdev = pdev;
768 dev_state->sbdf = sbdf;
771 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
772 dev_state->pasid_levels += 1;
774 atomic_set(&dev_state->count, 1);
775 dev_state->max_pasids = pasids;
778 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
779 if (dev_state->states == NULL)
782 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
783 if (dev_state->domain == NULL)
787 dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
788 amd_iommu_domain_direct_map(dev_state->domain);
790 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
800 ret = iommu_attach_group(dev_state->domain, group);
814 list_add_tail(&dev_state->list, &state_list);
824 iommu_domain_free(dev_state->domain);
827 free_page((unsigned long)dev_state->states);
830 kfree(dev_state);
838 struct device_state *dev_state;
849 dev_state = __get_device_state(sbdf);
850 if (dev_state == NULL) {
855 list_del(&dev_state->list);
859 put_device_state(dev_state);
860 free_device_state(dev_state);
867 struct device_state *dev_state;
880 dev_state = __get_device_state(sbdf);
881 if (dev_state == NULL)
884 dev_state->inv_ppr_cb = cb;
898 struct device_state *dev_state;
911 dev_state = __get_device_state(sbdf);
912 if (dev_state == NULL)
915 dev_state->inv_ctx_cb = cb;
956 struct device_state *dev_state, *next;
973 list_for_each_entry_safe(dev_state, next, &state_list, list) {
976 put_device_state(dev_state);
977 list_del(&dev_state->list);
978 list_add_tail(&dev_state->list, &freelist);
985 * we need to free dev_state outside the spinlock.
987 list_for_each_entry_safe(dev_state, next, &freelist, list) {
988 list_del(&dev_state->list);
989 free_device_state(dev_state);