Lines Matching defs:dev_state
68 struct device_state *dev_state;
84 static void free_pasid_states(struct device_state *dev_state);
98 struct device_state *dev_state;
100 list_for_each_entry(dev_state, &state_list, list) {
101 if (dev_state->devid == devid)
102 return dev_state;
110 struct device_state *dev_state;
114 dev_state = __get_device_state(devid);
115 if (dev_state != NULL)
116 atomic_inc(&dev_state->count);
119 return dev_state;
122 static void free_device_state(struct device_state *dev_state)
130 group = iommu_group_get(&dev_state->pdev->dev);
134 iommu_detach_group(dev_state->domain, group);
139 iommu_domain_free(dev_state->domain);
142 kfree(dev_state);
145 static void put_device_state(struct device_state *dev_state)
147 if (atomic_dec_and_test(&dev_state->count))
148 wake_up(&dev_state->wq);
151 /* Must be called under dev_state->lock */
152 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
158 level = dev_state->pasid_levels;
159 root = dev_state->states;
185 static int set_pasid_state(struct device_state *dev_state,
193 spin_lock_irqsave(&dev_state->lock, flags);
194 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
209 spin_unlock_irqrestore(&dev_state->lock, flags);
214 static void clear_pasid_state(struct device_state *dev_state, u32 pasid)
219 spin_lock_irqsave(&dev_state->lock, flags);
220 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
228 spin_unlock_irqrestore(&dev_state->lock, flags);
231 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
237 spin_lock_irqsave(&dev_state->lock, flags);
238 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
248 spin_unlock_irqrestore(&dev_state->lock, flags);
319 static void free_pasid_states(struct device_state *dev_state)
324 for (i = 0; i < dev_state->max_pasids; ++i) {
325 pasid_state = get_pasid_state(dev_state, i);
341 put_device_state(dev_state);
344 if (dev_state->pasid_levels == 2)
345 free_pasid_states_level2(dev_state->states);
346 else if (dev_state->pasid_levels == 1)
347 free_pasid_states_level1(dev_state->states);
349 BUG_ON(dev_state->pasid_levels != 0);
351 free_page((unsigned long)dev_state->states);
364 struct device_state *dev_state;
367 dev_state = pasid_state->device_state;
370 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
373 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
379 struct device_state *dev_state;
385 dev_state = pasid_state->device_state;
388 if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
389 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
409 static void finish_pri_tag(struct device_state *dev_state,
418 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
430 if (!fault->dev_state->inv_ppr_cb) {
435 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
506 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
517 struct device_state *dev_state;
544 dev_state = get_device_state(iommu_fault->device_id);
545 if (dev_state == NULL)
548 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
551 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
565 finish_pri_tag(dev_state, pasid_state, tag);
569 fault->dev_state = dev_state;
587 put_device_state(dev_state);
602 struct device_state *dev_state;
613 dev_state = get_device_state(devid);
615 if (dev_state == NULL)
619 if (pasid >= dev_state->max_pasids)
634 pasid_state->device_state = dev_state;
645 ret = set_pasid_state(dev_state, pasid_state, pasid);
649 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
667 clear_pasid_state(dev_state, pasid);
677 put_device_state(dev_state);
686 struct device_state *dev_state;
695 dev_state = get_device_state(devid);
696 if (dev_state == NULL)
699 if (pasid >= dev_state->max_pasids)
702 pasid_state = get_pasid_state(dev_state, pasid);
712 clear_pasid_state(dev_state, pasid_state->pasid);
724 put_device_state(dev_state);
727 put_device_state(dev_state);
733 struct device_state *dev_state;
756 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
757 if (dev_state == NULL)
760 spin_lock_init(&dev_state->lock);
761 init_waitqueue_head(&dev_state->wq);
762 dev_state->pdev = pdev;
763 dev_state->devid = devid;
766 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
767 dev_state->pasid_levels += 1;
769 atomic_set(&dev_state->count, 1);
770 dev_state->max_pasids = pasids;
773 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
774 if (dev_state->states == NULL)
777 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
778 if (dev_state->domain == NULL)
781 amd_iommu_domain_direct_map(dev_state->domain);
783 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
793 ret = iommu_attach_group(dev_state->domain, group);
807 list_add_tail(&dev_state->list, &state_list);
817 iommu_domain_free(dev_state->domain);
820 free_page((unsigned long)dev_state->states);
823 kfree(dev_state);
831 struct device_state *dev_state;
842 dev_state = __get_device_state(devid);
843 if (dev_state == NULL) {
848 list_del(&dev_state->list);
853 free_pasid_states(dev_state);
855 put_device_state(dev_state);
860 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
861 free_device_state(dev_state);
868 struct device_state *dev_state;
881 dev_state = __get_device_state(devid);
882 if (dev_state == NULL)
885 dev_state->inv_ppr_cb = cb;
899 struct device_state *dev_state;
912 dev_state = __get_device_state(devid);
913 if (dev_state == NULL)
916 dev_state->inv_ctx_cb = cb;
959 struct device_state *dev_state;
974 dev_state = get_device_state(i);
976 if (dev_state == NULL)
981 put_device_state(dev_state);
982 amd_iommu_free_device(dev_state->pdev);