Lines Matching refs:ggtt
60 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
64 err = mutex_lock_interruptible(&ggtt->vm.mutex);
69 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
71 0, ggtt->mappable_end,
74 mutex_unlock(&ggtt->vm.mutex);
80 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
82 mutex_lock(&ggtt->vm.mutex);
84 mutex_unlock(&ggtt->vm.mutex);
91 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
96 if (mutex_lock_interruptible(&ggtt->vm.mutex))
99 pinned = ggtt->vm.reserved;
100 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
104 mutex_unlock(&ggtt->vm.mutex);
106 args->aper_size = ggtt->vm.total;
381 struct i915_ggtt *ggtt = &i915->ggtt;
401 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
440 ggtt->vm.insert_page(&ggtt->vm,
447 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
461 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
462 remove_mappable_node(ggtt, &node);
570 struct i915_ggtt *ggtt = &i915->ggtt;
606 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
647 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
648 ggtt->vm.insert_page(&ggtt->vm,
661 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
672 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
678 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
679 remove_mappable_node(ggtt, &node);
901 &i915->ggtt.userfault_list, userfault_link)
909 for (i = 0; i < i915->ggtt.num_fences; i++) {
910 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
951 struct i915_ggtt *ggtt = &i915->ggtt;
965 if (obj->base.size > ggtt->mappable_end)
985 obj->base.size > ggtt->mappable_end / 2)
990 vma = i915_vma_instance(obj, &ggtt->vm, view);
1000 vma->fence_size > ggtt->mappable_end / 2)
1019 mutex_lock(&ggtt->vm.mutex);
1021 mutex_unlock(&ggtt->vm.mutex);
1174 i915_ggtt_resume(&dev_priv->ggtt);
1196 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);