Lines Matching defs:obj

117 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
120 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
127 assert_object_held(obj);
129 if (list_empty(&obj->vma.list))
142 spin_lock(&obj->vma.lock);
143 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
166 spin_unlock(&obj->vma.lock);
176 assert_object_held(vma->obj);
193 spin_lock(&obj->vma.lock);
195 list_splice_init(&still_in_list, &obj->vma.list);
196 spin_unlock(&obj->vma.lock);
228 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
238 ret = i915_gem_object_lock_interruptible(obj, NULL);
242 ret = i915_gem_object_pin_pages(obj);
246 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
250 i915_gem_object_finish_access(obj);
251 i915_gem_object_unlock(obj);
257 struct page *page = i915_gem_object_get_page(obj, idx);
270 i915_gem_object_unpin_pages(obj);
274 i915_gem_object_unpin_pages(obj);
276 i915_gem_object_unlock(obj);
304 static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
308 struct drm_i915_private *i915 = to_i915(obj->base.dev);
317 ret = i915_gem_object_lock(obj, &ww);
321 ret = i915_gem_object_set_to_gtt_domain(obj, write);
325 if (!i915_gem_object_is_tiled(obj))
326 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
344 ret = i915_gem_object_pin_pages(obj);
365 static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
369 struct drm_i915_private *i915 = to_i915(obj->base.dev);
372 i915_gem_object_unpin_pages(obj);
382 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
385 struct drm_i915_private *i915 = to_i915(obj->base.dev);
400 vma = i915_gem_gtt_prepare(obj, &node, false);
423 i915_gem_object_get_dma_address(obj,
443 i915_gem_gtt_cleanup(obj, &node, vma);
463 struct drm_i915_gem_object *obj;
479 obj = i915_gem_object_lookup(file, args->handle);
480 if (!obj)
484 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
489 trace_i915_gem_object_pread(obj, args->offset, args->size);
491 if (obj->ops->pread)
492 ret = obj->ops->pread(obj, args);
496 ret = i915_gem_object_wait(obj,
502 ret = i915_gem_shmem_pread(obj, args);
504 ret = i915_gem_gtt_pread(obj, args);
507 i915_gem_object_put(obj);
541 * @obj: i915 GEM object
545 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
548 struct drm_i915_private *i915 = to_i915(obj->base.dev);
562 if (i915_gem_object_has_struct_page(obj)) {
578 vma = i915_gem_gtt_prepare(obj, &node, true);
584 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
604 i915_gem_object_get_dma_address(obj,
631 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
633 i915_gem_gtt_cleanup(obj, &node, vma);
667 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
678 ret = i915_gem_object_lock_interruptible(obj, NULL);
682 ret = i915_gem_object_pin_pages(obj);
686 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
690 i915_gem_object_finish_access(obj);
691 i915_gem_object_unlock(obj);
705 struct page *page = i915_gem_object_get_page(obj, idx);
719 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
721 i915_gem_object_unpin_pages(obj);
725 i915_gem_object_unpin_pages(obj);
727 i915_gem_object_unlock(obj);
745 struct drm_i915_gem_object *obj;
760 obj = i915_gem_object_lookup(file, args->handle);
761 if (!obj)
765 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
771 if (i915_gem_object_is_readonly(obj)) {
776 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
779 if (obj->ops->pwrite)
780 ret = obj->ops->pwrite(obj, args);
784 ret = i915_gem_object_wait(obj,
798 if (!i915_gem_object_has_struct_page(obj) ||
799 i915_gem_cpu_write_needs_clflush(obj))
804 ret = i915_gem_gtt_pwrite_fast(obj, args);
807 if (i915_gem_object_has_struct_page(obj))
808 ret = i915_gem_shmem_pwrite(obj, args);
812 i915_gem_object_put(obj);
827 struct drm_i915_gem_object *obj;
829 obj = i915_gem_object_lookup(file, args->handle);
830 if (!obj)
839 i915_gem_object_flush_if_display(obj);
840 i915_gem_object_put(obj);
847 struct drm_i915_gem_object *obj, *on;
857 list_for_each_entry_safe(obj, on,
859 __i915_gem_object_release_mmap_gtt(obj);
861 list_for_each_entry_safe(obj, on,
863 i915_gem_object_runtime_pm_release_mmap_offset(obj);
895 struct drm_i915_gem_object *obj = vma->obj;
897 spin_lock(&obj->vma.lock);
899 rb_erase(&vma->obj_node, &obj->vma.tree);
902 spin_unlock(&obj->vma.lock);
906 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
911 struct drm_i915_private *i915 = to_i915(obj->base.dev);
928 if (obj->base.size > ggtt->mappable_end)
948 obj->base.size > ggtt->mappable_end / 2)
953 vma = i915_vma_instance(obj, &ggtt->vm, view);
993 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
1009 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1018 err = i915_gem_object_lock(obj, &ww);
1022 ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
1037 struct drm_i915_gem_object *obj;
1048 obj = i915_gem_object_lookup(file_priv, args->handle);
1049 if (!obj)
1052 err = i915_gem_object_lock_interruptible(obj, NULL);
1056 if (i915_gem_object_has_pages(obj) &&
1057 i915_gem_object_is_tiled(obj) &&
1059 if (obj->mm.madv == I915_MADV_WILLNEED) {
1060 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
1061 i915_gem_object_clear_tiling_quirk(obj);
1062 i915_gem_object_make_shrinkable(obj);
1065 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
1066 i915_gem_object_make_unshrinkable(obj);
1067 i915_gem_object_set_tiling_quirk(obj);
1071 if (obj->mm.madv != __I915_MADV_PURGED) {
1072 obj->mm.madv = args->madv;
1073 if (obj->ops->adjust_lru)
1074 obj->ops->adjust_lru(obj);
1077 if (i915_gem_object_has_pages(obj) ||
1078 i915_gem_object_has_self_managed_shrink_list(obj)) {
1082 if (!list_empty(&obj->mm.link)) {
1085 if (obj->mm.madv != I915_MADV_WILLNEED)
1089 list_move_tail(&obj->mm.link, list);
1096 if (obj->mm.madv == I915_MADV_DONTNEED &&
1097 !i915_gem_object_has_pages(obj))
1098 i915_gem_object_truncate(obj);
1100 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1102 i915_gem_object_unlock(obj);
1104 i915_gem_object_put(obj);