Lines Matching refs:base
54 struct ttm_base_object base;
87 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
115 container_of(f, struct vmw_fence_obj, base);
139 container_of(f, struct vmw_fence_obj, base);
146 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
155 struct dma_fence_cb base;
163 container_of(cb, struct vmwgfx_wait_cb, base);
173 container_of(f, struct vmw_fence_obj, base);
196 cb.base.func = vmwgfx_wait_cb;
198 list_add(&cb.base.node, &f->cb_list);
234 if (!list_empty(&cb.base.node))
235 list_del(&cb.base.node);
345 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
419 vmw_mmio_write(fence->base.seqno,
450 if (dma_fence_is_signaled_locked(&fence->base))
456 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
459 vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
476 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
478 dma_fence_signal_locked(&fence->base);
517 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
522 return dma_fence_is_signaled(&fence->base);
528 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
547 dma_fence_free(&fence->base);
581 ttm_base_object_kfree(ufence, base);
591 struct ttm_base_object *base = *p_base;
593 container_of(base, struct vmw_user_fence, base);
640 * The base object holds a reference which is freed in
644 ret = ttm_base_object_init(tfile, &ufence->base, false,
651 * Free the base object's reference
658 *p_handle = ufence->base.handle;
735 dma_fence_get(&fence->base);
743 dma_fence_signal(&fence->base);
751 dma_fence_put(&fence->base);
770 * @return: A struct vmw_user_fence base ttm object on success or
781 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
783 if (!base) {
789 if (base->refcount_release != vmw_user_fence_base_release) {
792 ttm_base_object_unref(&base);
796 return base;
806 struct ttm_base_object *base;
825 base = vmw_fence_obj_lookup(tfile, arg->handle);
826 if (IS_ERR(base))
827 return PTR_ERR(base);
829 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
843 ttm_base_object_unref(&base);
860 struct ttm_base_object *base;
866 base = vmw_fence_obj_lookup(tfile, arg->handle);
867 if (IS_ERR(base))
868 return PTR_ERR(base);
870 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
880 ttm_base_object_unref(&base);
971 if (dma_fence_is_signaled_locked(&fence->base)) {
1046 struct drm_pending_event base;
1068 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1069 event->event.base.length = sizeof(*event);
1072 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1082 &event->base,
1088 &event->base,
1098 drm_event_cancel_free(dev, &event->base);
1124 struct ttm_base_object *base =
1127 if (IS_ERR(base))
1128 return PTR_ERR(base);
1130 fence = &(container_of(base, struct vmw_user_fence,
1131 base)->fence);
1135 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1142 handle = base->handle;
1144 ttm_base_object_unref(&base);