Lines Matching defs:fman

128 	struct vmw_fence_manager *fman = fman_from_fence(fence);
130 spin_lock(&fman->lock);
132 --fman->num_fence_objects;
133 spin_unlock(&fman->lock);
152 struct vmw_fence_manager *fman = fman_from_fence(fence);
153 struct vmw_private *dev_priv = fman->dev_priv;
176 static void __vmw_fences_update(struct vmw_fence_manager *fman);
183 struct vmw_fence_manager *fman = fman_from_fence(fence);
184 struct vmw_private *dev_priv = fman->dev_priv;
208 __vmw_fences_update(fman);
269 struct vmw_fence_manager *fman =
277 mutex_lock(&fman->goal_irq_mutex);
279 spin_lock(&fman->lock);
280 list_splice_init(&fman->cleanup_list, &list);
281 seqno_valid = fman->seqno_valid;
282 spin_unlock(&fman->lock);
284 if (!seqno_valid && fman->goal_irq_on) {
285 fman->goal_irq_on = false;
286 vmw_goal_waiter_remove(fman->dev_priv);
288 mutex_unlock(&fman->goal_irq_mutex);
296 * hence fman::lock not held.
309 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
311 if (unlikely(!fman))
314 fman->dev_priv = dev_priv;
315 spin_lock_init(&fman->lock);
316 INIT_LIST_HEAD(&fman->fence_list);
317 INIT_LIST_HEAD(&fman->cleanup_list);
318 INIT_WORK(&fman->work, &vmw_fence_work_func);
319 fman->fifo_down = true;
320 mutex_init(&fman->goal_irq_mutex);
321 fman->ctx = dma_fence_context_alloc(1);
323 return fman;
326 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
330 (void) cancel_work_sync(&fman->work);
332 spin_lock(&fman->lock);
333 lists_empty = list_empty(&fman->fence_list) &&
334 list_empty(&fman->cleanup_list);
335 spin_unlock(&fman->lock);
338 kfree(fman);
341 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
347 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
348 fman->ctx, seqno);
352 spin_lock(&fman->lock);
353 if (unlikely(fman->fifo_down)) {
357 list_add_tail(&fence->head, &fman->fence_list);
358 ++fman->num_fence_objects;
361 spin_unlock(&fman->lock);
366 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
373 fman->pending_actions[action->type]--;
382 list_add_tail(&action->head, &fman->cleanup_list);
390 * @fman: Pointer to a fence manager.
402 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
408 if (likely(!fman->seqno_valid))
411 goal_seqno = vmw_fence_goal_read(fman->dev_priv);
415 fman->seqno_valid = false;
416 list_for_each_entry(fence, &fman->fence_list, head) {
418 fman->seqno_valid = true;
419 vmw_fence_goal_write(fman->dev_priv,
446 struct vmw_fence_manager *fman = fman_from_fence(fence);
452 goal_seqno = vmw_fence_goal_read(fman->dev_priv);
453 if (likely(fman->seqno_valid &&
457 vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
458 fman->seqno_valid = true;
463 static void __vmw_fences_update(struct vmw_fence_manager *fman)
470 seqno = vmw_fence_read(fman->dev_priv);
472 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
479 vmw_fences_perform_actions(fman, &action_list);
490 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
492 new_seqno = vmw_fence_read(fman->dev_priv);
499 if (!list_empty(&fman->cleanup_list))
500 (void) schedule_work(&fman->work);
503 void vmw_fences_update(struct vmw_fence_manager *fman)
505 spin_lock(&fman->lock);
506 __vmw_fences_update(fman);
507 spin_unlock(&fman->lock);
512 struct vmw_fence_manager *fman = fman_from_fence(fence);
517 vmw_fences_update(fman);
540 int vmw_fence_create(struct vmw_fence_manager *fman,
551 ret = vmw_fence_obj_init(fman, fence, seqno,
585 struct vmw_fence_manager *fman,
601 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
642 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
649 * restart when we've released the fman->lock.
652 spin_lock(&fman->lock);
653 fman->fifo_down = true;
654 while (!list_empty(&fman->fence_list)) {
656 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
659 spin_unlock(&fman->lock);
670 vmw_fences_perform_actions(fman, &action_list);
675 spin_lock(&fman->lock);
677 spin_unlock(&fman->lock);
680 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
682 spin_lock(&fman->lock);
683 fman->fifo_down = false;
684 spin_unlock(&fman->lock);
784 struct vmw_fence_manager *fman;
793 fman = fman_from_fence(fence);
798 spin_lock(&fman->lock);
800 spin_unlock(&fman->lock);
885 struct vmw_fence_manager *fman = fman_from_fence(fence);
888 mutex_lock(&fman->goal_irq_mutex);
889 spin_lock(&fman->lock);
891 fman->pending_actions[action->type]++;
897 vmw_fences_perform_actions(fman, &action_list);
902 * This function may set fman::seqno_valid, so it must
908 spin_unlock(&fman->lock);
911 if (!fman->goal_irq_on) {
912 fman->goal_irq_on = true;
913 vmw_goal_waiter_add(fman->dev_priv);
915 vmw_fences_update(fman);
917 mutex_unlock(&fman->goal_irq_mutex);
948 struct vmw_fence_manager *fman = fman_from_fence(fence);
961 eaction->dev = &fman->dev_priv->drm;
982 struct vmw_fence_manager *fman = fman_from_fence(fence);
983 struct drm_device *dev = &fman->dev_priv->drm;