1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <linux/sched/signal.h>
29
30#include "vmwgfx_drv.h"
31
32#define VMW_FENCE_WRAP (1 << 31)
33
34struct vmw_fence_manager {
35	int num_fence_objects;
36	struct vmw_private *dev_priv;
37	spinlock_t lock;
38	struct list_head fence_list;
39	struct work_struct work;
40	bool fifo_down;
41	struct list_head cleanup_list;
42	uint32_t pending_actions[VMW_ACTION_MAX];
43	struct mutex goal_irq_mutex;
44	bool goal_irq_on; /* Protected by @goal_irq_mutex */
45	bool seqno_valid; /* Protected by @lock, and may not be set to true
46			     without the @goal_irq_mutex held. */
47	u64 ctx;
48};
49
50struct vmw_user_fence {
51	struct ttm_base_object base;
52	struct vmw_fence_obj fence;
53};
54
55/**
56 * struct vmw_event_fence_action - fence action that delivers a drm event.
57 *
58 * @action: A struct vmw_fence_action to hook up to a fence.
59 * @event: A pointer to the pending event.
60 * @fence: A referenced pointer to the fence to keep it alive while @action
61 * hangs on it.
62 * @dev: Pointer to a struct drm_device so we can access the event stuff.
63 * @tv_sec: If non-null, the variable pointed to will be assigned
64 * current time tv_sec val when the fence signals.
65 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
66 * be assigned the current time tv_usec val when the fence signals.
67 */
68struct vmw_event_fence_action {
69	struct vmw_fence_action action;
70
71	struct drm_pending_event *event;
72	struct vmw_fence_obj *fence;
73	struct drm_device *dev;
74
75	uint32_t *tv_sec;
76	uint32_t *tv_usec;
77};
78
79static struct vmw_fence_manager *
80fman_from_fence(struct vmw_fence_obj *fence)
81{
82	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
83}
84
85static u32 vmw_fence_goal_read(struct vmw_private *vmw)
86{
87	if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
88		return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
89	else
90		return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
91}
92
93static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
94{
95	if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
96		vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
97	else
98		vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
99}
100
101/*
102 * Note on fencing subsystem usage of irqs:
103 * Typically the vmw_fences_update function is called
104 *
105 * a) When a new fence seqno has been submitted by the fifo code.
106 * b) On-demand when we have waiters. Sleeping waiters will switch on the
107 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
108 * irq is received. When the last fence waiter is gone, that IRQ is masked
109 * away.
110 *
111 * In situations where there are no waiters and we don't submit any new fences,
112 * fence objects may not be signaled. This is perfectly OK, since there are
113 * no consumers of the signaled data, but that is NOT ok when there are fence
114 * actions attached to a fence. The fencing subsystem then makes use of the
115 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
116 * which has an action attached, and each time vmw_fences_update is called,
117 * the subsystem makes sure the fence goal seqno is updated.
118 *
119 * The fence goal seqno irq is on as long as there are unsignaled fence
120 * objects with actions attached to them.
121 */
122
123static void vmw_fence_obj_destroy(struct dma_fence *f)
124{
125	struct vmw_fence_obj *fence =
126		container_of(f, struct vmw_fence_obj, base);
127
128	struct vmw_fence_manager *fman = fman_from_fence(fence);
129
130	spin_lock(&fman->lock);
131	list_del_init(&fence->head);
132	--fman->num_fence_objects;
133	spin_unlock(&fman->lock);
134	fence->destroy(fence);
135}
136
137static const char *vmw_fence_get_driver_name(struct dma_fence *f)
138{
139	return "vmwgfx";
140}
141
142static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
143{
144	return "svga";
145}
146
147static bool vmw_fence_enable_signaling(struct dma_fence *f)
148{
149	struct vmw_fence_obj *fence =
150		container_of(f, struct vmw_fence_obj, base);
151
152	struct vmw_fence_manager *fman = fman_from_fence(fence);
153	struct vmw_private *dev_priv = fman->dev_priv;
154
155	u32 seqno = vmw_fence_read(dev_priv);
156	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
157		return false;
158
159	return true;
160}
161
162struct vmwgfx_wait_cb {
163	struct dma_fence_cb base;
164	struct task_struct *task;
165};
166
167static void
168vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
169{
170	struct vmwgfx_wait_cb *wait =
171		container_of(cb, struct vmwgfx_wait_cb, base);
172
173	wake_up_process(wait->task);
174}
175
176static void __vmw_fences_update(struct vmw_fence_manager *fman);
177
178static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
179{
180	struct vmw_fence_obj *fence =
181		container_of(f, struct vmw_fence_obj, base);
182
183	struct vmw_fence_manager *fman = fman_from_fence(fence);
184	struct vmw_private *dev_priv = fman->dev_priv;
185	struct vmwgfx_wait_cb cb;
186	long ret = timeout;
187
188	if (likely(vmw_fence_obj_signaled(fence)))
189		return timeout;
190
191	vmw_seqno_waiter_add(dev_priv);
192
193	spin_lock(f->lock);
194
195	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
196		goto out;
197
198	if (intr && signal_pending(current)) {
199		ret = -ERESTARTSYS;
200		goto out;
201	}
202
203	cb.base.func = vmwgfx_wait_cb;
204	cb.task = current;
205	list_add(&cb.base.node, &f->cb_list);
206
207	for (;;) {
208		__vmw_fences_update(fman);
209
210		/*
211		 * We can use the barrier free __set_current_state() since
212		 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
213		 * fence spinlock.
214		 */
215		if (intr)
216			__set_current_state(TASK_INTERRUPTIBLE);
217		else
218			__set_current_state(TASK_UNINTERRUPTIBLE);
219
220		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
221			if (ret == 0 && timeout > 0)
222				ret = 1;
223			break;
224		}
225
226		if (intr && signal_pending(current)) {
227			ret = -ERESTARTSYS;
228			break;
229		}
230
231		if (ret == 0)
232			break;
233
234		spin_unlock(f->lock);
235
236		ret = schedule_timeout(ret);
237
238		spin_lock(f->lock);
239	}
240	__set_current_state(TASK_RUNNING);
241	if (!list_empty(&cb.base.node))
242		list_del(&cb.base.node);
243
244out:
245	spin_unlock(f->lock);
246
247	vmw_seqno_waiter_remove(dev_priv);
248
249	return ret;
250}
251
252static const struct dma_fence_ops vmw_fence_ops = {
253	.get_driver_name = vmw_fence_get_driver_name,
254	.get_timeline_name = vmw_fence_get_timeline_name,
255	.enable_signaling = vmw_fence_enable_signaling,
256	.wait = vmw_fence_wait,
257	.release = vmw_fence_obj_destroy,
258};
259
260
261/*
262 * Execute signal actions on fences recently signaled.
263 * This is done from a workqueue so we don't have to execute
264 * signal actions from atomic context.
265 */
266
267static void vmw_fence_work_func(struct work_struct *work)
268{
269	struct vmw_fence_manager *fman =
270		container_of(work, struct vmw_fence_manager, work);
271	struct list_head list;
272	struct vmw_fence_action *action, *next_action;
273	bool seqno_valid;
274
275	do {
276		INIT_LIST_HEAD(&list);
277		mutex_lock(&fman->goal_irq_mutex);
278
279		spin_lock(&fman->lock);
280		list_splice_init(&fman->cleanup_list, &list);
281		seqno_valid = fman->seqno_valid;
282		spin_unlock(&fman->lock);
283
284		if (!seqno_valid && fman->goal_irq_on) {
285			fman->goal_irq_on = false;
286			vmw_goal_waiter_remove(fman->dev_priv);
287		}
288		mutex_unlock(&fman->goal_irq_mutex);
289
290		if (list_empty(&list))
291			return;
292
293		/*
294		 * At this point, only we should be able to manipulate the
295		 * list heads of the actions we have on the private list.
296		 * hence fman::lock not held.
297		 */
298
299		list_for_each_entry_safe(action, next_action, &list, head) {
300			list_del_init(&action->head);
301			if (action->cleanup)
302				action->cleanup(action);
303		}
304	} while (1);
305}
306
307struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
308{
309	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
310
311	if (unlikely(!fman))
312		return NULL;
313
314	fman->dev_priv = dev_priv;
315	spin_lock_init(&fman->lock);
316	INIT_LIST_HEAD(&fman->fence_list);
317	INIT_LIST_HEAD(&fman->cleanup_list);
318	INIT_WORK(&fman->work, &vmw_fence_work_func);
319	fman->fifo_down = true;
320	mutex_init(&fman->goal_irq_mutex);
321	fman->ctx = dma_fence_context_alloc(1);
322
323	return fman;
324}
325
326void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
327{
328	bool lists_empty;
329
330	(void) cancel_work_sync(&fman->work);
331
332	spin_lock(&fman->lock);
333	lists_empty = list_empty(&fman->fence_list) &&
334		list_empty(&fman->cleanup_list);
335	spin_unlock(&fman->lock);
336
337	BUG_ON(!lists_empty);
338	kfree(fman);
339}
340
341static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
342			      struct vmw_fence_obj *fence, u32 seqno,
343			      void (*destroy) (struct vmw_fence_obj *fence))
344{
345	int ret = 0;
346
347	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
348		       fman->ctx, seqno);
349	INIT_LIST_HEAD(&fence->seq_passed_actions);
350	fence->destroy = destroy;
351
352	spin_lock(&fman->lock);
353	if (unlikely(fman->fifo_down)) {
354		ret = -EBUSY;
355		goto out_unlock;
356	}
357	list_add_tail(&fence->head, &fman->fence_list);
358	++fman->num_fence_objects;
359
360out_unlock:
361	spin_unlock(&fman->lock);
362	return ret;
363
364}
365
366static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
367				struct list_head *list)
368{
369	struct vmw_fence_action *action, *next_action;
370
371	list_for_each_entry_safe(action, next_action, list, head) {
372		list_del_init(&action->head);
373		fman->pending_actions[action->type]--;
374		if (action->seq_passed != NULL)
375			action->seq_passed(action);
376
377		/*
378		 * Add the cleanup action to the cleanup list so that
379		 * it will be performed by a worker task.
380		 */
381
382		list_add_tail(&action->head, &fman->cleanup_list);
383	}
384}
385
386/**
387 * vmw_fence_goal_new_locked - Figure out a new device fence goal
388 * seqno if needed.
389 *
390 * @fman: Pointer to a fence manager.
391 * @passed_seqno: The seqno the device currently signals as passed.
392 *
393 * This function should be called with the fence manager lock held.
394 * It is typically called when we have a new passed_seqno, and
395 * we might need to update the fence goal. It checks to see whether
396 * the current fence goal has already passed, and, in that case,
397 * scans through all unsignaled fences to get the next fence object with an
398 * action attached, and sets the seqno of that fence as a new fence goal.
399 *
400 * returns true if the device goal seqno was updated. False otherwise.
401 */
402static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
403				      u32 passed_seqno)
404{
405	u32 goal_seqno;
406	struct vmw_fence_obj *fence;
407
408	if (likely(!fman->seqno_valid))
409		return false;
410
411	goal_seqno = vmw_fence_goal_read(fman->dev_priv);
412	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
413		return false;
414
415	fman->seqno_valid = false;
416	list_for_each_entry(fence, &fman->fence_list, head) {
417		if (!list_empty(&fence->seq_passed_actions)) {
418			fman->seqno_valid = true;
419			vmw_fence_goal_write(fman->dev_priv,
420					     fence->base.seqno);
421			break;
422		}
423	}
424
425	return true;
426}
427
428
429/**
430 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
431 * needed.
432 *
433 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
434 * considered as a device fence goal.
435 *
436 * This function should be called with the fence manager lock held.
437 * It is typically called when an action has been attached to a fence to
438 * check whether the seqno of that fence should be used for a fence
439 * goal interrupt. This is typically needed if the current fence goal is
440 * invalid, or has a higher seqno than that of the current fence object.
441 *
442 * returns true if the device goal seqno was updated. False otherwise.
443 */
444static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
445{
446	struct vmw_fence_manager *fman = fman_from_fence(fence);
447	u32 goal_seqno;
448
449	if (dma_fence_is_signaled_locked(&fence->base))
450		return false;
451
452	goal_seqno = vmw_fence_goal_read(fman->dev_priv);
453	if (likely(fman->seqno_valid &&
454		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
455		return false;
456
457	vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
458	fman->seqno_valid = true;
459
460	return true;
461}
462
463static void __vmw_fences_update(struct vmw_fence_manager *fman)
464{
465	struct vmw_fence_obj *fence, *next_fence;
466	struct list_head action_list;
467	bool needs_rerun;
468	uint32_t seqno, new_seqno;
469
470	seqno = vmw_fence_read(fman->dev_priv);
471rerun:
472	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
473		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
474			list_del_init(&fence->head);
475			dma_fence_signal_locked(&fence->base);
476			INIT_LIST_HEAD(&action_list);
477			list_splice_init(&fence->seq_passed_actions,
478					 &action_list);
479			vmw_fences_perform_actions(fman, &action_list);
480		} else
481			break;
482	}
483
484	/*
485	 * Rerun if the fence goal seqno was updated, and the
486	 * hardware might have raced with that update, so that
487	 * we missed a fence_goal irq.
488	 */
489
490	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
491	if (unlikely(needs_rerun)) {
492		new_seqno = vmw_fence_read(fman->dev_priv);
493		if (new_seqno != seqno) {
494			seqno = new_seqno;
495			goto rerun;
496		}
497	}
498
499	if (!list_empty(&fman->cleanup_list))
500		(void) schedule_work(&fman->work);
501}
502
503void vmw_fences_update(struct vmw_fence_manager *fman)
504{
505	spin_lock(&fman->lock);
506	__vmw_fences_update(fman);
507	spin_unlock(&fman->lock);
508}
509
510bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
511{
512	struct vmw_fence_manager *fman = fman_from_fence(fence);
513
514	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
515		return true;
516
517	vmw_fences_update(fman);
518
519	return dma_fence_is_signaled(&fence->base);
520}
521
522int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
523		       bool interruptible, unsigned long timeout)
524{
525	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
526
527	if (likely(ret > 0))
528		return 0;
529	else if (ret == 0)
530		return -EBUSY;
531	else
532		return ret;
533}
534
535static void vmw_fence_destroy(struct vmw_fence_obj *fence)
536{
537	dma_fence_free(&fence->base);
538}
539
540int vmw_fence_create(struct vmw_fence_manager *fman,
541		     uint32_t seqno,
542		     struct vmw_fence_obj **p_fence)
543{
544	struct vmw_fence_obj *fence;
545 	int ret;
546
547	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
548	if (unlikely(!fence))
549		return -ENOMEM;
550
551	ret = vmw_fence_obj_init(fman, fence, seqno,
552				 vmw_fence_destroy);
553	if (unlikely(ret != 0))
554		goto out_err_init;
555
556	*p_fence = fence;
557	return 0;
558
559out_err_init:
560	kfree(fence);
561	return ret;
562}
563
564
565static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
566{
567	struct vmw_user_fence *ufence =
568		container_of(fence, struct vmw_user_fence, fence);
569
570	ttm_base_object_kfree(ufence, base);
571}
572
573static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
574{
575	struct ttm_base_object *base = *p_base;
576	struct vmw_user_fence *ufence =
577		container_of(base, struct vmw_user_fence, base);
578	struct vmw_fence_obj *fence = &ufence->fence;
579
580	*p_base = NULL;
581	vmw_fence_obj_unreference(&fence);
582}
583
584int vmw_user_fence_create(struct drm_file *file_priv,
585			  struct vmw_fence_manager *fman,
586			  uint32_t seqno,
587			  struct vmw_fence_obj **p_fence,
588			  uint32_t *p_handle)
589{
590	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
591	struct vmw_user_fence *ufence;
592	struct vmw_fence_obj *tmp;
593	int ret;
594
595	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
596	if (unlikely(!ufence)) {
597		ret = -ENOMEM;
598		goto out_no_object;
599	}
600
601	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
602				 vmw_user_fence_destroy);
603	if (unlikely(ret != 0)) {
604		kfree(ufence);
605		goto out_no_object;
606	}
607
608	/*
609	 * The base object holds a reference which is freed in
610	 * vmw_user_fence_base_release.
611	 */
612	tmp = vmw_fence_obj_reference(&ufence->fence);
613
614	ret = ttm_base_object_init(tfile, &ufence->base, false,
615				   VMW_RES_FENCE,
616				   &vmw_user_fence_base_release);
617
618
619	if (unlikely(ret != 0)) {
620		/*
621		 * Free the base object's reference
622		 */
623		vmw_fence_obj_unreference(&tmp);
624		goto out_err;
625	}
626
627	*p_fence = &ufence->fence;
628	*p_handle = ufence->base.handle;
629
630	return 0;
631out_err:
632	tmp = &ufence->fence;
633	vmw_fence_obj_unreference(&tmp);
634out_no_object:
635	return ret;
636}
637
638/*
639 * vmw_fence_fifo_down - signal all unsignaled fence objects.
640 */
641
642void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
643{
644	struct list_head action_list;
645	int ret;
646
647	/*
648	 * The list may be altered while we traverse it, so always
649	 * restart when we've released the fman->lock.
650	 */
651
652	spin_lock(&fman->lock);
653	fman->fifo_down = true;
654	while (!list_empty(&fman->fence_list)) {
655		struct vmw_fence_obj *fence =
656			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
657				   head);
658		dma_fence_get(&fence->base);
659		spin_unlock(&fman->lock);
660
661		ret = vmw_fence_obj_wait(fence, false, false,
662					 VMW_FENCE_WAIT_TIMEOUT);
663
664		if (unlikely(ret != 0)) {
665			list_del_init(&fence->head);
666			dma_fence_signal(&fence->base);
667			INIT_LIST_HEAD(&action_list);
668			list_splice_init(&fence->seq_passed_actions,
669					 &action_list);
670			vmw_fences_perform_actions(fman, &action_list);
671		}
672
673		BUG_ON(!list_empty(&fence->head));
674		dma_fence_put(&fence->base);
675		spin_lock(&fman->lock);
676	}
677	spin_unlock(&fman->lock);
678}
679
680void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
681{
682	spin_lock(&fman->lock);
683	fman->fifo_down = false;
684	spin_unlock(&fman->lock);
685}
686
687
688/**
689 * vmw_fence_obj_lookup - Look up a user-space fence object
690 *
691 * @tfile: A struct ttm_object_file identifying the caller.
692 * @handle: A handle identifying the fence object.
693 * @return: A struct vmw_user_fence base ttm object on success or
694 * an error pointer on failure.
695 *
696 * The fence object is looked up and type-checked. The caller needs
697 * to have opened the fence object first, but since that happens on
698 * creation and fence objects aren't shareable, that's not an
699 * issue currently.
700 */
701static struct ttm_base_object *
702vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
703{
704	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
705
706	if (!base) {
707		pr_err("Invalid fence object handle 0x%08lx.\n",
708		       (unsigned long)handle);
709		return ERR_PTR(-EINVAL);
710	}
711
712	if (base->refcount_release != vmw_user_fence_base_release) {
713		pr_err("Invalid fence object handle 0x%08lx.\n",
714		       (unsigned long)handle);
715		ttm_base_object_unref(&base);
716		return ERR_PTR(-EINVAL);
717	}
718
719	return base;
720}
721
722
723int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
724			     struct drm_file *file_priv)
725{
726	struct drm_vmw_fence_wait_arg *arg =
727	    (struct drm_vmw_fence_wait_arg *)data;
728	unsigned long timeout;
729	struct ttm_base_object *base;
730	struct vmw_fence_obj *fence;
731	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
732	int ret;
733	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
734
735	/*
736	 * 64-bit division not present on 32-bit systems, so do an
737	 * approximation. (Divide by 1000000).
738	 */
739
740	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
741	  (wait_timeout >> 26);
742
743	if (!arg->cookie_valid) {
744		arg->cookie_valid = 1;
745		arg->kernel_cookie = jiffies + wait_timeout;
746	}
747
748	base = vmw_fence_obj_lookup(tfile, arg->handle);
749	if (IS_ERR(base))
750		return PTR_ERR(base);
751
752	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
753
754	timeout = jiffies;
755	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
756		ret = ((vmw_fence_obj_signaled(fence)) ?
757		       0 : -EBUSY);
758		goto out;
759	}
760
761	timeout = (unsigned long)arg->kernel_cookie - timeout;
762
763	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
764
765out:
766	ttm_base_object_unref(&base);
767
768	/*
769	 * Optionally unref the fence object.
770	 */
771
772	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
773		return ttm_ref_object_base_unref(tfile, arg->handle);
774	return ret;
775}
776
777int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
778				 struct drm_file *file_priv)
779{
780	struct drm_vmw_fence_signaled_arg *arg =
781		(struct drm_vmw_fence_signaled_arg *) data;
782	struct ttm_base_object *base;
783	struct vmw_fence_obj *fence;
784	struct vmw_fence_manager *fman;
785	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
786	struct vmw_private *dev_priv = vmw_priv(dev);
787
788	base = vmw_fence_obj_lookup(tfile, arg->handle);
789	if (IS_ERR(base))
790		return PTR_ERR(base);
791
792	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
793	fman = fman_from_fence(fence);
794
795	arg->signaled = vmw_fence_obj_signaled(fence);
796
797	arg->signaled_flags = arg->flags;
798	spin_lock(&fman->lock);
799	arg->passed_seqno = dev_priv->last_read_seqno;
800	spin_unlock(&fman->lock);
801
802	ttm_base_object_unref(&base);
803
804	return 0;
805}
806
807
808int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
809			      struct drm_file *file_priv)
810{
811	struct drm_vmw_fence_arg *arg =
812		(struct drm_vmw_fence_arg *) data;
813
814	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
815					 arg->handle);
816}
817
818/**
819 * vmw_event_fence_action_seq_passed
820 *
821 * @action: The struct vmw_fence_action embedded in a struct
822 * vmw_event_fence_action.
823 *
824 * This function is called when the seqno of the fence where @action is
825 * attached has passed. It queues the event on the submitter's event list.
826 * This function is always called from atomic context.
827 */
828static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
829{
830	struct vmw_event_fence_action *eaction =
831		container_of(action, struct vmw_event_fence_action, action);
832	struct drm_device *dev = eaction->dev;
833	struct drm_pending_event *event = eaction->event;
834
835	if (unlikely(event == NULL))
836		return;
837
838	spin_lock_irq(&dev->event_lock);
839
840	if (likely(eaction->tv_sec != NULL)) {
841		struct timespec64 ts;
842
843		ktime_get_ts64(&ts);
844		/* monotonic time, so no y2038 overflow */
845		*eaction->tv_sec = ts.tv_sec;
846		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
847	}
848
849	drm_send_event_locked(dev, eaction->event);
850	eaction->event = NULL;
851	spin_unlock_irq(&dev->event_lock);
852}
853
854/**
855 * vmw_event_fence_action_cleanup
856 *
857 * @action: The struct vmw_fence_action embedded in a struct
858 * vmw_event_fence_action.
859 *
860 * This function is the struct vmw_fence_action destructor. It's typically
861 * called from a workqueue.
862 */
863static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
864{
865	struct vmw_event_fence_action *eaction =
866		container_of(action, struct vmw_event_fence_action, action);
867
868	vmw_fence_obj_unreference(&eaction->fence);
869	kfree(eaction);
870}
871
872
873/**
874 * vmw_fence_obj_add_action - Add an action to a fence object.
875 *
876 * @fence: The fence object.
877 * @action: The action to add.
878 *
879 * Note that the action callbacks may be executed before this function
880 * returns.
881 */
882static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
883			      struct vmw_fence_action *action)
884{
885	struct vmw_fence_manager *fman = fman_from_fence(fence);
886	bool run_update = false;
887
888	mutex_lock(&fman->goal_irq_mutex);
889	spin_lock(&fman->lock);
890
891	fman->pending_actions[action->type]++;
892	if (dma_fence_is_signaled_locked(&fence->base)) {
893		struct list_head action_list;
894
895		INIT_LIST_HEAD(&action_list);
896		list_add_tail(&action->head, &action_list);
897		vmw_fences_perform_actions(fman, &action_list);
898	} else {
899		list_add_tail(&action->head, &fence->seq_passed_actions);
900
901		/*
902		 * This function may set fman::seqno_valid, so it must
903		 * be run with the goal_irq_mutex held.
904		 */
905		run_update = vmw_fence_goal_check_locked(fence);
906	}
907
908	spin_unlock(&fman->lock);
909
910	if (run_update) {
911		if (!fman->goal_irq_on) {
912			fman->goal_irq_on = true;
913			vmw_goal_waiter_add(fman->dev_priv);
914		}
915		vmw_fences_update(fman);
916	}
917	mutex_unlock(&fman->goal_irq_mutex);
918
919}
920
921/**
922 * vmw_event_fence_action_queue - Post an event for sending when a fence
923 * object seqno has passed.
924 *
925 * @file_priv: The file connection on which the event should be posted.
926 * @fence: The fence object on which to post the event.
927 * @event: Event to be posted. This event should've been alloced
928 * using k[mz]alloc, and should've been completely initialized.
929 * @tv_sec: If non-null, the variable pointed to will be assigned
930 * current time tv_sec val when the fence signals.
931 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
932 * be assigned the current time tv_usec val when the fence signals.
933 * @interruptible: Interruptible waits if possible.
934 *
935 * As a side effect, the object pointed to by @event may have been
936 * freed when this function returns. If this function returns with
937 * an error code, the caller needs to free that object.
938 */
939
940int vmw_event_fence_action_queue(struct drm_file *file_priv,
941				 struct vmw_fence_obj *fence,
942				 struct drm_pending_event *event,
943				 uint32_t *tv_sec,
944				 uint32_t *tv_usec,
945				 bool interruptible)
946{
947	struct vmw_event_fence_action *eaction;
948	struct vmw_fence_manager *fman = fman_from_fence(fence);
949
950	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
951	if (unlikely(!eaction))
952		return -ENOMEM;
953
954	eaction->event = event;
955
956	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
957	eaction->action.cleanup = vmw_event_fence_action_cleanup;
958	eaction->action.type = VMW_ACTION_EVENT;
959
960	eaction->fence = vmw_fence_obj_reference(fence);
961	eaction->dev = &fman->dev_priv->drm;
962	eaction->tv_sec = tv_sec;
963	eaction->tv_usec = tv_usec;
964
965	vmw_fence_obj_add_action(fence, &eaction->action);
966
967	return 0;
968}
969
970struct vmw_event_fence_pending {
971	struct drm_pending_event base;
972	struct drm_vmw_event_fence event;
973};
974
975static int vmw_event_fence_action_create(struct drm_file *file_priv,
976				  struct vmw_fence_obj *fence,
977				  uint32_t flags,
978				  uint64_t user_data,
979				  bool interruptible)
980{
981	struct vmw_event_fence_pending *event;
982	struct vmw_fence_manager *fman = fman_from_fence(fence);
983	struct drm_device *dev = &fman->dev_priv->drm;
984	int ret;
985
986	event = kzalloc(sizeof(*event), GFP_KERNEL);
987	if (unlikely(!event)) {
988		DRM_ERROR("Failed to allocate an event.\n");
989		ret = -ENOMEM;
990		goto out_no_space;
991	}
992
993	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
994	event->event.base.length = sizeof(*event);
995	event->event.user_data = user_data;
996
997	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
998
999	if (unlikely(ret != 0)) {
1000		DRM_ERROR("Failed to allocate event space for this file.\n");
1001		kfree(event);
1002		goto out_no_space;
1003	}
1004
1005	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1006		ret = vmw_event_fence_action_queue(file_priv, fence,
1007						   &event->base,
1008						   &event->event.tv_sec,
1009						   &event->event.tv_usec,
1010						   interruptible);
1011	else
1012		ret = vmw_event_fence_action_queue(file_priv, fence,
1013						   &event->base,
1014						   NULL,
1015						   NULL,
1016						   interruptible);
1017	if (ret != 0)
1018		goto out_no_queue;
1019
1020	return 0;
1021
1022out_no_queue:
1023	drm_event_cancel_free(dev, &event->base);
1024out_no_space:
1025	return ret;
1026}
1027
1028int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1029			  struct drm_file *file_priv)
1030{
1031	struct vmw_private *dev_priv = vmw_priv(dev);
1032	struct drm_vmw_fence_event_arg *arg =
1033		(struct drm_vmw_fence_event_arg *) data;
1034	struct vmw_fence_obj *fence = NULL;
1035	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1036	struct ttm_object_file *tfile = vmw_fp->tfile;
1037	struct drm_vmw_fence_rep __user *user_fence_rep =
1038		(struct drm_vmw_fence_rep __user *)(unsigned long)
1039		arg->fence_rep;
1040	uint32_t handle;
1041	int ret;
1042
1043	/*
1044	 * Look up an existing fence object,
1045	 * and if user-space wants a new reference,
1046	 * add one.
1047	 */
1048	if (arg->handle) {
1049		struct ttm_base_object *base =
1050			vmw_fence_obj_lookup(tfile, arg->handle);
1051
1052		if (IS_ERR(base))
1053			return PTR_ERR(base);
1054
1055		fence = &(container_of(base, struct vmw_user_fence,
1056				       base)->fence);
1057		(void) vmw_fence_obj_reference(fence);
1058
1059		if (user_fence_rep != NULL) {
1060			ret = ttm_ref_object_add(vmw_fp->tfile, base,
1061						 NULL, false);
1062			if (unlikely(ret != 0)) {
1063				DRM_ERROR("Failed to reference a fence "
1064					  "object.\n");
1065				goto out_no_ref_obj;
1066			}
1067			handle = base->handle;
1068		}
1069		ttm_base_object_unref(&base);
1070	}
1071
1072	/*
1073	 * Create a new fence object.
1074	 */
1075	if (!fence) {
1076		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1077						 &fence,
1078						 (user_fence_rep) ?
1079						 &handle : NULL);
1080		if (unlikely(ret != 0)) {
1081			DRM_ERROR("Fence event failed to create fence.\n");
1082			return ret;
1083		}
1084	}
1085
1086	BUG_ON(fence == NULL);
1087
1088	ret = vmw_event_fence_action_create(file_priv, fence,
1089					    arg->flags,
1090					    arg->user_data,
1091					    true);
1092	if (unlikely(ret != 0)) {
1093		if (ret != -ERESTARTSYS)
1094			DRM_ERROR("Failed to attach event to fence.\n");
1095		goto out_no_create;
1096	}
1097
1098	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1099				    handle, -1);
1100	vmw_fence_obj_unreference(&fence);
1101	return 0;
1102out_no_create:
1103	if (user_fence_rep != NULL)
1104		ttm_ref_object_base_unref(tfile, handle);
1105out_no_ref_obj:
1106	vmw_fence_obj_unreference(&fence);
1107	return ret;
1108}
1109