1bf215546Sopenharmony_ci/*
2bf215546Sopenharmony_ci * Copyright © 2018 Intel Corporation
3bf215546Sopenharmony_ci *
4bf215546Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a
5bf215546Sopenharmony_ci * copy of this software and associated documentation files (the "Software"),
6bf215546Sopenharmony_ci * to deal in the Software without restriction, including without limitation
7bf215546Sopenharmony_ci * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8bf215546Sopenharmony_ci * and/or sell copies of the Software, and to permit persons to whom the
9bf215546Sopenharmony_ci * Software is furnished to do so, subject to the following conditions:
10bf215546Sopenharmony_ci *
11bf215546Sopenharmony_ci * The above copyright notice and this permission notice shall be included
12bf215546Sopenharmony_ci * in all copies or substantial portions of the Software.
13bf215546Sopenharmony_ci *
14bf215546Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15bf215546Sopenharmony_ci * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16bf215546Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17bf215546Sopenharmony_ci * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18bf215546Sopenharmony_ci * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19bf215546Sopenharmony_ci * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20bf215546Sopenharmony_ci * DEALINGS IN THE SOFTWARE.
21bf215546Sopenharmony_ci */
22bf215546Sopenharmony_ci
23bf215546Sopenharmony_ci/**
24bf215546Sopenharmony_ci * @file iris_fence.c
25bf215546Sopenharmony_ci *
26bf215546Sopenharmony_ci * Fences for driver and IPC serialisation, scheduling and synchronisation.
27bf215546Sopenharmony_ci */
28bf215546Sopenharmony_ci
29bf215546Sopenharmony_ci#include "drm-uapi/sync_file.h"
30bf215546Sopenharmony_ci#include "util/u_debug.h"
31bf215546Sopenharmony_ci#include "util/u_inlines.h"
32bf215546Sopenharmony_ci#include "intel/common/intel_gem.h"
33bf215546Sopenharmony_ci
34bf215546Sopenharmony_ci#include "iris_batch.h"
35bf215546Sopenharmony_ci#include "iris_bufmgr.h"
36bf215546Sopenharmony_ci#include "iris_context.h"
37bf215546Sopenharmony_ci#include "iris_fence.h"
38bf215546Sopenharmony_ci#include "iris_screen.h"
39bf215546Sopenharmony_ci
40bf215546Sopenharmony_cistatic uint32_t
41bf215546Sopenharmony_cigem_syncobj_create(int fd, uint32_t flags)
42bf215546Sopenharmony_ci{
43bf215546Sopenharmony_ci   struct drm_syncobj_create args = {
44bf215546Sopenharmony_ci      .flags = flags,
45bf215546Sopenharmony_ci   };
46bf215546Sopenharmony_ci
47bf215546Sopenharmony_ci   intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
48bf215546Sopenharmony_ci
49bf215546Sopenharmony_ci   return args.handle;
50bf215546Sopenharmony_ci}
51bf215546Sopenharmony_ci
52bf215546Sopenharmony_cistatic void
53bf215546Sopenharmony_cigem_syncobj_destroy(int fd, uint32_t handle)
54bf215546Sopenharmony_ci{
55bf215546Sopenharmony_ci   struct drm_syncobj_destroy args = {
56bf215546Sopenharmony_ci      .handle = handle,
57bf215546Sopenharmony_ci   };
58bf215546Sopenharmony_ci
59bf215546Sopenharmony_ci   intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
60bf215546Sopenharmony_ci}
61bf215546Sopenharmony_ci
62bf215546Sopenharmony_ci/**
63bf215546Sopenharmony_ci * Make a new sync-point.
64bf215546Sopenharmony_ci */
65bf215546Sopenharmony_cistruct iris_syncobj *
66bf215546Sopenharmony_ciiris_create_syncobj(struct iris_bufmgr *bufmgr)
67bf215546Sopenharmony_ci{
68bf215546Sopenharmony_ci   int fd = iris_bufmgr_get_fd(bufmgr);
69bf215546Sopenharmony_ci   struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
70bf215546Sopenharmony_ci
71bf215546Sopenharmony_ci   if (!syncobj)
72bf215546Sopenharmony_ci      return NULL;
73bf215546Sopenharmony_ci
74bf215546Sopenharmony_ci   syncobj->handle = gem_syncobj_create(fd, 0);
75bf215546Sopenharmony_ci   assert(syncobj->handle);
76bf215546Sopenharmony_ci
77bf215546Sopenharmony_ci   pipe_reference_init(&syncobj->ref, 1);
78bf215546Sopenharmony_ci
79bf215546Sopenharmony_ci   return syncobj;
80bf215546Sopenharmony_ci}
81bf215546Sopenharmony_ci
82bf215546Sopenharmony_civoid
83bf215546Sopenharmony_ciiris_syncobj_destroy(struct iris_bufmgr *bufmgr, struct iris_syncobj *syncobj)
84bf215546Sopenharmony_ci{
85bf215546Sopenharmony_ci   int fd = iris_bufmgr_get_fd(bufmgr);
86bf215546Sopenharmony_ci   gem_syncobj_destroy(fd, syncobj->handle);
87bf215546Sopenharmony_ci   free(syncobj);
88bf215546Sopenharmony_ci}
89bf215546Sopenharmony_ci
90bf215546Sopenharmony_civoid
91bf215546Sopenharmony_ciiris_syncobj_signal(struct iris_bufmgr *bufmgr, struct iris_syncobj *syncobj)
92bf215546Sopenharmony_ci{
93bf215546Sopenharmony_ci   int fd = iris_bufmgr_get_fd(bufmgr);
94bf215546Sopenharmony_ci   struct drm_syncobj_array args = {
95bf215546Sopenharmony_ci      .handles = (uintptr_t)&syncobj->handle,
96bf215546Sopenharmony_ci      .count_handles = 1,
97bf215546Sopenharmony_ci   };
98bf215546Sopenharmony_ci
99bf215546Sopenharmony_ci   if (intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &args)) {
100bf215546Sopenharmony_ci      fprintf(stderr, "failed to signal syncobj %"PRIu32"\n",
101bf215546Sopenharmony_ci              syncobj->handle);
102bf215546Sopenharmony_ci   }
103bf215546Sopenharmony_ci}
104bf215546Sopenharmony_ci
105bf215546Sopenharmony_ci/**
106bf215546Sopenharmony_ci * Add a sync-point to the batch, with the given flags.
107bf215546Sopenharmony_ci *
108bf215546Sopenharmony_ci * \p flags   One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
109bf215546Sopenharmony_ci */
110bf215546Sopenharmony_civoid
111bf215546Sopenharmony_ciiris_batch_add_syncobj(struct iris_batch *batch,
112bf215546Sopenharmony_ci                       struct iris_syncobj *syncobj,
113bf215546Sopenharmony_ci                       unsigned flags)
114bf215546Sopenharmony_ci{
115bf215546Sopenharmony_ci   struct drm_i915_gem_exec_fence *fence =
116bf215546Sopenharmony_ci      util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
117bf215546Sopenharmony_ci
118bf215546Sopenharmony_ci   *fence = (struct drm_i915_gem_exec_fence) {
119bf215546Sopenharmony_ci      .handle = syncobj->handle,
120bf215546Sopenharmony_ci      .flags = flags,
121bf215546Sopenharmony_ci   };
122bf215546Sopenharmony_ci
123bf215546Sopenharmony_ci   struct iris_syncobj **store =
124bf215546Sopenharmony_ci      util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
125bf215546Sopenharmony_ci
126bf215546Sopenharmony_ci   *store = NULL;
127bf215546Sopenharmony_ci   iris_syncobj_reference(batch->screen->bufmgr, store, syncobj);
128bf215546Sopenharmony_ci}
129bf215546Sopenharmony_ci
130bf215546Sopenharmony_ci/**
131bf215546Sopenharmony_ci * Walk through a batch's dependencies (any I915_EXEC_FENCE_WAIT syncobjs)
132bf215546Sopenharmony_ci * and unreference any which have already passed.
133bf215546Sopenharmony_ci *
134bf215546Sopenharmony_ci * Sometimes the compute batch is seldom used, and accumulates references
135bf215546Sopenharmony_ci * to stale render batches that are no longer of interest, so we can free
136bf215546Sopenharmony_ci * those up.
137bf215546Sopenharmony_ci */
138bf215546Sopenharmony_cistatic void
139bf215546Sopenharmony_ciclear_stale_syncobjs(struct iris_batch *batch)
140bf215546Sopenharmony_ci{
141bf215546Sopenharmony_ci   struct iris_screen *screen = batch->screen;
142bf215546Sopenharmony_ci   struct iris_bufmgr *bufmgr = screen->bufmgr;
143bf215546Sopenharmony_ci
144bf215546Sopenharmony_ci   int n = util_dynarray_num_elements(&batch->syncobjs, struct iris_syncobj *);
145bf215546Sopenharmony_ci
146bf215546Sopenharmony_ci   assert(n == util_dynarray_num_elements(&batch->exec_fences,
147bf215546Sopenharmony_ci                                          struct drm_i915_gem_exec_fence));
148bf215546Sopenharmony_ci
149bf215546Sopenharmony_ci   /* Skip the first syncobj, as it's the signalling one. */
150bf215546Sopenharmony_ci   for (int i = n - 1; i > 0; i--) {
151bf215546Sopenharmony_ci      struct iris_syncobj **syncobj =
152bf215546Sopenharmony_ci         util_dynarray_element(&batch->syncobjs, struct iris_syncobj *, i);
153bf215546Sopenharmony_ci      struct drm_i915_gem_exec_fence *fence =
154bf215546Sopenharmony_ci         util_dynarray_element(&batch->exec_fences,
155bf215546Sopenharmony_ci                               struct drm_i915_gem_exec_fence, i);
156bf215546Sopenharmony_ci      assert(fence->flags & I915_EXEC_FENCE_WAIT);
157bf215546Sopenharmony_ci
158bf215546Sopenharmony_ci      if (iris_wait_syncobj(bufmgr, *syncobj, 0))
159bf215546Sopenharmony_ci         continue;
160bf215546Sopenharmony_ci
161bf215546Sopenharmony_ci      /* This sync object has already passed, there's no need to continue
162bf215546Sopenharmony_ci       * marking it as a dependency; we can stop holding on to the reference.
163bf215546Sopenharmony_ci       */
164bf215546Sopenharmony_ci      iris_syncobj_reference(bufmgr, syncobj, NULL);
165bf215546Sopenharmony_ci
166bf215546Sopenharmony_ci      /* Remove it from the lists; move the last element here. */
167bf215546Sopenharmony_ci      struct iris_syncobj **nth_syncobj =
168bf215546Sopenharmony_ci         util_dynarray_pop_ptr(&batch->syncobjs, struct iris_syncobj *);
169bf215546Sopenharmony_ci      struct drm_i915_gem_exec_fence *nth_fence =
170bf215546Sopenharmony_ci         util_dynarray_pop_ptr(&batch->exec_fences,
171bf215546Sopenharmony_ci                               struct drm_i915_gem_exec_fence);
172bf215546Sopenharmony_ci
173bf215546Sopenharmony_ci      if (syncobj != nth_syncobj) {
174bf215546Sopenharmony_ci         *syncobj = *nth_syncobj;
175bf215546Sopenharmony_ci         memcpy(fence, nth_fence, sizeof(*fence));
176bf215546Sopenharmony_ci      }
177bf215546Sopenharmony_ci   }
178bf215546Sopenharmony_ci}
179bf215546Sopenharmony_ci
180bf215546Sopenharmony_ci/* ------------------------------------------------------------------- */
181bf215546Sopenharmony_ci
182bf215546Sopenharmony_cistruct pipe_fence_handle {
183bf215546Sopenharmony_ci   struct pipe_reference ref;
184bf215546Sopenharmony_ci
185bf215546Sopenharmony_ci   struct pipe_context *unflushed_ctx;
186bf215546Sopenharmony_ci
187bf215546Sopenharmony_ci   struct iris_fine_fence *fine[IRIS_BATCH_COUNT];
188bf215546Sopenharmony_ci};
189bf215546Sopenharmony_ci
190bf215546Sopenharmony_cistatic void
191bf215546Sopenharmony_ciiris_fence_destroy(struct pipe_screen *p_screen,
192bf215546Sopenharmony_ci                   struct pipe_fence_handle *fence)
193bf215546Sopenharmony_ci{
194bf215546Sopenharmony_ci   struct iris_screen *screen = (struct iris_screen *)p_screen;
195bf215546Sopenharmony_ci
196bf215546Sopenharmony_ci   for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++)
197bf215546Sopenharmony_ci      iris_fine_fence_reference(screen, &fence->fine[i], NULL);
198bf215546Sopenharmony_ci
199bf215546Sopenharmony_ci   free(fence);
200bf215546Sopenharmony_ci}
201bf215546Sopenharmony_ci
202bf215546Sopenharmony_cistatic void
203bf215546Sopenharmony_ciiris_fence_reference(struct pipe_screen *p_screen,
204bf215546Sopenharmony_ci                     struct pipe_fence_handle **dst,
205bf215546Sopenharmony_ci                     struct pipe_fence_handle *src)
206bf215546Sopenharmony_ci{
207bf215546Sopenharmony_ci   if (pipe_reference(*dst ? &(*dst)->ref : NULL,
208bf215546Sopenharmony_ci                      src ? &src->ref : NULL))
209bf215546Sopenharmony_ci      iris_fence_destroy(p_screen, *dst);
210bf215546Sopenharmony_ci
211bf215546Sopenharmony_ci   *dst = src;
212bf215546Sopenharmony_ci}
213bf215546Sopenharmony_ci
214bf215546Sopenharmony_cibool
215bf215546Sopenharmony_ciiris_wait_syncobj(struct iris_bufmgr *bufmgr,
216bf215546Sopenharmony_ci                  struct iris_syncobj *syncobj,
217bf215546Sopenharmony_ci                  int64_t timeout_nsec)
218bf215546Sopenharmony_ci{
219bf215546Sopenharmony_ci   if (!syncobj)
220bf215546Sopenharmony_ci      return false;
221bf215546Sopenharmony_ci
222bf215546Sopenharmony_ci   int fd = iris_bufmgr_get_fd(bufmgr);
223bf215546Sopenharmony_ci
224bf215546Sopenharmony_ci   struct drm_syncobj_wait args = {
225bf215546Sopenharmony_ci      .handles = (uintptr_t)&syncobj->handle,
226bf215546Sopenharmony_ci      .count_handles = 1,
227bf215546Sopenharmony_ci      .timeout_nsec = timeout_nsec,
228bf215546Sopenharmony_ci   };
229bf215546Sopenharmony_ci   return intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
230bf215546Sopenharmony_ci}
231bf215546Sopenharmony_ci
232bf215546Sopenharmony_ci#define CSI "\e["
233bf215546Sopenharmony_ci#define BLUE_HEADER  CSI "0;97;44m"
234bf215546Sopenharmony_ci#define NORMAL       CSI "0m"
235bf215546Sopenharmony_ci
236bf215546Sopenharmony_cistatic void
237bf215546Sopenharmony_ciiris_fence_flush(struct pipe_context *ctx,
238bf215546Sopenharmony_ci                 struct pipe_fence_handle **out_fence,
239bf215546Sopenharmony_ci                 unsigned flags)
240bf215546Sopenharmony_ci{
241bf215546Sopenharmony_ci   struct iris_screen *screen = (void *) ctx->screen;
242bf215546Sopenharmony_ci   struct iris_context *ice = (struct iris_context *)ctx;
243bf215546Sopenharmony_ci
244bf215546Sopenharmony_ci   /* We require DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (kernel 5.2+) for
245bf215546Sopenharmony_ci    * deferred flushes.  Just ignore the request to defer on older kernels.
246bf215546Sopenharmony_ci    */
247bf215546Sopenharmony_ci   if (!(screen->kernel_features & KERNEL_HAS_WAIT_FOR_SUBMIT))
248bf215546Sopenharmony_ci      flags &= ~PIPE_FLUSH_DEFERRED;
249bf215546Sopenharmony_ci
250bf215546Sopenharmony_ci   const bool deferred = flags & PIPE_FLUSH_DEFERRED;
251bf215546Sopenharmony_ci
252bf215546Sopenharmony_ci   if (flags & PIPE_FLUSH_END_OF_FRAME) {
253bf215546Sopenharmony_ci      ice->frame++;
254bf215546Sopenharmony_ci
255bf215546Sopenharmony_ci      if (INTEL_DEBUG(DEBUG_SUBMIT)) {
256bf215546Sopenharmony_ci         fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
257bf215546Sopenharmony_ci                 INTEL_DEBUG(DEBUG_COLOR) ? BLUE_HEADER : "",
258bf215546Sopenharmony_ci                 ice->frame, ctx, ' ',
259bf215546Sopenharmony_ci                 INTEL_DEBUG(DEBUG_COLOR) ? NORMAL : "");
260bf215546Sopenharmony_ci      }
261bf215546Sopenharmony_ci   }
262bf215546Sopenharmony_ci
263bf215546Sopenharmony_ci   iris_flush_dirty_dmabufs(ice);
264bf215546Sopenharmony_ci
265bf215546Sopenharmony_ci   if (!deferred) {
266bf215546Sopenharmony_ci      iris_foreach_batch(ice, batch)
267bf215546Sopenharmony_ci         iris_batch_flush(batch);
268bf215546Sopenharmony_ci   }
269bf215546Sopenharmony_ci
270bf215546Sopenharmony_ci   if (flags & PIPE_FLUSH_END_OF_FRAME) {
271bf215546Sopenharmony_ci      iris_measure_frame_end(ice);
272bf215546Sopenharmony_ci   }
273bf215546Sopenharmony_ci
274bf215546Sopenharmony_ci   u_trace_context_process(&ice->ds.trace_context,
275bf215546Sopenharmony_ci                           flags & PIPE_FLUSH_END_OF_FRAME);
276bf215546Sopenharmony_ci
277bf215546Sopenharmony_ci   if (!out_fence)
278bf215546Sopenharmony_ci      return;
279bf215546Sopenharmony_ci
280bf215546Sopenharmony_ci   struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
281bf215546Sopenharmony_ci   if (!fence)
282bf215546Sopenharmony_ci      return;
283bf215546Sopenharmony_ci
284bf215546Sopenharmony_ci   pipe_reference_init(&fence->ref, 1);
285bf215546Sopenharmony_ci
286bf215546Sopenharmony_ci   if (deferred)
287bf215546Sopenharmony_ci      fence->unflushed_ctx = ctx;
288bf215546Sopenharmony_ci
289bf215546Sopenharmony_ci   iris_foreach_batch(ice, batch) {
290bf215546Sopenharmony_ci      unsigned b = batch->name;
291bf215546Sopenharmony_ci
292bf215546Sopenharmony_ci      if (deferred && iris_batch_bytes_used(batch) > 0) {
293bf215546Sopenharmony_ci         struct iris_fine_fence *fine =
294bf215546Sopenharmony_ci            iris_fine_fence_new(batch, IRIS_FENCE_BOTTOM_OF_PIPE);
295bf215546Sopenharmony_ci         iris_fine_fence_reference(screen, &fence->fine[b], fine);
296bf215546Sopenharmony_ci         iris_fine_fence_reference(screen, &fine, NULL);
297bf215546Sopenharmony_ci      } else {
298bf215546Sopenharmony_ci         /* This batch has no commands queued up (perhaps we just flushed,
299bf215546Sopenharmony_ci          * or all the commands are on the other batch).  Wait for the last
300bf215546Sopenharmony_ci          * syncobj on this engine - unless it's already finished by now.
301bf215546Sopenharmony_ci          */
302bf215546Sopenharmony_ci         if (iris_fine_fence_signaled(batch->last_fence))
303bf215546Sopenharmony_ci            continue;
304bf215546Sopenharmony_ci
305bf215546Sopenharmony_ci         iris_fine_fence_reference(screen, &fence->fine[b], batch->last_fence);
306bf215546Sopenharmony_ci      }
307bf215546Sopenharmony_ci   }
308bf215546Sopenharmony_ci
309bf215546Sopenharmony_ci   iris_fence_reference(ctx->screen, out_fence, NULL);
310bf215546Sopenharmony_ci   *out_fence = fence;
311bf215546Sopenharmony_ci}
312bf215546Sopenharmony_ci
313bf215546Sopenharmony_cistatic void
314bf215546Sopenharmony_ciiris_fence_await(struct pipe_context *ctx,
315bf215546Sopenharmony_ci                 struct pipe_fence_handle *fence)
316bf215546Sopenharmony_ci{
317bf215546Sopenharmony_ci   struct iris_context *ice = (struct iris_context *)ctx;
318bf215546Sopenharmony_ci
319bf215546Sopenharmony_ci   /* Unflushed fences from the same context are no-ops. */
320bf215546Sopenharmony_ci   if (ctx && ctx == fence->unflushed_ctx)
321bf215546Sopenharmony_ci      return;
322bf215546Sopenharmony_ci
323bf215546Sopenharmony_ci   /* XXX: We can't safely flush the other context, because it might be
324bf215546Sopenharmony_ci    *      bound to another thread, and poking at its internals wouldn't
325bf215546Sopenharmony_ci    *      be safe.  In the future we should use MI_SEMAPHORE_WAIT and
326bf215546Sopenharmony_ci    *      block until the other job has been submitted, relying on
327bf215546Sopenharmony_ci    *      kernel timeslicing to preempt us until the other job is
328bf215546Sopenharmony_ci    *      actually flushed and the seqno finally passes.
329bf215546Sopenharmony_ci    */
330bf215546Sopenharmony_ci   if (fence->unflushed_ctx) {
331bf215546Sopenharmony_ci      util_debug_message(&ice->dbg, CONFORMANCE, "%s",
332bf215546Sopenharmony_ci                         "glWaitSync on unflushed fence from another context "
333bf215546Sopenharmony_ci                         "is unlikely to work without kernel 5.8+\n");
334bf215546Sopenharmony_ci   }
335bf215546Sopenharmony_ci
336bf215546Sopenharmony_ci   for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
337bf215546Sopenharmony_ci      struct iris_fine_fence *fine = fence->fine[i];
338bf215546Sopenharmony_ci
339bf215546Sopenharmony_ci      if (iris_fine_fence_signaled(fine))
340bf215546Sopenharmony_ci         continue;
341bf215546Sopenharmony_ci
342bf215546Sopenharmony_ci      iris_foreach_batch(ice, batch) {
343bf215546Sopenharmony_ci         /* We're going to make any future work in this batch wait for our
344bf215546Sopenharmony_ci          * fence to have gone by.  But any currently queued work doesn't
345bf215546Sopenharmony_ci          * need to wait.  Flush the batch now, so it can happen sooner.
346bf215546Sopenharmony_ci          */
347bf215546Sopenharmony_ci         iris_batch_flush(batch);
348bf215546Sopenharmony_ci
349bf215546Sopenharmony_ci         /* Before adding a new reference, clean out any stale ones. */
350bf215546Sopenharmony_ci         clear_stale_syncobjs(batch);
351bf215546Sopenharmony_ci
352bf215546Sopenharmony_ci         iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);
353bf215546Sopenharmony_ci      }
354bf215546Sopenharmony_ci   }
355bf215546Sopenharmony_ci}
356bf215546Sopenharmony_ci
357bf215546Sopenharmony_ci#define NSEC_PER_SEC (1000 * USEC_PER_SEC)
358bf215546Sopenharmony_ci#define USEC_PER_SEC (1000 * MSEC_PER_SEC)
359bf215546Sopenharmony_ci#define MSEC_PER_SEC (1000)
360bf215546Sopenharmony_ci
361bf215546Sopenharmony_cistatic uint64_t
362bf215546Sopenharmony_cigettime_ns(void)
363bf215546Sopenharmony_ci{
364bf215546Sopenharmony_ci   struct timespec current;
365bf215546Sopenharmony_ci   clock_gettime(CLOCK_MONOTONIC, &current);
366bf215546Sopenharmony_ci   return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
367bf215546Sopenharmony_ci}
368bf215546Sopenharmony_ci
369bf215546Sopenharmony_cistatic uint64_t
370bf215546Sopenharmony_cirel2abs(uint64_t timeout)
371bf215546Sopenharmony_ci{
372bf215546Sopenharmony_ci   if (timeout == 0)
373bf215546Sopenharmony_ci      return 0;
374bf215546Sopenharmony_ci
375bf215546Sopenharmony_ci   uint64_t current_time = gettime_ns();
376bf215546Sopenharmony_ci   uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
377bf215546Sopenharmony_ci
378bf215546Sopenharmony_ci   timeout = MIN2(max_timeout, timeout);
379bf215546Sopenharmony_ci
380bf215546Sopenharmony_ci   return current_time + timeout;
381bf215546Sopenharmony_ci}
382bf215546Sopenharmony_ci
383bf215546Sopenharmony_cistatic bool
384bf215546Sopenharmony_ciiris_fence_finish(struct pipe_screen *p_screen,
385bf215546Sopenharmony_ci                  struct pipe_context *ctx,
386bf215546Sopenharmony_ci                  struct pipe_fence_handle *fence,
387bf215546Sopenharmony_ci                  uint64_t timeout)
388bf215546Sopenharmony_ci{
389bf215546Sopenharmony_ci   ctx = threaded_context_unwrap_sync(ctx);
390bf215546Sopenharmony_ci
391bf215546Sopenharmony_ci   struct iris_context *ice = (struct iris_context *)ctx;
392bf215546Sopenharmony_ci   struct iris_screen *screen = (struct iris_screen *)p_screen;
393bf215546Sopenharmony_ci
394bf215546Sopenharmony_ci   /* If we created the fence with PIPE_FLUSH_DEFERRED, we may not have
395bf215546Sopenharmony_ci    * flushed yet.  Check if our syncobj is the current batch's signalling
396bf215546Sopenharmony_ci    * syncobj - if so, we haven't flushed and need to now.
397bf215546Sopenharmony_ci    *
398bf215546Sopenharmony_ci    * The Gallium docs mention that a flush will occur if \p ctx matches
399bf215546Sopenharmony_ci    * the context the fence was created with.  It may be NULL, so we check
400bf215546Sopenharmony_ci    * that it matches first.
401bf215546Sopenharmony_ci    */
402bf215546Sopenharmony_ci   if (ctx && ctx == fence->unflushed_ctx) {
403bf215546Sopenharmony_ci      iris_foreach_batch(ice, batch) {
404bf215546Sopenharmony_ci         struct iris_fine_fence *fine = fence->fine[batch->name];
405bf215546Sopenharmony_ci
406bf215546Sopenharmony_ci         if (iris_fine_fence_signaled(fine))
407bf215546Sopenharmony_ci            continue;
408bf215546Sopenharmony_ci
409bf215546Sopenharmony_ci         if (fine->syncobj == iris_batch_get_signal_syncobj(batch))
410bf215546Sopenharmony_ci            iris_batch_flush(batch);
411bf215546Sopenharmony_ci      }
412bf215546Sopenharmony_ci
413bf215546Sopenharmony_ci      /* The fence is no longer deferred. */
414bf215546Sopenharmony_ci      fence->unflushed_ctx = NULL;
415bf215546Sopenharmony_ci   }
416bf215546Sopenharmony_ci
417bf215546Sopenharmony_ci   unsigned int handle_count = 0;
418bf215546Sopenharmony_ci   uint32_t handles[ARRAY_SIZE(fence->fine)];
419bf215546Sopenharmony_ci   for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
420bf215546Sopenharmony_ci      struct iris_fine_fence *fine = fence->fine[i];
421bf215546Sopenharmony_ci
422bf215546Sopenharmony_ci      if (iris_fine_fence_signaled(fine))
423bf215546Sopenharmony_ci         continue;
424bf215546Sopenharmony_ci
425bf215546Sopenharmony_ci      handles[handle_count++] = fine->syncobj->handle;
426bf215546Sopenharmony_ci   }
427bf215546Sopenharmony_ci
428bf215546Sopenharmony_ci   if (handle_count == 0)
429bf215546Sopenharmony_ci      return true;
430bf215546Sopenharmony_ci
431bf215546Sopenharmony_ci   struct drm_syncobj_wait args = {
432bf215546Sopenharmony_ci      .handles = (uintptr_t)handles,
433bf215546Sopenharmony_ci      .count_handles = handle_count,
434bf215546Sopenharmony_ci      .timeout_nsec = rel2abs(timeout),
435bf215546Sopenharmony_ci      .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
436bf215546Sopenharmony_ci   };
437bf215546Sopenharmony_ci
438bf215546Sopenharmony_ci   if (fence->unflushed_ctx) {
439bf215546Sopenharmony_ci      /* This fence had a deferred flush from another context.  We can't
440bf215546Sopenharmony_ci       * safely flush it here, because the context might be bound to a
441bf215546Sopenharmony_ci       * different thread, and poking at its internals wouldn't be safe.
442bf215546Sopenharmony_ci       *
443bf215546Sopenharmony_ci       * Instead, use the WAIT_FOR_SUBMIT flag to block and hope that
444bf215546Sopenharmony_ci       * another thread submits the work.
445bf215546Sopenharmony_ci       */
446bf215546Sopenharmony_ci      args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
447bf215546Sopenharmony_ci   }
448bf215546Sopenharmony_ci
449bf215546Sopenharmony_ci   return intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
450bf215546Sopenharmony_ci}
451bf215546Sopenharmony_ci
452bf215546Sopenharmony_cistatic int
453bf215546Sopenharmony_cisync_merge_fd(int sync_fd, int new_fd)
454bf215546Sopenharmony_ci{
455bf215546Sopenharmony_ci   if (sync_fd == -1)
456bf215546Sopenharmony_ci      return new_fd;
457bf215546Sopenharmony_ci
458bf215546Sopenharmony_ci   if (new_fd == -1)
459bf215546Sopenharmony_ci      return sync_fd;
460bf215546Sopenharmony_ci
461bf215546Sopenharmony_ci   struct sync_merge_data args = {
462bf215546Sopenharmony_ci      .name = "iris fence",
463bf215546Sopenharmony_ci      .fd2 = new_fd,
464bf215546Sopenharmony_ci      .fence = -1,
465bf215546Sopenharmony_ci   };
466bf215546Sopenharmony_ci
467bf215546Sopenharmony_ci   intel_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
468bf215546Sopenharmony_ci   close(new_fd);
469bf215546Sopenharmony_ci   close(sync_fd);
470bf215546Sopenharmony_ci
471bf215546Sopenharmony_ci   return args.fence;
472bf215546Sopenharmony_ci}
473bf215546Sopenharmony_ci
474bf215546Sopenharmony_cistatic int
475bf215546Sopenharmony_ciiris_fence_get_fd(struct pipe_screen *p_screen,
476bf215546Sopenharmony_ci                  struct pipe_fence_handle *fence)
477bf215546Sopenharmony_ci{
478bf215546Sopenharmony_ci   struct iris_screen *screen = (struct iris_screen *)p_screen;
479bf215546Sopenharmony_ci   int fd = -1;
480bf215546Sopenharmony_ci
481bf215546Sopenharmony_ci   /* Deferred fences aren't supported. */
482bf215546Sopenharmony_ci   if (fence->unflushed_ctx)
483bf215546Sopenharmony_ci      return -1;
484bf215546Sopenharmony_ci
485bf215546Sopenharmony_ci   for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
486bf215546Sopenharmony_ci      struct iris_fine_fence *fine = fence->fine[i];
487bf215546Sopenharmony_ci
488bf215546Sopenharmony_ci      if (iris_fine_fence_signaled(fine))
489bf215546Sopenharmony_ci         continue;
490bf215546Sopenharmony_ci
491bf215546Sopenharmony_ci      struct drm_syncobj_handle args = {
492bf215546Sopenharmony_ci         .handle = fine->syncobj->handle,
493bf215546Sopenharmony_ci         .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
494bf215546Sopenharmony_ci         .fd = -1,
495bf215546Sopenharmony_ci      };
496bf215546Sopenharmony_ci
497bf215546Sopenharmony_ci      intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
498bf215546Sopenharmony_ci      fd = sync_merge_fd(fd, args.fd);
499bf215546Sopenharmony_ci   }
500bf215546Sopenharmony_ci
501bf215546Sopenharmony_ci   if (fd == -1) {
502bf215546Sopenharmony_ci      /* Our fence has no syncobj's recorded.  This means that all of the
503bf215546Sopenharmony_ci       * batches had already completed, their syncobj's had been signalled,
504bf215546Sopenharmony_ci       * and so we didn't bother to record them.  But we're being asked to
505bf215546Sopenharmony_ci       * export such a fence.  So export a dummy already-signalled syncobj.
506bf215546Sopenharmony_ci       */
507bf215546Sopenharmony_ci      struct drm_syncobj_handle args = {
508bf215546Sopenharmony_ci         .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,
509bf215546Sopenharmony_ci      };
510bf215546Sopenharmony_ci
511bf215546Sopenharmony_ci      args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
512bf215546Sopenharmony_ci      intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
513bf215546Sopenharmony_ci      gem_syncobj_destroy(screen->fd, args.handle);
514bf215546Sopenharmony_ci      return args.fd;
515bf215546Sopenharmony_ci   }
516bf215546Sopenharmony_ci
517bf215546Sopenharmony_ci   return fd;
518bf215546Sopenharmony_ci}
519bf215546Sopenharmony_ci
520bf215546Sopenharmony_cistatic void
521bf215546Sopenharmony_ciiris_fence_create_fd(struct pipe_context *ctx,
522bf215546Sopenharmony_ci                     struct pipe_fence_handle **out,
523bf215546Sopenharmony_ci                     int fd,
524bf215546Sopenharmony_ci                     enum pipe_fd_type type)
525bf215546Sopenharmony_ci{
526bf215546Sopenharmony_ci   assert(type == PIPE_FD_TYPE_NATIVE_SYNC || type == PIPE_FD_TYPE_SYNCOBJ);
527bf215546Sopenharmony_ci
528bf215546Sopenharmony_ci   struct iris_screen *screen = (struct iris_screen *)ctx->screen;
529bf215546Sopenharmony_ci   struct drm_syncobj_handle args = {
530bf215546Sopenharmony_ci      .fd = fd,
531bf215546Sopenharmony_ci   };
532bf215546Sopenharmony_ci
533bf215546Sopenharmony_ci   if (type == PIPE_FD_TYPE_NATIVE_SYNC) {
534bf215546Sopenharmony_ci      args.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
535bf215546Sopenharmony_ci      args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
536bf215546Sopenharmony_ci   }
537bf215546Sopenharmony_ci
538bf215546Sopenharmony_ci   if (intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
539bf215546Sopenharmony_ci      fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
540bf215546Sopenharmony_ci              strerror(errno));
541bf215546Sopenharmony_ci      if (type == PIPE_FD_TYPE_NATIVE_SYNC)
542bf215546Sopenharmony_ci         gem_syncobj_destroy(screen->fd, args.handle);
543bf215546Sopenharmony_ci      *out = NULL;
544bf215546Sopenharmony_ci      return;
545bf215546Sopenharmony_ci   }
546bf215546Sopenharmony_ci
547bf215546Sopenharmony_ci   struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
548bf215546Sopenharmony_ci   if (!syncobj) {
549bf215546Sopenharmony_ci      *out = NULL;
550bf215546Sopenharmony_ci      return;
551bf215546Sopenharmony_ci   }
552bf215546Sopenharmony_ci   syncobj->handle = args.handle;
553bf215546Sopenharmony_ci   pipe_reference_init(&syncobj->ref, 1);
554bf215546Sopenharmony_ci
555bf215546Sopenharmony_ci   struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
556bf215546Sopenharmony_ci   if (!fine) {
557bf215546Sopenharmony_ci      free(syncobj);
558bf215546Sopenharmony_ci      *out = NULL;
559bf215546Sopenharmony_ci      return;
560bf215546Sopenharmony_ci   }
561bf215546Sopenharmony_ci
562bf215546Sopenharmony_ci   static const uint32_t zero = 0;
563bf215546Sopenharmony_ci
564bf215546Sopenharmony_ci   /* Fences work in terms of iris_fine_fence, but we don't actually have a
565bf215546Sopenharmony_ci    * seqno for an imported fence.  So, create a fake one which always
566bf215546Sopenharmony_ci    * returns as 'not signaled' so we fall back to using the sync object.
567bf215546Sopenharmony_ci    */
568bf215546Sopenharmony_ci   fine->seqno = UINT32_MAX;
569bf215546Sopenharmony_ci   fine->map = &zero;
570bf215546Sopenharmony_ci   fine->syncobj = syncobj;
571bf215546Sopenharmony_ci   fine->flags = IRIS_FENCE_END;
572bf215546Sopenharmony_ci   pipe_reference_init(&fine->reference, 1);
573bf215546Sopenharmony_ci
574bf215546Sopenharmony_ci   struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
575bf215546Sopenharmony_ci   if (!fence) {
576bf215546Sopenharmony_ci      free(fine);
577bf215546Sopenharmony_ci      free(syncobj);
578bf215546Sopenharmony_ci      *out = NULL;
579bf215546Sopenharmony_ci      return;
580bf215546Sopenharmony_ci   }
581bf215546Sopenharmony_ci   pipe_reference_init(&fence->ref, 1);
582bf215546Sopenharmony_ci   fence->fine[0] = fine;
583bf215546Sopenharmony_ci
584bf215546Sopenharmony_ci   *out = fence;
585bf215546Sopenharmony_ci}
586bf215546Sopenharmony_ci
587bf215546Sopenharmony_cistatic void
588bf215546Sopenharmony_ciiris_fence_signal(struct pipe_context *ctx,
589bf215546Sopenharmony_ci                  struct pipe_fence_handle *fence)
590bf215546Sopenharmony_ci{
591bf215546Sopenharmony_ci   struct iris_context *ice = (struct iris_context *)ctx;
592bf215546Sopenharmony_ci
593bf215546Sopenharmony_ci   if (ctx == fence->unflushed_ctx)
594bf215546Sopenharmony_ci      return;
595bf215546Sopenharmony_ci
596bf215546Sopenharmony_ci   iris_foreach_batch(ice, batch) {
597bf215546Sopenharmony_ci      for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
598bf215546Sopenharmony_ci         struct iris_fine_fence *fine = fence->fine[i];
599bf215546Sopenharmony_ci
600bf215546Sopenharmony_ci         /* already signaled fence skipped */
601bf215546Sopenharmony_ci         if (iris_fine_fence_signaled(fine))
602bf215546Sopenharmony_ci            continue;
603bf215546Sopenharmony_ci
604bf215546Sopenharmony_ci         batch->contains_fence_signal = true;
605bf215546Sopenharmony_ci         iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_SIGNAL);
606bf215546Sopenharmony_ci      }
607bf215546Sopenharmony_ci      if (batch->contains_fence_signal)
608bf215546Sopenharmony_ci         iris_batch_flush(batch);
609bf215546Sopenharmony_ci   }
610bf215546Sopenharmony_ci}
611bf215546Sopenharmony_ci
612bf215546Sopenharmony_civoid
613bf215546Sopenharmony_ciiris_init_screen_fence_functions(struct pipe_screen *screen)
614bf215546Sopenharmony_ci{
615bf215546Sopenharmony_ci   screen->fence_reference = iris_fence_reference;
616bf215546Sopenharmony_ci   screen->fence_finish = iris_fence_finish;
617bf215546Sopenharmony_ci   screen->fence_get_fd = iris_fence_get_fd;
618bf215546Sopenharmony_ci}
619bf215546Sopenharmony_ci
620bf215546Sopenharmony_civoid
621bf215546Sopenharmony_ciiris_init_context_fence_functions(struct pipe_context *ctx)
622bf215546Sopenharmony_ci{
623bf215546Sopenharmony_ci   ctx->flush = iris_fence_flush;
624bf215546Sopenharmony_ci   ctx->create_fence_fd = iris_fence_create_fd;
625bf215546Sopenharmony_ci   ctx->fence_server_sync = iris_fence_await;
626bf215546Sopenharmony_ci   ctx->fence_server_signal = iris_fence_signal;
627bf215546Sopenharmony_ci}
628