1/*
2 * Copyright © 2021 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "anv_private.h"
25
26#include "util/os_time.h"
27
28static struct anv_bo_sync *
29to_anv_bo_sync(struct vk_sync *sync)
30{
31   assert(sync->type == &anv_bo_sync_type);
32   return container_of(sync, struct anv_bo_sync, sync);
33}
34
35static VkResult
36anv_bo_sync_init(struct vk_device *vk_device,
37                 struct vk_sync *vk_sync,
38                 uint64_t initial_value)
39{
40   struct anv_device *device = container_of(vk_device, struct anv_device, vk);
41   struct anv_bo_sync *sync = to_anv_bo_sync(vk_sync);
42
43   sync->state = initial_value ? ANV_BO_SYNC_STATE_SIGNALED :
44                                 ANV_BO_SYNC_STATE_RESET;
45
46   return anv_device_alloc_bo(device, "bo-sync", 4096,
47                              ANV_BO_ALLOC_EXTERNAL |
48                              ANV_BO_ALLOC_IMPLICIT_SYNC,
49                              0 /* explicit_address */,
50                              &sync->bo);
51}
52
53static void
54anv_bo_sync_finish(struct vk_device *vk_device,
55                   struct vk_sync *vk_sync)
56{
57   struct anv_device *device = container_of(vk_device, struct anv_device, vk);
58   struct anv_bo_sync *sync = to_anv_bo_sync(vk_sync);
59
60   anv_device_release_bo(device, sync->bo);
61}
62
63static VkResult
64anv_bo_sync_reset(struct vk_device *vk_device,
65                  struct vk_sync *vk_sync)
66{
67   struct anv_bo_sync *sync = to_anv_bo_sync(vk_sync);
68
69   sync->state = ANV_BO_SYNC_STATE_RESET;
70
71   return VK_SUCCESS;
72}
73
74static int64_t
75anv_get_relative_timeout(uint64_t abs_timeout)
76{
77   uint64_t now = os_time_get_nano();
78
79   /* We don't want negative timeouts.
80    *
81    * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
82    * supposed to block indefinitely timeouts < 0.  Unfortunately,
83    * this was broken for a couple of kernel releases.  Since there's
84    * no way to know whether or not the kernel we're using is one of
85    * the broken ones, the best we can do is to clamp the timeout to
86    * INT64_MAX.  This limits the maximum timeout from 584 years to
87    * 292 years - likely not a big deal.
88    */
89   if (abs_timeout < now)
90      return 0;
91
92   uint64_t rel_timeout = abs_timeout - now;
93   if (rel_timeout > (uint64_t) INT64_MAX)
94      rel_timeout = INT64_MAX;
95
96   return rel_timeout;
97}
98
99static VkResult
100anv_bo_sync_wait(struct vk_device *vk_device,
101                 uint32_t wait_count,
102                 const struct vk_sync_wait *waits,
103                 enum vk_sync_wait_flags wait_flags,
104                 uint64_t abs_timeout_ns)
105{
106   struct anv_device *device = container_of(vk_device, struct anv_device, vk);
107   VkResult result;
108
109   uint32_t pending = wait_count;
110   while (pending) {
111      pending = 0;
112      bool signaled = false;
113      for (uint32_t i = 0; i < wait_count; i++) {
114         struct anv_bo_sync *sync = to_anv_bo_sync(waits[i].sync);
115         switch (sync->state) {
116         case ANV_BO_SYNC_STATE_RESET:
117            /* This fence hasn't been submitted yet, we'll catch it the next
118             * time around.  Yes, this may mean we dead-loop but, short of
119             * lots of locking and a condition variable, there's not much that
120             * we can do about that.
121             */
122            assert(!(wait_flags & VK_SYNC_WAIT_PENDING));
123            pending++;
124            continue;
125
126         case ANV_BO_SYNC_STATE_SIGNALED:
127            /* This fence is not pending.  If waitAll isn't set, we can return
128             * early.  Otherwise, we have to keep going.
129             */
130            if (wait_flags & VK_SYNC_WAIT_ANY)
131               return VK_SUCCESS;
132            continue;
133
134         case ANV_BO_SYNC_STATE_SUBMITTED:
135            /* These are the fences we really care about.  Go ahead and wait
136             * on it until we hit a timeout.
137             */
138            if (!(wait_flags & VK_SYNC_WAIT_PENDING)) {
139               uint64_t rel_timeout = anv_get_relative_timeout(abs_timeout_ns);
140               result = anv_device_wait(device, sync->bo, rel_timeout);
141               /* This also covers VK_TIMEOUT */
142               if (result != VK_SUCCESS)
143                  return result;
144
145               sync->state = ANV_BO_SYNC_STATE_SIGNALED;
146               signaled = true;
147            }
148            if (wait_flags & VK_SYNC_WAIT_ANY)
149               return VK_SUCCESS;
150            break;
151
152         default:
153            unreachable("Invalid BO sync state");
154         }
155      }
156
157      if (pending && !signaled) {
158         /* If we've hit this then someone decided to vkWaitForFences before
159          * they've actually submitted any of them to a queue.  This is a
160          * fairly pessimal case, so it's ok to lock here and use a standard
161          * pthreads condition variable.
162          */
163         pthread_mutex_lock(&device->mutex);
164
165         /* It's possible that some of the fences have changed state since the
166          * last time we checked.  Now that we have the lock, check for
167          * pending fences again and don't wait if it's changed.
168          */
169         uint32_t now_pending = 0;
170         for (uint32_t i = 0; i < wait_count; i++) {
171            struct anv_bo_sync *sync = to_anv_bo_sync(waits[i].sync);
172            if (sync->state == ANV_BO_SYNC_STATE_RESET)
173               now_pending++;
174         }
175         assert(now_pending <= pending);
176
177         if (now_pending == pending) {
178            struct timespec abstime = {
179               .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
180               .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
181            };
182
183            ASSERTED int ret;
184            ret = pthread_cond_timedwait(&device->queue_submit,
185                                         &device->mutex, &abstime);
186            assert(ret != EINVAL);
187            if (os_time_get_nano() >= abs_timeout_ns) {
188               pthread_mutex_unlock(&device->mutex);
189               return VK_TIMEOUT;
190            }
191         }
192
193         pthread_mutex_unlock(&device->mutex);
194      }
195   }
196
197   return VK_SUCCESS;
198}
199
200const struct vk_sync_type anv_bo_sync_type = {
201   .size = sizeof(struct anv_bo_sync),
202   .features = VK_SYNC_FEATURE_BINARY |
203               VK_SYNC_FEATURE_GPU_WAIT |
204               VK_SYNC_FEATURE_GPU_MULTI_WAIT |
205               VK_SYNC_FEATURE_CPU_WAIT |
206               VK_SYNC_FEATURE_CPU_RESET |
207               VK_SYNC_FEATURE_WAIT_ANY |
208               VK_SYNC_FEATURE_WAIT_PENDING,
209   .init = anv_bo_sync_init,
210   .finish = anv_bo_sync_finish,
211   .reset = anv_bo_sync_reset,
212   .wait_many = anv_bo_sync_wait,
213};
214
215VkResult
216anv_create_sync_for_memory(struct vk_device *device,
217                           VkDeviceMemory memory,
218                           bool signal_memory,
219                           struct vk_sync **sync_out)
220{
221   ANV_FROM_HANDLE(anv_device_memory, mem, memory);
222   struct anv_bo_sync *bo_sync;
223
224   bo_sync = vk_zalloc(&device->alloc, sizeof(*bo_sync), 8,
225                       VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
226   if (bo_sync == NULL)
227      return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
228
229   bo_sync->sync.type = &anv_bo_sync_type;
230   bo_sync->state = signal_memory ? ANV_BO_SYNC_STATE_RESET :
231                                    ANV_BO_SYNC_STATE_SUBMITTED;
232   bo_sync->bo = anv_bo_ref(mem->bo);
233
234   *sync_out = &bo_sync->sync;
235
236   return VK_SUCCESS;
237}
238