1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <X11/Xlib-xcb.h>
25#include <X11/xshmfence.h>
26#include <xcb/xcb.h>
27#include <xcb/dri3.h>
28#include <xcb/present.h>
29#include <xcb/shm.h>
30
31#include "util/macros.h"
32#include <stdatomic.h>
33#include <stdlib.h>
34#include <stdio.h>
35#include <unistd.h>
36#include <errno.h>
37#include <string.h>
38#include <fcntl.h>
39#include <poll.h>
40#include <xf86drm.h>
41#include "drm-uapi/drm_fourcc.h"
42#include "util/hash_table.h"
43#include "util/os_file.h"
44#include "util/os_time.h"
45#include "util/u_debug.h"
46#include "util/u_thread.h"
47#include "util/xmlconfig.h"
48
49#include "vk_instance.h"
50#include "vk_physical_device.h"
51#include "vk_util.h"
52#include "vk_enum_to_str.h"
53#include "wsi_common_entrypoints.h"
54#include "wsi_common_private.h"
55#include "wsi_common_queue.h"
56
57#ifdef HAVE_SYS_SHM_H
58#include <sys/ipc.h>
59#include <sys/shm.h>
60#endif
61
62struct wsi_x11_connection {
63   bool has_dri3;
64   bool has_dri3_modifiers;
65   bool has_present;
66   bool is_proprietary_x11;
67   bool is_xwayland;
68   bool has_mit_shm;
69   bool has_xfixes;
70};
71
72struct wsi_x11 {
73   struct wsi_interface base;
74
75   pthread_mutex_t                              mutex;
76   /* Hash table of xcb_connection -> wsi_x11_connection mappings */
77   struct hash_table *connections;
78};
79
80
81/**
82 * Wrapper around xcb_dri3_open. Returns the opened fd or -1 on error.
83 */
84static int
85wsi_dri3_open(xcb_connection_t *conn,
86	      xcb_window_t root,
87	      uint32_t provider)
88{
89   xcb_dri3_open_cookie_t       cookie;
90   xcb_dri3_open_reply_t        *reply;
91   int                          fd;
92
93   cookie = xcb_dri3_open(conn,
94                          root,
95                          provider);
96
97   reply = xcb_dri3_open_reply(conn, cookie, NULL);
98   if (!reply)
99      return -1;
100
101   /* According to DRI3 extension nfd must equal one. */
102   if (reply->nfd != 1) {
103      free(reply);
104      return -1;
105   }
106
107   fd = xcb_dri3_open_reply_fds(conn, reply)[0];
108   free(reply);
109   fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
110
111   return fd;
112}
113
114/**
115 * Checks compatibility of the device wsi_dev with the device the X server
116 * provides via DRI3.
117 *
118 * This returns true when no device could be retrieved from the X server or when
119 * the information for the X server device indicate that it is the same device.
120 */
121static bool
122wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
123                              xcb_connection_t *conn)
124{
125   xcb_screen_iterator_t screen_iter =
126      xcb_setup_roots_iterator(xcb_get_setup(conn));
127   xcb_screen_t *screen = screen_iter.data;
128
129   /* Open the DRI3 device from the X server. If we do not retrieve one we
130    * assume our local device is compatible.
131    */
132   int dri3_fd = wsi_dri3_open(conn, screen->root, None);
133   if (dri3_fd == -1)
134      return true;
135
136   bool match = wsi_device_matches_drm_fd(wsi_dev, dri3_fd);
137
138   close(dri3_fd);
139
140   return match;
141}
142
143static bool
144wsi_x11_detect_xwayland(xcb_connection_t *conn)
145{
146   xcb_randr_query_version_cookie_t ver_cookie =
147      xcb_randr_query_version_unchecked(conn, 1, 3);
148   xcb_randr_query_version_reply_t *ver_reply =
149      xcb_randr_query_version_reply(conn, ver_cookie, NULL);
150   bool has_randr_v1_3 = ver_reply && (ver_reply->major_version > 1 ||
151                                       ver_reply->minor_version >= 3);
152   free(ver_reply);
153
154   if (!has_randr_v1_3)
155      return false;
156
157   const xcb_setup_t *setup = xcb_get_setup(conn);
158   xcb_screen_iterator_t iter = xcb_setup_roots_iterator(setup);
159
160   xcb_randr_get_screen_resources_current_cookie_t gsr_cookie =
161      xcb_randr_get_screen_resources_current_unchecked(conn, iter.data->root);
162   xcb_randr_get_screen_resources_current_reply_t *gsr_reply =
163      xcb_randr_get_screen_resources_current_reply(conn, gsr_cookie, NULL);
164
165   if (!gsr_reply || gsr_reply->num_outputs == 0) {
166      free(gsr_reply);
167      return false;
168   }
169
170   xcb_randr_output_t *randr_outputs =
171      xcb_randr_get_screen_resources_current_outputs(gsr_reply);
172   xcb_randr_get_output_info_cookie_t goi_cookie =
173      xcb_randr_get_output_info(conn, randr_outputs[0], gsr_reply->config_timestamp);
174   free(gsr_reply);
175
176   xcb_randr_get_output_info_reply_t *goi_reply =
177      xcb_randr_get_output_info_reply(conn, goi_cookie, NULL);
178   if (!goi_reply) {
179      return false;
180   }
181
182   char *output_name = (char*)xcb_randr_get_output_info_name(goi_reply);
183   bool is_xwayland = output_name && strncmp(output_name, "XWAYLAND", 8) == 0;
184   free(goi_reply);
185
186   return is_xwayland;
187}
188
189static struct wsi_x11_connection *
190wsi_x11_connection_create(struct wsi_device *wsi_dev,
191                          xcb_connection_t *conn)
192{
193   xcb_query_extension_cookie_t dri3_cookie, pres_cookie, randr_cookie,
194                                amd_cookie, nv_cookie, shm_cookie, sync_cookie,
195                                xfixes_cookie;
196   xcb_query_extension_reply_t *dri3_reply, *pres_reply, *randr_reply,
197                               *amd_reply, *nv_reply, *shm_reply = NULL,
198                               *xfixes_reply;
199   bool wants_shm = wsi_dev->sw && !(WSI_DEBUG & WSI_DEBUG_NOSHM) &&
200                    wsi_dev->has_import_memory_host;
201   bool has_dri3_v1_2 = false;
202   bool has_present_v1_2 = false;
203
204   struct wsi_x11_connection *wsi_conn =
205      vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8,
206                VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
207   if (!wsi_conn)
208      return NULL;
209
210   sync_cookie = xcb_query_extension(conn, 4, "SYNC");
211   dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
212   pres_cookie = xcb_query_extension(conn, 7, "Present");
213   randr_cookie = xcb_query_extension(conn, 5, "RANDR");
214   xfixes_cookie = xcb_query_extension(conn, 6, "XFIXES");
215
216   if (wants_shm)
217      shm_cookie = xcb_query_extension(conn, 7, "MIT-SHM");
218
219   /* We try to be nice to users and emit a warning if they try to use a
220    * Vulkan application on a system without DRI3 enabled.  However, this ends
221    * up spewing the warning when a user has, for example, both Intel
222    * integrated graphics and a discrete card with proprietary drivers and are
223    * running on the discrete card with the proprietary DDX.  In this case, we
224    * really don't want to print the warning because it just confuses users.
225    * As a heuristic to detect this case, we check for a couple of proprietary
226    * X11 extensions.
227    */
228   amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
229   nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
230
231   xcb_discard_reply(conn, sync_cookie.sequence);
232   dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
233   pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
234   randr_reply = xcb_query_extension_reply(conn, randr_cookie, NULL);
235   amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
236   nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
237   xfixes_reply = xcb_query_extension_reply(conn, xfixes_cookie, NULL);
238   if (wants_shm)
239      shm_reply = xcb_query_extension_reply(conn, shm_cookie, NULL);
240   if (!dri3_reply || !pres_reply || !xfixes_reply) {
241      free(dri3_reply);
242      free(pres_reply);
243      free(xfixes_reply);
244      free(randr_reply);
245      free(amd_reply);
246      free(nv_reply);
247      if (wants_shm)
248         free(shm_reply);
249      vk_free(&wsi_dev->instance_alloc, wsi_conn);
250      return NULL;
251   }
252
253   wsi_conn->has_dri3 = dri3_reply->present != 0;
254#ifdef HAVE_DRI3_MODIFIERS
255   if (wsi_conn->has_dri3) {
256      xcb_dri3_query_version_cookie_t ver_cookie;
257      xcb_dri3_query_version_reply_t *ver_reply;
258
259      ver_cookie = xcb_dri3_query_version(conn, 1, 2);
260      ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL);
261      has_dri3_v1_2 = ver_reply != NULL &&
262         (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
263      free(ver_reply);
264   }
265#endif
266
267   wsi_conn->has_present = pres_reply->present != 0;
268#ifdef HAVE_DRI3_MODIFIERS
269   if (wsi_conn->has_present) {
270      xcb_present_query_version_cookie_t ver_cookie;
271      xcb_present_query_version_reply_t *ver_reply;
272
273      ver_cookie = xcb_present_query_version(conn, 1, 2);
274      ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL);
275      has_present_v1_2 =
276        (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
277      free(ver_reply);
278   }
279#endif
280
281   wsi_conn->has_xfixes = xfixes_reply->present != 0;
282   if (wsi_conn->has_xfixes) {
283      xcb_xfixes_query_version_cookie_t ver_cookie;
284      xcb_xfixes_query_version_reply_t *ver_reply;
285
286      ver_cookie = xcb_xfixes_query_version(conn, 6, 0);
287      ver_reply = xcb_xfixes_query_version_reply(conn, ver_cookie, NULL);
288      wsi_conn->has_xfixes = (ver_reply->major_version >= 2);
289      free(ver_reply);
290   }
291
292   if (randr_reply && randr_reply->present != 0)
293      wsi_conn->is_xwayland = wsi_x11_detect_xwayland(conn);
294   else
295      wsi_conn->is_xwayland = false;
296
297   wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2;
298   wsi_conn->is_proprietary_x11 = false;
299   if (amd_reply && amd_reply->present)
300      wsi_conn->is_proprietary_x11 = true;
301   if (nv_reply && nv_reply->present)
302      wsi_conn->is_proprietary_x11 = true;
303
304   wsi_conn->has_mit_shm = false;
305   if (wsi_conn->has_dri3 && wsi_conn->has_present && wants_shm) {
306      bool has_mit_shm = shm_reply->present != 0;
307
308      xcb_shm_query_version_cookie_t ver_cookie;
309      xcb_shm_query_version_reply_t *ver_reply;
310
311      ver_cookie = xcb_shm_query_version(conn);
312      ver_reply = xcb_shm_query_version_reply(conn, ver_cookie, NULL);
313
314      has_mit_shm = ver_reply->shared_pixmaps;
315      free(ver_reply);
316      xcb_void_cookie_t cookie;
317      xcb_generic_error_t *error;
318
319      if (has_mit_shm) {
320         cookie = xcb_shm_detach_checked(conn, 0);
321         if ((error = xcb_request_check(conn, cookie))) {
322            if (error->error_code != BadRequest)
323               wsi_conn->has_mit_shm = true;
324            free(error);
325         }
326      }
327   }
328
329   free(dri3_reply);
330   free(pres_reply);
331   free(randr_reply);
332   free(amd_reply);
333   free(nv_reply);
334   free(xfixes_reply);
335   if (wants_shm)
336      free(shm_reply);
337
338   return wsi_conn;
339}
340
341static void
342wsi_x11_connection_destroy(struct wsi_device *wsi_dev,
343                           struct wsi_x11_connection *conn)
344{
345   vk_free(&wsi_dev->instance_alloc, conn);
346}
347
348static bool
349wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
350{
351  if (wsi_conn->has_dri3)
352    return true;
353  if (!wsi_conn->is_proprietary_x11) {
354    fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
355                    "Note: you can probably enable DRI3 in your Xorg config\n");
356  }
357  return false;
358}
359
360/**
361 * Get internal struct representing an xcb_connection_t.
362 *
363 * This can allocate the struct but the caller does not own the struct. It is
364 * deleted on wsi_x11_finish_wsi by the hash table it is inserted.
365 *
366 * If the allocation fails NULL is returned.
367 */
368static struct wsi_x11_connection *
369wsi_x11_get_connection(struct wsi_device *wsi_dev,
370                       xcb_connection_t *conn)
371{
372   struct wsi_x11 *wsi =
373      (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
374
375   pthread_mutex_lock(&wsi->mutex);
376
377   struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
378   if (!entry) {
379      /* We're about to make a bunch of blocking calls.  Let's drop the
380       * mutex for now so we don't block up too badly.
381       */
382      pthread_mutex_unlock(&wsi->mutex);
383
384      struct wsi_x11_connection *wsi_conn =
385         wsi_x11_connection_create(wsi_dev, conn);
386      if (!wsi_conn)
387         return NULL;
388
389      pthread_mutex_lock(&wsi->mutex);
390
391      entry = _mesa_hash_table_search(wsi->connections, conn);
392      if (entry) {
393         /* Oops, someone raced us to it */
394         wsi_x11_connection_destroy(wsi_dev, wsi_conn);
395      } else {
396         entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
397      }
398   }
399
400   pthread_mutex_unlock(&wsi->mutex);
401
402   return entry->data;
403}
404
405struct surface_format {
406   VkFormat format;
407   unsigned bits_per_rgb;
408};
409
410static const struct surface_format formats[] = {
411   { VK_FORMAT_B8G8R8A8_SRGB,             8 },
412   { VK_FORMAT_B8G8R8A8_UNORM,            8 },
413   { VK_FORMAT_A2R10G10B10_UNORM_PACK32, 10 },
414};
415
416static const VkPresentModeKHR present_modes[] = {
417   VK_PRESENT_MODE_IMMEDIATE_KHR,
418   VK_PRESENT_MODE_MAILBOX_KHR,
419   VK_PRESENT_MODE_FIFO_KHR,
420   VK_PRESENT_MODE_FIFO_RELAXED_KHR,
421};
422
423static xcb_screen_t *
424get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
425{
426   xcb_screen_iterator_t screen_iter =
427      xcb_setup_roots_iterator(xcb_get_setup(conn));
428
429   for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
430      if (screen_iter.data->root == root)
431         return screen_iter.data;
432   }
433
434   return NULL;
435}
436
437static xcb_visualtype_t *
438screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
439                      unsigned *depth)
440{
441   xcb_depth_iterator_t depth_iter =
442      xcb_screen_allowed_depths_iterator(screen);
443
444   for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
445      xcb_visualtype_iterator_t visual_iter =
446         xcb_depth_visuals_iterator (depth_iter.data);
447
448      for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
449         if (visual_iter.data->visual_id == visual_id) {
450            if (depth)
451               *depth = depth_iter.data->depth;
452            return visual_iter.data;
453         }
454      }
455   }
456
457   return NULL;
458}
459
460static xcb_visualtype_t *
461connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id)
462{
463   xcb_screen_iterator_t screen_iter =
464      xcb_setup_roots_iterator(xcb_get_setup(conn));
465
466   /* For this we have to iterate over all of the screens which is rather
467    * annoying.  Fortunately, there is probably only 1.
468    */
469   for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
470      xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
471                                                       visual_id, NULL);
472      if (visual)
473         return visual;
474   }
475
476   return NULL;
477}
478
479static xcb_visualtype_t *
480get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
481                          unsigned *depth)
482{
483   xcb_query_tree_cookie_t tree_cookie;
484   xcb_get_window_attributes_cookie_t attrib_cookie;
485   xcb_query_tree_reply_t *tree;
486   xcb_get_window_attributes_reply_t *attrib;
487
488   tree_cookie = xcb_query_tree(conn, window);
489   attrib_cookie = xcb_get_window_attributes(conn, window);
490
491   tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
492   attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
493   if (attrib == NULL || tree == NULL) {
494      free(attrib);
495      free(tree);
496      return NULL;
497   }
498
499   xcb_window_t root = tree->root;
500   xcb_visualid_t visual_id = attrib->visual;
501   free(attrib);
502   free(tree);
503
504   xcb_screen_t *screen = get_screen_for_root(conn, root);
505   if (screen == NULL)
506      return NULL;
507
508   return screen_get_visualtype(screen, visual_id, depth);
509}
510
511static bool
512visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
513{
514   uint32_t rgb_mask = visual->red_mask |
515                       visual->green_mask |
516                       visual->blue_mask;
517
518   uint32_t all_mask = 0xffffffff >> (32 - depth);
519
520   /* Do we have bits left over after RGB? */
521   return (all_mask & ~rgb_mask) != 0;
522}
523
524static bool
525visual_supported(xcb_visualtype_t *visual)
526{
527   if (!visual)
528      return false;
529
530   return visual->bits_per_rgb_value == 8 || visual->bits_per_rgb_value == 10;
531}
532
533VKAPI_ATTR VkBool32 VKAPI_CALL
534wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
535                                               uint32_t queueFamilyIndex,
536                                               xcb_connection_t *connection,
537                                               xcb_visualid_t visual_id)
538{
539   VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
540   struct wsi_device *wsi_device = pdevice->wsi_device;
541   struct wsi_x11_connection *wsi_conn =
542      wsi_x11_get_connection(wsi_device, connection);
543
544   if (!wsi_conn)
545      return false;
546
547   if (!wsi_device->sw) {
548      if (!wsi_x11_check_for_dri3(wsi_conn))
549         return false;
550   }
551
552   if (!visual_supported(connection_get_visualtype(connection, visual_id)))
553      return false;
554
555   return true;
556}
557
558VKAPI_ATTR VkBool32 VKAPI_CALL
559wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
560                                                uint32_t queueFamilyIndex,
561                                                Display *dpy,
562                                                VisualID visualID)
563{
564   return wsi_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice,
565                                                         queueFamilyIndex,
566                                                         XGetXCBConnection(dpy),
567                                                         visualID);
568}
569
570static xcb_connection_t*
571x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
572{
573   if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
574      return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
575   else
576      return ((VkIcdSurfaceXcb *)icd_surface)->connection;
577}
578
579static xcb_window_t
580x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
581{
582   if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
583      return ((VkIcdSurfaceXlib *)icd_surface)->window;
584   else
585      return ((VkIcdSurfaceXcb *)icd_surface)->window;
586}
587
588static VkResult
589x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
590                        struct wsi_device *wsi_device,
591                        uint32_t queueFamilyIndex,
592                        VkBool32* pSupported)
593{
594   xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
595   xcb_window_t window = x11_surface_get_window(icd_surface);
596
597   struct wsi_x11_connection *wsi_conn =
598      wsi_x11_get_connection(wsi_device, conn);
599   if (!wsi_conn)
600      return VK_ERROR_OUT_OF_HOST_MEMORY;
601
602   if (!wsi_device->sw) {
603      if (!wsi_x11_check_for_dri3(wsi_conn)) {
604         *pSupported = false;
605         return VK_SUCCESS;
606      }
607   }
608
609   if (!visual_supported(get_visualtype_for_window(conn, window, NULL))) {
610      *pSupported = false;
611      return VK_SUCCESS;
612   }
613
614   *pSupported = true;
615   return VK_SUCCESS;
616}
617
618static uint32_t
619x11_get_min_image_count(const struct wsi_device *wsi_device)
620{
621   if (wsi_device->x11.override_minImageCount)
622      return wsi_device->x11.override_minImageCount;
623
624   /* For IMMEDIATE and FIFO, most games work in a pipelined manner where the
625    * can produce frames at a rate of 1/MAX(CPU duration, GPU duration), but
626    * the render latency is CPU duration + GPU duration.
627    *
628    * This means that with scanout from pageflipping we need 3 frames to run
629    * full speed:
630    * 1) CPU rendering work
631    * 2) GPU rendering work
632    * 3) scanout
633    *
634    * Once we have a nonblocking acquire that returns a semaphore we can merge
635    * 1 and 3. Hence the ideal implementation needs only 2 images, but games
636    * cannot tellwe currently do not have an ideal implementation and that
637    * hence they need to allocate 3 images. So let us do it for them.
638    *
639    * This is a tradeoff as it uses more memory than needed for non-fullscreen
640    * and non-performance intensive applications.
641    */
642   return 3;
643}
644
645static VkResult
646x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
647                             struct wsi_device *wsi_device,
648                             VkSurfaceCapabilitiesKHR *caps)
649{
650   xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
651   xcb_window_t window = x11_surface_get_window(icd_surface);
652   xcb_get_geometry_cookie_t geom_cookie;
653   xcb_generic_error_t *err;
654   xcb_get_geometry_reply_t *geom;
655   unsigned visual_depth;
656
657   geom_cookie = xcb_get_geometry(conn, window);
658
659   /* This does a round-trip.  This is why we do get_geometry first and
660    * wait to read the reply until after we have a visual.
661    */
662   xcb_visualtype_t *visual =
663      get_visualtype_for_window(conn, window, &visual_depth);
664
665   if (!visual)
666      return VK_ERROR_SURFACE_LOST_KHR;
667
668   geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
669   if (geom) {
670      VkExtent2D extent = { geom->width, geom->height };
671      caps->currentExtent = extent;
672      caps->minImageExtent = extent;
673      caps->maxImageExtent = extent;
674   }
675   free(err);
676   free(geom);
677   if (!geom)
678       return VK_ERROR_SURFACE_LOST_KHR;
679
680   if (visual_has_alpha(visual, visual_depth)) {
681      caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
682                                      VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
683   } else {
684      caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
685                                      VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
686   }
687
688   caps->minImageCount = x11_get_min_image_count(wsi_device);
689   /* There is no real maximum */
690   caps->maxImageCount = 0;
691
692   caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
693   caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
694   caps->maxImageArrayLayers = 1;
695   caps->supportedUsageFlags =
696      VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
697      VK_IMAGE_USAGE_SAMPLED_BIT |
698      VK_IMAGE_USAGE_TRANSFER_DST_BIT |
699      VK_IMAGE_USAGE_STORAGE_BIT |
700      VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
701      VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
702
703   return VK_SUCCESS;
704}
705
706static VkResult
707x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
708                              struct wsi_device *wsi_device,
709                              const void *info_next,
710                              VkSurfaceCapabilities2KHR *caps)
711{
712   assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
713
714   VkResult result =
715      x11_surface_get_capabilities(icd_surface, wsi_device,
716                                   &caps->surfaceCapabilities);
717
718   if (result != VK_SUCCESS)
719      return result;
720
721   vk_foreach_struct(ext, caps->pNext) {
722      switch (ext->sType) {
723      case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
724         VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
725         protected->supportsProtected = VK_FALSE;
726         break;
727      }
728
729      default:
730         /* Ignored */
731         break;
732      }
733   }
734
735   return result;
736}
737
738static bool
739get_sorted_vk_formats(VkIcdSurfaceBase *surface, struct wsi_device *wsi_device,
740                      VkFormat *sorted_formats, unsigned *count)
741{
742   xcb_connection_t *conn = x11_surface_get_connection(surface);
743   xcb_window_t window = x11_surface_get_window(surface);
744   xcb_visualtype_t *visual = get_visualtype_for_window(conn, window, NULL);
745   if (!visual)
746      return false;
747
748   *count = 0;
749   for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
750      if (formats[i].bits_per_rgb == visual->bits_per_rgb_value)
751         sorted_formats[(*count)++] = formats[i].format;
752   }
753
754   if (wsi_device->force_bgra8_unorm_first) {
755      for (unsigned i = 0; i < *count; i++) {
756         if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
757            sorted_formats[i] = sorted_formats[0];
758            sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
759            break;
760         }
761      }
762   }
763
764   return true;
765}
766
767static VkResult
768x11_surface_get_formats(VkIcdSurfaceBase *surface,
769                        struct wsi_device *wsi_device,
770                        uint32_t *pSurfaceFormatCount,
771                        VkSurfaceFormatKHR *pSurfaceFormats)
772{
773   VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
774                          pSurfaceFormats, pSurfaceFormatCount);
775
776   unsigned count;
777   VkFormat sorted_formats[ARRAY_SIZE(formats)];
778   if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
779      return VK_ERROR_SURFACE_LOST_KHR;
780
781   for (unsigned i = 0; i < count; i++) {
782      vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
783         f->format = sorted_formats[i];
784         f->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
785      }
786   }
787
788   return vk_outarray_status(&out);
789}
790
791static VkResult
792x11_surface_get_formats2(VkIcdSurfaceBase *surface,
793                        struct wsi_device *wsi_device,
794                        const void *info_next,
795                        uint32_t *pSurfaceFormatCount,
796                        VkSurfaceFormat2KHR *pSurfaceFormats)
797{
798   VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
799                          pSurfaceFormats, pSurfaceFormatCount);
800
801   unsigned count;
802   VkFormat sorted_formats[ARRAY_SIZE(formats)];
803   if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
804      return VK_ERROR_SURFACE_LOST_KHR;
805
806   for (unsigned i = 0; i < count; i++) {
807      vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
808         assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
809         f->surfaceFormat.format = sorted_formats[i];
810         f->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
811      }
812   }
813
814   return vk_outarray_status(&out);
815}
816
817static VkResult
818x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
819                              uint32_t *pPresentModeCount,
820                              VkPresentModeKHR *pPresentModes)
821{
822   if (pPresentModes == NULL) {
823      *pPresentModeCount = ARRAY_SIZE(present_modes);
824      return VK_SUCCESS;
825   }
826
827   *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
828   typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
829
830   return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
831      VK_INCOMPLETE : VK_SUCCESS;
832}
833
834static VkResult
835x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
836                                   struct wsi_device *wsi_device,
837                                   uint32_t* pRectCount,
838                                   VkRect2D* pRects)
839{
840   xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
841   xcb_window_t window = x11_surface_get_window(icd_surface);
842   VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
843
844   vk_outarray_append_typed(VkRect2D, &out, rect) {
845      xcb_generic_error_t *err = NULL;
846      xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
847      xcb_get_geometry_reply_t *geom =
848         xcb_get_geometry_reply(conn, geom_cookie, &err);
849      free(err);
850      if (geom) {
851         *rect = (VkRect2D) {
852            .offset = { 0, 0 },
853            .extent = { geom->width, geom->height },
854         };
855      }
856      free(geom);
857      if (!geom)
858          return VK_ERROR_SURFACE_LOST_KHR;
859   }
860
861   return vk_outarray_status(&out);
862}
863
864VKAPI_ATTR VkResult VKAPI_CALL
865wsi_CreateXcbSurfaceKHR(VkInstance _instance,
866                        const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
867                        const VkAllocationCallbacks *pAllocator,
868                        VkSurfaceKHR *pSurface)
869{
870   VK_FROM_HANDLE(vk_instance, instance, _instance);
871   VkIcdSurfaceXcb *surface;
872
873   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
874
875   surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
876                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
877   if (surface == NULL)
878      return VK_ERROR_OUT_OF_HOST_MEMORY;
879
880   surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
881   surface->connection = pCreateInfo->connection;
882   surface->window = pCreateInfo->window;
883
884   *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
885   return VK_SUCCESS;
886}
887
888VKAPI_ATTR VkResult VKAPI_CALL
889wsi_CreateXlibSurfaceKHR(VkInstance _instance,
890                         const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
891                         const VkAllocationCallbacks *pAllocator,
892                         VkSurfaceKHR *pSurface)
893{
894   VK_FROM_HANDLE(vk_instance, instance, _instance);
895   VkIcdSurfaceXlib *surface;
896
897   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
898
899   surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
900                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
901   if (surface == NULL)
902      return VK_ERROR_OUT_OF_HOST_MEMORY;
903
904   surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
905   surface->dpy = pCreateInfo->dpy;
906   surface->window = pCreateInfo->window;
907
908   *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
909   return VK_SUCCESS;
910}
911
912struct x11_image {
913   struct wsi_image                          base;
914   xcb_pixmap_t                              pixmap;
915   xcb_xfixes_region_t                       update_region; /* long lived XID */
916   xcb_xfixes_region_t                       update_area;   /* the above or None */
917   bool                                      busy;
918   bool                                      present_queued;
919   struct xshmfence *                        shm_fence;
920   uint32_t                                  sync_fence;
921   uint32_t                                  serial;
922   xcb_shm_seg_t                             shmseg;
923   int                                       shmid;
924   uint8_t *                                 shmaddr;
925};
926
927struct x11_swapchain {
928   struct wsi_swapchain                        base;
929
930   bool                                         has_dri3_modifiers;
931   bool                                         has_mit_shm;
932
933   xcb_connection_t *                           conn;
934   xcb_window_t                                 window;
935   xcb_gc_t                                     gc;
936   uint32_t                                     depth;
937   VkExtent2D                                   extent;
938
939   xcb_present_event_t                          event_id;
940   xcb_special_event_t *                        special_event;
941   uint64_t                                     send_sbc;
942   uint64_t                                     last_present_msc;
943   uint32_t                                     stamp;
944   atomic_int                                   sent_image_count;
945
946   bool                                         has_present_queue;
947   bool                                         has_acquire_queue;
948   VkResult                                     status;
949   bool                                         copy_is_suboptimal;
950   struct wsi_queue                             present_queue;
951   struct wsi_queue                             acquire_queue;
952   pthread_t                                    queue_manager;
953
954   struct x11_image                             images[0];
955};
956VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
957                               VK_OBJECT_TYPE_SWAPCHAIN_KHR)
958
959/**
960 * Update the swapchain status with the result of an operation, and return
961 * the combined status. The chain status will eventually be returned from
962 * AcquireNextImage and QueuePresent.
963 *
964 * We make sure to 'stick' more pessimistic statuses: an out-of-date error
965 * is permanent once seen, and every subsequent call will return this. If
966 * this has not been seen, success will be returned.
967 */
968static VkResult
969_x11_swapchain_result(struct x11_swapchain *chain, VkResult result,
970                      const char *file, int line)
971{
972   /* Prioritise returning existing errors for consistency. */
973   if (chain->status < 0)
974      return chain->status;
975
976   /* If we have a new error, mark it as permanent on the chain and return. */
977   if (result < 0) {
978#ifndef NDEBUG
979      fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
980              file, line, vk_Result_to_str(result));
981#endif
982      chain->status = result;
983      return result;
984   }
985
986   /* Return temporary errors, but don't persist them. */
987   if (result == VK_TIMEOUT || result == VK_NOT_READY)
988      return result;
989
990   /* Suboptimal isn't an error, but is a status which sticks to the swapchain
991    * and is always returned rather than success.
992    */
993   if (result == VK_SUBOPTIMAL_KHR) {
994#ifndef NDEBUG
995      if (chain->status != VK_SUBOPTIMAL_KHR) {
996         fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
997                 file, line, vk_Result_to_str(result));
998      }
999#endif
1000      chain->status = result;
1001      return result;
1002   }
1003
1004   /* No changes, so return the last status. */
1005   return chain->status;
1006}
1007#define x11_swapchain_result(chain, result) \
1008   _x11_swapchain_result(chain, result, __FILE__, __LINE__)
1009
1010static struct wsi_image *
1011x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
1012{
1013   struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1014   return &chain->images[image_index].base;
1015}
1016
1017/**
1018 * Process an X11 Present event. Does not update chain->status.
1019 */
1020static VkResult
1021x11_handle_dri3_present_event(struct x11_swapchain *chain,
1022                              xcb_present_generic_event_t *event)
1023{
1024   switch (event->evtype) {
1025   case XCB_PRESENT_CONFIGURE_NOTIFY: {
1026      xcb_present_configure_notify_event_t *config = (void *) event;
1027
1028      if (config->width != chain->extent.width ||
1029          config->height != chain->extent.height)
1030         return VK_SUBOPTIMAL_KHR;
1031
1032      break;
1033   }
1034
1035   case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
1036      xcb_present_idle_notify_event_t *idle = (void *) event;
1037
1038      for (unsigned i = 0; i < chain->base.image_count; i++) {
1039         if (chain->images[i].pixmap == idle->pixmap) {
1040            chain->images[i].busy = false;
1041            chain->sent_image_count--;
1042            assert(chain->sent_image_count >= 0);
1043            if (chain->has_acquire_queue)
1044               wsi_queue_push(&chain->acquire_queue, i);
1045            break;
1046         }
1047      }
1048
1049      break;
1050   }
1051
1052   case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
1053      xcb_present_complete_notify_event_t *complete = (void *) event;
1054      if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
1055         unsigned i;
1056         for (i = 0; i < chain->base.image_count; i++) {
1057            struct x11_image *image = &chain->images[i];
1058            if (image->present_queued && image->serial == complete->serial)
1059               image->present_queued = false;
1060         }
1061         chain->last_present_msc = complete->msc;
1062      }
1063
1064      VkResult result = VK_SUCCESS;
1065      switch (complete->mode) {
1066      case XCB_PRESENT_COMPLETE_MODE_COPY:
1067         if (chain->copy_is_suboptimal)
1068            result = VK_SUBOPTIMAL_KHR;
1069         break;
1070      case XCB_PRESENT_COMPLETE_MODE_FLIP:
1071         /* If we ever go from flipping to copying, the odds are very likely
1072          * that we could reallocate in a more optimal way if we didn't have
1073          * to care about scanout, so we always do this.
1074          */
1075         chain->copy_is_suboptimal = true;
1076         break;
1077#ifdef HAVE_DRI3_MODIFIERS
1078      case XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY:
1079         /* The winsys is now trying to flip directly and cannot due to our
1080          * configuration. Request the user reallocate.
1081          */
1082         result = VK_SUBOPTIMAL_KHR;
1083         break;
1084#endif
1085      default:
1086         break;
1087      }
1088
1089      return result;
1090   }
1091
1092   default:
1093      break;
1094   }
1095
1096   return VK_SUCCESS;
1097}
1098
1099
1100static uint64_t wsi_get_absolute_timeout(uint64_t timeout)
1101{
1102   uint64_t current_time = os_time_get_nano();
1103
1104   timeout = MIN2(UINT64_MAX - current_time, timeout);
1105
1106   return current_time + timeout;
1107}
1108
1109/**
1110 * Acquire a ready-to-use image directly from our swapchain. If all images are
1111 * busy wait until one is not anymore or till timeout.
1112 */
1113static VkResult
1114x11_acquire_next_image_poll_x11(struct x11_swapchain *chain,
1115                                uint32_t *image_index, uint64_t timeout)
1116{
1117   xcb_generic_event_t *event;
1118   struct pollfd pfds;
1119   uint64_t atimeout;
1120   while (1) {
1121      for (uint32_t i = 0; i < chain->base.image_count; i++) {
1122         if (!chain->images[i].busy) {
1123            /* We found a non-busy image */
1124            xshmfence_await(chain->images[i].shm_fence);
1125            *image_index = i;
1126            chain->images[i].busy = true;
1127            return x11_swapchain_result(chain, VK_SUCCESS);
1128         }
1129      }
1130
1131      xcb_flush(chain->conn);
1132
1133      if (timeout == UINT64_MAX) {
1134         event = xcb_wait_for_special_event(chain->conn, chain->special_event);
1135         if (!event)
1136            return x11_swapchain_result(chain, VK_ERROR_SURFACE_LOST_KHR);
1137      } else {
1138         event = xcb_poll_for_special_event(chain->conn, chain->special_event);
1139         if (!event) {
1140            int ret;
1141            if (timeout == 0)
1142               return x11_swapchain_result(chain, VK_NOT_READY);
1143
1144            atimeout = wsi_get_absolute_timeout(timeout);
1145
1146            pfds.fd = xcb_get_file_descriptor(chain->conn);
1147            pfds.events = POLLIN;
1148            ret = poll(&pfds, 1, timeout / 1000 / 1000);
1149            if (ret == 0)
1150               return x11_swapchain_result(chain, VK_TIMEOUT);
1151            if (ret == -1)
1152               return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR);
1153
1154            /* If a non-special event happens, the fd will still
1155             * poll. So recalculate the timeout now just in case.
1156             */
1157            uint64_t current_time = os_time_get_nano();
1158            if (atimeout > current_time)
1159               timeout = atimeout - current_time;
1160            else
1161               timeout = 0;
1162            continue;
1163         }
1164      }
1165
1166      /* Update the swapchain status here. We may catch non-fatal errors here,
1167       * in which case we need to update the status and continue.
1168       */
1169      VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
1170      /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1171      result = x11_swapchain_result(chain, result);
1172      free(event);
1173      if (result < 0)
1174         return result;
1175   }
1176}
1177
1178/**
1179 * Acquire a ready-to-use image from the acquire-queue. Only relevant in fifo
1180 * presentation mode.
1181 */
1182static VkResult
1183x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
1184                                  uint32_t *image_index_out, uint64_t timeout)
1185{
1186   assert(chain->has_acquire_queue);
1187
1188   uint32_t image_index;
1189   VkResult result = wsi_queue_pull(&chain->acquire_queue,
1190                                    &image_index, timeout);
1191   if (result < 0 || result == VK_TIMEOUT) {
1192      /* On error, the thread has shut down, so safe to update chain->status.
1193       * Calling x11_swapchain_result with VK_TIMEOUT won't modify
1194       * chain->status so that is also safe.
1195       */
1196      return x11_swapchain_result(chain, result);
1197   } else if (chain->status < 0) {
1198      return chain->status;
1199   }
1200
1201   assert(image_index < chain->base.image_count);
1202   xshmfence_await(chain->images[image_index].shm_fence);
1203
1204   *image_index_out = image_index;
1205
1206   return chain->status;
1207}
1208
1209/**
1210 * Send image to X server via Present extension.
1211 */
1212static VkResult
1213x11_present_to_x11_dri3(struct x11_swapchain *chain, uint32_t image_index,
1214                        uint64_t target_msc)
1215{
1216   struct x11_image *image = &chain->images[image_index];
1217
1218   assert(image_index < chain->base.image_count);
1219
1220   uint32_t options = XCB_PRESENT_OPTION_NONE;
1221
1222   int64_t divisor = 0;
1223   int64_t remainder = 0;
1224
1225   struct wsi_x11_connection *wsi_conn =
1226      wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1227   if (!wsi_conn)
1228      return VK_ERROR_OUT_OF_HOST_MEMORY;
1229
1230   if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ||
1231       (chain->base.present_mode == VK_PRESENT_MODE_MAILBOX_KHR &&
1232        wsi_conn->is_xwayland) ||
1233       chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
1234      options |= XCB_PRESENT_OPTION_ASYNC;
1235
1236#ifdef HAVE_DRI3_MODIFIERS
1237   if (chain->has_dri3_modifiers)
1238      options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1239#endif
1240
1241   /* Poll for any available event and update the swapchain status. This could
1242    * update the status of the swapchain to SUBOPTIMAL or OUT_OF_DATE if the
1243    * associated X11 surface has been resized.
1244    */
1245   xcb_generic_event_t *event;
1246   while ((event = xcb_poll_for_special_event(chain->conn, chain->special_event))) {
1247      VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
1248      /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1249      result = x11_swapchain_result(chain, result);
1250      free(event);
1251      if (result < 0)
1252         return result;
1253   }
1254
1255   xshmfence_reset(image->shm_fence);
1256
1257   ++chain->sent_image_count;
1258   assert(chain->sent_image_count <= chain->base.image_count);
1259
1260   ++chain->send_sbc;
1261   image->present_queued = true;
1262   image->serial = (uint32_t) chain->send_sbc;
1263
1264   xcb_void_cookie_t cookie =
1265      xcb_present_pixmap_checked(chain->conn,
1266                                 chain->window,
1267                                 image->pixmap,
1268                                 image->serial,
1269                                 0,                            /* valid */
1270                                 image->update_area,           /* update */
1271                                 0,                            /* x_off */
1272                                 0,                            /* y_off */
1273                                 XCB_NONE,                     /* target_crtc */
1274                                 XCB_NONE,
1275                                 image->sync_fence,
1276                                 options,
1277                                 target_msc,
1278                                 divisor,
1279                                 remainder, 0, NULL);
1280   xcb_generic_error_t *error = xcb_request_check(chain->conn, cookie);
1281   if (error) {
1282      free(error);
1283      return x11_swapchain_result(chain, VK_ERROR_SURFACE_LOST_KHR);
1284   }
1285
1286   return x11_swapchain_result(chain, VK_SUCCESS);
1287}
1288
1289/**
1290 * Send image to X server unaccelerated (software drivers).
1291 */
1292static VkResult
1293x11_present_to_x11_sw(struct x11_swapchain *chain, uint32_t image_index,
1294                      uint64_t target_msc)
1295{
1296   struct x11_image *image = &chain->images[image_index];
1297
1298   xcb_void_cookie_t cookie;
1299   void *myptr = image->base.cpu_map;
1300   size_t hdr_len = sizeof(xcb_put_image_request_t);
1301   int stride_b = image->base.row_pitches[0];
1302   size_t size = (hdr_len + stride_b * chain->extent.height) >> 2;
1303   uint64_t max_req_len = xcb_get_maximum_request_length(chain->conn);
1304   chain->images[image_index].busy = false;
1305
1306   if (size < max_req_len) {
1307      cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1308                             chain->window,
1309                             chain->gc,
1310                             image->base.row_pitches[0] / 4,
1311                             chain->extent.height,
1312                             0,0,0,24,
1313                             image->base.row_pitches[0] * chain->extent.height,
1314                             image->base.cpu_map);
1315      xcb_discard_reply(chain->conn, cookie.sequence);
1316   } else {
1317      int num_lines = ((max_req_len << 2) - hdr_len) / stride_b;
1318      int y_start = 0;
1319      int y_todo = chain->extent.height;
1320      while (y_todo) {
1321         int this_lines = MIN2(num_lines, y_todo);
1322         cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1323                                chain->window,
1324                                chain->gc,
1325                                image->base.row_pitches[0] / 4,
1326                                this_lines,
1327                                0,y_start,0,24,
1328                                this_lines * stride_b,
1329                                (const uint8_t *)myptr + (y_start * stride_b));
1330         xcb_discard_reply(chain->conn, cookie.sequence);
1331         y_start += this_lines;
1332         y_todo -= this_lines;
1333      }
1334   }
1335
1336   xcb_flush(chain->conn);
1337   return x11_swapchain_result(chain, VK_SUCCESS);
1338}
1339
1340/**
1341 * Send image to the X server for presentation at target_msc.
1342 */
1343static VkResult
1344x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
1345                   uint64_t target_msc)
1346{
1347   if (chain->base.wsi->sw && !chain->has_mit_shm)
1348      return x11_present_to_x11_sw(chain, image_index, target_msc);
1349   return x11_present_to_x11_dri3(chain, image_index, target_msc);
1350}
1351
1352/**
1353 * Acquire a ready-to-use image from the swapchain.
1354 *
1355 * This means usually that the image is not waiting on presentation and that the
1356 * image has been released by the X server to be used again by the consumer.
1357 */
1358static VkResult
1359x11_acquire_next_image(struct wsi_swapchain *anv_chain,
1360                       const VkAcquireNextImageInfoKHR *info,
1361                       uint32_t *image_index)
1362{
1363   struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1364   uint64_t timeout = info->timeout;
1365
1366   /* If the swapchain is in an error state, don't go any further. */
1367   if (chain->status < 0)
1368      return chain->status;
1369
1370   if (chain->base.wsi->sw && !chain->has_mit_shm) {
1371      for (unsigned i = 0; i < chain->base.image_count; i++) {
1372         if (!chain->images[i].busy) {
1373            *image_index = i;
1374            chain->images[i].busy = true;
1375            xcb_generic_error_t *err;
1376
1377            xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(chain->conn, chain->window);
1378            xcb_get_geometry_reply_t *geom = xcb_get_geometry_reply(chain->conn, geom_cookie, &err);
1379            VkResult result = VK_SUCCESS;
1380            if (geom) {
1381               if (chain->extent.width != geom->width ||
1382                   chain->extent.height != geom->height)
1383                  result = VK_SUBOPTIMAL_KHR;
1384            } else {
1385               result = VK_ERROR_SURFACE_LOST_KHR;
1386            }
1387            free(err);
1388            free(geom);
1389            return result;
1390         }
1391      }
1392      return VK_NOT_READY;
1393   }
1394
1395   if (chain->has_acquire_queue) {
1396      return x11_acquire_next_image_from_queue(chain, image_index, timeout);
1397   } else {
1398      return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
1399   }
1400}
1401
1402#define MAX_DAMAGE_RECTS 64
1403
1404/**
1405 * Queue a new presentation of an image that was previously acquired by the
1406 * consumer.
1407 *
1408 * Note that in immediate presentation mode this does not really queue the
1409 * presentation but directly asks the X server to show it.
1410 */
1411static VkResult
1412x11_queue_present(struct wsi_swapchain *anv_chain,
1413                  uint32_t image_index,
1414                  const VkPresentRegionKHR *damage)
1415{
1416   struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1417   xcb_xfixes_region_t update_area = 0;
1418
1419   /* If the swapchain is in an error state, don't go any further. */
1420   if (chain->status < 0)
1421      return chain->status;
1422
1423   if (damage && damage->pRectangles && damage->rectangleCount > 0 &&
1424      damage->rectangleCount <= MAX_DAMAGE_RECTS) {
1425      xcb_rectangle_t rects[MAX_DAMAGE_RECTS];
1426
1427      update_area = chain->images[image_index].update_region;
1428      for (unsigned i = 0; i < damage->rectangleCount; i++) {
1429         const VkRectLayerKHR *rect = &damage->pRectangles[i];
1430         assert(rect->layer == 0);
1431         rects[i].x = rect->offset.x;
1432         rects[i].y = rect->offset.y;
1433         rects[i].width = rect->extent.width;
1434         rects[i].height = rect->extent.height;
1435      }
1436      xcb_xfixes_set_region(chain->conn, update_area, damage->rectangleCount, rects);
1437   }
1438   chain->images[image_index].update_area = update_area;
1439
1440   chain->images[image_index].busy = true;
1441   if (chain->has_present_queue) {
1442      wsi_queue_push(&chain->present_queue, image_index);
1443      return chain->status;
1444   } else {
1445      /* No present queue means immedate mode, so we present immediately. */
1446      return x11_present_to_x11(chain, image_index, 0);
1447   }
1448}
1449
1450/**
1451 * Decides if an early wait on buffer fences before buffer submission is required. That is for:
1452 *   - Mailbox mode, as otherwise the latest image in the queue might not be fully rendered at
1453 *     present time, what could lead to missing a frame.
1454 *   - Immediate mode under Xwayland, as it works practically the same as mailbox mode using the
1455 *     mailbox mechanism of Wayland. Sending a buffer with fences not yet signalled can make the
1456 *     compositor miss a frame when compositing the final image with this buffer.
1457 *
1458 * Note though that early waits can be disabled in general on Xwayland by setting the
1459 * 'vk_xwayland_wait_ready' DRIConf option to false.
1460 */
1461static bool
1462x11_needs_wait_for_fences(const struct wsi_device *wsi_device,
1463                          struct wsi_x11_connection *wsi_conn,
1464                          VkPresentModeKHR present_mode)
1465{
1466   if (wsi_conn->is_xwayland && !wsi_device->x11.xwaylandWaitReady) {
1467      return false;
1468   }
1469
1470   switch (present_mode) {
1471   case VK_PRESENT_MODE_MAILBOX_KHR:
1472      return true;
1473   case VK_PRESENT_MODE_IMMEDIATE_KHR:
1474      return wsi_conn->is_xwayland;
1475   default:
1476      return false;
1477   }
1478}
1479
1480/**
1481 * The number of images that are not owned by X11:
1482 *  (1) in the ownership of the app, or
1483 *  (2) app to take ownership through an acquire, or
1484 *  (3) in the present queue waiting for the FIFO thread to present to X11.
1485 */
1486static unsigned x11_driver_owned_images(const struct x11_swapchain *chain)
1487{
1488   return chain->base.image_count - chain->sent_image_count;
1489}
1490
1491/**
1492 * Our queue manager. Albeit called x11_manage_fifo_queues only directly
1493 * manages the present-queue and does this in general in fifo and mailbox presentation
1494 * modes (there is no present-queue in immediate mode with the exception of Xwayland).
1495 *
1496 * Runs in a separate thread, blocks and reacts to queued images on the
1497 * present-queue
1498 *
1499 * In mailbox mode the queue management is simplified since we only need to
1500 * pull new images from the present queue and can directly present them.
1501 *
1502 * In fifo mode images can only be presented one after the other. For that after
1503 * sending the image to the X server we wait until the image either has been
1504 * presented or released and only then pull a new image from the present-queue.
1505 */
1506static void *
1507x11_manage_fifo_queues(void *state)
1508{
1509   struct x11_swapchain *chain = state;
1510   struct wsi_x11_connection *wsi_conn =
1511      wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1512   VkResult result = VK_SUCCESS;
1513
1514   assert(chain->has_present_queue);
1515
1516   u_thread_setname("WSI swapchain queue");
1517
1518   while (chain->status >= 0) {
1519      /* We can block here unconditionally because after an image was sent to
1520       * the server (later on in this loop) we ensure at least one image is
1521       * acquirable by the consumer or wait there on such an event.
1522       */
1523      uint32_t image_index = 0;
1524      result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
1525      assert(result != VK_TIMEOUT);
1526
1527      if (result < 0) {
1528         goto fail;
1529      } else if (chain->status < 0) {
1530         /* The status can change underneath us if the swapchain is destroyed
1531          * from another thread.
1532          */
1533         return NULL;
1534      }
1535
1536      /* Waiting for the GPU work to finish at this point in time is required in certain usage
1537       * scenarios. Otherwise we wait as usual in wsi_common_queue_present.
1538       */
1539      if (x11_needs_wait_for_fences(chain->base.wsi, wsi_conn,
1540                                    chain->base.present_mode)) {
1541         result = chain->base.wsi->WaitForFences(chain->base.device, 1,
1542                                        &chain->base.fences[image_index],
1543                                        true, UINT64_MAX);
1544         if (result != VK_SUCCESS) {
1545            result = VK_ERROR_OUT_OF_DATE_KHR;
1546            goto fail;
1547         }
1548      }
1549
1550      uint64_t target_msc = 0;
1551      if (chain->has_acquire_queue)
1552         target_msc = chain->last_present_msc + 1;
1553
1554      result = x11_present_to_x11(chain, image_index, target_msc);
1555      if (result < 0)
1556         goto fail;
1557
1558      if (chain->has_acquire_queue) {
1559         /* Assume this isn't a swapchain where we force 5 images, because those
1560          * don't end up with an acquire queue at the moment.
1561          */
1562         unsigned min_image_count = x11_get_min_image_count(chain->base.wsi);
1563
1564         /* With drirc overrides some games have swapchain with less than
1565          * minimum number of images. */
1566         min_image_count = MIN2(min_image_count, chain->base.image_count);
1567
1568         /* We always need to ensure that the app can have this number of images
1569          * acquired concurrently in between presents:
1570          * "VUID-vkAcquireNextImageKHR-swapchain-01802
1571          *  If the number of currently acquired images is greater than the difference
1572          *  between the number of images in swapchain and the value of
1573          *  VkSurfaceCapabilitiesKHR::minImageCount as returned by a call to
1574          *  vkGetPhysicalDeviceSurfaceCapabilities2KHR with the surface used to
1575          *  create swapchain, timeout must not be UINT64_MAX"
1576          */
1577         unsigned forward_progress_guaranteed_acquired_images =
1578            chain->base.image_count - min_image_count + 1;
1579
1580         /* Wait for our presentation to occur and ensure we have at least one
1581          * image that can be acquired by the client afterwards. This ensures we
1582          * can pull on the present-queue on the next loop.
1583          */
1584         while (chain->images[image_index].present_queued ||
1585                /* If we have images in the present queue the outer loop won't block and a break
1586                 * here would end up at this loop again, otherwise a break here satisfies
1587                 * VUID-vkAcquireNextImageKHR-swapchain-01802 */
1588                x11_driver_owned_images(chain) < forward_progress_guaranteed_acquired_images) {
1589
1590            xcb_generic_event_t *event =
1591               xcb_wait_for_special_event(chain->conn, chain->special_event);
1592            if (!event) {
1593               result = VK_ERROR_SURFACE_LOST_KHR;
1594               goto fail;
1595            }
1596
1597            result = x11_handle_dri3_present_event(chain, (void *)event);
1598            /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1599            result = x11_swapchain_result(chain, result);
1600            free(event);
1601            if (result < 0)
1602               goto fail;
1603         }
1604      }
1605   }
1606
1607fail:
1608   x11_swapchain_result(chain, result);
1609   if (chain->has_acquire_queue)
1610      wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
1611
1612   return NULL;
1613}
1614
1615static uint8_t *
1616alloc_shm(struct wsi_image *imagew, unsigned size)
1617{
1618#ifdef HAVE_SYS_SHM_H
1619   struct x11_image *image = (struct x11_image *)imagew;
1620   image->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | 0600);
1621   if (image->shmid < 0)
1622      return NULL;
1623
1624   uint8_t *addr = (uint8_t *)shmat(image->shmid, 0, 0);
1625   /* mark the segment immediately for deletion to avoid leaks */
1626   shmctl(image->shmid, IPC_RMID, 0);
1627
1628   if (addr == (uint8_t *) -1)
1629      return NULL;
1630
1631   image->shmaddr = addr;
1632   return addr;
1633#else
1634   return NULL;
1635#endif
1636}
1637
1638static VkResult
1639x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
1640               const VkSwapchainCreateInfoKHR *pCreateInfo,
1641               const VkAllocationCallbacks* pAllocator,
1642               struct x11_image *image)
1643{
1644   xcb_void_cookie_t cookie;
1645   VkResult result;
1646   uint32_t bpp = 32;
1647   int fence_fd;
1648
1649   result = wsi_create_image(&chain->base, &chain->base.image_info,
1650                             &image->base);
1651   if (result != VK_SUCCESS)
1652      return result;
1653
1654   image->update_region = xcb_generate_id(chain->conn);
1655   xcb_xfixes_create_region(chain->conn, image->update_region, 0, NULL);
1656
1657   if (chain->base.wsi->sw) {
1658      if (!chain->has_mit_shm) {
1659         image->busy = false;
1660         return VK_SUCCESS;
1661      }
1662
1663      image->shmseg = xcb_generate_id(chain->conn);
1664
1665      xcb_shm_attach(chain->conn,
1666                     image->shmseg,
1667                     image->shmid,
1668                     0);
1669      image->pixmap = xcb_generate_id(chain->conn);
1670      cookie = xcb_shm_create_pixmap_checked(chain->conn,
1671                                             image->pixmap,
1672                                             chain->window,
1673                                             image->base.row_pitches[0] / 4,
1674                                             pCreateInfo->imageExtent.height,
1675                                             chain->depth,
1676                                             image->shmseg, 0);
1677      xcb_discard_reply(chain->conn, cookie.sequence);
1678      goto out_fence;
1679   }
1680   image->pixmap = xcb_generate_id(chain->conn);
1681
1682#ifdef HAVE_DRI3_MODIFIERS
1683   if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) {
1684      /* If the image has a modifier, we must have DRI3 v1.2. */
1685      assert(chain->has_dri3_modifiers);
1686
1687      /* XCB requires an array of file descriptors but we only have one */
1688      int fds[4] = { -1, -1, -1, -1 };
1689      for (int i = 0; i < image->base.num_planes; i++) {
1690         fds[i] = os_dupfd_cloexec(image->base.dma_buf_fd);
1691         if (fds[i] == -1) {
1692            for (int j = 0; j < i; j++)
1693               close(fds[j]);
1694
1695            return VK_ERROR_OUT_OF_HOST_MEMORY;
1696         }
1697      }
1698
1699      cookie =
1700         xcb_dri3_pixmap_from_buffers_checked(chain->conn,
1701                                              image->pixmap,
1702                                              chain->window,
1703                                              image->base.num_planes,
1704                                              pCreateInfo->imageExtent.width,
1705                                              pCreateInfo->imageExtent.height,
1706                                              image->base.row_pitches[0],
1707                                              image->base.offsets[0],
1708                                              image->base.row_pitches[1],
1709                                              image->base.offsets[1],
1710                                              image->base.row_pitches[2],
1711                                              image->base.offsets[2],
1712                                              image->base.row_pitches[3],
1713                                              image->base.offsets[3],
1714                                              chain->depth, bpp,
1715                                              image->base.drm_modifier,
1716                                              fds);
1717   } else
1718#endif
1719   {
1720      /* Without passing modifiers, we can't have multi-plane RGB images. */
1721      assert(image->base.num_planes == 1);
1722
1723      /* XCB will take ownership of the FD we pass it. */
1724      int fd = os_dupfd_cloexec(image->base.dma_buf_fd);
1725      if (fd == -1)
1726         return VK_ERROR_OUT_OF_HOST_MEMORY;
1727
1728      cookie =
1729         xcb_dri3_pixmap_from_buffer_checked(chain->conn,
1730                                             image->pixmap,
1731                                             chain->window,
1732                                             image->base.sizes[0],
1733                                             pCreateInfo->imageExtent.width,
1734                                             pCreateInfo->imageExtent.height,
1735                                             image->base.row_pitches[0],
1736                                             chain->depth, bpp, fd);
1737   }
1738
1739   xcb_discard_reply(chain->conn, cookie.sequence);
1740
1741out_fence:
1742   fence_fd = xshmfence_alloc_shm();
1743   if (fence_fd < 0)
1744      goto fail_pixmap;
1745
1746   image->shm_fence = xshmfence_map_shm(fence_fd);
1747   if (image->shm_fence == NULL)
1748      goto fail_shmfence_alloc;
1749
1750   image->sync_fence = xcb_generate_id(chain->conn);
1751   xcb_dri3_fence_from_fd(chain->conn,
1752                          image->pixmap,
1753                          image->sync_fence,
1754                          false,
1755                          fence_fd);
1756
1757   image->busy = false;
1758   xshmfence_trigger(image->shm_fence);
1759
1760   return VK_SUCCESS;
1761
1762fail_shmfence_alloc:
1763   close(fence_fd);
1764
1765fail_pixmap:
1766   cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1767   xcb_discard_reply(chain->conn, cookie.sequence);
1768
1769   wsi_destroy_image(&chain->base, &image->base);
1770
1771   return VK_ERROR_INITIALIZATION_FAILED;
1772}
1773
1774static void
1775x11_image_finish(struct x11_swapchain *chain,
1776                 const VkAllocationCallbacks* pAllocator,
1777                 struct x11_image *image)
1778{
1779   xcb_void_cookie_t cookie;
1780
1781   if (!chain->base.wsi->sw || chain->has_mit_shm) {
1782      cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
1783      xcb_discard_reply(chain->conn, cookie.sequence);
1784      xshmfence_unmap_shm(image->shm_fence);
1785
1786      cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1787      xcb_discard_reply(chain->conn, cookie.sequence);
1788
1789      cookie = xcb_xfixes_destroy_region(chain->conn, image->update_region);
1790      xcb_discard_reply(chain->conn, cookie.sequence);
1791   }
1792
1793   wsi_destroy_image(&chain->base, &image->base);
1794#ifdef HAVE_SYS_SHM_H
1795   if (image->shmaddr)
1796      shmdt(image->shmaddr);
1797#endif
1798}
1799
1800static void
1801wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn,
1802                           xcb_connection_t *conn, xcb_window_t window,
1803                           uint8_t depth, uint8_t bpp,
1804                           VkCompositeAlphaFlagsKHR vk_alpha,
1805                           uint64_t **modifiers_in, uint32_t *num_modifiers_in,
1806                           uint32_t *num_tranches_in,
1807                           const VkAllocationCallbacks *pAllocator)
1808{
1809   if (!wsi_conn->has_dri3_modifiers)
1810      goto out;
1811
1812#ifdef HAVE_DRI3_MODIFIERS
1813   xcb_generic_error_t *error = NULL;
1814   xcb_dri3_get_supported_modifiers_cookie_t mod_cookie =
1815      xcb_dri3_get_supported_modifiers(conn, window, depth, bpp);
1816   xcb_dri3_get_supported_modifiers_reply_t *mod_reply =
1817      xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error);
1818   free(error);
1819
1820   if (!mod_reply || (mod_reply->num_window_modifiers == 0 &&
1821                      mod_reply->num_screen_modifiers == 0)) {
1822      free(mod_reply);
1823      goto out;
1824   }
1825
1826   uint32_t n = 0;
1827   uint32_t counts[2];
1828   uint64_t *modifiers[2];
1829
1830   if (mod_reply->num_window_modifiers) {
1831      counts[n] = mod_reply->num_window_modifiers;
1832      modifiers[n] = vk_alloc(pAllocator,
1833                              counts[n] * sizeof(uint64_t),
1834                              8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1835      if (!modifiers[n]) {
1836         free(mod_reply);
1837         goto out;
1838      }
1839
1840      memcpy(modifiers[n],
1841             xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1842             counts[n] * sizeof(uint64_t));
1843      n++;
1844   }
1845
1846   if (mod_reply->num_screen_modifiers) {
1847      counts[n] = mod_reply->num_screen_modifiers;
1848      modifiers[n] = vk_alloc(pAllocator,
1849                              counts[n] * sizeof(uint64_t),
1850                              8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1851      if (!modifiers[n]) {
1852	 if (n > 0)
1853            vk_free(pAllocator, modifiers[0]);
1854         free(mod_reply);
1855         goto out;
1856      }
1857
1858      memcpy(modifiers[n],
1859             xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1860             counts[n] * sizeof(uint64_t));
1861      n++;
1862   }
1863
1864   for (int i = 0; i < n; i++) {
1865      modifiers_in[i] = modifiers[i];
1866      num_modifiers_in[i] = counts[i];
1867   }
1868   *num_tranches_in = n;
1869
1870   free(mod_reply);
1871   return;
1872#endif
1873out:
1874   *num_tranches_in = 0;
1875}
1876
1877static VkResult
1878x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
1879                      const VkAllocationCallbacks *pAllocator)
1880{
1881   struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1882   xcb_void_cookie_t cookie;
1883
1884   if (chain->has_present_queue) {
1885      chain->status = VK_ERROR_OUT_OF_DATE_KHR;
1886      /* Push a UINT32_MAX to wake up the manager */
1887      wsi_queue_push(&chain->present_queue, UINT32_MAX);
1888      pthread_join(chain->queue_manager, NULL);
1889
1890      if (chain->has_acquire_queue)
1891         wsi_queue_destroy(&chain->acquire_queue);
1892      wsi_queue_destroy(&chain->present_queue);
1893   }
1894
1895   for (uint32_t i = 0; i < chain->base.image_count; i++)
1896      x11_image_finish(chain, pAllocator, &chain->images[i]);
1897   wsi_destroy_image_info(&chain->base, &chain->base.image_info);
1898
1899   xcb_unregister_for_special_event(chain->conn, chain->special_event);
1900   cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
1901                                             chain->window,
1902                                             XCB_PRESENT_EVENT_MASK_NO_EVENT);
1903   xcb_discard_reply(chain->conn, cookie.sequence);
1904
1905   wsi_swapchain_finish(&chain->base);
1906
1907   vk_free(pAllocator, chain);
1908
1909   return VK_SUCCESS;
1910}
1911
1912static void
1913wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn,
1914                                   xcb_drawable_t drawable,
1915                                   uint32_t state)
1916{
1917   static char const name[] = "_VARIABLE_REFRESH";
1918   xcb_intern_atom_cookie_t cookie;
1919   xcb_intern_atom_reply_t* reply;
1920   xcb_void_cookie_t check;
1921
1922   cookie = xcb_intern_atom(conn, 0, strlen(name), name);
1923   reply = xcb_intern_atom_reply(conn, cookie, NULL);
1924   if (reply == NULL)
1925      return;
1926
1927   if (state)
1928      check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
1929                                          drawable, reply->atom,
1930                                          XCB_ATOM_CARDINAL, 32, 1, &state);
1931   else
1932      check = xcb_delete_property_checked(conn, drawable, reply->atom);
1933
1934   xcb_discard_reply(conn, check.sequence);
1935   free(reply);
1936}
1937
1938/**
1939 * Create the swapchain.
1940 *
1941 * Supports immediate, fifo and mailbox presentation mode.
1942 *
1943 */
1944static VkResult
1945x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
1946                             VkDevice device,
1947                             struct wsi_device *wsi_device,
1948                             const VkSwapchainCreateInfoKHR *pCreateInfo,
1949                             const VkAllocationCallbacks* pAllocator,
1950                             struct wsi_swapchain **swapchain_out)
1951{
1952   struct x11_swapchain *chain;
1953   xcb_void_cookie_t cookie;
1954   VkResult result;
1955   VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
1956
1957   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
1958
1959   /* Get xcb connection from the icd_surface and from that our internal struct
1960    * representing it.
1961    */
1962   xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
1963   struct wsi_x11_connection *wsi_conn =
1964      wsi_x11_get_connection(wsi_device, conn);
1965   if (!wsi_conn)
1966      return VK_ERROR_OUT_OF_HOST_MEMORY;
1967
1968   /* Get number of images in our swapchain. This count depends on:
1969    * - requested minimal image count
1970    * - device characteristics
1971    * - presentation mode.
1972    */
1973   unsigned num_images = pCreateInfo->minImageCount;
1974   if (wsi_device->x11.strict_imageCount)
1975      num_images = pCreateInfo->minImageCount;
1976   else if (x11_needs_wait_for_fences(wsi_device, wsi_conn, present_mode))
1977      num_images = MAX2(num_images, 5);
1978   else if (wsi_device->x11.ensure_minImageCount)
1979      num_images = MAX2(num_images, x11_get_min_image_count(wsi_device));
1980
1981   /* Check that we have a window up-front. It is an error to not have one. */
1982   xcb_window_t window = x11_surface_get_window(icd_surface);
1983
1984   /* Get the geometry of that window. The bit depth of the swapchain will be fitted and the
1985    * chain's images extents should fit it for performance-optimizing flips.
1986    */
1987   xcb_get_geometry_reply_t *geometry =
1988      xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
1989   if (geometry == NULL)
1990      return VK_ERROR_SURFACE_LOST_KHR;
1991   const uint32_t bit_depth = geometry->depth;
1992   const uint16_t cur_width = geometry->width;
1993   const uint16_t cur_height = geometry->height;
1994   free(geometry);
1995
1996   /* Allocate the actual swapchain. The size depends on image count. */
1997   size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
1998   chain = vk_zalloc(pAllocator, size, 8,
1999                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2000   if (chain == NULL)
2001      return VK_ERROR_OUT_OF_HOST_MEMORY;
2002
2003   /* When our local device is not compatible with the DRI3 device provided by
2004    * the X server we assume this is a PRIME system.
2005    */
2006   bool use_buffer_blit = false;
2007   if (!wsi_device->sw)
2008      if (!wsi_x11_check_dri3_compatible(wsi_device, conn))
2009         use_buffer_blit = true;
2010
2011   result = wsi_swapchain_init(wsi_device, &chain->base, device,
2012                               pCreateInfo, pAllocator, use_buffer_blit);
2013   if (result != VK_SUCCESS)
2014      goto fail_alloc;
2015
2016   chain->base.destroy = x11_swapchain_destroy;
2017   chain->base.get_wsi_image = x11_get_wsi_image;
2018   chain->base.acquire_next_image = x11_acquire_next_image;
2019   chain->base.queue_present = x11_queue_present;
2020   chain->base.present_mode = present_mode;
2021   chain->base.image_count = num_images;
2022   chain->conn = conn;
2023   chain->window = window;
2024   chain->depth = bit_depth;
2025   chain->extent = pCreateInfo->imageExtent;
2026   chain->send_sbc = 0;
2027   chain->sent_image_count = 0;
2028   chain->last_present_msc = 0;
2029   chain->has_acquire_queue = false;
2030   chain->has_present_queue = false;
2031   chain->status = VK_SUCCESS;
2032   chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
2033   chain->has_mit_shm = wsi_conn->has_mit_shm;
2034
2035   /* When images in the swapchain don't fit the window, X can still present them, but it won't
2036    * happen by flip, only by copy. So this is a suboptimal copy, because if the client would change
2037    * the chain extents X may be able to flip
2038    */
2039   if (chain->extent.width != cur_width || chain->extent.height != cur_height)
2040       chain->status = VK_SUBOPTIMAL_KHR;
2041
2042   /* On a new swapchain this helper variable is set to false. Once we present it will have an
2043    * impact once we ever do at least one flip and go back to copying afterwards. It is presumed
2044    * that in this case here is a high likelihood X could do flips again if the client reallocates a
2045    * new swapchain.
2046    *
2047    * Note that we used to inheritted this property from 'pCreateInfo->oldSwapchain'. But when it
2048    * was true, and when the next present was completed with copying, we would return
2049    * VK_SUBOPTIMAL_KHR and hint the app to reallocate again for no good reason. If all following
2050    * presents on the surface were completed with copying because of some surface state change, we
2051    * would always return VK_SUBOPTIMAL_KHR no matter how many times the app had reallocated.
2052    *
2053    * Note also that is is questionable in general if that mechanism is really useful. It ist not
2054    * clear why on a change from flipping to copying we can assume a reallocation has a high chance
2055    * of making flips work again per se. In other words it is not clear why there is need for
2056    * another way to inform clients about suboptimal copies besides forwarding the
2057    * 'PresentOptionSuboptimal' complete mode.
2058    */
2059   chain->copy_is_suboptimal = false;
2060
2061   /* For our swapchain we need to listen to following Present extension events:
2062    * - Configure: Window dimensions changed. Images in the swapchain might need
2063    *              to be reallocated.
2064    * - Complete: An image from our swapchain was presented on the output.
2065    * - Idle: An image from our swapchain is not anymore accessed by the X
2066    *         server and can be reused.
2067    */
2068   chain->event_id = xcb_generate_id(chain->conn);
2069   xcb_present_select_input(chain->conn, chain->event_id, chain->window,
2070                            XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
2071                            XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
2072                            XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
2073
2074   /* Create an XCB event queue to hold present events outside of the usual
2075    * application event queue
2076    */
2077   chain->special_event =
2078      xcb_register_for_special_xge(chain->conn, &xcb_present_id,
2079                                   chain->event_id, NULL);
2080
2081   /* Create the graphics context. */
2082   chain->gc = xcb_generate_id(chain->conn);
2083   if (!chain->gc) {
2084      /* FINISHME: Choose a better error. */
2085      result = VK_ERROR_OUT_OF_HOST_MEMORY;
2086      goto fail_register;
2087   }
2088
2089   cookie = xcb_create_gc(chain->conn,
2090                          chain->gc,
2091                          chain->window,
2092                          XCB_GC_GRAPHICS_EXPOSURES,
2093                          (uint32_t []) { 0 });
2094   xcb_discard_reply(chain->conn, cookie.sequence);
2095
2096   uint64_t *modifiers[2] = {NULL, NULL};
2097   uint32_t num_modifiers[2] = {0, 0};
2098   uint32_t num_tranches = 0;
2099   if (wsi_device->supports_modifiers)
2100      wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, chain->depth, 32,
2101                                 pCreateInfo->compositeAlpha,
2102                                 modifiers, num_modifiers, &num_tranches,
2103                                 pAllocator);
2104
2105   if (wsi_device->sw) {
2106      result = wsi_configure_cpu_image(&chain->base, pCreateInfo,
2107                                       chain->has_mit_shm ? &alloc_shm : NULL,
2108                                       &chain->base.image_info);
2109   } else if (chain->base.use_buffer_blit) {
2110      bool use_modifier = num_tranches > 0;
2111      result = wsi_configure_prime_image(&chain->base, pCreateInfo,
2112                                         use_modifier,
2113                                         &chain->base.image_info);
2114   } else {
2115      result = wsi_configure_native_image(&chain->base, pCreateInfo,
2116                                          num_tranches, num_modifiers,
2117                                          (const uint64_t *const *)modifiers,
2118                                          &chain->base.image_info);
2119   }
2120   if (result != VK_SUCCESS)
2121      goto fail_modifiers;
2122
2123   uint32_t image = 0;
2124   for (; image < chain->base.image_count; image++) {
2125      result = x11_image_init(device, chain, pCreateInfo, pAllocator,
2126                              &chain->images[image]);
2127      if (result != VK_SUCCESS)
2128         goto fail_init_images;
2129   }
2130
2131   /* Initialize queues for images in our swapchain. Possible queues are:
2132    * - Present queue: for images sent to the X server but not yet presented.
2133    * - Acquire queue: for images already presented but not yet released by the
2134    *                  X server.
2135    *
2136    * In general queues are not used on software drivers, otherwise which queues
2137    * are used depends on our presentation mode:
2138    * - Fifo: present and acquire
2139    * - Mailbox: present only
2140    * - Immediate: present when we wait on fences before buffer submission (Xwayland)
2141    */
2142   if ((chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2143        chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR ||
2144        x11_needs_wait_for_fences(wsi_device, wsi_conn,
2145                                  chain->base.present_mode)) &&
2146       !chain->base.wsi->sw) {
2147      chain->has_present_queue = true;
2148
2149      /* The queues have a length of base.image_count + 1 because we will
2150       * occasionally use UINT32_MAX to signal the other thread that an error
2151       * has occurred and we don't want an overflow.
2152       */
2153      int ret;
2154      ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
2155      if (ret) {
2156         goto fail_init_images;
2157      }
2158
2159      if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2160          chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR) {
2161         chain->has_acquire_queue = true;
2162
2163         ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
2164         if (ret) {
2165            wsi_queue_destroy(&chain->present_queue);
2166            goto fail_init_images;
2167         }
2168
2169         for (unsigned i = 0; i < chain->base.image_count; i++)
2170            wsi_queue_push(&chain->acquire_queue, i);
2171      }
2172
2173      ret = pthread_create(&chain->queue_manager, NULL,
2174                           x11_manage_fifo_queues, chain);
2175      if (ret) {
2176         wsi_queue_destroy(&chain->present_queue);
2177         if (chain->has_acquire_queue)
2178            wsi_queue_destroy(&chain->acquire_queue);
2179
2180         goto fail_init_images;
2181      }
2182   }
2183
2184   assert(chain->has_present_queue || !chain->has_acquire_queue);
2185
2186   for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2187      vk_free(pAllocator, modifiers[i]);
2188
2189   /* It is safe to set it here as only one swapchain can be associated with
2190    * the window, and swapchain creation does the association. At this point
2191    * we know the creation is going to succeed. */
2192   wsi_x11_set_adaptive_sync_property(conn, window,
2193                                      wsi_device->enable_adaptive_sync);
2194
2195   *swapchain_out = &chain->base;
2196
2197   return VK_SUCCESS;
2198
2199fail_init_images:
2200   for (uint32_t j = 0; j < image; j++)
2201      x11_image_finish(chain, pAllocator, &chain->images[j]);
2202
2203   wsi_destroy_image_info(&chain->base, &chain->base.image_info);
2204
2205fail_modifiers:
2206   for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2207      vk_free(pAllocator, modifiers[i]);
2208
2209fail_register:
2210   xcb_unregister_for_special_event(chain->conn, chain->special_event);
2211
2212   wsi_swapchain_finish(&chain->base);
2213
2214fail_alloc:
2215   vk_free(pAllocator, chain);
2216
2217   return result;
2218}
2219
2220VkResult
2221wsi_x11_init_wsi(struct wsi_device *wsi_device,
2222                 const VkAllocationCallbacks *alloc,
2223                 const struct driOptionCache *dri_options)
2224{
2225   struct wsi_x11 *wsi;
2226   VkResult result;
2227
2228   wsi = vk_alloc(alloc, sizeof(*wsi), 8,
2229                   VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2230   if (!wsi) {
2231      result = VK_ERROR_OUT_OF_HOST_MEMORY;
2232      goto fail;
2233   }
2234
2235   int ret = pthread_mutex_init(&wsi->mutex, NULL);
2236   if (ret != 0) {
2237      if (ret == ENOMEM) {
2238         result = VK_ERROR_OUT_OF_HOST_MEMORY;
2239      } else {
2240         /* FINISHME: Choose a better error. */
2241         result = VK_ERROR_OUT_OF_HOST_MEMORY;
2242      }
2243
2244      goto fail_alloc;
2245   }
2246
2247   wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2248                                              _mesa_key_pointer_equal);
2249   if (!wsi->connections) {
2250      result = VK_ERROR_OUT_OF_HOST_MEMORY;
2251      goto fail_mutex;
2252   }
2253
2254   if (dri_options) {
2255      if (driCheckOption(dri_options, "vk_x11_override_min_image_count", DRI_INT)) {
2256         wsi_device->x11.override_minImageCount =
2257            driQueryOptioni(dri_options, "vk_x11_override_min_image_count");
2258      }
2259      if (driCheckOption(dri_options, "vk_x11_strict_image_count", DRI_BOOL)) {
2260         wsi_device->x11.strict_imageCount =
2261            driQueryOptionb(dri_options, "vk_x11_strict_image_count");
2262      }
2263      if (driCheckOption(dri_options, "vk_x11_ensure_min_image_count", DRI_BOOL)) {
2264         wsi_device->x11.ensure_minImageCount =
2265            driQueryOptionb(dri_options, "vk_x11_ensure_min_image_count");
2266      }
2267      wsi_device->x11.xwaylandWaitReady = true;
2268      if (driCheckOption(dri_options, "vk_xwayland_wait_ready", DRI_BOOL)) {
2269         wsi_device->x11.xwaylandWaitReady =
2270            driQueryOptionb(dri_options, "vk_xwayland_wait_ready");
2271      }
2272   }
2273
2274   wsi->base.get_support = x11_surface_get_support;
2275   wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
2276   wsi->base.get_formats = x11_surface_get_formats;
2277   wsi->base.get_formats2 = x11_surface_get_formats2;
2278   wsi->base.get_present_modes = x11_surface_get_present_modes;
2279   wsi->base.get_present_rectangles = x11_surface_get_present_rectangles;
2280   wsi->base.create_swapchain = x11_surface_create_swapchain;
2281
2282   wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
2283   wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
2284
2285   return VK_SUCCESS;
2286
2287fail_mutex:
2288   pthread_mutex_destroy(&wsi->mutex);
2289fail_alloc:
2290   vk_free(alloc, wsi);
2291fail:
2292   wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
2293   wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
2294
2295   return result;
2296}
2297
2298void
2299wsi_x11_finish_wsi(struct wsi_device *wsi_device,
2300                   const VkAllocationCallbacks *alloc)
2301{
2302   struct wsi_x11 *wsi =
2303      (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
2304
2305   if (wsi) {
2306      hash_table_foreach(wsi->connections, entry)
2307         wsi_x11_connection_destroy(wsi_device, entry->data);
2308
2309      _mesa_hash_table_destroy(wsi->connections, NULL);
2310
2311      pthread_mutex_destroy(&wsi->mutex);
2312
2313      vk_free(alloc, wsi);
2314   }
2315}
2316