1 /*
2 * Copyright © 2011-2012 Intel Corporation
3 * Copyright © 2012 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
20 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Kristian Høgsberg <krh@bitplanet.net>
27 * Benjamin Franzke <benjaminfranzke@googlemail.com>
28 */
29
30 #include <stdint.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <limits.h>
34 #include <dlfcn.h>
35 #include <errno.h>
36 #include <unistd.h>
37 #include <fcntl.h>
38 #include <xf86drm.h>
39 #include "drm-uapi/drm_fourcc.h"
40 #include <sys/mman.h>
41
42 #include "egl_dri2.h"
43 #include "loader_dri_helper.h"
44 #include "loader.h"
45 #include "util/u_vector.h"
46 #include "util/anon_file.h"
47 #include "eglglobals.h"
48 #include "kopper_interface.h"
49
50 #include <wayland-egl-backend.h>
51 #include <wayland-client.h>
52 #include "wayland-drm-client-protocol.h"
53 #include "linux-dmabuf-unstable-v1-client-protocol.h"
54
55 /*
56 * The index of entries in this table is used as a bitmask in
57 * dri2_dpy->formats.formats_bitmap, which tracks the formats supported
58 * by our server.
59 */
60 static const struct dri2_wl_visual {
61 const char *format_name;
62 uint32_t wl_drm_format;
63 uint32_t wl_shm_format;
64 int dri_image_format;
65 /* alt_dri_image_format is a substitute wl_buffer format to use for a
66 * wl-server unsupported dri_image_format, ie. some other dri_image_format in
67 * the table, of the same precision but with different channel ordering, or
68 * __DRI_IMAGE_FORMAT_NONE if an alternate format is not needed or supported.
69 * The code checks if alt_dri_image_format can be used as a fallback for a
70 * dri_image_format for a given wl-server implementation.
71 */
72 int alt_dri_image_format;
73 int bpp;
74 int rgba_shifts[4];
75 unsigned int rgba_sizes[4];
76 } dri2_wl_visuals[] = {
77 {
78 "ABGR16F",
79 WL_DRM_FORMAT_ABGR16F, WL_SHM_FORMAT_ABGR16161616F,
80 __DRI_IMAGE_FORMAT_ABGR16161616F, 0, 64,
81 { 0, 16, 32, 48 },
82 { 16, 16, 16, 16 },
83 },
84 {
85 "XBGR16F",
86 WL_DRM_FORMAT_XBGR16F, WL_SHM_FORMAT_XBGR16161616F,
87 __DRI_IMAGE_FORMAT_XBGR16161616F, 0, 64,
88 { 0, 16, 32, -1 },
89 { 16, 16, 16, 0 },
90 },
91 {
92 "XRGB2101010",
93 WL_DRM_FORMAT_XRGB2101010, WL_SHM_FORMAT_XRGB2101010,
94 __DRI_IMAGE_FORMAT_XRGB2101010, __DRI_IMAGE_FORMAT_XBGR2101010, 32,
95 { 20, 10, 0, -1 },
96 { 10, 10, 10, 0 },
97 },
98 {
99 "ARGB2101010",
100 WL_DRM_FORMAT_ARGB2101010, WL_SHM_FORMAT_ARGB2101010,
101 __DRI_IMAGE_FORMAT_ARGB2101010, __DRI_IMAGE_FORMAT_ABGR2101010, 32,
102 { 20, 10, 0, 30 },
103 { 10, 10, 10, 2 },
104 },
105 {
106 "XBGR2101010",
107 WL_DRM_FORMAT_XBGR2101010, WL_SHM_FORMAT_XBGR2101010,
108 __DRI_IMAGE_FORMAT_XBGR2101010, __DRI_IMAGE_FORMAT_XRGB2101010, 32,
109 { 0, 10, 20, -1 },
110 { 10, 10, 10, 0 },
111 },
112 {
113 "ABGR2101010",
114 WL_DRM_FORMAT_ABGR2101010, WL_SHM_FORMAT_ABGR2101010,
115 __DRI_IMAGE_FORMAT_ABGR2101010, __DRI_IMAGE_FORMAT_ARGB2101010, 32,
116 { 0, 10, 20, 30 },
117 { 10, 10, 10, 2 },
118 },
119 {
120 "XRGB8888",
121 WL_DRM_FORMAT_XRGB8888, WL_SHM_FORMAT_XRGB8888,
122 __DRI_IMAGE_FORMAT_XRGB8888, __DRI_IMAGE_FORMAT_NONE, 32,
123 { 16, 8, 0, -1 },
124 { 8, 8, 8, 0 },
125 },
126 {
127 "ARGB8888",
128 WL_DRM_FORMAT_ARGB8888, WL_SHM_FORMAT_ARGB8888,
129 __DRI_IMAGE_FORMAT_ARGB8888, __DRI_IMAGE_FORMAT_NONE, 32,
130 { 16, 8, 0, 24 },
131 { 8, 8, 8, 8 },
132 },
133 {
134 "ABGR8888",
135 WL_DRM_FORMAT_ABGR8888, WL_SHM_FORMAT_ABGR8888,
136 __DRI_IMAGE_FORMAT_ABGR8888, __DRI_IMAGE_FORMAT_NONE, 32,
137 { 0, 8, 16, 24 },
138 { 8, 8, 8, 8 },
139 },
140 {
141 "XBGR8888",
142 WL_DRM_FORMAT_XBGR8888, WL_SHM_FORMAT_XBGR8888,
143 __DRI_IMAGE_FORMAT_XBGR8888, __DRI_IMAGE_FORMAT_NONE, 32,
144 { 0, 8, 16, -1 },
145 { 8, 8, 8, 0 },
146 },
147 {
148 "RGB565",
149 WL_DRM_FORMAT_RGB565, WL_SHM_FORMAT_RGB565,
150 __DRI_IMAGE_FORMAT_RGB565, __DRI_IMAGE_FORMAT_NONE, 16,
151 { 11, 5, 0, -1 },
152 { 5, 6, 5, 0 },
153 },
154 };
155
156 static int
dri2_wl_visual_idx_from_config(struct dri2_egl_display *dri2_dpy, const __DRIconfig *config, bool force_opaque)157 dri2_wl_visual_idx_from_config(struct dri2_egl_display *dri2_dpy,
158 const __DRIconfig *config,
159 bool force_opaque)
160 {
161 int shifts[4];
162 unsigned int sizes[4];
163
164 dri2_get_shifts_and_sizes(dri2_dpy->core, config, shifts, sizes);
165
166 for (unsigned int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
167 const struct dri2_wl_visual *wl_visual = &dri2_wl_visuals[i];
168
169 int cmp_rgb_shifts = memcmp(shifts, wl_visual->rgba_shifts,
170 3 * sizeof(shifts[0]));
171 int cmp_rgb_sizes = memcmp(sizes, wl_visual->rgba_sizes,
172 3 * sizeof(sizes[0]));
173
174 if (cmp_rgb_shifts == 0 && cmp_rgb_sizes == 0 &&
175 wl_visual->rgba_shifts[3] == (force_opaque ? -1 : shifts[3]) &&
176 wl_visual->rgba_sizes[3] == (force_opaque ? 0 : sizes[3])) {
177 return i;
178 }
179 }
180
181 return -1;
182 }
183
184 static int
dri2_wl_visual_idx_from_fourcc(uint32_t fourcc)185 dri2_wl_visual_idx_from_fourcc(uint32_t fourcc)
186 {
187 for (int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
188 /* wl_drm format codes overlap with DRIImage FourCC codes for all formats
189 * we support. */
190 if (dri2_wl_visuals[i].wl_drm_format == fourcc)
191 return i;
192 }
193
194 return -1;
195 }
196
197 static int
dri2_wl_visual_idx_from_dri_image_format(uint32_t dri_image_format)198 dri2_wl_visual_idx_from_dri_image_format(uint32_t dri_image_format)
199 {
200 for (int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
201 if (dri2_wl_visuals[i].dri_image_format == dri_image_format)
202 return i;
203 }
204
205 return -1;
206 }
207
208 static int
dri2_wl_visual_idx_from_shm_format(uint32_t shm_format)209 dri2_wl_visual_idx_from_shm_format(uint32_t shm_format)
210 {
211 for (int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
212 if (dri2_wl_visuals[i].wl_shm_format == shm_format)
213 return i;
214 }
215
216 return -1;
217 }
218
219 bool
dri2_wl_is_format_supported(void* user_data, uint32_t format)220 dri2_wl_is_format_supported(void* user_data, uint32_t format)
221 {
222 _EGLDisplay *disp = (_EGLDisplay *) user_data;
223 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
224 int j = dri2_wl_visual_idx_from_fourcc(format);
225
226 if (j == -1)
227 return false;
228
229 for (int i = 0; dri2_dpy->driver_configs[i]; i++)
230 if (j == dri2_wl_visual_idx_from_config(dri2_dpy,
231 dri2_dpy->driver_configs[i],
232 false))
233 return true;
234
235 return false;
236 }
237
238 static int
roundtrip(struct dri2_egl_display *dri2_dpy)239 roundtrip(struct dri2_egl_display *dri2_dpy)
240 {
241 return wl_display_roundtrip_queue(dri2_dpy->wl_dpy, dri2_dpy->wl_queue);
242 }
243
244 static void
wl_buffer_release(void *data, struct wl_buffer *buffer)245 wl_buffer_release(void *data, struct wl_buffer *buffer)
246 {
247 struct dri2_egl_surface *dri2_surf = data;
248 int i;
249
250 for (i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); ++i)
251 if (dri2_surf->color_buffers[i].wl_buffer == buffer)
252 break;
253
254 assert (i < ARRAY_SIZE(dri2_surf->color_buffers));
255
256 if (dri2_surf->color_buffers[i].wl_release) {
257 wl_buffer_destroy(buffer);
258 dri2_surf->color_buffers[i].wl_release = false;
259 dri2_surf->color_buffers[i].wl_buffer = NULL;
260 dri2_surf->color_buffers[i].age = 0;
261 }
262
263 dri2_surf->color_buffers[i].locked = false;
264 }
265
266 static const struct wl_buffer_listener wl_buffer_listener = {
267 .release = wl_buffer_release
268 };
269
270 static void
dri2_wl_formats_fini(struct dri2_wl_formats *formats)271 dri2_wl_formats_fini(struct dri2_wl_formats *formats)
272 {
273 unsigned int i;
274
275 for (i = 0; i < formats->num_formats; i++)
276 u_vector_finish(&formats->modifiers[i]);
277
278 free(formats->modifiers);
279 free(formats->formats_bitmap);
280 }
281
282 static int
dri2_wl_formats_init(struct dri2_wl_formats *formats)283 dri2_wl_formats_init(struct dri2_wl_formats *formats)
284 {
285 unsigned int i, j;
286
287 /* formats->formats_bitmap tells us if a format in dri2_wl_visuals is present
288 * or not. So we must compute the amount of unsigned int's needed to
289 * represent all the formats of dri2_wl_visuals. We use BITSET_WORDS for
290 * this task. */
291 formats->num_formats = ARRAY_SIZE(dri2_wl_visuals);
292 formats->formats_bitmap = calloc(BITSET_WORDS(formats->num_formats),
293 sizeof(*formats->formats_bitmap));
294 if (!formats->formats_bitmap)
295 goto err;
296
297 /* Here we have an array of u_vector's to store the modifiers supported by
298 * each format in the bitmask. */
299 formats->modifiers = calloc(formats->num_formats,
300 sizeof(*formats->modifiers));
301 if (!formats->modifiers)
302 goto err_modifier;
303
304 for (i = 0; i < formats->num_formats; i++)
305 if (!u_vector_init_pow2(&formats->modifiers[i], 4, sizeof(uint64_t))) {
306 j = i;
307 goto err_vector_init;
308 }
309
310 return 0;
311
312 err_vector_init:
313 for (i = 0; i < j; i++)
314 u_vector_finish(&formats->modifiers[i]);
315 free(formats->modifiers);
316 err_modifier:
317 free(formats->formats_bitmap);
318 err:
319 _eglError(EGL_BAD_ALLOC, "dri2_wl_formats_init");
320 return -1;
321 }
322
323 static void
dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table *format_table)324 dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table *format_table)
325 {
326 if (format_table->data && format_table->data != MAP_FAILED)
327 munmap(format_table->data, format_table->size);
328 }
329
330 static void
dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table *format_table)331 dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table *format_table)
332 {
333 memset(format_table, 0, sizeof(*format_table));
334 }
335
336 static void
dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)337 dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)
338 {
339 dri2_wl_formats_fini(&tranche->formats);
340 }
341
342 static int
dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)343 dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)
344 {
345 memset(tranche, 0, sizeof(*tranche));
346
347 if (dri2_wl_formats_init(&tranche->formats) < 0)
348 return -1;
349
350 return 0;
351 }
352
353 static void
dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)354 dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)
355 {
356 dmabuf_feedback_tranche_fini(&dmabuf_feedback->pending_tranche);
357
358 util_dynarray_foreach(&dmabuf_feedback->tranches,
359 struct dmabuf_feedback_tranche, tranche)
360 dmabuf_feedback_tranche_fini(tranche);
361 util_dynarray_fini(&dmabuf_feedback->tranches);
362
363 dmabuf_feedback_format_table_fini(&dmabuf_feedback->format_table);
364 }
365
366 static int
dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)367 dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)
368 {
369 memset(dmabuf_feedback, 0, sizeof(*dmabuf_feedback));
370
371 if (dmabuf_feedback_tranche_init(&dmabuf_feedback->pending_tranche) < 0)
372 return -1;
373
374 util_dynarray_init(&dmabuf_feedback->tranches, NULL);
375
376 dmabuf_feedback_format_table_init(&dmabuf_feedback->format_table);
377
378 return 0;
379 }
380
381 static void
resize_callback(struct wl_egl_window *wl_win, void *data)382 resize_callback(struct wl_egl_window *wl_win, void *data)
383 {
384 struct dri2_egl_surface *dri2_surf = data;
385 struct dri2_egl_display *dri2_dpy =
386 dri2_egl_display(dri2_surf->base.Resource.Display);
387
388 if (dri2_surf->base.Width == wl_win->width &&
389 dri2_surf->base.Height == wl_win->height)
390 return;
391
392 dri2_surf->resized = true;
393
394 /* Update the surface size as soon as native window is resized; from user
395 * pov, this makes the effect that resize is done immediately after native
396 * window resize, without requiring to wait until the first draw.
397 *
398 * A more detailed and lengthy explanation can be found at
399 * https://lists.freedesktop.org/archives/mesa-dev/2018-June/196474.html
400 */
401 if (!dri2_surf->back) {
402 dri2_surf->base.Width = wl_win->width;
403 dri2_surf->base.Height = wl_win->height;
404 }
405 dri2_dpy->flush->invalidate(dri2_surf->dri_drawable);
406 }
407
408 static void
destroy_window_callback(void *data)409 destroy_window_callback(void *data)
410 {
411 struct dri2_egl_surface *dri2_surf = data;
412 dri2_surf->wl_win = NULL;
413 }
414
415 static struct wl_surface *
get_wl_surface_proxy(struct wl_egl_window *window)416 get_wl_surface_proxy(struct wl_egl_window *window)
417 {
418 /* Version 3 of wl_egl_window introduced a version field at the same
419 * location where a pointer to wl_surface was stored. Thus, if
420 * window->version is dereferenceable, we've been given an older version of
421 * wl_egl_window, and window->version points to wl_surface */
422 if (_eglPointerIsDereferencable((void *)(window->version))) {
423 return wl_proxy_create_wrapper((void *)(window->version));
424 }
425 return wl_proxy_create_wrapper(window->surface);
426 }
427
428 static void
surface_dmabuf_feedback_format_table(void *data, struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1, int32_t fd, uint32_t size)429 surface_dmabuf_feedback_format_table(void *data,
430 struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
431 int32_t fd, uint32_t size)
432 {
433 struct dri2_egl_surface *dri2_surf = data;
434 struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
435
436 feedback->format_table.size = size;
437 feedback->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
438
439 close(fd);
440 }
441
442 static void
surface_dmabuf_feedback_main_device(void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback, struct wl_array *device)443 surface_dmabuf_feedback_main_device(void *data,
444 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
445 struct wl_array *device)
446 {
447 struct dri2_egl_surface *dri2_surf = data;
448 struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
449
450 memcpy(&feedback->main_device, device->data, sizeof(feedback->main_device));
451
452 /* Compositors may support switching render devices and change the main
453 * device of the dma-buf feedback. In this case, when we reallocate the
454 * buffers of the surface we must ensure that it is not allocated in memory
455 * that is only visible to the GPU that EGL is using, as the compositor will
456 * have to import them to the render device it is using.
457 *
458 * TODO: we still don't know how to allocate such buffers.
459 */
460 if (dri2_surf->dmabuf_feedback.main_device != 0 &&
461 (feedback->main_device != dri2_surf->dmabuf_feedback.main_device))
462 dri2_surf->compositor_using_another_device = true;
463 else
464 dri2_surf->compositor_using_another_device = false;
465 }
466
467 static void
surface_dmabuf_feedback_tranche_target_device(void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback, struct wl_array *device)468 surface_dmabuf_feedback_tranche_target_device(void *data,
469 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
470 struct wl_array *device)
471 {
472 struct dri2_egl_surface *dri2_surf = data;
473 struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
474
475 memcpy(&feedback->pending_tranche.target_device, device->data,
476 sizeof(feedback->pending_tranche.target_device));
477 }
478
479 static void
surface_dmabuf_feedback_tranche_flags(void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback, uint32_t flags)480 surface_dmabuf_feedback_tranche_flags(void *data,
481 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
482 uint32_t flags)
483 {
484 struct dri2_egl_surface *dri2_surf = data;
485 struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
486
487 feedback->pending_tranche.flags = flags;
488 }
489
490 static void
surface_dmabuf_feedback_tranche_formats(void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback, struct wl_array *indices)491 surface_dmabuf_feedback_tranche_formats(void *data,
492 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
493 struct wl_array *indices)
494 {
495 struct dri2_egl_surface *dri2_surf = data;
496 struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
497 uint64_t *modifier_ptr, modifier;
498 uint32_t format;
499 uint16_t *index;
500 int visual_idx;
501
502 /* Compositor may advertise or not a format table. If it does, we use it.
503 * Otherwise, we steal the most recent advertised format table. If we don't have
504 * a most recent advertised format table, compositor did something wrong. */
505 if (feedback->format_table.data == NULL) {
506 feedback->format_table = dri2_surf->dmabuf_feedback.format_table;
507 dmabuf_feedback_format_table_init(&dri2_surf->dmabuf_feedback.format_table);
508 }
509 if (feedback->format_table.data == MAP_FAILED) {
510 _eglLog(_EGL_WARNING, "wayland-egl: we could not map the format table "
511 "so we won't be able to use this batch of dma-buf "
512 "feedback events.");
513 return;
514 }
515 if (feedback->format_table.data == NULL) {
516 _eglLog(_EGL_WARNING, "wayland-egl: compositor didn't advertise a format "
517 "table, so we won't be able to use this batch of dma-buf "
518 "feedback events.");
519 return;
520 }
521
522 wl_array_for_each(index, indices) {
523 format = feedback->format_table.data[*index].format;
524 modifier = feedback->format_table.data[*index].modifier;
525
526 /* Skip formats that are not the one the surface is already using. We
527 * can't switch to another format. */
528 if (format != dri2_surf->format)
529 continue;
530
531 /* We are sure that the format is supported because of the check above. */
532 visual_idx = dri2_wl_visual_idx_from_fourcc(format);
533 assert(visual_idx != -1);
534
535 BITSET_SET(feedback->pending_tranche.formats.formats_bitmap, visual_idx);
536 modifier_ptr =
537 u_vector_add(&feedback->pending_tranche.formats.modifiers[visual_idx]);
538 if (modifier_ptr)
539 *modifier_ptr = modifier;
540 }
541 }
542
543 static void
surface_dmabuf_feedback_tranche_done(void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)544 surface_dmabuf_feedback_tranche_done(void *data,
545 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
546 {
547 struct dri2_egl_surface *dri2_surf = data;
548 struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
549
550 /* Add tranche to array of tranches. */
551 util_dynarray_append(&feedback->tranches, struct dmabuf_feedback_tranche,
552 feedback->pending_tranche);
553
554 dmabuf_feedback_tranche_init(&feedback->pending_tranche);
555 }
556
557 static void
surface_dmabuf_feedback_done(void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)558 surface_dmabuf_feedback_done(void *data,
559 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
560 {
561 struct dri2_egl_surface *dri2_surf = data;
562
563 /* The dma-buf feedback protocol states that surface dma-buf feedback should
564 * be sent by the compositor only if its buffers are using a suboptimal pair
565 * of format and modifier. We can't change the buffer format, but we can
566 * reallocate with another modifier. So we raise this flag in order to force
567 * buffer reallocation based on the dma-buf feedback sent. */
568 dri2_surf->received_dmabuf_feedback = true;
569
570 dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
571 dri2_surf->dmabuf_feedback = dri2_surf->pending_dmabuf_feedback;
572 dmabuf_feedback_init(&dri2_surf->pending_dmabuf_feedback);
573 }
574
575 static const struct zwp_linux_dmabuf_feedback_v1_listener
576 surface_dmabuf_feedback_listener = {
577 .format_table = surface_dmabuf_feedback_format_table,
578 .main_device = surface_dmabuf_feedback_main_device,
579 .tranche_target_device = surface_dmabuf_feedback_tranche_target_device,
580 .tranche_flags = surface_dmabuf_feedback_tranche_flags,
581 .tranche_formats = surface_dmabuf_feedback_tranche_formats,
582 .tranche_done = surface_dmabuf_feedback_tranche_done,
583 .done = surface_dmabuf_feedback_done,
584 };
585
586 /**
587 * Called via eglCreateWindowSurface(), drv->CreateWindowSurface().
588 */
589 static _EGLSurface *
dri2_wl_create_window_surface(_EGLDisplay *disp, _EGLConfig *conf, void *native_window, const EGLint *attrib_list)590 dri2_wl_create_window_surface(_EGLDisplay *disp, _EGLConfig *conf,
591 void *native_window, const EGLint *attrib_list)
592 {
593 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
594 struct dri2_egl_config *dri2_conf = dri2_egl_config(conf);
595 struct wl_egl_window *window = native_window;
596 struct dri2_egl_surface *dri2_surf;
597 struct zwp_linux_dmabuf_v1 *dmabuf_wrapper;
598 int visual_idx;
599 const __DRIconfig *config;
600
601 if (!window) {
602 _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_create_surface");
603 return NULL;
604 }
605
606 if (window->driver_private) {
607 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
608 return NULL;
609 }
610
611 dri2_surf = calloc(1, sizeof *dri2_surf);
612 if (!dri2_surf) {
613 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
614 return NULL;
615 }
616
617 if (!dri2_init_surface(&dri2_surf->base, disp, EGL_WINDOW_BIT, conf,
618 attrib_list, false, native_window))
619 goto cleanup_surf;
620
621 config = dri2_get_dri_config(dri2_conf, EGL_WINDOW_BIT,
622 dri2_surf->base.GLColorspace);
623
624 if (!config) {
625 _eglError(EGL_BAD_MATCH, "Unsupported surfacetype/colorspace configuration");
626 goto cleanup_surf;
627 }
628
629 dri2_surf->base.Width = window->width;
630 dri2_surf->base.Height = window->height;
631
632 #ifndef NDEBUG
633 /* Enforce that every visual has an opaque variant (requirement to support
634 * EGL_EXT_present_opaque)
635 */
636 for (unsigned int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
637 const struct dri2_wl_visual *transparent_visual = &dri2_wl_visuals[i];
638 if (transparent_visual->rgba_sizes[3] == 0) {
639 continue;
640 }
641
642 bool found_opaque_equivalent = false;
643 for (unsigned int j = 0; j < ARRAY_SIZE(dri2_wl_visuals); j++) {
644 const struct dri2_wl_visual *opaque_visual = &dri2_wl_visuals[j];
645 if (opaque_visual->rgba_sizes[3] != 0) {
646 continue;
647 }
648
649 int cmp_rgb_shifts = memcmp(transparent_visual->rgba_shifts,
650 opaque_visual->rgba_shifts,
651 3 * sizeof(opaque_visual->rgba_shifts[0]));
652 int cmp_rgb_sizes = memcmp(transparent_visual->rgba_sizes,
653 opaque_visual->rgba_sizes,
654 3 * sizeof(opaque_visual->rgba_sizes[0]));
655
656 if (cmp_rgb_shifts == 0 && cmp_rgb_sizes == 0) {
657 found_opaque_equivalent = true;
658 break;
659 }
660 }
661
662 assert(found_opaque_equivalent);
663 }
664 #endif
665
666 visual_idx = dri2_wl_visual_idx_from_config(dri2_dpy, config,
667 dri2_surf->base.PresentOpaque);
668 assert(visual_idx != -1);
669
670 if (dri2_dpy->wl_dmabuf || dri2_dpy->wl_drm) {
671 dri2_surf->format = dri2_wl_visuals[visual_idx].wl_drm_format;
672 } else {
673 assert(dri2_dpy->wl_shm);
674 dri2_surf->format = dri2_wl_visuals[visual_idx].wl_shm_format;
675 }
676
677 dri2_surf->wl_queue = wl_display_create_queue(dri2_dpy->wl_dpy);
678 if (!dri2_surf->wl_queue) {
679 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
680 goto cleanup_surf;
681 }
682
683 if (dri2_dpy->wl_drm) {
684 dri2_surf->wl_drm_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_drm);
685 if (!dri2_surf->wl_drm_wrapper) {
686 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
687 goto cleanup_queue;
688 }
689 wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_drm_wrapper,
690 dri2_surf->wl_queue);
691 }
692
693 dri2_surf->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
694 if (!dri2_surf->wl_dpy_wrapper) {
695 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
696 goto cleanup_drm;
697 }
698 wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_dpy_wrapper,
699 dri2_surf->wl_queue);
700
701 dri2_surf->wl_surface_wrapper = get_wl_surface_proxy(window);
702 if (!dri2_surf->wl_surface_wrapper) {
703 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
704 goto cleanup_dpy_wrapper;
705 }
706 wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_surface_wrapper,
707 dri2_surf->wl_queue);
708
709 if (dri2_dpy->wl_dmabuf && zwp_linux_dmabuf_v1_get_version(dri2_dpy->wl_dmabuf) >=
710 ZWP_LINUX_DMABUF_V1_GET_SURFACE_FEEDBACK_SINCE_VERSION) {
711 dmabuf_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dmabuf);
712 if (!dmabuf_wrapper) {
713 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
714 goto cleanup_surf_wrapper;
715 }
716 wl_proxy_set_queue((struct wl_proxy *)dmabuf_wrapper,
717 dri2_surf->wl_queue);
718 dri2_surf->wl_dmabuf_feedback =
719 zwp_linux_dmabuf_v1_get_surface_feedback(dmabuf_wrapper,
720 dri2_surf->wl_surface_wrapper);
721 wl_proxy_wrapper_destroy(dmabuf_wrapper);
722
723 zwp_linux_dmabuf_feedback_v1_add_listener(dri2_surf->wl_dmabuf_feedback,
724 &surface_dmabuf_feedback_listener,
725 dri2_surf);
726
727 if (dmabuf_feedback_init(&dri2_surf->pending_dmabuf_feedback) < 0) {
728 zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
729 goto cleanup_surf_wrapper;
730 }
731 if (dmabuf_feedback_init(&dri2_surf->dmabuf_feedback) < 0) {
732 dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
733 zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
734 goto cleanup_surf_wrapper;
735 }
736
737 if (roundtrip(dri2_dpy) < 0)
738 goto cleanup_dmabuf_feedback;
739 }
740
741 dri2_surf->wl_win = window;
742 dri2_surf->wl_win->driver_private = dri2_surf;
743 dri2_surf->wl_win->destroy_window_callback = destroy_window_callback;
744 if (dri2_dpy->flush)
745 dri2_surf->wl_win->resize_callback = resize_callback;
746
747 if (!dri2_create_drawable(dri2_dpy, config, dri2_surf, dri2_surf))
748 goto cleanup_dmabuf_feedback;
749
750 dri2_surf->base.SwapInterval = dri2_dpy->default_swap_interval;
751
752 return &dri2_surf->base;
753
754 cleanup_dmabuf_feedback:
755 if (dri2_surf->wl_dmabuf_feedback) {
756 zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
757 dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
758 dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
759 }
760 cleanup_surf_wrapper:
761 wl_proxy_wrapper_destroy(dri2_surf->wl_surface_wrapper);
762 cleanup_dpy_wrapper:
763 wl_proxy_wrapper_destroy(dri2_surf->wl_dpy_wrapper);
764 cleanup_drm:
765 if (dri2_surf->wl_drm_wrapper)
766 wl_proxy_wrapper_destroy(dri2_surf->wl_drm_wrapper);
767 cleanup_queue:
768 wl_event_queue_destroy(dri2_surf->wl_queue);
769 cleanup_surf:
770 free(dri2_surf);
771
772 return NULL;
773 }
774
775 static _EGLSurface *
dri2_wl_create_pixmap_surface(_EGLDisplay *disp, _EGLConfig *conf, void *native_window, const EGLint *attrib_list)776 dri2_wl_create_pixmap_surface(_EGLDisplay *disp, _EGLConfig *conf,
777 void *native_window, const EGLint *attrib_list)
778 {
779 /* From the EGL_EXT_platform_wayland spec, version 3:
780 *
781 * It is not valid to call eglCreatePlatformPixmapSurfaceEXT with a <dpy>
782 * that belongs to Wayland. Any such call fails and generates
783 * EGL_BAD_PARAMETER.
784 */
785 _eglError(EGL_BAD_PARAMETER, "cannot create EGL pixmap surfaces on "
786 "Wayland");
787 return NULL;
788 }
789
790 /**
791 * Called via eglDestroySurface(), drv->DestroySurface().
792 */
793 static EGLBoolean
dri2_wl_destroy_surface(_EGLDisplay *disp, _EGLSurface *surf)794 dri2_wl_destroy_surface(_EGLDisplay *disp, _EGLSurface *surf)
795 {
796 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
797 struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surf);
798
799 dri2_dpy->core->destroyDrawable(dri2_surf->dri_drawable);
800
801 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
802 if (dri2_surf->color_buffers[i].wl_buffer)
803 wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
804 if (dri2_surf->color_buffers[i].dri_image)
805 dri2_dpy->image->destroyImage(dri2_surf->color_buffers[i].dri_image);
806 if (dri2_surf->color_buffers[i].linear_copy)
807 dri2_dpy->image->destroyImage(dri2_surf->color_buffers[i].linear_copy);
808 if (dri2_surf->color_buffers[i].data)
809 munmap(dri2_surf->color_buffers[i].data,
810 dri2_surf->color_buffers[i].data_size);
811 }
812
813 if (dri2_dpy->dri2)
814 dri2_egl_surface_free_local_buffers(dri2_surf);
815
816 if (dri2_surf->throttle_callback)
817 wl_callback_destroy(dri2_surf->throttle_callback);
818
819 if (dri2_surf->wl_win) {
820 dri2_surf->wl_win->driver_private = NULL;
821 dri2_surf->wl_win->resize_callback = NULL;
822 dri2_surf->wl_win->destroy_window_callback = NULL;
823 }
824
825 wl_proxy_wrapper_destroy(dri2_surf->wl_surface_wrapper);
826 wl_proxy_wrapper_destroy(dri2_surf->wl_dpy_wrapper);
827 if (dri2_surf->wl_drm_wrapper)
828 wl_proxy_wrapper_destroy(dri2_surf->wl_drm_wrapper);
829 if (dri2_surf->wl_dmabuf_feedback) {
830 zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
831 dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
832 dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
833 }
834 wl_event_queue_destroy(dri2_surf->wl_queue);
835
836 dri2_fini_surface(surf);
837 free(surf);
838
839 return EGL_TRUE;
840 }
841
842 static EGLBoolean
dri2_wl_swap_interval(_EGLDisplay *disp, _EGLSurface *surf, EGLint interval)843 dri2_wl_swap_interval(_EGLDisplay *disp, _EGLSurface *surf, EGLint interval)
844 {
845 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
846 struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surf);
847
848 if (dri2_dpy->kopper)
849 dri2_dpy->kopper->setSwapInterval(dri2_surf->dri_drawable, interval);
850
851 return EGL_TRUE;
852 }
853
854 static void
dri2_wl_release_buffers(struct dri2_egl_surface *dri2_surf)855 dri2_wl_release_buffers(struct dri2_egl_surface *dri2_surf)
856 {
857 struct dri2_egl_display *dri2_dpy =
858 dri2_egl_display(dri2_surf->base.Resource.Display);
859
860 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
861 if (dri2_surf->color_buffers[i].wl_buffer) {
862 if (dri2_surf->color_buffers[i].locked) {
863 dri2_surf->color_buffers[i].wl_release = true;
864 } else {
865 wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
866 dri2_surf->color_buffers[i].wl_buffer = NULL;
867 }
868 }
869 if (dri2_surf->color_buffers[i].dri_image)
870 dri2_dpy->image->destroyImage(dri2_surf->color_buffers[i].dri_image);
871 if (dri2_surf->color_buffers[i].linear_copy)
872 dri2_dpy->image->destroyImage(dri2_surf->color_buffers[i].linear_copy);
873 if (dri2_surf->color_buffers[i].data)
874 munmap(dri2_surf->color_buffers[i].data,
875 dri2_surf->color_buffers[i].data_size);
876
877 dri2_surf->color_buffers[i].dri_image = NULL;
878 dri2_surf->color_buffers[i].linear_copy = NULL;
879 dri2_surf->color_buffers[i].data = NULL;
880 dri2_surf->color_buffers[i].age = 0;
881 }
882
883 if (dri2_dpy->dri2)
884 dri2_egl_surface_free_local_buffers(dri2_surf);
885 }
886
887 static void
create_dri_image_diff_gpu(struct dri2_egl_surface *dri2_surf, unsigned int linear_dri_image_format, uint32_t use_flags)888 create_dri_image_diff_gpu(struct dri2_egl_surface *dri2_surf,
889 unsigned int linear_dri_image_format, uint32_t use_flags)
890 {
891 struct dri2_egl_display *dri2_dpy =
892 dri2_egl_display(dri2_surf->base.Resource.Display);
893 uint64_t linear_mod;
894
895 /* The LINEAR modifier should be a perfect alias of the LINEAR use flag */
896 linear_mod = DRM_FORMAT_MOD_LINEAR;
897
898 dri2_surf->back->linear_copy =
899 loader_dri_create_image(dri2_dpy->dri_screen, dri2_dpy->image,
900 dri2_surf->base.Width,
901 dri2_surf->base.Height,
902 linear_dri_image_format,
903 use_flags | __DRI_IMAGE_USE_LINEAR,
904 &linear_mod, 1, NULL);
905 }
906
907 static void
create_dri_image_from_dmabuf_feedback(struct dri2_egl_surface *dri2_surf, unsigned int dri_image_format, uint32_t use_flags)908 create_dri_image_from_dmabuf_feedback(struct dri2_egl_surface *dri2_surf,
909 unsigned int dri_image_format, uint32_t use_flags)
910 {
911 struct dri2_egl_display *dri2_dpy =
912 dri2_egl_display(dri2_surf->base.Resource.Display);
913 int visual_idx;
914 uint64_t *modifiers;
915 unsigned int num_modifiers;
916 uint32_t flags;
917
918 /* We don't have valid dma-buf feedback, so return */
919 if (dri2_surf->dmabuf_feedback.main_device == 0)
920 return;
921
922 visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
923 assert(visual_idx != -1);
924
925 /* Iterates through the dma-buf feedback to pick a new set of modifiers. The
926 * tranches are sent in descending order of preference by the compositor, so
927 * the first set that we can pick is the best one. For now we still can't
928 * specify the target device in order to make the render device try its best
929 * to allocate memory that can be directly scanned out by the KMS device. But
930 * in the future this may change (newer versions of
931 * createImageWithModifiers). Also, we are safe to pick modifiers from
932 * tranches whose target device differs from the main device, as compositors
933 * do not expose (in dma-buf feedback tranches) formats/modifiers that are
934 * incompatible with the main device. */
935 util_dynarray_foreach(&dri2_surf->dmabuf_feedback.tranches,
936 struct dmabuf_feedback_tranche, tranche) {
937 /* Ignore tranches that do not contain dri2_surf->format */
938 if (!BITSET_TEST(tranche->formats.formats_bitmap, visual_idx))
939 continue;
940 modifiers = u_vector_tail(&tranche->formats.modifiers[visual_idx]);
941 num_modifiers = u_vector_length(&tranche->formats.modifiers[visual_idx]);
942
943 /* For the purposes of this function, an INVALID modifier on
944 * its own means the modifiers aren't supported. */
945 if (num_modifiers == 0 ||
946 (num_modifiers == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID)) {
947 num_modifiers = 0;
948 modifiers = NULL;
949 }
950
951 flags = use_flags;
952 if (tranche->flags & ZWP_LINUX_DMABUF_FEEDBACK_V1_TRANCHE_FLAGS_SCANOUT)
953 flags |= __DRI_IMAGE_USE_SCANOUT;
954
955 dri2_surf->back->dri_image =
956 loader_dri_create_image(dri2_dpy->dri_screen, dri2_dpy->image,
957 dri2_surf->base.Width,
958 dri2_surf->base.Height,
959 dri_image_format,
960 dri2_dpy->is_different_gpu ? 0 : flags,
961 modifiers, num_modifiers, NULL);
962
963 if (dri2_surf->back->dri_image)
964 return;
965 }
966 }
967
968 static void
create_dri_image(struct dri2_egl_surface *dri2_surf, unsigned int dri_image_format, uint32_t use_flags)969 create_dri_image(struct dri2_egl_surface *dri2_surf,
970 unsigned int dri_image_format, uint32_t use_flags)
971 {
972 struct dri2_egl_display *dri2_dpy =
973 dri2_egl_display(dri2_surf->base.Resource.Display);
974 int visual_idx;
975 uint64_t *modifiers;
976 unsigned int num_modifiers;
977
978 visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
979 modifiers = u_vector_tail(&dri2_dpy->formats.modifiers[visual_idx]);
980 num_modifiers = u_vector_length(&dri2_dpy->formats.modifiers[visual_idx]);
981
982 /* For the purposes of this function, an INVALID modifier on
983 * its own means the modifiers aren't supported. */
984 if (num_modifiers == 0 ||
985 (num_modifiers == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID)) {
986 num_modifiers = 0;
987 modifiers = NULL;
988 }
989
990 /* If our DRIImage implementation does not support createImageWithModifiers,
991 * then fall back to the old createImage, and hope it allocates an image
992 * which is acceptable to the winsys. */
993 dri2_surf->back->dri_image =
994 loader_dri_create_image(dri2_dpy->dri_screen, dri2_dpy->image,
995 dri2_surf->base.Width,
996 dri2_surf->base.Height,
997 dri_image_format,
998 dri2_dpy->is_different_gpu ? 0 : use_flags,
999 modifiers, num_modifiers, NULL);
1000 }
1001
1002 static int
get_back_bo(struct dri2_egl_surface *dri2_surf)1003 get_back_bo(struct dri2_egl_surface *dri2_surf)
1004 {
1005 struct dri2_egl_display *dri2_dpy =
1006 dri2_egl_display(dri2_surf->base.Resource.Display);
1007 int use_flags;
1008 int visual_idx;
1009 unsigned int dri_image_format;
1010 unsigned int linear_dri_image_format;
1011
1012 visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
1013 assert(visual_idx != -1);
1014 dri_image_format = dri2_wl_visuals[visual_idx].dri_image_format;
1015 linear_dri_image_format = dri_image_format;
1016
1017 /* Substitute dri image format if server does not support original format */
1018 if (!BITSET_TEST(dri2_dpy->formats.formats_bitmap, visual_idx))
1019 linear_dri_image_format = dri2_wl_visuals[visual_idx].alt_dri_image_format;
1020
1021 /* These asserts hold, as long as dri2_wl_visuals[] is self-consistent and
1022 * the PRIME substitution logic in dri2_wl_add_configs_for_visuals() is free
1023 * of bugs.
1024 */
1025 assert(linear_dri_image_format != __DRI_IMAGE_FORMAT_NONE);
1026 assert(BITSET_TEST(dri2_dpy->formats.formats_bitmap,
1027 dri2_wl_visual_idx_from_dri_image_format(linear_dri_image_format)));
1028
1029 /* There might be a buffer release already queued that wasn't processed */
1030 wl_display_dispatch_queue_pending(dri2_dpy->wl_dpy, dri2_surf->wl_queue);
1031
1032 while (dri2_surf->back == NULL) {
1033 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
1034 /* Get an unlocked buffer, preferably one with a dri_buffer
1035 * already allocated. */
1036 if (dri2_surf->color_buffers[i].locked)
1037 continue;
1038 if (dri2_surf->back == NULL)
1039 dri2_surf->back = &dri2_surf->color_buffers[i];
1040 else if (dri2_surf->back->dri_image == NULL)
1041 dri2_surf->back = &dri2_surf->color_buffers[i];
1042 }
1043
1044 if (dri2_surf->back)
1045 break;
1046
1047 /* If we don't have a buffer, then block on the server to release one for
1048 * us, and try again. wl_display_dispatch_queue will process any pending
1049 * events, however not all servers flush on issuing a buffer release
1050 * event. So, we spam the server with roundtrips as they always cause a
1051 * client flush.
1052 */
1053 if (wl_display_roundtrip_queue(dri2_dpy->wl_dpy,
1054 dri2_surf->wl_queue) < 0)
1055 return -1;
1056 }
1057
1058 if (dri2_surf->back == NULL)
1059 return -1;
1060
1061 use_flags = __DRI_IMAGE_USE_SHARE | __DRI_IMAGE_USE_BACKBUFFER;
1062
1063 if (dri2_surf->base.ProtectedContent) {
1064 /* Protected buffers can't be read from another GPU */
1065 if (dri2_dpy->is_different_gpu)
1066 return -1;
1067 use_flags |= __DRI_IMAGE_USE_PROTECTED;
1068 }
1069
1070 if (dri2_dpy->is_different_gpu && dri2_surf->back->linear_copy == NULL) {
1071 create_dri_image_diff_gpu(dri2_surf, linear_dri_image_format, use_flags);
1072 if (dri2_surf->back->linear_copy == NULL)
1073 return -1;
1074 }
1075
1076 if (dri2_surf->back->dri_image == NULL) {
1077 if (dri2_surf->wl_dmabuf_feedback)
1078 create_dri_image_from_dmabuf_feedback(dri2_surf, dri_image_format, use_flags);
1079 if (dri2_surf->back->dri_image == NULL)
1080 create_dri_image(dri2_surf, dri_image_format, use_flags);
1081 dri2_surf->back->age = 0;
1082 }
1083
1084 if (dri2_surf->back->dri_image == NULL)
1085 return -1;
1086
1087 dri2_surf->back->locked = true;
1088
1089 return 0;
1090 }
1091
1092
1093 static void
back_bo_to_dri_buffer(struct dri2_egl_surface *dri2_surf, __DRIbuffer *buffer)1094 back_bo_to_dri_buffer(struct dri2_egl_surface *dri2_surf, __DRIbuffer *buffer)
1095 {
1096 struct dri2_egl_display *dri2_dpy =
1097 dri2_egl_display(dri2_surf->base.Resource.Display);
1098 __DRIimage *image;
1099 int name, pitch;
1100
1101 image = dri2_surf->back->dri_image;
1102
1103 dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_NAME, &name);
1104 dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE, &pitch);
1105
1106 buffer->attachment = __DRI_BUFFER_BACK_LEFT;
1107 buffer->name = name;
1108 buffer->pitch = pitch;
1109 buffer->cpp = 4;
1110 buffer->flags = 0;
1111 }
1112
1113 /* Value chosen empirically as a compromise between avoiding frequent
1114 * reallocations and extended time of increased memory consumption due to
1115 * unused buffers being kept.
1116 */
1117 #define BUFFER_TRIM_AGE_HYSTERESIS 20
1118
1119 static int
update_buffers(struct dri2_egl_surface *dri2_surf)1120 update_buffers(struct dri2_egl_surface *dri2_surf)
1121 {
1122 struct dri2_egl_display *dri2_dpy =
1123 dri2_egl_display(dri2_surf->base.Resource.Display);
1124
1125 if (dri2_surf->wl_win &&
1126 (dri2_surf->base.Width != dri2_surf->wl_win->width ||
1127 dri2_surf->base.Height != dri2_surf->wl_win->height)) {
1128
1129 dri2_surf->base.Width = dri2_surf->wl_win->width;
1130 dri2_surf->base.Height = dri2_surf->wl_win->height;
1131 dri2_surf->dx = dri2_surf->wl_win->dx;
1132 dri2_surf->dy = dri2_surf->wl_win->dy;
1133 }
1134
1135 if (dri2_surf->resized || dri2_surf->received_dmabuf_feedback) {
1136 dri2_wl_release_buffers(dri2_surf);
1137 dri2_surf->resized = false;
1138 dri2_surf->received_dmabuf_feedback = false;
1139 }
1140
1141 if (get_back_bo(dri2_surf) < 0) {
1142 _eglError(EGL_BAD_ALLOC, "failed to allocate color buffer");
1143 return -1;
1144 }
1145
1146 /* If we have an extra unlocked buffer at this point, we had to do triple
1147 * buffering for a while, but now can go back to just double buffering.
1148 * That means we can free any unlocked buffer now. To avoid toggling between
1149 * going back to double buffering and needing to allocate another buffer too
1150 * fast we let the unneeded buffer sit around for a short while. */
1151 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
1152 if (!dri2_surf->color_buffers[i].locked &&
1153 dri2_surf->color_buffers[i].wl_buffer &&
1154 dri2_surf->color_buffers[i].age > BUFFER_TRIM_AGE_HYSTERESIS) {
1155 wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
1156 dri2_dpy->image->destroyImage(dri2_surf->color_buffers[i].dri_image);
1157 if (dri2_dpy->is_different_gpu)
1158 dri2_dpy->image->destroyImage(dri2_surf->color_buffers[i].linear_copy);
1159 dri2_surf->color_buffers[i].wl_buffer = NULL;
1160 dri2_surf->color_buffers[i].dri_image = NULL;
1161 dri2_surf->color_buffers[i].linear_copy = NULL;
1162 dri2_surf->color_buffers[i].age = 0;
1163 }
1164 }
1165
1166 return 0;
1167 }
1168
1169 static int
update_buffers_if_needed(struct dri2_egl_surface *dri2_surf)1170 update_buffers_if_needed(struct dri2_egl_surface *dri2_surf)
1171 {
1172 if (dri2_surf->back != NULL)
1173 return 0;
1174
1175 return update_buffers(dri2_surf);
1176 }
1177
1178 static __DRIbuffer *
dri2_wl_get_buffers_with_format(__DRIdrawable * driDrawable, int *width, int *height, unsigned int *attachments, int count, int *out_count, void *loaderPrivate)1179 dri2_wl_get_buffers_with_format(__DRIdrawable * driDrawable,
1180 int *width, int *height,
1181 unsigned int *attachments, int count,
1182 int *out_count, void *loaderPrivate)
1183 {
1184 struct dri2_egl_surface *dri2_surf = loaderPrivate;
1185 int i, j;
1186
1187 if (update_buffers_if_needed(dri2_surf) < 0)
1188 return NULL;
1189
1190 for (i = 0, j = 0; i < 2 * count; i += 2, j++) {
1191 __DRIbuffer *local;
1192
1193 switch (attachments[i]) {
1194 case __DRI_BUFFER_BACK_LEFT:
1195 back_bo_to_dri_buffer(dri2_surf, &dri2_surf->buffers[j]);
1196 break;
1197 default:
1198 local = dri2_egl_surface_alloc_local_buffer(dri2_surf, attachments[i],
1199 attachments[i + 1]);
1200
1201 if (!local) {
1202 _eglError(EGL_BAD_ALLOC, "failed to allocate local buffer");
1203 return NULL;
1204 }
1205 dri2_surf->buffers[j] = *local;
1206 break;
1207 }
1208 }
1209
1210 *out_count = j;
1211 if (j == 0)
1212 return NULL;
1213
1214 *width = dri2_surf->base.Width;
1215 *height = dri2_surf->base.Height;
1216
1217 return dri2_surf->buffers;
1218 }
1219
1220 static __DRIbuffer *
dri2_wl_get_buffers(__DRIdrawable * driDrawable, int *width, int *height, unsigned int *attachments, int count, int *out_count, void *loaderPrivate)1221 dri2_wl_get_buffers(__DRIdrawable * driDrawable,
1222 int *width, int *height,
1223 unsigned int *attachments, int count,
1224 int *out_count, void *loaderPrivate)
1225 {
1226 struct dri2_egl_surface *dri2_surf = loaderPrivate;
1227 unsigned int *attachments_with_format;
1228 __DRIbuffer *buffer;
1229 int visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
1230
1231 if (visual_idx == -1)
1232 return NULL;
1233
1234 attachments_with_format = calloc(count, 2 * sizeof(unsigned int));
1235 if (!attachments_with_format) {
1236 *out_count = 0;
1237 return NULL;
1238 }
1239
1240 for (int i = 0; i < count; ++i) {
1241 attachments_with_format[2*i] = attachments[i];
1242 attachments_with_format[2*i + 1] = dri2_wl_visuals[visual_idx].bpp;
1243 }
1244
1245 buffer =
1246 dri2_wl_get_buffers_with_format(driDrawable,
1247 width, height,
1248 attachments_with_format, count,
1249 out_count, loaderPrivate);
1250
1251 free(attachments_with_format);
1252
1253 return buffer;
1254 }
1255
1256 static int
image_get_buffers(__DRIdrawable *driDrawable, unsigned int format, uint32_t *stamp, void *loaderPrivate, uint32_t buffer_mask, struct __DRIimageList *buffers)1257 image_get_buffers(__DRIdrawable *driDrawable,
1258 unsigned int format,
1259 uint32_t *stamp,
1260 void *loaderPrivate,
1261 uint32_t buffer_mask,
1262 struct __DRIimageList *buffers)
1263 {
1264 struct dri2_egl_surface *dri2_surf = loaderPrivate;
1265
1266 if (update_buffers_if_needed(dri2_surf) < 0)
1267 return 0;
1268
1269 buffers->image_mask = __DRI_IMAGE_BUFFER_BACK;
1270 buffers->back = dri2_surf->back->dri_image;
1271
1272 return 1;
1273 }
1274
1275 static void
dri2_wl_flush_front_buffer(__DRIdrawable * driDrawable, void *loaderPrivate)1276 dri2_wl_flush_front_buffer(__DRIdrawable * driDrawable, void *loaderPrivate)
1277 {
1278 (void) driDrawable;
1279 (void) loaderPrivate;
1280 }
1281
1282 static unsigned
dri2_wl_get_capability(void *loaderPrivate, enum dri_loader_cap cap)1283 dri2_wl_get_capability(void *loaderPrivate, enum dri_loader_cap cap)
1284 {
1285 switch (cap) {
1286 case DRI_LOADER_CAP_FP16:
1287 return 1;
1288 case DRI_LOADER_CAP_RGBA_ORDERING:
1289 return 1;
1290 default:
1291 return 0;
1292 }
1293 }
1294
1295 static const __DRIdri2LoaderExtension dri2_loader_extension = {
1296 .base = { __DRI_DRI2_LOADER, 4 },
1297
1298 .getBuffers = dri2_wl_get_buffers,
1299 .flushFrontBuffer = dri2_wl_flush_front_buffer,
1300 .getBuffersWithFormat = dri2_wl_get_buffers_with_format,
1301 .getCapability = dri2_wl_get_capability,
1302 };
1303
1304 static const __DRIimageLoaderExtension image_loader_extension = {
1305 .base = { __DRI_IMAGE_LOADER, 2 },
1306
1307 .getBuffers = image_get_buffers,
1308 .flushFrontBuffer = dri2_wl_flush_front_buffer,
1309 .getCapability = dri2_wl_get_capability,
1310 };
1311
1312 static void
wayland_throttle_callback(void *data, struct wl_callback *callback, uint32_t time)1313 wayland_throttle_callback(void *data,
1314 struct wl_callback *callback,
1315 uint32_t time)
1316 {
1317 struct dri2_egl_surface *dri2_surf = data;
1318
1319 dri2_surf->throttle_callback = NULL;
1320 wl_callback_destroy(callback);
1321 }
1322
1323 static const struct wl_callback_listener throttle_listener = {
1324 .done = wayland_throttle_callback
1325 };
1326
1327 static EGLBoolean
get_fourcc(struct dri2_egl_display *dri2_dpy, __DRIimage *image, int *fourcc)1328 get_fourcc(struct dri2_egl_display *dri2_dpy,
1329 __DRIimage *image, int *fourcc)
1330 {
1331 EGLBoolean query;
1332 int dri_format;
1333 int visual_idx;
1334
1335 query = dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_FOURCC,
1336 fourcc);
1337 if (query)
1338 return true;
1339
1340 query = dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_FORMAT,
1341 &dri_format);
1342 if (!query)
1343 return false;
1344
1345 visual_idx = dri2_wl_visual_idx_from_dri_image_format(dri_format);
1346 if (visual_idx == -1)
1347 return false;
1348
1349 *fourcc = dri2_wl_visuals[visual_idx].wl_drm_format;
1350 return true;
1351 }
1352
1353 static struct wl_buffer *
create_wl_buffer(struct dri2_egl_display *dri2_dpy, struct dri2_egl_surface *dri2_surf, __DRIimage *image)1354 create_wl_buffer(struct dri2_egl_display *dri2_dpy,
1355 struct dri2_egl_surface *dri2_surf,
1356 __DRIimage *image)
1357 {
1358 struct wl_buffer *ret = NULL;
1359 EGLBoolean query;
1360 int width, height, fourcc, num_planes;
1361 uint64_t modifier = DRM_FORMAT_MOD_INVALID;
1362
1363 query = dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_WIDTH, &width);
1364 query &= dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_HEIGHT,
1365 &height);
1366 query &= get_fourcc(dri2_dpy, image, &fourcc);
1367 if (!query)
1368 return NULL;
1369
1370 query = dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1371 &num_planes);
1372 if (!query)
1373 num_planes = 1;
1374
1375 if (dri2_dpy->image->base.version >= 15) {
1376 int mod_hi, mod_lo;
1377
1378 query = dri2_dpy->image->queryImage(image,
1379 __DRI_IMAGE_ATTRIB_MODIFIER_UPPER,
1380 &mod_hi);
1381 query &= dri2_dpy->image->queryImage(image,
1382 __DRI_IMAGE_ATTRIB_MODIFIER_LOWER,
1383 &mod_lo);
1384 if (query) {
1385 modifier = combine_u32_into_u64(mod_hi, mod_lo);
1386 }
1387 }
1388
1389 bool supported_modifier = false;
1390 bool mod_invalid_supported = false;
1391 int visual_idx = dri2_wl_visual_idx_from_fourcc(fourcc);
1392 assert(visual_idx != -1);
1393
1394 uint64_t *mod;
1395 u_vector_foreach(mod, &dri2_dpy->formats.modifiers[visual_idx]) {
1396 if (*mod == DRM_FORMAT_MOD_INVALID) {
1397 mod_invalid_supported = true;
1398 }
1399 if (*mod == modifier) {
1400 supported_modifier = true;
1401 break;
1402 }
1403 }
1404 if (!supported_modifier && mod_invalid_supported) {
1405 /* If the server has advertised DRM_FORMAT_MOD_INVALID then we trust
1406 * that the client has allocated the buffer with the right implicit
1407 * modifier for the format, even though it's allocated a buffer the
1408 * server hasn't explicitly claimed to support. */
1409 modifier = DRM_FORMAT_MOD_INVALID;
1410 supported_modifier = true;
1411 }
1412
1413 if (dri2_dpy->wl_dmabuf && supported_modifier) {
1414 struct zwp_linux_buffer_params_v1 *params;
1415 int i;
1416
1417 /* We don't need a wrapper for wl_dmabuf objects, because we have to
1418 * create the intermediate params object; we can set the queue on this,
1419 * and the wl_buffer inherits it race-free. */
1420 params = zwp_linux_dmabuf_v1_create_params(dri2_dpy->wl_dmabuf);
1421 if (dri2_surf)
1422 wl_proxy_set_queue((struct wl_proxy *) params, dri2_surf->wl_queue);
1423
1424 for (i = 0; i < num_planes; i++) {
1425 __DRIimage *p_image;
1426 int stride, offset;
1427 int fd = -1;
1428
1429 p_image = dri2_dpy->image->fromPlanar(image, i, NULL);
1430 if (!p_image) {
1431 assert(i == 0);
1432 p_image = image;
1433 }
1434
1435 query = dri2_dpy->image->queryImage(p_image,
1436 __DRI_IMAGE_ATTRIB_FD,
1437 &fd);
1438 query &= dri2_dpy->image->queryImage(p_image,
1439 __DRI_IMAGE_ATTRIB_STRIDE,
1440 &stride);
1441 query &= dri2_dpy->image->queryImage(p_image,
1442 __DRI_IMAGE_ATTRIB_OFFSET,
1443 &offset);
1444 if (image != p_image)
1445 dri2_dpy->image->destroyImage(p_image);
1446
1447 if (!query) {
1448 if (fd >= 0)
1449 close(fd);
1450 zwp_linux_buffer_params_v1_destroy(params);
1451 return NULL;
1452 }
1453
1454 zwp_linux_buffer_params_v1_add(params, fd, i, offset, stride,
1455 modifier >> 32, modifier & 0xffffffff);
1456 close(fd);
1457 }
1458
1459 ret = zwp_linux_buffer_params_v1_create_immed(params, width, height,
1460 fourcc, 0);
1461 zwp_linux_buffer_params_v1_destroy(params);
1462 } else {
1463 struct wl_drm *wl_drm =
1464 dri2_surf ? dri2_surf->wl_drm_wrapper : dri2_dpy->wl_drm;
1465 int fd = -1, stride;
1466
1467 if (num_planes > 1)
1468 return NULL;
1469
1470 query = dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_FD, &fd);
1471 query &= dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE, &stride);
1472 if (!query) {
1473 if (fd >= 0)
1474 close(fd);
1475 return NULL;
1476 }
1477
1478 ret = wl_drm_create_prime_buffer(wl_drm, fd, width, height, fourcc, 0,
1479 stride, 0, 0, 0, 0);
1480 close(fd);
1481 }
1482
1483 return ret;
1484 }
1485
1486 static EGLBoolean
try_damage_buffer(struct dri2_egl_surface *dri2_surf, const EGLint *rects, EGLint n_rects)1487 try_damage_buffer(struct dri2_egl_surface *dri2_surf,
1488 const EGLint *rects,
1489 EGLint n_rects)
1490 {
1491 if (wl_proxy_get_version((struct wl_proxy *) dri2_surf->wl_surface_wrapper)
1492 < WL_SURFACE_DAMAGE_BUFFER_SINCE_VERSION)
1493 return EGL_FALSE;
1494
1495 for (int i = 0; i < n_rects; i++) {
1496 const int *rect = &rects[i * 4];
1497
1498 wl_surface_damage_buffer(dri2_surf->wl_surface_wrapper,
1499 rect[0],
1500 dri2_surf->base.Height - rect[1] - rect[3],
1501 rect[2], rect[3]);
1502 }
1503 return EGL_TRUE;
1504 }
1505
1506 /**
1507 * Called via eglSwapBuffers(), drv->SwapBuffers().
1508 */
1509 static EGLBoolean
dri2_wl_swap_buffers_with_damage(_EGLDisplay *disp, _EGLSurface *draw, const EGLint *rects, EGLint n_rects)1510 dri2_wl_swap_buffers_with_damage(_EGLDisplay *disp,
1511 _EGLSurface *draw,
1512 const EGLint *rects,
1513 EGLint n_rects)
1514 {
1515 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1516 struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
1517
1518 if (!dri2_surf->wl_win)
1519 return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
1520
1521 while (dri2_surf->throttle_callback != NULL)
1522 if (wl_display_dispatch_queue(dri2_dpy->wl_dpy,
1523 dri2_surf->wl_queue) == -1)
1524 return -1;
1525
1526 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++)
1527 if (dri2_surf->color_buffers[i].age > 0)
1528 dri2_surf->color_buffers[i].age++;
1529
1530 /* Make sure we have a back buffer in case we're swapping without ever
1531 * rendering. */
1532 if (update_buffers_if_needed(dri2_surf) < 0)
1533 return _eglError(EGL_BAD_ALLOC, "dri2_swap_buffers");
1534
1535 if (draw->SwapInterval > 0) {
1536 dri2_surf->throttle_callback =
1537 wl_surface_frame(dri2_surf->wl_surface_wrapper);
1538 wl_callback_add_listener(dri2_surf->throttle_callback,
1539 &throttle_listener, dri2_surf);
1540 }
1541
1542 dri2_surf->back->age = 1;
1543 dri2_surf->current = dri2_surf->back;
1544 dri2_surf->back = NULL;
1545
1546 if (!dri2_surf->current->wl_buffer) {
1547 __DRIimage *image;
1548
1549 if (dri2_dpy->is_different_gpu)
1550 image = dri2_surf->current->linear_copy;
1551 else
1552 image = dri2_surf->current->dri_image;
1553
1554 dri2_surf->current->wl_buffer =
1555 create_wl_buffer(dri2_dpy, dri2_surf, image);
1556
1557 dri2_surf->current->wl_release = false;
1558
1559 wl_buffer_add_listener(dri2_surf->current->wl_buffer,
1560 &wl_buffer_listener, dri2_surf);
1561 }
1562
1563 wl_surface_attach(dri2_surf->wl_surface_wrapper,
1564 dri2_surf->current->wl_buffer,
1565 dri2_surf->dx, dri2_surf->dy);
1566
1567 dri2_surf->wl_win->attached_width = dri2_surf->base.Width;
1568 dri2_surf->wl_win->attached_height = dri2_surf->base.Height;
1569 /* reset resize growing parameters */
1570 dri2_surf->dx = 0;
1571 dri2_surf->dy = 0;
1572
1573 /* If the compositor doesn't support damage_buffer, we deliberately
1574 * ignore the damage region and post maximum damage, due to
1575 * https://bugs.freedesktop.org/78190 */
1576 if (!n_rects || !try_damage_buffer(dri2_surf, rects, n_rects))
1577 wl_surface_damage(dri2_surf->wl_surface_wrapper,
1578 0, 0, INT32_MAX, INT32_MAX);
1579
1580 if (dri2_dpy->is_different_gpu) {
1581 _EGLContext *ctx = _eglGetCurrentContext();
1582 struct dri2_egl_context *dri2_ctx = dri2_egl_context(ctx);
1583 dri2_dpy->image->blitImage(dri2_ctx->dri_context,
1584 dri2_surf->current->linear_copy,
1585 dri2_surf->current->dri_image,
1586 0, 0, dri2_surf->base.Width,
1587 dri2_surf->base.Height,
1588 0, 0, dri2_surf->base.Width,
1589 dri2_surf->base.Height, 0);
1590 }
1591
1592 dri2_flush_drawable_for_swapbuffers(disp, draw);
1593 dri2_dpy->flush->invalidate(dri2_surf->dri_drawable);
1594
1595 wl_surface_commit(dri2_surf->wl_surface_wrapper);
1596
1597 /* If we're not waiting for a frame callback then we'll at least throttle
1598 * to a sync callback so that we always give a chance for the compositor to
1599 * handle the commit and send a release event before checking for a free
1600 * buffer */
1601 if (dri2_surf->throttle_callback == NULL) {
1602 dri2_surf->throttle_callback = wl_display_sync(dri2_surf->wl_dpy_wrapper);
1603 wl_callback_add_listener(dri2_surf->throttle_callback,
1604 &throttle_listener, dri2_surf);
1605 }
1606
1607 wl_display_flush(dri2_dpy->wl_dpy);
1608
1609 return EGL_TRUE;
1610 }
1611
1612 static EGLint
dri2_wl_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)1613 dri2_wl_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)
1614 {
1615 struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
1616
1617 if (update_buffers_if_needed(dri2_surf) < 0) {
1618 _eglError(EGL_BAD_ALLOC, "dri2_query_buffer_age");
1619 return -1;
1620 }
1621
1622 return dri2_surf->back->age;
1623 }
1624
1625 static EGLBoolean
dri2_wl_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)1626 dri2_wl_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
1627 {
1628 return dri2_wl_swap_buffers_with_damage(disp, draw, NULL, 0);
1629 }
1630
1631 static struct wl_buffer *
dri2_wl_create_wayland_buffer_from_image(_EGLDisplay *disp, _EGLImage *img)1632 dri2_wl_create_wayland_buffer_from_image(_EGLDisplay *disp, _EGLImage *img)
1633 {
1634 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1635 struct dri2_egl_image *dri2_img = dri2_egl_image(img);
1636 __DRIimage *image = dri2_img->dri_image;
1637 struct wl_buffer *buffer;
1638 int format, visual_idx;
1639
1640 /* Check the upstream display supports this buffer's format. */
1641 dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_FORMAT, &format);
1642 visual_idx = dri2_wl_visual_idx_from_dri_image_format(format);
1643 if (visual_idx == -1)
1644 goto bad_format;
1645
1646 if (!BITSET_TEST(dri2_dpy->formats.formats_bitmap, visual_idx))
1647 goto bad_format;
1648
1649 buffer = create_wl_buffer(dri2_dpy, NULL, image);
1650
1651 /* The buffer object will have been created with our internal event queue
1652 * because it is using wl_dmabuf/wl_drm as a proxy factory. We want the
1653 * buffer to be used by the application so we'll reset it to the display's
1654 * default event queue. This isn't actually racy, as the only event the
1655 * buffer can get is a buffer release, which doesn't happen with an explicit
1656 * attach. */
1657 if (buffer)
1658 wl_proxy_set_queue((struct wl_proxy *) buffer, NULL);
1659
1660 return buffer;
1661
1662 bad_format:
1663 _eglError(EGL_BAD_MATCH, "unsupported image format");
1664 return NULL;
1665 }
1666
1667 static int
dri2_wl_authenticate(_EGLDisplay *disp, uint32_t id)1668 dri2_wl_authenticate(_EGLDisplay *disp, uint32_t id)
1669 {
1670 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1671 int ret = 0;
1672
1673 if (dri2_dpy->is_render_node) {
1674 _eglLog(_EGL_WARNING, "wayland-egl: client asks server to "
1675 "authenticate for render-nodes");
1676 return 0;
1677 }
1678 dri2_dpy->authenticated = false;
1679
1680 wl_drm_authenticate(dri2_dpy->wl_drm, id);
1681 if (roundtrip(dri2_dpy) < 0)
1682 ret = -1;
1683
1684 if (!dri2_dpy->authenticated)
1685 ret = -1;
1686
1687 /* reset authenticated */
1688 dri2_dpy->authenticated = true;
1689
1690 return ret;
1691 }
1692
1693 static void
drm_handle_device(void *data, struct wl_drm *drm, const char *device)1694 drm_handle_device(void *data, struct wl_drm *drm, const char *device)
1695 {
1696 struct dri2_egl_display *dri2_dpy = data;
1697 drm_magic_t magic;
1698
1699 dri2_dpy->device_name = strdup(device);
1700 if (!dri2_dpy->device_name)
1701 return;
1702
1703 dri2_dpy->fd = loader_open_device(dri2_dpy->device_name);
1704 if (dri2_dpy->fd == -1) {
1705 _eglLog(_EGL_WARNING, "wayland-egl: could not open %s (%s)",
1706 dri2_dpy->device_name, strerror(errno));
1707 free(dri2_dpy->device_name);
1708 dri2_dpy->device_name = NULL;
1709 return;
1710 }
1711
1712 if (drmGetNodeTypeFromFd(dri2_dpy->fd) == DRM_NODE_RENDER) {
1713 dri2_dpy->authenticated = true;
1714 } else {
1715 if (drmGetMagic(dri2_dpy->fd, &magic)) {
1716 close(dri2_dpy->fd);
1717 dri2_dpy->fd = -1;
1718 free(dri2_dpy->device_name);
1719 dri2_dpy->device_name = NULL;
1720 _eglLog(_EGL_WARNING, "wayland-egl: drmGetMagic failed");
1721 return;
1722 }
1723 wl_drm_authenticate(dri2_dpy->wl_drm, magic);
1724 }
1725 }
1726
1727 static void
drm_handle_format(void *data, struct wl_drm *drm, uint32_t format)1728 drm_handle_format(void *data, struct wl_drm *drm, uint32_t format)
1729 {
1730 struct dri2_egl_display *dri2_dpy = data;
1731 int visual_idx = dri2_wl_visual_idx_from_fourcc(format);
1732
1733 if (visual_idx == -1)
1734 return;
1735
1736 BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
1737 }
1738
1739 static void
drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t value)1740 drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t value)
1741 {
1742 struct dri2_egl_display *dri2_dpy = data;
1743
1744 dri2_dpy->capabilities = value;
1745 }
1746
1747 static void
drm_handle_authenticated(void *data, struct wl_drm *drm)1748 drm_handle_authenticated(void *data, struct wl_drm *drm)
1749 {
1750 struct dri2_egl_display *dri2_dpy = data;
1751
1752 dri2_dpy->authenticated = true;
1753 }
1754
1755 static const struct wl_drm_listener drm_listener = {
1756 .device = drm_handle_device,
1757 .format = drm_handle_format,
1758 .authenticated = drm_handle_authenticated,
1759 .capabilities = drm_handle_capabilities
1760 };
1761
1762 static void
dmabuf_ignore_format(void *data, struct zwp_linux_dmabuf_v1 *dmabuf, uint32_t format)1763 dmabuf_ignore_format(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
1764 uint32_t format)
1765 {
1766 /* formats are implicitly advertised by the 'modifier' event, so ignore */
1767 }
1768
1769 static void
dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf, uint32_t format, uint32_t modifier_hi, uint32_t modifier_lo)1770 dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
1771 uint32_t format, uint32_t modifier_hi,
1772 uint32_t modifier_lo)
1773 {
1774 struct dri2_egl_display *dri2_dpy = data;
1775 int visual_idx = dri2_wl_visual_idx_from_fourcc(format);
1776 uint64_t *mod;
1777
1778 /* Ignore this if the compositor advertised dma-buf feedback. From version 4
1779 * onwards (when dma-buf feedback was introduced), the compositor should not
1780 * advertise this event anymore, but let's keep this for safety. */
1781 if (dri2_dpy->wl_dmabuf_feedback)
1782 return;
1783
1784 if (visual_idx == -1)
1785 return;
1786
1787 BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
1788
1789 mod = u_vector_add(&dri2_dpy->formats.modifiers[visual_idx]);
1790 if (mod)
1791 *mod = combine_u32_into_u64(modifier_hi, modifier_lo);
1792 }
1793
1794 static const struct zwp_linux_dmabuf_v1_listener dmabuf_listener = {
1795 .format = dmabuf_ignore_format,
1796 .modifier = dmabuf_handle_modifier,
1797 };
1798
1799 static void
wl_drm_bind(struct dri2_egl_display *dri2_dpy)1800 wl_drm_bind(struct dri2_egl_display *dri2_dpy)
1801 {
1802 dri2_dpy->wl_drm = wl_registry_bind(dri2_dpy->wl_registry, dri2_dpy->wl_drm_name,
1803 &wl_drm_interface, dri2_dpy->wl_drm_version);
1804 wl_drm_add_listener(dri2_dpy->wl_drm, &drm_listener, dri2_dpy);
1805 }
1806
1807 static void
default_dmabuf_feedback_format_table(void *data, struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1, int32_t fd, uint32_t size)1808 default_dmabuf_feedback_format_table(void *data,
1809 struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
1810 int32_t fd, uint32_t size)
1811 {
1812 struct dri2_egl_display *dri2_dpy = data;
1813
1814 dri2_dpy->format_table.size = size;
1815 dri2_dpy->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
1816
1817 close(fd);
1818 }
1819
1820 static void
default_dmabuf_feedback_main_device(void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback, struct wl_array *device)1821 default_dmabuf_feedback_main_device(void *data,
1822 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1823 struct wl_array *device)
1824 {
1825 struct dri2_egl_display *dri2_dpy = data;
1826 char *node;
1827 int fd;
1828 dev_t dev;
1829
1830 /* Given the device, look for a render node and try to open it. */
1831 memcpy(&dev, device->data, sizeof(dev));
1832 node = loader_get_render_node(dev);
1833 if (!node)
1834 return;
1835 fd = loader_open_device(node);
1836 if (fd == -1) {
1837 free(node);
1838 return;
1839 }
1840
1841 dri2_dpy->device_name = node;
1842 dri2_dpy->fd = fd;
1843 dri2_dpy->authenticated = true;
1844 }
1845
1846 static void
default_dmabuf_feedback_tranche_target_device(void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback, struct wl_array *device)1847 default_dmabuf_feedback_tranche_target_device(void *data,
1848 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1849 struct wl_array *device)
1850 {
1851 /* ignore this event */
1852 }
1853
1854 static void
default_dmabuf_feedback_tranche_flags(void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback, uint32_t flags)1855 default_dmabuf_feedback_tranche_flags(void *data,
1856 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1857 uint32_t flags)
1858 {
1859 /* ignore this event */
1860 }
1861
1862 static void
default_dmabuf_feedback_tranche_formats(void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback, struct wl_array *indices)1863 default_dmabuf_feedback_tranche_formats(void *data,
1864 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1865 struct wl_array *indices)
1866 {
1867 struct dri2_egl_display *dri2_dpy = data;
1868 uint64_t *modifier_ptr, modifier;
1869 uint32_t format;
1870 uint16_t *index;
1871 int visual_idx;
1872
1873 if (dri2_dpy->format_table.data == MAP_FAILED) {
1874 _eglLog(_EGL_WARNING, "wayland-egl: we could not map the format table "
1875 "so we won't be able to use this batch of dma-buf "
1876 "feedback events.");
1877 return;
1878 }
1879 if (dri2_dpy->format_table.data == NULL) {
1880 _eglLog(_EGL_WARNING, "wayland-egl: compositor didn't advertise a format "
1881 "table, so we won't be able to use this batch of dma-buf "
1882 "feedback events.");
1883 return;
1884 }
1885
1886 wl_array_for_each(index, indices) {
1887 format = dri2_dpy->format_table.data[*index].format;
1888 modifier = dri2_dpy->format_table.data[*index].modifier;
1889
1890 /* skip formats that we don't support */
1891 visual_idx = dri2_wl_visual_idx_from_fourcc(format);
1892 if (visual_idx == -1)
1893 continue;
1894
1895 BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
1896 modifier_ptr = u_vector_add(&dri2_dpy->formats.modifiers[visual_idx]);
1897 if (modifier_ptr)
1898 *modifier_ptr = modifier;
1899 }
1900 }
1901
1902 static void
default_dmabuf_feedback_tranche_done(void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)1903 default_dmabuf_feedback_tranche_done(void *data,
1904 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
1905 {
1906 /* ignore this event */
1907 }
1908
1909 static void
default_dmabuf_feedback_done(void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)1910 default_dmabuf_feedback_done(void *data,
1911 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
1912 {
1913 /* ignore this event */
1914 }
1915
1916 static const struct zwp_linux_dmabuf_feedback_v1_listener
1917 dmabuf_feedback_listener = {
1918 .format_table = default_dmabuf_feedback_format_table,
1919 .main_device = default_dmabuf_feedback_main_device,
1920 .tranche_target_device = default_dmabuf_feedback_tranche_target_device,
1921 .tranche_flags = default_dmabuf_feedback_tranche_flags,
1922 .tranche_formats = default_dmabuf_feedback_tranche_formats,
1923 .tranche_done = default_dmabuf_feedback_tranche_done,
1924 .done = default_dmabuf_feedback_done,
1925 };
1926
1927 static void
registry_handle_global_drm(void *data, struct wl_registry *registry, uint32_t name, const char *interface, uint32_t version)1928 registry_handle_global_drm(void *data, struct wl_registry *registry,
1929 uint32_t name, const char *interface,
1930 uint32_t version)
1931 {
1932 struct dri2_egl_display *dri2_dpy = data;
1933
1934 if (strcmp(interface, "wl_drm") == 0) {
1935 dri2_dpy->wl_drm_version = MIN2(version, 2);
1936 dri2_dpy->wl_drm_name = name;
1937 } else if (strcmp(interface, "zwp_linux_dmabuf_v1") == 0 && version >= 3) {
1938 dri2_dpy->wl_dmabuf =
1939 wl_registry_bind(registry, name, &zwp_linux_dmabuf_v1_interface,
1940 MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
1941 zwp_linux_dmabuf_v1_add_listener(dri2_dpy->wl_dmabuf, &dmabuf_listener,
1942 dri2_dpy);
1943 }
1944 }
1945
1946 static void
registry_handle_global_remove(void *data, struct wl_registry *registry, uint32_t name)1947 registry_handle_global_remove(void *data, struct wl_registry *registry,
1948 uint32_t name)
1949 {
1950 }
1951
1952 static const struct wl_registry_listener registry_listener_drm = {
1953 .global = registry_handle_global_drm,
1954 .global_remove = registry_handle_global_remove
1955 };
1956
1957 static void
dri2_wl_setup_swap_interval(_EGLDisplay *disp)1958 dri2_wl_setup_swap_interval(_EGLDisplay *disp)
1959 {
1960 /* We can't use values greater than 1 on Wayland because we are using the
1961 * frame callback to synchronise the frame and the only way we be sure to
1962 * get a frame callback is to attach a new buffer. Therefore we can't just
1963 * sit drawing nothing to wait until the next ‘n’ frame callbacks */
1964
1965 dri2_setup_swap_interval(disp, 1);
1966 }
1967
1968 static const struct dri2_egl_display_vtbl dri2_wl_display_vtbl = {
1969 .authenticate = dri2_wl_authenticate,
1970 .create_window_surface = dri2_wl_create_window_surface,
1971 .create_pixmap_surface = dri2_wl_create_pixmap_surface,
1972 .destroy_surface = dri2_wl_destroy_surface,
1973 .swap_interval = dri2_wl_swap_interval,
1974 .create_image = dri2_create_image_khr,
1975 .swap_buffers = dri2_wl_swap_buffers,
1976 .swap_buffers_with_damage = dri2_wl_swap_buffers_with_damage,
1977 .query_buffer_age = dri2_wl_query_buffer_age,
1978 .create_wayland_buffer_from_image = dri2_wl_create_wayland_buffer_from_image,
1979 .get_dri_drawable = dri2_surface_get_dri_drawable,
1980 };
1981
1982 static const __DRIextension *dri2_loader_extensions[] = {
1983 &dri2_loader_extension.base,
1984 &image_loader_extension.base,
1985 &image_lookup_extension.base,
1986 &use_invalidate.base,
1987 NULL,
1988 };
1989
1990 static const __DRIextension *image_loader_extensions[] = {
1991 &image_loader_extension.base,
1992 &image_lookup_extension.base,
1993 &use_invalidate.base,
1994 NULL,
1995 };
1996
1997 static EGLBoolean
dri2_wl_add_configs_for_visuals(_EGLDisplay *disp)1998 dri2_wl_add_configs_for_visuals(_EGLDisplay *disp)
1999 {
2000 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2001 unsigned int format_count[ARRAY_SIZE(dri2_wl_visuals)] = { 0 };
2002 unsigned int count = 0;
2003 bool assigned;
2004
2005 for (unsigned i = 0; dri2_dpy->driver_configs[i]; i++) {
2006 assigned = false;
2007
2008 for (unsigned j = 0; j < ARRAY_SIZE(dri2_wl_visuals); j++) {
2009 struct dri2_egl_config *dri2_conf;
2010
2011 if (!BITSET_TEST(dri2_dpy->formats.formats_bitmap, j))
2012 continue;
2013
2014 dri2_conf = dri2_add_config(disp, dri2_dpy->driver_configs[i],
2015 count + 1, EGL_WINDOW_BIT, NULL, dri2_wl_visuals[j].rgba_shifts, dri2_wl_visuals[j].rgba_sizes);
2016 if (dri2_conf) {
2017 if (dri2_conf->base.ConfigID == count + 1)
2018 count++;
2019 format_count[j]++;
2020 assigned = true;
2021 }
2022 }
2023
2024 if (!assigned && dri2_dpy->is_different_gpu) {
2025 struct dri2_egl_config *dri2_conf;
2026 int alt_dri_image_format, c, s;
2027
2028 /* No match for config. Try if we can blitImage convert to a visual */
2029 c = dri2_wl_visual_idx_from_config(dri2_dpy,
2030 dri2_dpy->driver_configs[i],
2031 false);
2032
2033 if (c == -1)
2034 continue;
2035
2036 /* Find optimal target visual for blitImage conversion, if any. */
2037 alt_dri_image_format = dri2_wl_visuals[c].alt_dri_image_format;
2038 s = dri2_wl_visual_idx_from_dri_image_format(alt_dri_image_format);
2039
2040 if (s == -1 || !BITSET_TEST(dri2_dpy->formats.formats_bitmap, s))
2041 continue;
2042
2043 /* Visual s works for the Wayland server, and c can be converted into s
2044 * by our client gpu during PRIME blitImage conversion to a linear
2045 * wl_buffer, so add visual c as supported by the client renderer.
2046 */
2047 dri2_conf = dri2_add_config(disp, dri2_dpy->driver_configs[i],
2048 count + 1, EGL_WINDOW_BIT, NULL,
2049 dri2_wl_visuals[c].rgba_shifts,
2050 dri2_wl_visuals[c].rgba_sizes);
2051 if (dri2_conf) {
2052 if (dri2_conf->base.ConfigID == count + 1)
2053 count++;
2054 format_count[c]++;
2055 if (format_count[c] == 1)
2056 _eglLog(_EGL_DEBUG, "Client format %s to server format %s via "
2057 "PRIME blitImage.", dri2_wl_visuals[c].format_name,
2058 dri2_wl_visuals[s].format_name);
2059 }
2060 }
2061 }
2062
2063 for (unsigned i = 0; i < ARRAY_SIZE(format_count); i++) {
2064 if (!format_count[i]) {
2065 _eglLog(_EGL_DEBUG, "No DRI config supports native format %s",
2066 dri2_wl_visuals[i].format_name);
2067 }
2068 }
2069
2070 return (count != 0);
2071 }
2072
2073 static EGLBoolean
dri2_initialize_wayland_drm(_EGLDisplay *disp)2074 dri2_initialize_wayland_drm(_EGLDisplay *disp)
2075 {
2076 _EGLDevice *dev;
2077 struct dri2_egl_display *dri2_dpy;
2078
2079 dri2_dpy = calloc(1, sizeof *dri2_dpy);
2080 if (!dri2_dpy)
2081 return _eglError(EGL_BAD_ALLOC, "eglInitialize");
2082
2083 dri2_dpy->fd = -1;
2084 disp->DriverData = (void *) dri2_dpy;
2085
2086 if (dri2_wl_formats_init(&dri2_dpy->formats) < 0)
2087 goto cleanup;
2088
2089 if (disp->PlatformDisplay == NULL) {
2090 dri2_dpy->wl_dpy = wl_display_connect(NULL);
2091 if (dri2_dpy->wl_dpy == NULL)
2092 goto cleanup;
2093 dri2_dpy->own_device = true;
2094 } else {
2095 dri2_dpy->wl_dpy = disp->PlatformDisplay;
2096 }
2097
2098 dri2_dpy->wl_queue = wl_display_create_queue(dri2_dpy->wl_dpy);
2099
2100 dri2_dpy->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
2101 if (dri2_dpy->wl_dpy_wrapper == NULL)
2102 goto cleanup;
2103
2104 wl_proxy_set_queue((struct wl_proxy *) dri2_dpy->wl_dpy_wrapper,
2105 dri2_dpy->wl_queue);
2106
2107 if (dri2_dpy->own_device)
2108 wl_display_dispatch_pending(dri2_dpy->wl_dpy);
2109
2110 dri2_dpy->wl_registry = wl_display_get_registry(dri2_dpy->wl_dpy_wrapper);
2111 wl_registry_add_listener(dri2_dpy->wl_registry,
2112 ®istry_listener_drm, dri2_dpy);
2113
2114 if (roundtrip(dri2_dpy) < 0)
2115 goto cleanup;
2116
2117 /* Get default dma-buf feedback */
2118 if (dri2_dpy->wl_dmabuf && zwp_linux_dmabuf_v1_get_version(dri2_dpy->wl_dmabuf) >=
2119 ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION) {
2120 dmabuf_feedback_format_table_init(&dri2_dpy->format_table);
2121 dri2_dpy->wl_dmabuf_feedback =
2122 zwp_linux_dmabuf_v1_get_default_feedback(dri2_dpy->wl_dmabuf);
2123 zwp_linux_dmabuf_feedback_v1_add_listener(dri2_dpy->wl_dmabuf_feedback,
2124 &dmabuf_feedback_listener, dri2_dpy);
2125 }
2126
2127 if (roundtrip(dri2_dpy) < 0)
2128 goto cleanup;
2129
2130 /* Destroy the default dma-buf feedback and the format table. */
2131 if (dri2_dpy->wl_dmabuf_feedback) {
2132 zwp_linux_dmabuf_feedback_v1_destroy(dri2_dpy->wl_dmabuf_feedback);
2133 dri2_dpy->wl_dmabuf_feedback = NULL;
2134 dmabuf_feedback_format_table_fini(&dri2_dpy->format_table);
2135 }
2136
2137 /* We couldn't retrieve a render node from the dma-buf feedback (or the
2138 * feedback was not advertised at all), so we must fallback to wl_drm. */
2139 if (dri2_dpy->fd == -1) {
2140 /* wl_drm not advertised by compositor, so can't continue */
2141 if (dri2_dpy->wl_drm_name == 0)
2142 goto cleanup;
2143 wl_drm_bind(dri2_dpy);
2144
2145 if (dri2_dpy->wl_drm == NULL)
2146 goto cleanup;
2147 if (roundtrip(dri2_dpy) < 0 || dri2_dpy->fd == -1)
2148 goto cleanup;
2149
2150 if (!dri2_dpy->authenticated &&
2151 (roundtrip(dri2_dpy) < 0 || !dri2_dpy->authenticated))
2152 goto cleanup;
2153 }
2154
2155 dri2_dpy->fd = loader_get_user_preferred_fd(dri2_dpy->fd,
2156 &dri2_dpy->is_different_gpu);
2157 dev = _eglAddDevice(dri2_dpy->fd, false);
2158 if (!dev) {
2159 _eglError(EGL_NOT_INITIALIZED, "DRI2: failed to find EGLDevice");
2160 goto cleanup;
2161 }
2162
2163 disp->Device = dev;
2164
2165 if (dri2_dpy->is_different_gpu) {
2166 free(dri2_dpy->device_name);
2167 dri2_dpy->device_name = loader_get_device_name_for_fd(dri2_dpy->fd);
2168 if (!dri2_dpy->device_name) {
2169 _eglError(EGL_BAD_ALLOC, "wayland-egl: failed to get device name "
2170 "for requested GPU");
2171 goto cleanup;
2172 }
2173 }
2174
2175 /* we have to do the check now, because loader_get_user_preferred_fd
2176 * will return a render-node when the requested gpu is different
2177 * to the server, but also if the client asks for the same gpu than
2178 * the server by requesting its pci-id */
2179 dri2_dpy->is_render_node = drmGetNodeTypeFromFd(dri2_dpy->fd) == DRM_NODE_RENDER;
2180
2181 dri2_dpy->driver_name = loader_get_driver_for_fd(dri2_dpy->fd);
2182 if (dri2_dpy->driver_name == NULL) {
2183 _eglError(EGL_BAD_ALLOC, "DRI2: failed to get driver name");
2184 goto cleanup;
2185 }
2186
2187 /* render nodes cannot use Gem names, and thus do not support
2188 * the __DRI_DRI2_LOADER extension */
2189 if (!dri2_dpy->is_render_node) {
2190 dri2_dpy->loader_extensions = dri2_loader_extensions;
2191 if (!dri2_load_driver(disp)) {
2192 _eglError(EGL_BAD_ALLOC, "DRI2: failed to load driver");
2193 goto cleanup;
2194 }
2195 } else {
2196 dri2_dpy->loader_extensions = image_loader_extensions;
2197 if (!dri2_load_driver_dri3(disp)) {
2198 _eglError(EGL_BAD_ALLOC, "DRI3: failed to load driver");
2199 goto cleanup;
2200 }
2201 }
2202
2203 if (!dri2_create_screen(disp))
2204 goto cleanup;
2205
2206 if (!dri2_setup_extensions(disp))
2207 goto cleanup;
2208
2209 dri2_setup_screen(disp);
2210
2211 dri2_wl_setup_swap_interval(disp);
2212
2213 if (dri2_dpy->wl_drm) {
2214 /* To use Prime, we must have _DRI_IMAGE v7 at least. createImageFromFds
2215 * support indicates that Prime export/import is supported by the driver.
2216 * We deprecated the support to GEM names API, so we bail out if the
2217 * driver does not suport Prime. */
2218 if (!(dri2_dpy->capabilities & WL_DRM_CAPABILITY_PRIME) ||
2219 (dri2_dpy->image->base.version < 7) ||
2220 (dri2_dpy->image->createImageFromFds == NULL)) {
2221 _eglLog(_EGL_WARNING, "wayland-egl: display does not support prime");
2222 goto cleanup;
2223 }
2224 }
2225
2226 if (dri2_dpy->is_different_gpu &&
2227 (dri2_dpy->image->base.version < 9 ||
2228 dri2_dpy->image->blitImage == NULL)) {
2229 _eglLog(_EGL_WARNING, "wayland-egl: Different GPU selected, but the "
2230 "Image extension in the driver is not "
2231 "compatible. Version 9 or later and blitImage() "
2232 "are required");
2233 goto cleanup;
2234 }
2235
2236 if (!dri2_wl_add_configs_for_visuals(disp)) {
2237 _eglError(EGL_NOT_INITIALIZED, "DRI2: failed to add configs");
2238 goto cleanup;
2239 }
2240
2241 dri2_set_WL_bind_wayland_display(disp);
2242 /* When cannot convert EGLImage to wl_buffer when on a different gpu,
2243 * because the buffer of the EGLImage has likely a tiling mode the server
2244 * gpu won't support. These is no way to check for now. Thus do not support the
2245 * extension */
2246 if (!dri2_dpy->is_different_gpu)
2247 disp->Extensions.WL_create_wayland_buffer_from_image = EGL_TRUE;
2248
2249 disp->Extensions.EXT_buffer_age = EGL_TRUE;
2250
2251 disp->Extensions.EXT_swap_buffers_with_damage = EGL_TRUE;
2252
2253 disp->Extensions.EXT_present_opaque = EGL_TRUE;
2254
2255 /* Fill vtbl last to prevent accidentally calling virtual function during
2256 * initialization.
2257 */
2258 dri2_dpy->vtbl = &dri2_wl_display_vtbl;
2259
2260 return EGL_TRUE;
2261
2262 cleanup:
2263 dri2_display_destroy(disp);
2264 return EGL_FALSE;
2265 }
2266
2267 static int
dri2_wl_swrast_get_stride_for_format(int format, int w)2268 dri2_wl_swrast_get_stride_for_format(int format, int w)
2269 {
2270 int visual_idx = dri2_wl_visual_idx_from_shm_format(format);
2271
2272 assume(visual_idx != -1);
2273
2274 return w * (dri2_wl_visuals[visual_idx].bpp / 8);
2275 }
2276
2277 static EGLBoolean
dri2_wl_swrast_allocate_buffer(struct dri2_egl_surface *dri2_surf, int format, int w, int h, void **data, int *size, struct wl_buffer **buffer)2278 dri2_wl_swrast_allocate_buffer(struct dri2_egl_surface *dri2_surf,
2279 int format, int w, int h,
2280 void **data, int *size,
2281 struct wl_buffer **buffer)
2282 {
2283 struct dri2_egl_display *dri2_dpy =
2284 dri2_egl_display(dri2_surf->base.Resource.Display);
2285 struct wl_shm_pool *pool;
2286 int fd, stride, size_map;
2287 void *data_map;
2288
2289 stride = dri2_wl_swrast_get_stride_for_format(format, w);
2290 size_map = h * stride;
2291
2292 /* Create a shareable buffer */
2293 fd = os_create_anonymous_file(size_map, NULL);
2294 if (fd < 0)
2295 return EGL_FALSE;
2296
2297 data_map = mmap(NULL, size_map, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2298 if (data_map == MAP_FAILED) {
2299 close(fd);
2300 return EGL_FALSE;
2301 }
2302
2303 /* Share it in a wl_buffer */
2304 pool = wl_shm_create_pool(dri2_dpy->wl_shm, fd, size_map);
2305 wl_proxy_set_queue((struct wl_proxy *)pool, dri2_surf->wl_queue);
2306 *buffer = wl_shm_pool_create_buffer(pool, 0, w, h, stride, format);
2307 wl_shm_pool_destroy(pool);
2308 close(fd);
2309
2310 *data = data_map;
2311 *size = size_map;
2312 return EGL_TRUE;
2313 }
2314
2315 static int
swrast_update_buffers(struct dri2_egl_surface *dri2_surf)2316 swrast_update_buffers(struct dri2_egl_surface *dri2_surf)
2317 {
2318 struct dri2_egl_display *dri2_dpy =
2319 dri2_egl_display(dri2_surf->base.Resource.Display);
2320 bool zink = dri2_surf->base.Resource.Display->Options.Zink;
2321
2322 /* we need to do the following operations only once per frame */
2323 if (dri2_surf->back)
2324 return 0;
2325
2326 if (dri2_surf->wl_win &&
2327 (dri2_surf->base.Width != dri2_surf->wl_win->width ||
2328 dri2_surf->base.Height != dri2_surf->wl_win->height)) {
2329
2330 if (!zink)
2331 dri2_wl_release_buffers(dri2_surf);
2332
2333 dri2_surf->base.Width = dri2_surf->wl_win->width;
2334 dri2_surf->base.Height = dri2_surf->wl_win->height;
2335 dri2_surf->dx = dri2_surf->wl_win->dx;
2336 dri2_surf->dy = dri2_surf->wl_win->dy;
2337 dri2_surf->current = NULL;
2338 }
2339
2340 /* find back buffer */
2341
2342 /* There might be a buffer release already queued that wasn't processed */
2343 wl_display_dispatch_queue_pending(dri2_dpy->wl_dpy, dri2_surf->wl_queue);
2344
2345 /* try get free buffer already created */
2346 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
2347 if (!dri2_surf->color_buffers[i].locked &&
2348 dri2_surf->color_buffers[i].wl_buffer) {
2349 dri2_surf->back = &dri2_surf->color_buffers[i];
2350 break;
2351 }
2352 }
2353
2354 /* else choose any another free location */
2355 if (!dri2_surf->back) {
2356 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
2357 if (!dri2_surf->color_buffers[i].locked) {
2358 dri2_surf->back = &dri2_surf->color_buffers[i];
2359 if (zink)
2360 continue;
2361 if (!dri2_wl_swrast_allocate_buffer(dri2_surf,
2362 dri2_surf->format,
2363 dri2_surf->base.Width,
2364 dri2_surf->base.Height,
2365 &dri2_surf->back->data,
2366 &dri2_surf->back->data_size,
2367 &dri2_surf->back->wl_buffer)) {
2368 _eglError(EGL_BAD_ALLOC, "failed to allocate color buffer");
2369 return -1;
2370 }
2371 wl_buffer_add_listener(dri2_surf->back->wl_buffer,
2372 &wl_buffer_listener, dri2_surf);
2373 break;
2374 }
2375 }
2376 }
2377
2378 if (!dri2_surf->back) {
2379 _eglError(EGL_BAD_ALLOC, "failed to find free buffer");
2380 return -1;
2381 }
2382
2383 dri2_surf->back->locked = true;
2384
2385 /* If we have an extra unlocked buffer at this point, we had to do triple
2386 * buffering for a while, but now can go back to just double buffering.
2387 * That means we can free any unlocked buffer now. To avoid toggling between
2388 * going back to double buffering and needing to allocate another buffer too
2389 * fast we let the unneeded buffer sit around for a short while. */
2390 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
2391 if (!dri2_surf->color_buffers[i].locked &&
2392 dri2_surf->color_buffers[i].wl_buffer &&
2393 dri2_surf->color_buffers[i].age > BUFFER_TRIM_AGE_HYSTERESIS) {
2394 if (!zink) {
2395 wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
2396 munmap(dri2_surf->color_buffers[i].data,
2397 dri2_surf->color_buffers[i].data_size);
2398 }
2399 dri2_surf->color_buffers[i].wl_buffer = NULL;
2400 dri2_surf->color_buffers[i].data = NULL;
2401 dri2_surf->color_buffers[i].age = 0;
2402 }
2403 }
2404
2405 return 0;
2406 }
2407
2408 static void*
dri2_wl_swrast_get_frontbuffer_data(struct dri2_egl_surface *dri2_surf)2409 dri2_wl_swrast_get_frontbuffer_data(struct dri2_egl_surface *dri2_surf)
2410 {
2411 /* if there has been a resize: */
2412 if (!dri2_surf->current)
2413 return NULL;
2414
2415 return dri2_surf->current->data;
2416 }
2417
2418 static void*
dri2_wl_swrast_get_backbuffer_data(struct dri2_egl_surface *dri2_surf)2419 dri2_wl_swrast_get_backbuffer_data(struct dri2_egl_surface *dri2_surf)
2420 {
2421 assert(dri2_surf->back);
2422 return dri2_surf->back->data;
2423 }
2424
2425 static void
dri2_wl_swrast_commit_backbuffer(struct dri2_egl_surface *dri2_surf)2426 dri2_wl_swrast_commit_backbuffer(struct dri2_egl_surface *dri2_surf)
2427 {
2428 struct dri2_egl_display *dri2_dpy = dri2_egl_display(dri2_surf->base.Resource.Display);
2429
2430 while (dri2_surf->throttle_callback != NULL)
2431 if (wl_display_dispatch_queue(dri2_dpy->wl_dpy,
2432 dri2_surf->wl_queue) == -1)
2433 return;
2434
2435 if (dri2_surf->base.SwapInterval > 0) {
2436 dri2_surf->throttle_callback =
2437 wl_surface_frame(dri2_surf->wl_surface_wrapper);
2438 wl_callback_add_listener(dri2_surf->throttle_callback,
2439 &throttle_listener, dri2_surf);
2440 }
2441
2442 dri2_surf->current = dri2_surf->back;
2443 dri2_surf->back = NULL;
2444
2445 wl_surface_attach(dri2_surf->wl_surface_wrapper,
2446 dri2_surf->current->wl_buffer,
2447 dri2_surf->dx, dri2_surf->dy);
2448
2449 dri2_surf->wl_win->attached_width = dri2_surf->base.Width;
2450 dri2_surf->wl_win->attached_height = dri2_surf->base.Height;
2451 /* reset resize growing parameters */
2452 dri2_surf->dx = 0;
2453 dri2_surf->dy = 0;
2454
2455 wl_surface_damage(dri2_surf->wl_surface_wrapper,
2456 0, 0, INT32_MAX, INT32_MAX);
2457 wl_surface_commit(dri2_surf->wl_surface_wrapper);
2458
2459 /* If we're not waiting for a frame callback then we'll at least throttle
2460 * to a sync callback so that we always give a chance for the compositor to
2461 * handle the commit and send a release event before checking for a free
2462 * buffer */
2463 if (dri2_surf->throttle_callback == NULL) {
2464 dri2_surf->throttle_callback = wl_display_sync(dri2_surf->wl_dpy_wrapper);
2465 wl_callback_add_listener(dri2_surf->throttle_callback,
2466 &throttle_listener, dri2_surf);
2467 }
2468
2469 wl_display_flush(dri2_dpy->wl_dpy);
2470 }
2471
2472 static void
dri2_wl_swrast_get_drawable_info(__DRIdrawable * draw, int *x, int *y, int *w, int *h, void *loaderPrivate)2473 dri2_wl_swrast_get_drawable_info(__DRIdrawable * draw,
2474 int *x, int *y, int *w, int *h,
2475 void *loaderPrivate)
2476 {
2477 struct dri2_egl_surface *dri2_surf = loaderPrivate;
2478
2479 (void) swrast_update_buffers(dri2_surf);
2480 *x = 0;
2481 *y = 0;
2482 *w = dri2_surf->base.Width;
2483 *h = dri2_surf->base.Height;
2484 }
2485
2486 static void
dri2_wl_swrast_get_image(__DRIdrawable * read, int x, int y, int w, int h, char *data, void *loaderPrivate)2487 dri2_wl_swrast_get_image(__DRIdrawable * read,
2488 int x, int y, int w, int h,
2489 char *data, void *loaderPrivate)
2490 {
2491 struct dri2_egl_surface *dri2_surf = loaderPrivate;
2492 int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2493 int x_offset = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, x);
2494 int src_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, dri2_surf->base.Width);
2495 int dst_stride = copy_width;
2496 char *src, *dst;
2497
2498 src = dri2_wl_swrast_get_frontbuffer_data(dri2_surf);
2499 if (!src) {
2500 memset(data, 0, copy_width * h);
2501 return;
2502 }
2503
2504 assert(data != src);
2505 assert(copy_width <= src_stride);
2506
2507 src += x_offset;
2508 src += y * src_stride;
2509 dst = data;
2510
2511 if (copy_width > src_stride-x_offset)
2512 copy_width = src_stride-x_offset;
2513 if (h > dri2_surf->base.Height-y)
2514 h = dri2_surf->base.Height-y;
2515
2516 for (; h>0; h--) {
2517 memcpy(dst, src, copy_width);
2518 src += src_stride;
2519 dst += dst_stride;
2520 }
2521 }
2522
2523 static void
dri2_wl_swrast_put_image2(__DRIdrawable * draw, int op, int x, int y, int w, int h, int stride, char *data, void *loaderPrivate)2524 dri2_wl_swrast_put_image2(__DRIdrawable * draw, int op,
2525 int x, int y, int w, int h, int stride,
2526 char *data, void *loaderPrivate)
2527 {
2528 struct dri2_egl_surface *dri2_surf = loaderPrivate;
2529 int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2530 int dst_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, dri2_surf->base.Width);
2531 int x_offset = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, x);
2532 char *src, *dst;
2533
2534 assert(copy_width <= stride);
2535
2536 (void) swrast_update_buffers(dri2_surf);
2537 dst = dri2_wl_swrast_get_backbuffer_data(dri2_surf);
2538
2539 /* partial copy, copy old content */
2540 if (copy_width < dst_stride)
2541 dri2_wl_swrast_get_image(draw, 0, 0,
2542 dri2_surf->base.Width, dri2_surf->base.Height,
2543 dst, loaderPrivate);
2544
2545 dst += x_offset;
2546 dst += y * dst_stride;
2547
2548 src = data;
2549
2550 /* drivers expect we do these checks (and some rely on it) */
2551 if (copy_width > dst_stride-x_offset)
2552 copy_width = dst_stride-x_offset;
2553 if (h > dri2_surf->base.Height-y)
2554 h = dri2_surf->base.Height-y;
2555
2556 for (; h>0; h--) {
2557 memcpy(dst, src, copy_width);
2558 src += stride;
2559 dst += dst_stride;
2560 }
2561 dri2_wl_swrast_commit_backbuffer(dri2_surf);
2562 }
2563
2564 static void
dri2_wl_swrast_put_image(__DRIdrawable * draw, int op, int x, int y, int w, int h, char *data, void *loaderPrivate)2565 dri2_wl_swrast_put_image(__DRIdrawable * draw, int op,
2566 int x, int y, int w, int h,
2567 char *data, void *loaderPrivate)
2568 {
2569 struct dri2_egl_surface *dri2_surf = loaderPrivate;
2570 int stride;
2571
2572 stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2573 dri2_wl_swrast_put_image2(draw, op, x, y, w, h,
2574 stride, data, loaderPrivate);
2575 }
2576
2577 static EGLBoolean
dri2_wl_swrast_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)2578 dri2_wl_swrast_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
2579 {
2580 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2581 struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
2582
2583 if (!dri2_surf->wl_win)
2584 return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
2585
2586 dri2_dpy->core->swapBuffers(dri2_surf->dri_drawable);
2587 if (disp->Options.Zink) {
2588 dri2_surf->current = dri2_surf->back;
2589 dri2_surf->back = NULL;
2590 }
2591 return EGL_TRUE;
2592 }
2593
2594 static void
shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)2595 shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)
2596 {
2597 struct dri2_egl_display *dri2_dpy = data;
2598 int visual_idx = dri2_wl_visual_idx_from_shm_format(format);
2599
2600 if (visual_idx == -1)
2601 return;
2602
2603 BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
2604 }
2605
2606 static const struct wl_shm_listener shm_listener = {
2607 .format = shm_handle_format
2608 };
2609
2610 static void
registry_handle_global_swrast(void *data, struct wl_registry *registry, uint32_t name, const char *interface, uint32_t version)2611 registry_handle_global_swrast(void *data, struct wl_registry *registry,
2612 uint32_t name, const char *interface,
2613 uint32_t version)
2614 {
2615 struct dri2_egl_display *dri2_dpy = data;
2616
2617 if (strcmp(interface, "wl_shm") == 0) {
2618 dri2_dpy->wl_shm =
2619 wl_registry_bind(registry, name, &wl_shm_interface, 1);
2620 wl_shm_add_listener(dri2_dpy->wl_shm, &shm_listener, dri2_dpy);
2621 }
2622 }
2623
2624 static const struct wl_registry_listener registry_listener_swrast = {
2625 .global = registry_handle_global_swrast,
2626 .global_remove = registry_handle_global_remove
2627 };
2628
2629 static const struct dri2_egl_display_vtbl dri2_wl_swrast_display_vtbl = {
2630 .authenticate = NULL,
2631 .create_window_surface = dri2_wl_create_window_surface,
2632 .create_pixmap_surface = dri2_wl_create_pixmap_surface,
2633 .destroy_surface = dri2_wl_destroy_surface,
2634 .create_image = dri2_create_image_khr,
2635 .swap_buffers = dri2_wl_swrast_swap_buffers,
2636 .get_dri_drawable = dri2_surface_get_dri_drawable,
2637 };
2638
2639 static const __DRIswrastLoaderExtension swrast_loader_extension = {
2640 .base = { __DRI_SWRAST_LOADER, 2 },
2641
2642 .getDrawableInfo = dri2_wl_swrast_get_drawable_info,
2643 .putImage = dri2_wl_swrast_put_image,
2644 .getImage = dri2_wl_swrast_get_image,
2645 .putImage2 = dri2_wl_swrast_put_image2,
2646 };
2647
2648 static void
kopperSetSurfaceCreateInfo(void *_draw, struct kopper_loader_info *out)2649 kopperSetSurfaceCreateInfo(void *_draw, struct kopper_loader_info *out)
2650 {
2651 struct dri2_egl_surface *dri2_surf = _draw;
2652 struct dri2_egl_display *dri2_dpy = dri2_egl_display(dri2_surf->base.Resource.Display);
2653 VkWaylandSurfaceCreateInfoKHR *wlsci = &out->wl;
2654
2655 wlsci->sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR;
2656 wlsci->pNext = NULL;
2657 wlsci->flags = 0;
2658 wlsci->display = dri2_dpy->wl_dpy;
2659 wlsci->surface = dri2_surf->wl_surface_wrapper;
2660 }
2661
2662 static const __DRIkopperLoaderExtension kopper_loader_extension = {
2663 .base = { __DRI_KOPPER_LOADER, 1 },
2664
2665 .SetSurfaceCreateInfo = kopperSetSurfaceCreateInfo,
2666 };
2667 static const __DRIextension *swrast_loader_extensions[] = {
2668 &swrast_loader_extension.base,
2669 &image_lookup_extension.base,
2670 &kopper_loader_extension.base,
2671 NULL,
2672 };
2673
2674 static EGLBoolean
dri2_initialize_wayland_swrast(_EGLDisplay *disp)2675 dri2_initialize_wayland_swrast(_EGLDisplay *disp)
2676 {
2677 _EGLDevice *dev;
2678 struct dri2_egl_display *dri2_dpy;
2679
2680 dri2_dpy = calloc(1, sizeof *dri2_dpy);
2681 if (!dri2_dpy)
2682 return _eglError(EGL_BAD_ALLOC, "eglInitialize");
2683
2684 dri2_dpy->fd = -1;
2685 disp->DriverData = (void *) dri2_dpy;
2686
2687 if (dri2_wl_formats_init(&dri2_dpy->formats) < 0)
2688 goto cleanup;
2689
2690 if (disp->PlatformDisplay == NULL) {
2691 dri2_dpy->wl_dpy = wl_display_connect(NULL);
2692 if (dri2_dpy->wl_dpy == NULL)
2693 goto cleanup;
2694 dri2_dpy->own_device = true;
2695 } else {
2696 dri2_dpy->wl_dpy = disp->PlatformDisplay;
2697 }
2698
2699 dev = _eglAddDevice(dri2_dpy->fd, true);
2700 if (!dev) {
2701 _eglError(EGL_NOT_INITIALIZED, "DRI2: failed to find EGLDevice");
2702 goto cleanup;
2703 }
2704
2705 disp->Device = dev;
2706
2707 dri2_dpy->wl_queue = wl_display_create_queue(dri2_dpy->wl_dpy);
2708
2709 dri2_dpy->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
2710 if (dri2_dpy->wl_dpy_wrapper == NULL)
2711 goto cleanup;
2712
2713 wl_proxy_set_queue((struct wl_proxy *) dri2_dpy->wl_dpy_wrapper,
2714 dri2_dpy->wl_queue);
2715
2716 if (dri2_dpy->own_device)
2717 wl_display_dispatch_pending(dri2_dpy->wl_dpy);
2718
2719 dri2_dpy->wl_registry = wl_display_get_registry(dri2_dpy->wl_dpy_wrapper);
2720 wl_registry_add_listener(dri2_dpy->wl_registry,
2721 ®istry_listener_swrast, dri2_dpy);
2722
2723 if (roundtrip(dri2_dpy) < 0 || dri2_dpy->wl_shm == NULL)
2724 goto cleanup;
2725
2726 if (roundtrip(dri2_dpy) < 0 || !BITSET_TEST_RANGE(dri2_dpy->formats.formats_bitmap,
2727 0, dri2_dpy->formats.num_formats))
2728 goto cleanup;
2729
2730 dri2_dpy->driver_name = strdup(disp->Options.Zink ? "zink" : "swrast");
2731 if (!dri2_load_driver_swrast(disp))
2732 goto cleanup;
2733
2734 dri2_dpy->loader_extensions = swrast_loader_extensions;
2735
2736 if (!dri2_create_screen(disp))
2737 goto cleanup;
2738
2739 if (!dri2_setup_extensions(disp))
2740 goto cleanup;
2741
2742 dri2_setup_screen(disp);
2743
2744 dri2_wl_setup_swap_interval(disp);
2745
2746 if (!dri2_wl_add_configs_for_visuals(disp)) {
2747 _eglError(EGL_NOT_INITIALIZED, "DRI2: failed to add configs");
2748 goto cleanup;
2749 }
2750
2751 /* Fill vtbl last to prevent accidentally calling virtual function during
2752 * initialization.
2753 */
2754 dri2_dpy->vtbl = &dri2_wl_swrast_display_vtbl;
2755
2756 return EGL_TRUE;
2757
2758 cleanup:
2759 dri2_display_destroy(disp);
2760 return EGL_FALSE;
2761 }
2762
2763 EGLBoolean
dri2_initialize_wayland(_EGLDisplay *disp)2764 dri2_initialize_wayland(_EGLDisplay *disp)
2765 {
2766 if (disp->Options.ForceSoftware)
2767 return dri2_initialize_wayland_swrast(disp);
2768 else
2769 return dri2_initialize_wayland_drm(disp);
2770 }
2771
2772 void
dri2_teardown_wayland(struct dri2_egl_display *dri2_dpy)2773 dri2_teardown_wayland(struct dri2_egl_display *dri2_dpy)
2774 {
2775 dri2_wl_formats_fini(&dri2_dpy->formats);
2776 if (dri2_dpy->wl_drm)
2777 wl_drm_destroy(dri2_dpy->wl_drm);
2778 if (dri2_dpy->wl_dmabuf)
2779 zwp_linux_dmabuf_v1_destroy(dri2_dpy->wl_dmabuf);
2780 if (dri2_dpy->wl_shm)
2781 wl_shm_destroy(dri2_dpy->wl_shm);
2782 if (dri2_dpy->wl_registry)
2783 wl_registry_destroy(dri2_dpy->wl_registry);
2784 if (dri2_dpy->wl_queue)
2785 wl_event_queue_destroy(dri2_dpy->wl_queue);
2786 if (dri2_dpy->wl_dpy_wrapper)
2787 wl_proxy_wrapper_destroy(dri2_dpy->wl_dpy_wrapper);
2788
2789 if (dri2_dpy->own_device)
2790 wl_display_disconnect(dri2_dpy->wl_dpy);
2791 }
2792