loader/dri3: Check for window destruction in dri3_wait_for_event_locked
[mesa.git] / src / loader / loader_dri3_helper.c
1 /*
2 * Copyright © 2013 Keith Packard
3 * Copyright © 2015 Boyan Ding
4 *
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that copyright
8 * notice and this permission notice appear in supporting documentation, and
9 * that the name of the copyright holders not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. The copyright holders make no representations
12 * about the suitability of this software for any purpose. It is provided "as
13 * is" without express or implied warranty.
14 *
15 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21 * OF THIS SOFTWARE.
22 */
23
24 #include <fcntl.h>
25 #include <poll.h>
26 #include <stdlib.h>
27 #include <unistd.h>
28 #include <string.h>
29
30 #include <X11/xshmfence.h>
31 #include <xcb/xcb.h>
32 #include <xcb/dri3.h>
33 #include <xcb/present.h>
34
35 #include <X11/Xlib-xcb.h>
36
37 #include "loader_dri3_helper.h"
38 #include "util/macros.h"
39 #include "drm-uapi/drm_fourcc.h"
40
41 /* From driconf.h, user exposed so should be stable */
42 #define DRI_CONF_VBLANK_NEVER 0
43 #define DRI_CONF_VBLANK_DEF_INTERVAL_0 1
44 #define DRI_CONF_VBLANK_DEF_INTERVAL_1 2
45 #define DRI_CONF_VBLANK_ALWAYS_SYNC 3
46
47 /**
48 * A cached blit context.
49 */
50 struct loader_dri3_blit_context {
51 mtx_t mtx;
52 __DRIcontext *ctx;
53 __DRIscreen *cur_screen;
54 const __DRIcoreExtension *core;
55 };
56
57 /* For simplicity we maintain the cache only for a single screen at a time */
58 static struct loader_dri3_blit_context blit_context = {
59 _MTX_INITIALIZER_NP, NULL
60 };
61
62 static void
63 dri3_flush_present_events(struct loader_dri3_drawable *draw);
64
65 static struct loader_dri3_buffer *
66 dri3_find_back_alloc(struct loader_dri3_drawable *draw);
67
68 static xcb_screen_t *
69 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
70 {
71 xcb_screen_iterator_t screen_iter =
72 xcb_setup_roots_iterator(xcb_get_setup(conn));
73
74 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
75 if (screen_iter.data->root == root)
76 return screen_iter.data;
77 }
78
79 return NULL;
80 }
81
82 static xcb_visualtype_t *
83 get_xcb_visualtype_for_depth(struct loader_dri3_drawable *draw, int depth)
84 {
85 xcb_visualtype_iterator_t visual_iter;
86 xcb_screen_t *screen = draw->screen;
87 xcb_depth_iterator_t depth_iter;
88
89 if (!screen)
90 return NULL;
91
92 depth_iter = xcb_screen_allowed_depths_iterator(screen);
93 for (; depth_iter.rem; xcb_depth_next(&depth_iter)) {
94 if (depth_iter.data->depth != depth)
95 continue;
96
97 visual_iter = xcb_depth_visuals_iterator(depth_iter.data);
98 if (visual_iter.rem)
99 return visual_iter.data;
100 }
101
102 return NULL;
103 }
104
105 /* Sets the adaptive sync window property state. */
106 static void
107 set_adaptive_sync_property(xcb_connection_t *conn, xcb_drawable_t drawable,
108 uint32_t state)
109 {
110 static char const name[] = "_VARIABLE_REFRESH";
111 xcb_intern_atom_cookie_t cookie;
112 xcb_intern_atom_reply_t* reply;
113 xcb_void_cookie_t check;
114
115 cookie = xcb_intern_atom(conn, 0, strlen(name), name);
116 reply = xcb_intern_atom_reply(conn, cookie, NULL);
117 if (reply == NULL)
118 return;
119
120 if (state)
121 check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
122 drawable, reply->atom,
123 XCB_ATOM_CARDINAL, 32, 1, &state);
124 else
125 check = xcb_delete_property_checked(conn, drawable, reply->atom);
126
127 xcb_discard_reply(conn, check.sequence);
128 free(reply);
129 }
130
131 /* Get red channel mask for given drawable at given depth. */
132 static unsigned int
133 dri3_get_red_mask_for_depth(struct loader_dri3_drawable *draw, int depth)
134 {
135 xcb_visualtype_t *visual = get_xcb_visualtype_for_depth(draw, depth);
136
137 if (visual)
138 return visual->red_mask;
139
140 return 0;
141 }
142
143 /**
144 * Do we have blit functionality in the image blit extension?
145 *
146 * \param draw[in] The drawable intended to blit from / to.
147 * \return true if we have blit functionality. false otherwise.
148 */
149 static bool loader_dri3_have_image_blit(const struct loader_dri3_drawable *draw)
150 {
151 return draw->ext->image->base.version >= 9 &&
152 draw->ext->image->blitImage != NULL;
153 }
154
155 /**
156 * Get and lock (for use with the current thread) a dri context associated
157 * with the drawable's dri screen. The context is intended to be used with
158 * the dri image extension's blitImage method.
159 *
160 * \param draw[in] Pointer to the drawable whose dri screen we want a
161 * dri context for.
162 * \return A dri context or NULL if context creation failed.
163 *
164 * When the caller is done with the context (even if the context returned was
165 * NULL), the caller must call loader_dri3_blit_context_put.
166 */
167 static __DRIcontext *
168 loader_dri3_blit_context_get(struct loader_dri3_drawable *draw)
169 {
170 mtx_lock(&blit_context.mtx);
171
172 if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen) {
173 blit_context.core->destroyContext(blit_context.ctx);
174 blit_context.ctx = NULL;
175 }
176
177 if (!blit_context.ctx) {
178 blit_context.ctx = draw->ext->core->createNewContext(draw->dri_screen,
179 NULL, NULL, NULL);
180 blit_context.cur_screen = draw->dri_screen;
181 blit_context.core = draw->ext->core;
182 }
183
184 return blit_context.ctx;
185 }
186
187 /**
188 * Release (for use with other threads) a dri context previously obtained using
189 * loader_dri3_blit_context_get.
190 */
191 static void
192 loader_dri3_blit_context_put(void)
193 {
194 mtx_unlock(&blit_context.mtx);
195 }
196
197 /**
198 * Blit (parts of) the contents of a DRI image to another dri image
199 *
200 * \param draw[in] The drawable which owns the images.
201 * \param dst[in] The destination image.
202 * \param src[in] The source image.
203 * \param dstx0[in] Start destination coordinate.
204 * \param dsty0[in] Start destination coordinate.
205 * \param width[in] Blit width.
206 * \param height[in] Blit height.
207 * \param srcx0[in] Start source coordinate.
208 * \param srcy0[in] Start source coordinate.
209 * \param flush_flag[in] Image blit flush flag.
210 * \return true iff successful.
211 */
212 static bool
213 loader_dri3_blit_image(struct loader_dri3_drawable *draw,
214 __DRIimage *dst, __DRIimage *src,
215 int dstx0, int dsty0, int width, int height,
216 int srcx0, int srcy0, int flush_flag)
217 {
218 __DRIcontext *dri_context;
219 bool use_blit_context = false;
220
221 if (!loader_dri3_have_image_blit(draw))
222 return false;
223
224 dri_context = draw->vtable->get_dri_context(draw);
225
226 if (!dri_context || !draw->vtable->in_current_context(draw)) {
227 dri_context = loader_dri3_blit_context_get(draw);
228 use_blit_context = true;
229 flush_flag |= __BLIT_FLAG_FLUSH;
230 }
231
232 if (dri_context)
233 draw->ext->image->blitImage(dri_context, dst, src, dstx0, dsty0,
234 width, height, srcx0, srcy0,
235 width, height, flush_flag);
236
237 if (use_blit_context)
238 loader_dri3_blit_context_put();
239
240 return dri_context != NULL;
241 }
242
243 static inline void
244 dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
245 {
246 xshmfence_reset(buffer->shm_fence);
247 }
248
249 static inline void
250 dri3_fence_set(struct loader_dri3_buffer *buffer)
251 {
252 xshmfence_trigger(buffer->shm_fence);
253 }
254
255 static inline void
256 dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
257 {
258 xcb_sync_trigger_fence(c, buffer->sync_fence);
259 }
260
261 static inline void
262 dri3_fence_await(xcb_connection_t *c, struct loader_dri3_drawable *draw,
263 struct loader_dri3_buffer *buffer)
264 {
265 xcb_flush(c);
266 xshmfence_await(buffer->shm_fence);
267 if (draw) {
268 mtx_lock(&draw->mtx);
269 dri3_flush_present_events(draw);
270 mtx_unlock(&draw->mtx);
271 }
272 }
273
274 static void
275 dri3_update_num_back(struct loader_dri3_drawable *draw)
276 {
277 if (draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP)
278 draw->num_back = 3;
279 else
280 draw->num_back = 2;
281 }
282
283 void
284 loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
285 {
286 draw->swap_interval = interval;
287 }
288
289 /** dri3_free_render_buffer
290 *
291 * Free everything associated with one render buffer including pixmap, fence
292 * stuff and the driver image
293 */
294 static void
295 dri3_free_render_buffer(struct loader_dri3_drawable *draw,
296 struct loader_dri3_buffer *buffer)
297 {
298 if (buffer->own_pixmap)
299 xcb_free_pixmap(draw->conn, buffer->pixmap);
300 xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
301 xshmfence_unmap_shm(buffer->shm_fence);
302 draw->ext->image->destroyImage(buffer->image);
303 if (buffer->linear_buffer)
304 draw->ext->image->destroyImage(buffer->linear_buffer);
305 free(buffer);
306 }
307
308 void
309 loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
310 {
311 int i;
312
313 draw->ext->core->destroyDrawable(draw->dri_drawable);
314
315 for (i = 0; i < ARRAY_SIZE(draw->buffers); i++) {
316 if (draw->buffers[i])
317 dri3_free_render_buffer(draw, draw->buffers[i]);
318 }
319
320 if (draw->special_event) {
321 xcb_void_cookie_t cookie =
322 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
323 XCB_PRESENT_EVENT_MASK_NO_EVENT);
324
325 xcb_discard_reply(draw->conn, cookie.sequence);
326 xcb_unregister_for_special_event(draw->conn, draw->special_event);
327 }
328
329 cnd_destroy(&draw->event_cnd);
330 mtx_destroy(&draw->mtx);
331 }
332
333 int
334 loader_dri3_drawable_init(xcb_connection_t *conn,
335 xcb_drawable_t drawable,
336 __DRIscreen *dri_screen,
337 bool is_different_gpu,
338 bool multiplanes_available,
339 const __DRIconfig *dri_config,
340 struct loader_dri3_extensions *ext,
341 const struct loader_dri3_vtable *vtable,
342 struct loader_dri3_drawable *draw)
343 {
344 xcb_get_geometry_cookie_t cookie;
345 xcb_get_geometry_reply_t *reply;
346 xcb_generic_error_t *error;
347 GLint vblank_mode = DRI_CONF_VBLANK_DEF_INTERVAL_1;
348 int swap_interval;
349
350 draw->conn = conn;
351 draw->ext = ext;
352 draw->vtable = vtable;
353 draw->drawable = drawable;
354 draw->dri_screen = dri_screen;
355 draw->is_different_gpu = is_different_gpu;
356 draw->multiplanes_available = multiplanes_available;
357
358 draw->have_back = 0;
359 draw->have_fake_front = 0;
360 draw->first_init = true;
361 draw->adaptive_sync = false;
362 draw->adaptive_sync_active = false;
363
364 draw->cur_blit_source = -1;
365 draw->back_format = __DRI_IMAGE_FORMAT_NONE;
366 mtx_init(&draw->mtx, mtx_plain);
367 cnd_init(&draw->event_cnd);
368
369 if (draw->ext->config) {
370 unsigned char adaptive_sync = 0;
371
372 draw->ext->config->configQueryi(draw->dri_screen,
373 "vblank_mode", &vblank_mode);
374
375 draw->ext->config->configQueryb(draw->dri_screen,
376 "adaptive_sync",
377 &adaptive_sync);
378
379 draw->adaptive_sync = adaptive_sync;
380 }
381
382 if (!draw->adaptive_sync)
383 set_adaptive_sync_property(conn, draw->drawable, false);
384
385 switch (vblank_mode) {
386 case DRI_CONF_VBLANK_NEVER:
387 case DRI_CONF_VBLANK_DEF_INTERVAL_0:
388 swap_interval = 0;
389 break;
390 case DRI_CONF_VBLANK_DEF_INTERVAL_1:
391 case DRI_CONF_VBLANK_ALWAYS_SYNC:
392 default:
393 swap_interval = 1;
394 break;
395 }
396 draw->swap_interval = swap_interval;
397
398 dri3_update_num_back(draw);
399
400 /* Create a new drawable */
401 draw->dri_drawable =
402 draw->ext->image_driver->createNewDrawable(dri_screen,
403 dri_config,
404 draw);
405
406 if (!draw->dri_drawable)
407 return 1;
408
409 cookie = xcb_get_geometry(draw->conn, draw->drawable);
410 reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
411 if (reply == NULL || error != NULL) {
412 draw->ext->core->destroyDrawable(draw->dri_drawable);
413 return 1;
414 }
415
416 draw->screen = get_screen_for_root(draw->conn, reply->root);
417 draw->width = reply->width;
418 draw->height = reply->height;
419 draw->depth = reply->depth;
420 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
421 free(reply);
422
423 draw->swap_method = __DRI_ATTRIB_SWAP_UNDEFINED;
424 if (draw->ext->core->base.version >= 2) {
425 (void )draw->ext->core->getConfigAttrib(dri_config,
426 __DRI_ATTRIB_SWAP_METHOD,
427 &draw->swap_method);
428 }
429
430 /*
431 * Make sure server has the same swap interval we do for the new
432 * drawable.
433 */
434 loader_dri3_set_swap_interval(draw, swap_interval);
435
436 return 0;
437 }
438
439 /*
440 * Process one Present event
441 */
442 static void
443 dri3_handle_present_event(struct loader_dri3_drawable *draw,
444 xcb_present_generic_event_t *ge)
445 {
446 switch (ge->evtype) {
447 case XCB_PRESENT_CONFIGURE_NOTIFY: {
448 xcb_present_configure_notify_event_t *ce = (void *) ge;
449
450 draw->width = ce->width;
451 draw->height = ce->height;
452 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
453 draw->ext->flush->invalidate(draw->dri_drawable);
454 break;
455 }
456 case XCB_PRESENT_COMPLETE_NOTIFY: {
457 xcb_present_complete_notify_event_t *ce = (void *) ge;
458
459 /* Compute the processed SBC number from the received 32-bit serial number
460 * merged with the upper 32-bits of the sent 64-bit serial number while
461 * checking for wrap.
462 */
463 if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
464 uint64_t recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
465
466 /* Only assume wraparound if that results in exactly the previous
467 * SBC + 1, otherwise ignore received SBC > sent SBC (those are
468 * probably from a previous loader_dri3_drawable instance) to avoid
469 * calculating bogus target MSC values in loader_dri3_swap_buffers_msc
470 */
471 if (recv_sbc <= draw->send_sbc)
472 draw->recv_sbc = recv_sbc;
473 else if (recv_sbc == (draw->recv_sbc + 0x100000001ULL))
474 draw->recv_sbc = recv_sbc - 0x100000000ULL;
475
476 /* When moving from flip to copy, we assume that we can allocate in
477 * a more optimal way if we don't need to cater for the display
478 * controller.
479 */
480 if (ce->mode == XCB_PRESENT_COMPLETE_MODE_COPY &&
481 draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) {
482 for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
483 if (draw->buffers[b])
484 draw->buffers[b]->reallocate = true;
485 }
486 }
487
488 /* If the server tells us that our allocation is suboptimal, we
489 * reallocate once.
490 */
491 #ifdef HAVE_DRI3_MODIFIERS
492 if (ce->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY &&
493 draw->last_present_mode != ce->mode) {
494 for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
495 if (draw->buffers[b])
496 draw->buffers[b]->reallocate = true;
497 }
498 }
499 #endif
500 draw->last_present_mode = ce->mode;
501
502 if (draw->vtable->show_fps)
503 draw->vtable->show_fps(draw, ce->ust);
504
505 draw->ust = ce->ust;
506 draw->msc = ce->msc;
507 } else if (ce->serial == draw->eid) {
508 draw->notify_ust = ce->ust;
509 draw->notify_msc = ce->msc;
510 }
511 break;
512 }
513 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
514 xcb_present_idle_notify_event_t *ie = (void *) ge;
515 int b;
516
517 for (b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
518 struct loader_dri3_buffer *buf = draw->buffers[b];
519
520 if (buf && buf->pixmap == ie->pixmap)
521 buf->busy = 0;
522 }
523 break;
524 }
525 }
526 free(ge);
527 }
528
529 static bool
530 dri3_wait_for_event_locked(struct loader_dri3_drawable *draw,
531 unsigned *full_sequence)
532 {
533 xcb_generic_event_t *ev;
534 xcb_present_generic_event_t *ge;
535
536 if (draw->window_destroyed)
537 return false;
538
539 xcb_flush(draw->conn);
540
541 /* Only have one thread waiting for events at a time */
542 if (draw->has_event_waiter) {
543 cnd_wait(&draw->event_cnd, &draw->mtx);
544 if (draw->window_destroyed)
545 return false;
546 if (full_sequence)
547 *full_sequence = draw->last_special_event_sequence;
548 /* Another thread has updated the protected info, so retest. */
549 return true;
550 } else {
551 struct pollfd pfds;
552
553 draw->has_event_waiter = true;
554 /* Allow other threads access to the drawable while we're waiting. */
555 mtx_unlock(&draw->mtx);
556
557 pfds.fd = xcb_get_file_descriptor(draw->conn);
558 pfds.events = POLLIN;
559
560 ev = xcb_poll_for_special_event(draw->conn, draw->special_event);
561 while (!ev) {
562 /* Wait up to ~1s for the XCB FD to become readable */
563 if (poll(&pfds, 1, 1000) < 1) {
564 xcb_get_window_attributes_cookie_t cookie;
565 xcb_get_window_attributes_reply_t *attrib;
566 xcb_generic_error_t *error;
567
568 /* Check if the window still exists */
569 cookie = xcb_get_window_attributes(draw->conn, draw->drawable);
570 attrib = xcb_get_window_attributes_reply(draw->conn, cookie, &error);
571 free(attrib);
572
573 if (error) {
574 if (error->error_code == BadWindow)
575 draw->window_destroyed = true;
576
577 free(error);
578 break;
579 }
580 }
581
582 ev = xcb_poll_for_special_event(draw->conn, draw->special_event);
583 }
584
585 mtx_lock(&draw->mtx);
586 draw->has_event_waiter = false;
587 cnd_broadcast(&draw->event_cnd);
588 }
589 if (!ev)
590 return false;
591 draw->last_special_event_sequence = ev->full_sequence;
592 if (full_sequence)
593 *full_sequence = ev->full_sequence;
594 ge = (void *) ev;
595 dri3_handle_present_event(draw, ge);
596 return true;
597 }
598
599 /** loader_dri3_wait_for_msc
600 *
601 * Get the X server to send an event when the target msc/divisor/remainder is
602 * reached.
603 */
604 bool
605 loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
606 int64_t target_msc,
607 int64_t divisor, int64_t remainder,
608 int64_t *ust, int64_t *msc, int64_t *sbc)
609 {
610 xcb_void_cookie_t cookie = xcb_present_notify_msc(draw->conn,
611 draw->drawable,
612 draw->eid,
613 target_msc,
614 divisor,
615 remainder);
616 unsigned full_sequence;
617
618 mtx_lock(&draw->mtx);
619
620 /* Wait for the event */
621 do {
622 if (!dri3_wait_for_event_locked(draw, &full_sequence)) {
623 mtx_unlock(&draw->mtx);
624 return false;
625 }
626 } while (full_sequence != cookie.sequence || draw->notify_msc < target_msc);
627
628 *ust = draw->notify_ust;
629 *msc = draw->notify_msc;
630 *sbc = draw->recv_sbc;
631 mtx_unlock(&draw->mtx);
632
633 return true;
634 }
635
636 /** loader_dri3_wait_for_sbc
637 *
638 * Wait for the completed swap buffer count to reach the specified
639 * target. Presumably the application knows that this will be reached with
640 * outstanding complete events, or we're going to be here awhile.
641 */
642 int
643 loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
644 int64_t target_sbc, int64_t *ust,
645 int64_t *msc, int64_t *sbc)
646 {
647 /* From the GLX_OML_sync_control spec:
648 *
649 * "If <target_sbc> = 0, the function will block until all previous
650 * swaps requested with glXSwapBuffersMscOML for that window have
651 * completed."
652 */
653 mtx_lock(&draw->mtx);
654 if (!target_sbc)
655 target_sbc = draw->send_sbc;
656
657 while (draw->recv_sbc < target_sbc) {
658 if (!dri3_wait_for_event_locked(draw, NULL)) {
659 mtx_unlock(&draw->mtx);
660 return 0;
661 }
662 }
663
664 *ust = draw->ust;
665 *msc = draw->msc;
666 *sbc = draw->recv_sbc;
667 mtx_unlock(&draw->mtx);
668 return 1;
669 }
670
671 /** loader_dri3_find_back
672 *
673 * Find an idle back buffer. If there isn't one, then
674 * wait for a present idle notify event from the X server
675 */
676 static int
677 dri3_find_back(struct loader_dri3_drawable *draw)
678 {
679 int b;
680 int num_to_consider;
681
682 mtx_lock(&draw->mtx);
683 /* Increase the likelyhood of reusing current buffer */
684 dri3_flush_present_events(draw);
685
686 /* Check whether we need to reuse the current back buffer as new back.
687 * In that case, wait until it's not busy anymore.
688 */
689 num_to_consider = draw->num_back;
690 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1) {
691 num_to_consider = 1;
692 draw->cur_blit_source = -1;
693 }
694
695 for (;;) {
696 for (b = 0; b < num_to_consider; b++) {
697 int id = LOADER_DRI3_BACK_ID((b + draw->cur_back) % draw->num_back);
698 struct loader_dri3_buffer *buffer = draw->buffers[id];
699
700 if (!buffer || !buffer->busy) {
701 draw->cur_back = id;
702 mtx_unlock(&draw->mtx);
703 return id;
704 }
705 }
706 if (!dri3_wait_for_event_locked(draw, NULL)) {
707 mtx_unlock(&draw->mtx);
708 return -1;
709 }
710 }
711 }
712
713 static xcb_gcontext_t
714 dri3_drawable_gc(struct loader_dri3_drawable *draw)
715 {
716 if (!draw->gc) {
717 uint32_t v = 0;
718 xcb_create_gc(draw->conn,
719 (draw->gc = xcb_generate_id(draw->conn)),
720 draw->drawable,
721 XCB_GC_GRAPHICS_EXPOSURES,
722 &v);
723 }
724 return draw->gc;
725 }
726
727
728 static struct loader_dri3_buffer *
729 dri3_back_buffer(struct loader_dri3_drawable *draw)
730 {
731 return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
732 }
733
734 static struct loader_dri3_buffer *
735 dri3_fake_front_buffer(struct loader_dri3_drawable *draw)
736 {
737 return draw->buffers[LOADER_DRI3_FRONT_ID];
738 }
739
740 static void
741 dri3_copy_area(xcb_connection_t *c,
742 xcb_drawable_t src_drawable,
743 xcb_drawable_t dst_drawable,
744 xcb_gcontext_t gc,
745 int16_t src_x,
746 int16_t src_y,
747 int16_t dst_x,
748 int16_t dst_y,
749 uint16_t width,
750 uint16_t height)
751 {
752 xcb_void_cookie_t cookie;
753
754 cookie = xcb_copy_area_checked(c,
755 src_drawable,
756 dst_drawable,
757 gc,
758 src_x,
759 src_y,
760 dst_x,
761 dst_y,
762 width,
763 height);
764 xcb_discard_reply(c, cookie.sequence);
765 }
766
767 /**
768 * Asks the driver to flush any queued work necessary for serializing with the
769 * X command stream, and optionally the slightly more strict requirement of
770 * glFlush() equivalence (which would require flushing even if nothing had
771 * been drawn to a window system framebuffer, for example).
772 */
773 void
774 loader_dri3_flush(struct loader_dri3_drawable *draw,
775 unsigned flags,
776 enum __DRI2throttleReason throttle_reason)
777 {
778 /* NEED TO CHECK WHETHER CONTEXT IS NULL */
779 __DRIcontext *dri_context = draw->vtable->get_dri_context(draw);
780
781 if (dri_context) {
782 draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable,
783 flags, throttle_reason);
784 }
785 }
786
787 void
788 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
789 int x, int y,
790 int width, int height,
791 bool flush)
792 {
793 struct loader_dri3_buffer *back;
794 unsigned flags = __DRI2_FLUSH_DRAWABLE;
795
796 /* Check we have the right attachments */
797 if (!draw->have_back || draw->is_pixmap)
798 return;
799
800 if (flush)
801 flags |= __DRI2_FLUSH_CONTEXT;
802 loader_dri3_flush(draw, flags, __DRI2_THROTTLE_COPYSUBBUFFER);
803
804 back = dri3_find_back_alloc(draw);
805 if (!back)
806 return;
807
808 y = draw->height - y - height;
809
810 if (draw->is_different_gpu) {
811 /* Update the linear buffer part of the back buffer
812 * for the dri3_copy_area operation
813 */
814 (void) loader_dri3_blit_image(draw,
815 back->linear_buffer,
816 back->image,
817 0, 0, back->width, back->height,
818 0, 0, __BLIT_FLAG_FLUSH);
819 }
820
821 loader_dri3_swapbuffer_barrier(draw);
822 dri3_fence_reset(draw->conn, back);
823 dri3_copy_area(draw->conn,
824 back->pixmap,
825 draw->drawable,
826 dri3_drawable_gc(draw),
827 x, y, x, y, width, height);
828 dri3_fence_trigger(draw->conn, back);
829 /* Refresh the fake front (if present) after we just damaged the real
830 * front.
831 */
832 if (draw->have_fake_front &&
833 !loader_dri3_blit_image(draw,
834 dri3_fake_front_buffer(draw)->image,
835 back->image,
836 x, y, width, height,
837 x, y, __BLIT_FLAG_FLUSH) &&
838 !draw->is_different_gpu) {
839 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
840 dri3_copy_area(draw->conn,
841 back->pixmap,
842 dri3_fake_front_buffer(draw)->pixmap,
843 dri3_drawable_gc(draw),
844 x, y, x, y, width, height);
845 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
846 dri3_fence_await(draw->conn, NULL, dri3_fake_front_buffer(draw));
847 }
848 dri3_fence_await(draw->conn, draw, back);
849 }
850
851 void
852 loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
853 xcb_drawable_t dest,
854 xcb_drawable_t src)
855 {
856 loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, __DRI2_THROTTLE_COPYSUBBUFFER);
857
858 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
859 dri3_copy_area(draw->conn,
860 src, dest,
861 dri3_drawable_gc(draw),
862 0, 0, 0, 0, draw->width, draw->height);
863 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
864 dri3_fence_await(draw->conn, draw, dri3_fake_front_buffer(draw));
865 }
866
867 void
868 loader_dri3_wait_x(struct loader_dri3_drawable *draw)
869 {
870 struct loader_dri3_buffer *front;
871
872 if (draw == NULL || !draw->have_fake_front)
873 return;
874
875 front = dri3_fake_front_buffer(draw);
876
877 loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
878
879 /* In the psc->is_different_gpu case, the linear buffer has been updated,
880 * but not yet the tiled buffer.
881 * Copy back to the tiled buffer we use for rendering.
882 * Note that we don't need flushing.
883 */
884 if (draw->is_different_gpu)
885 (void) loader_dri3_blit_image(draw,
886 front->image,
887 front->linear_buffer,
888 0, 0, front->width, front->height,
889 0, 0, 0);
890 }
891
892 void
893 loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
894 {
895 struct loader_dri3_buffer *front;
896
897 if (draw == NULL || !draw->have_fake_front)
898 return;
899
900 front = dri3_fake_front_buffer(draw);
901
902 /* In the psc->is_different_gpu case, we update the linear_buffer
903 * before updating the real front.
904 */
905 if (draw->is_different_gpu)
906 (void) loader_dri3_blit_image(draw,
907 front->linear_buffer,
908 front->image,
909 0, 0, front->width, front->height,
910 0, 0, __BLIT_FLAG_FLUSH);
911 loader_dri3_swapbuffer_barrier(draw);
912 loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
913 }
914
915 /** dri3_flush_present_events
916 *
917 * Process any present events that have been received from the X server
918 */
919 static void
920 dri3_flush_present_events(struct loader_dri3_drawable *draw)
921 {
922 /* Check to see if any configuration changes have occurred
923 * since we were last invoked
924 */
925 if (draw->has_event_waiter)
926 return;
927
928 if (draw->special_event) {
929 xcb_generic_event_t *ev;
930
931 while ((ev = xcb_poll_for_special_event(draw->conn,
932 draw->special_event)) != NULL) {
933 xcb_present_generic_event_t *ge = (void *) ev;
934 dri3_handle_present_event(draw, ge);
935 }
936 }
937 }
938
939 /** loader_dri3_swap_buffers_msc
940 *
941 * Make the current back buffer visible using the present extension
942 */
943 int64_t
944 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
945 int64_t target_msc, int64_t divisor,
946 int64_t remainder, unsigned flush_flags,
947 bool force_copy)
948 {
949 struct loader_dri3_buffer *back;
950 int64_t ret = 0;
951 uint32_t options = XCB_PRESENT_OPTION_NONE;
952
953 draw->vtable->flush_drawable(draw, flush_flags);
954
955 back = dri3_find_back_alloc(draw);
956
957 mtx_lock(&draw->mtx);
958
959 if (draw->adaptive_sync && !draw->adaptive_sync_active) {
960 set_adaptive_sync_property(draw->conn, draw->drawable, true);
961 draw->adaptive_sync_active = true;
962 }
963
964 if (draw->is_different_gpu && back) {
965 /* Update the linear buffer before presenting the pixmap */
966 (void) loader_dri3_blit_image(draw,
967 back->linear_buffer,
968 back->image,
969 0, 0, back->width, back->height,
970 0, 0, __BLIT_FLAG_FLUSH);
971 }
972
973 /* If we need to preload the new back buffer, remember the source.
974 * The force_copy parameter is used by EGL to attempt to preserve
975 * the back buffer across a call to this function.
976 */
977 if (draw->swap_method != __DRI_ATTRIB_SWAP_UNDEFINED || force_copy)
978 draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back);
979
980 /* Exchange the back and fake front. Even though the server knows about these
981 * buffers, it has no notion of back and fake front.
982 */
983 if (back && draw->have_fake_front) {
984 struct loader_dri3_buffer *tmp;
985
986 tmp = dri3_fake_front_buffer(draw);
987 draw->buffers[LOADER_DRI3_FRONT_ID] = back;
988 draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)] = tmp;
989
990 if (draw->swap_method == __DRI_ATTRIB_SWAP_COPY || force_copy)
991 draw->cur_blit_source = LOADER_DRI3_FRONT_ID;
992 }
993
994 dri3_flush_present_events(draw);
995
996 if (back && !draw->is_pixmap) {
997 dri3_fence_reset(draw->conn, back);
998
999 /* Compute when we want the frame shown by taking the last known
1000 * successful MSC and adding in a swap interval for each outstanding swap
1001 * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
1002 * semantic"
1003 */
1004 ++draw->send_sbc;
1005 if (target_msc == 0 && divisor == 0 && remainder == 0)
1006 target_msc = draw->msc + draw->swap_interval *
1007 (draw->send_sbc - draw->recv_sbc);
1008 else if (divisor == 0 && remainder > 0) {
1009 /* From the GLX_OML_sync_control spec:
1010 * "If <divisor> = 0, the swap will occur when MSC becomes
1011 * greater than or equal to <target_msc>."
1012 *
1013 * Note that there's no mention of the remainder. The Present
1014 * extension throws BadValue for remainder != 0 with divisor == 0, so
1015 * just drop the passed in value.
1016 */
1017 remainder = 0;
1018 }
1019
1020 /* From the GLX_EXT_swap_control spec
1021 * and the EGL 1.4 spec (page 53):
1022 *
1023 * "If <interval> is set to a value of 0, buffer swaps are not
1024 * synchronized to a video frame."
1025 *
1026 * Implementation note: It is possible to enable triple buffering
1027 * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
1028 * the default.
1029 */
1030 if (draw->swap_interval == 0)
1031 options |= XCB_PRESENT_OPTION_ASYNC;
1032
1033 /* If we need to populate the new back, but need to reuse the back
1034 * buffer slot due to lack of local blit capabilities, make sure
1035 * the server doesn't flip and we deadlock.
1036 */
1037 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1)
1038 options |= XCB_PRESENT_OPTION_COPY;
1039 #ifdef HAVE_DRI3_MODIFIERS
1040 if (draw->multiplanes_available)
1041 options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1042 #endif
1043 back->busy = 1;
1044 back->last_swap = draw->send_sbc;
1045 xcb_present_pixmap(draw->conn,
1046 draw->drawable,
1047 back->pixmap,
1048 (uint32_t) draw->send_sbc,
1049 0, /* valid */
1050 0, /* update */
1051 0, /* x_off */
1052 0, /* y_off */
1053 None, /* target_crtc */
1054 None,
1055 back->sync_fence,
1056 options,
1057 target_msc,
1058 divisor,
1059 remainder, 0, NULL);
1060 ret = (int64_t) draw->send_sbc;
1061
1062 /* Schedule a server-side back-preserving blit if necessary.
1063 * This happens iff all conditions below are satisfied:
1064 * a) We have a fake front,
1065 * b) We need to preserve the back buffer,
1066 * c) We don't have local blit capabilities.
1067 */
1068 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1 &&
1069 draw->cur_blit_source != LOADER_DRI3_BACK_ID(draw->cur_back)) {
1070 struct loader_dri3_buffer *new_back = dri3_back_buffer(draw);
1071 struct loader_dri3_buffer *src = draw->buffers[draw->cur_blit_source];
1072
1073 dri3_fence_reset(draw->conn, new_back);
1074 dri3_copy_area(draw->conn, src->pixmap,
1075 new_back->pixmap,
1076 dri3_drawable_gc(draw),
1077 0, 0, 0, 0, draw->width, draw->height);
1078 dri3_fence_trigger(draw->conn, new_back);
1079 new_back->last_swap = src->last_swap;
1080 }
1081
1082 xcb_flush(draw->conn);
1083 if (draw->stamp)
1084 ++(*draw->stamp);
1085 }
1086 mtx_unlock(&draw->mtx);
1087
1088 draw->ext->flush->invalidate(draw->dri_drawable);
1089
1090 return ret;
1091 }
1092
1093 int
1094 loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
1095 {
1096 struct loader_dri3_buffer *back = dri3_find_back_alloc(draw);
1097 int ret;
1098
1099 mtx_lock(&draw->mtx);
1100 ret = (!back || back->last_swap == 0) ? 0 :
1101 draw->send_sbc - back->last_swap + 1;
1102 mtx_unlock(&draw->mtx);
1103
1104 return ret;
1105 }
1106
1107 /** loader_dri3_open
1108 *
1109 * Wrapper around xcb_dri3_open
1110 */
1111 int
1112 loader_dri3_open(xcb_connection_t *conn,
1113 xcb_window_t root,
1114 uint32_t provider)
1115 {
1116 xcb_dri3_open_cookie_t cookie;
1117 xcb_dri3_open_reply_t *reply;
1118 int fd;
1119
1120 cookie = xcb_dri3_open(conn,
1121 root,
1122 provider);
1123
1124 reply = xcb_dri3_open_reply(conn, cookie, NULL);
1125 if (!reply)
1126 return -1;
1127
1128 if (reply->nfd != 1) {
1129 free(reply);
1130 return -1;
1131 }
1132
1133 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
1134 free(reply);
1135 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
1136
1137 return fd;
1138 }
1139
1140 static uint32_t
1141 dri3_cpp_for_format(uint32_t format) {
1142 switch (format) {
1143 case __DRI_IMAGE_FORMAT_R8:
1144 return 1;
1145 case __DRI_IMAGE_FORMAT_RGB565:
1146 case __DRI_IMAGE_FORMAT_GR88:
1147 return 2;
1148 case __DRI_IMAGE_FORMAT_XRGB8888:
1149 case __DRI_IMAGE_FORMAT_ARGB8888:
1150 case __DRI_IMAGE_FORMAT_ABGR8888:
1151 case __DRI_IMAGE_FORMAT_XBGR8888:
1152 case __DRI_IMAGE_FORMAT_XRGB2101010:
1153 case __DRI_IMAGE_FORMAT_ARGB2101010:
1154 case __DRI_IMAGE_FORMAT_XBGR2101010:
1155 case __DRI_IMAGE_FORMAT_ABGR2101010:
1156 case __DRI_IMAGE_FORMAT_SARGB8:
1157 case __DRI_IMAGE_FORMAT_SABGR8:
1158 case __DRI_IMAGE_FORMAT_SXRGB8:
1159 return 4;
1160 case __DRI_IMAGE_FORMAT_XBGR16161616F:
1161 case __DRI_IMAGE_FORMAT_ABGR16161616F:
1162 return 8;
1163 case __DRI_IMAGE_FORMAT_NONE:
1164 default:
1165 return 0;
1166 }
1167 }
1168
1169 /* Map format of render buffer to corresponding format for the linear_buffer
1170 * used for sharing with the display gpu of a Prime setup (== is_different_gpu).
1171 * Usually linear_format == format, except for depth >= 30 formats, where
1172 * different gpu vendors have different preferences wrt. color channel ordering.
1173 */
1174 static uint32_t
1175 dri3_linear_format_for_format(struct loader_dri3_drawable *draw, uint32_t format)
1176 {
1177 switch (format) {
1178 case __DRI_IMAGE_FORMAT_XRGB2101010:
1179 case __DRI_IMAGE_FORMAT_XBGR2101010:
1180 /* Different preferred formats for different hw */
1181 if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1182 return __DRI_IMAGE_FORMAT_XBGR2101010;
1183 else
1184 return __DRI_IMAGE_FORMAT_XRGB2101010;
1185
1186 case __DRI_IMAGE_FORMAT_ARGB2101010:
1187 case __DRI_IMAGE_FORMAT_ABGR2101010:
1188 /* Different preferred formats for different hw */
1189 if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1190 return __DRI_IMAGE_FORMAT_ABGR2101010;
1191 else
1192 return __DRI_IMAGE_FORMAT_ARGB2101010;
1193
1194 default:
1195 return format;
1196 }
1197 }
1198
1199 /* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while
1200 * the createImageFromFds call takes DRM_FORMAT codes. To avoid
1201 * complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and
1202 * translate to DRM_FORMAT codes in the call to createImageFromFds
1203 */
1204 static int
1205 image_format_to_fourcc(int format)
1206 {
1207
1208 /* Convert from __DRI_IMAGE_FORMAT to DRM_FORMAT (sigh) */
1209 switch (format) {
1210 case __DRI_IMAGE_FORMAT_SARGB8: return __DRI_IMAGE_FOURCC_SARGB8888;
1211 case __DRI_IMAGE_FORMAT_SABGR8: return __DRI_IMAGE_FOURCC_SABGR8888;
1212 case __DRI_IMAGE_FORMAT_SXRGB8: return __DRI_IMAGE_FOURCC_SXRGB8888;
1213 case __DRI_IMAGE_FORMAT_RGB565: return DRM_FORMAT_RGB565;
1214 case __DRI_IMAGE_FORMAT_XRGB8888: return DRM_FORMAT_XRGB8888;
1215 case __DRI_IMAGE_FORMAT_ARGB8888: return DRM_FORMAT_ARGB8888;
1216 case __DRI_IMAGE_FORMAT_ABGR8888: return DRM_FORMAT_ABGR8888;
1217 case __DRI_IMAGE_FORMAT_XBGR8888: return DRM_FORMAT_XBGR8888;
1218 case __DRI_IMAGE_FORMAT_XRGB2101010: return DRM_FORMAT_XRGB2101010;
1219 case __DRI_IMAGE_FORMAT_ARGB2101010: return DRM_FORMAT_ARGB2101010;
1220 case __DRI_IMAGE_FORMAT_XBGR2101010: return DRM_FORMAT_XBGR2101010;
1221 case __DRI_IMAGE_FORMAT_ABGR2101010: return DRM_FORMAT_ABGR2101010;
1222 case __DRI_IMAGE_FORMAT_XBGR16161616F: return DRM_FORMAT_XBGR16161616F;
1223 case __DRI_IMAGE_FORMAT_ABGR16161616F: return DRM_FORMAT_ABGR16161616F;
1224 }
1225 return 0;
1226 }
1227
1228 #ifdef HAVE_DRI3_MODIFIERS
1229 static bool
1230 has_supported_modifier(struct loader_dri3_drawable *draw, unsigned int format,
1231 uint64_t *modifiers, uint32_t count)
1232 {
1233 uint64_t *supported_modifiers;
1234 int32_t supported_modifiers_count;
1235 bool found = false;
1236 int i, j;
1237
1238 if (!draw->ext->image->queryDmaBufModifiers(draw->dri_screen,
1239 format, 0, NULL, NULL,
1240 &supported_modifiers_count) ||
1241 supported_modifiers_count == 0)
1242 return false;
1243
1244 supported_modifiers = malloc(supported_modifiers_count * sizeof(uint64_t));
1245 if (!supported_modifiers)
1246 return false;
1247
1248 draw->ext->image->queryDmaBufModifiers(draw->dri_screen, format,
1249 supported_modifiers_count,
1250 supported_modifiers, NULL,
1251 &supported_modifiers_count);
1252
1253 for (i = 0; !found && i < supported_modifiers_count; i++) {
1254 for (j = 0; !found && j < count; j++) {
1255 if (supported_modifiers[i] == modifiers[j])
1256 found = true;
1257 }
1258 }
1259
1260 free(supported_modifiers);
1261 return found;
1262 }
1263 #endif
1264
1265 /** loader_dri3_alloc_render_buffer
1266 *
1267 * Use the driver createImage function to construct a __DRIimage, then
1268 * get a file descriptor for that and create an X pixmap from that
1269 *
1270 * Allocate an xshmfence for synchronization
1271 */
1272 static struct loader_dri3_buffer *
1273 dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format,
1274 int width, int height, int depth)
1275 {
1276 struct loader_dri3_buffer *buffer;
1277 __DRIimage *pixmap_buffer;
1278 xcb_pixmap_t pixmap;
1279 xcb_sync_fence_t sync_fence;
1280 struct xshmfence *shm_fence;
1281 int buffer_fds[4], fence_fd;
1282 int num_planes = 0;
1283 int i, mod;
1284 int ret;
1285
1286 /* Create an xshmfence object and
1287 * prepare to send that to the X server
1288 */
1289
1290 fence_fd = xshmfence_alloc_shm();
1291 if (fence_fd < 0)
1292 return NULL;
1293
1294 shm_fence = xshmfence_map_shm(fence_fd);
1295 if (shm_fence == NULL)
1296 goto no_shm_fence;
1297
1298 /* Allocate the image from the driver
1299 */
1300 buffer = calloc(1, sizeof *buffer);
1301 if (!buffer)
1302 goto no_buffer;
1303
1304 buffer->cpp = dri3_cpp_for_format(format);
1305 if (!buffer->cpp)
1306 goto no_image;
1307
1308 if (!draw->is_different_gpu) {
1309 #ifdef HAVE_DRI3_MODIFIERS
1310 if (draw->multiplanes_available &&
1311 draw->ext->image->base.version >= 15 &&
1312 draw->ext->image->queryDmaBufModifiers &&
1313 draw->ext->image->createImageWithModifiers) {
1314 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie;
1315 xcb_dri3_get_supported_modifiers_reply_t *mod_reply;
1316 xcb_generic_error_t *error = NULL;
1317 uint64_t *modifiers = NULL;
1318 uint32_t count = 0;
1319
1320 mod_cookie = xcb_dri3_get_supported_modifiers(draw->conn,
1321 draw->window,
1322 depth, buffer->cpp * 8);
1323 mod_reply = xcb_dri3_get_supported_modifiers_reply(draw->conn,
1324 mod_cookie,
1325 &error);
1326 if (!mod_reply)
1327 goto no_image;
1328
1329 if (mod_reply->num_window_modifiers) {
1330 count = mod_reply->num_window_modifiers;
1331 modifiers = malloc(count * sizeof(uint64_t));
1332 if (!modifiers) {
1333 free(mod_reply);
1334 goto no_image;
1335 }
1336
1337 memcpy(modifiers,
1338 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1339 count * sizeof(uint64_t));
1340
1341 if (!has_supported_modifier(draw, image_format_to_fourcc(format),
1342 modifiers, count)) {
1343 free(modifiers);
1344 count = 0;
1345 modifiers = NULL;
1346 }
1347 }
1348
1349 if (mod_reply->num_screen_modifiers && modifiers == NULL) {
1350 count = mod_reply->num_screen_modifiers;
1351 modifiers = malloc(count * sizeof(uint64_t));
1352 if (!modifiers) {
1353 free(modifiers);
1354 free(mod_reply);
1355 goto no_image;
1356 }
1357
1358 memcpy(modifiers,
1359 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1360 count * sizeof(uint64_t));
1361 }
1362
1363 free(mod_reply);
1364
1365 /* don't use createImageWithModifiers() if we have no
1366 * modifiers, other things depend on the use flags when
1367 * there are no modifiers to know that a buffer can be
1368 * shared.
1369 */
1370 if (modifiers) {
1371 buffer->image = draw->ext->image->createImageWithModifiers(draw->dri_screen,
1372 width, height,
1373 format,
1374 modifiers,
1375 count,
1376 buffer);
1377 }
1378
1379 free(modifiers);
1380 }
1381 #endif
1382 if (!buffer->image)
1383 buffer->image = draw->ext->image->createImage(draw->dri_screen,
1384 width, height,
1385 format,
1386 __DRI_IMAGE_USE_SHARE |
1387 __DRI_IMAGE_USE_SCANOUT |
1388 __DRI_IMAGE_USE_BACKBUFFER,
1389 buffer);
1390
1391 pixmap_buffer = buffer->image;
1392
1393 if (!buffer->image)
1394 goto no_image;
1395 } else {
1396 buffer->image = draw->ext->image->createImage(draw->dri_screen,
1397 width, height,
1398 format,
1399 0,
1400 buffer);
1401
1402 if (!buffer->image)
1403 goto no_image;
1404
1405 buffer->linear_buffer =
1406 draw->ext->image->createImage(draw->dri_screen,
1407 width, height,
1408 dri3_linear_format_for_format(draw, format),
1409 __DRI_IMAGE_USE_SHARE |
1410 __DRI_IMAGE_USE_LINEAR |
1411 __DRI_IMAGE_USE_BACKBUFFER,
1412 buffer);
1413 pixmap_buffer = buffer->linear_buffer;
1414
1415 if (!buffer->linear_buffer)
1416 goto no_linear_buffer;
1417 }
1418
1419 /* X want some information about the planes, so ask the image for it
1420 */
1421 if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1422 &num_planes))
1423 num_planes = 1;
1424
1425 for (i = 0; i < num_planes; i++) {
1426 __DRIimage *image = draw->ext->image->fromPlanar(pixmap_buffer, i, NULL);
1427
1428 if (!image) {
1429 assert(i == 0);
1430 image = pixmap_buffer;
1431 }
1432
1433 buffer_fds[i] = -1;
1434
1435 ret = draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_FD,
1436 &buffer_fds[i]);
1437 ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE,
1438 &buffer->strides[i]);
1439 ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET,
1440 &buffer->offsets[i]);
1441 if (image != pixmap_buffer)
1442 draw->ext->image->destroyImage(image);
1443
1444 if (!ret)
1445 goto no_buffer_attrib;
1446 }
1447
1448 ret = draw->ext->image->queryImage(pixmap_buffer,
1449 __DRI_IMAGE_ATTRIB_MODIFIER_UPPER, &mod);
1450 buffer->modifier = (uint64_t) mod << 32;
1451 ret &= draw->ext->image->queryImage(pixmap_buffer,
1452 __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod);
1453 buffer->modifier |= (uint64_t)(mod & 0xffffffff);
1454
1455 if (!ret)
1456 buffer->modifier = DRM_FORMAT_MOD_INVALID;
1457
1458 pixmap = xcb_generate_id(draw->conn);
1459 #ifdef HAVE_DRI3_MODIFIERS
1460 if (draw->multiplanes_available &&
1461 buffer->modifier != DRM_FORMAT_MOD_INVALID) {
1462 xcb_dri3_pixmap_from_buffers(draw->conn,
1463 pixmap,
1464 draw->window,
1465 num_planes,
1466 width, height,
1467 buffer->strides[0], buffer->offsets[0],
1468 buffer->strides[1], buffer->offsets[1],
1469 buffer->strides[2], buffer->offsets[2],
1470 buffer->strides[3], buffer->offsets[3],
1471 depth, buffer->cpp * 8,
1472 buffer->modifier,
1473 buffer_fds);
1474 } else
1475 #endif
1476 {
1477 xcb_dri3_pixmap_from_buffer(draw->conn,
1478 pixmap,
1479 draw->drawable,
1480 buffer->size,
1481 width, height, buffer->strides[0],
1482 depth, buffer->cpp * 8,
1483 buffer_fds[0]);
1484 }
1485
1486 xcb_dri3_fence_from_fd(draw->conn,
1487 pixmap,
1488 (sync_fence = xcb_generate_id(draw->conn)),
1489 false,
1490 fence_fd);
1491
1492 buffer->pixmap = pixmap;
1493 buffer->own_pixmap = true;
1494 buffer->sync_fence = sync_fence;
1495 buffer->shm_fence = shm_fence;
1496 buffer->width = width;
1497 buffer->height = height;
1498
1499 /* Mark the buffer as idle
1500 */
1501 dri3_fence_set(buffer);
1502
1503 return buffer;
1504
1505 no_buffer_attrib:
1506 do {
1507 if (buffer_fds[i] != -1)
1508 close(buffer_fds[i]);
1509 } while (--i >= 0);
1510 draw->ext->image->destroyImage(pixmap_buffer);
1511 no_linear_buffer:
1512 if (draw->is_different_gpu)
1513 draw->ext->image->destroyImage(buffer->image);
1514 no_image:
1515 free(buffer);
1516 no_buffer:
1517 xshmfence_unmap_shm(shm_fence);
1518 no_shm_fence:
1519 close(fence_fd);
1520 return NULL;
1521 }
1522
1523 /** loader_dri3_update_drawable
1524 *
1525 * Called the first time we use the drawable and then
1526 * after we receive present configure notify events to
1527 * track the geometry of the drawable
1528 */
1529 static int
1530 dri3_update_drawable(struct loader_dri3_drawable *draw)
1531 {
1532 mtx_lock(&draw->mtx);
1533 if (draw->first_init) {
1534 xcb_get_geometry_cookie_t geom_cookie;
1535 xcb_get_geometry_reply_t *geom_reply;
1536 xcb_void_cookie_t cookie;
1537 xcb_generic_error_t *error;
1538 xcb_present_query_capabilities_cookie_t present_capabilities_cookie;
1539 xcb_present_query_capabilities_reply_t *present_capabilities_reply;
1540 xcb_window_t root_win;
1541
1542 draw->first_init = false;
1543
1544 /* Try to select for input on the window.
1545 *
1546 * If the drawable is a window, this will get our events
1547 * delivered.
1548 *
1549 * Otherwise, we'll get a BadWindow error back from this request which
1550 * will let us know that the drawable is a pixmap instead.
1551 */
1552
1553 draw->eid = xcb_generate_id(draw->conn);
1554 cookie =
1555 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
1556 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1557 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1558 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1559
1560 present_capabilities_cookie =
1561 xcb_present_query_capabilities(draw->conn, draw->drawable);
1562
1563 /* Create an XCB event queue to hold present events outside of the usual
1564 * application event queue
1565 */
1566 draw->special_event = xcb_register_for_special_xge(draw->conn,
1567 &xcb_present_id,
1568 draw->eid,
1569 draw->stamp);
1570 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1571
1572 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1573
1574 if (!geom_reply) {
1575 mtx_unlock(&draw->mtx);
1576 return false;
1577 }
1578 draw->width = geom_reply->width;
1579 draw->height = geom_reply->height;
1580 draw->depth = geom_reply->depth;
1581 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1582 root_win = geom_reply->root;
1583
1584 free(geom_reply);
1585
1586 draw->is_pixmap = false;
1587
1588 /* Check to see if our select input call failed. If it failed with a
1589 * BadWindow error, then assume the drawable is a pixmap. Destroy the
1590 * special event queue created above and mark the drawable as a pixmap
1591 */
1592
1593 error = xcb_request_check(draw->conn, cookie);
1594
1595 present_capabilities_reply =
1596 xcb_present_query_capabilities_reply(draw->conn,
1597 present_capabilities_cookie,
1598 NULL);
1599
1600 if (present_capabilities_reply) {
1601 draw->present_capabilities = present_capabilities_reply->capabilities;
1602 free(present_capabilities_reply);
1603 } else
1604 draw->present_capabilities = 0;
1605
1606 if (error) {
1607 if (error->error_code != BadWindow) {
1608 free(error);
1609 mtx_unlock(&draw->mtx);
1610 return false;
1611 }
1612 free(error);
1613 draw->is_pixmap = true;
1614 xcb_unregister_for_special_event(draw->conn, draw->special_event);
1615 draw->special_event = NULL;
1616 }
1617
1618 if (draw->is_pixmap)
1619 draw->window = root_win;
1620 else
1621 draw->window = draw->drawable;
1622 }
1623 dri3_flush_present_events(draw);
1624 mtx_unlock(&draw->mtx);
1625 return true;
1626 }
1627
1628 __DRIimage *
1629 loader_dri3_create_image(xcb_connection_t *c,
1630 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1631 unsigned int format,
1632 __DRIscreen *dri_screen,
1633 const __DRIimageExtension *image,
1634 void *loaderPrivate)
1635 {
1636 int *fds;
1637 __DRIimage *image_planar, *ret;
1638 int stride, offset;
1639
1640 /* Get an FD for the pixmap object
1641 */
1642 fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1643
1644 stride = bp_reply->stride;
1645 offset = 0;
1646
1647 /* createImageFromFds creates a wrapper __DRIimage structure which
1648 * can deal with multiple planes for things like Yuv images. So, once
1649 * we've gotten the planar wrapper, pull the single plane out of it and
1650 * discard the wrapper.
1651 */
1652 image_planar = image->createImageFromFds(dri_screen,
1653 bp_reply->width,
1654 bp_reply->height,
1655 image_format_to_fourcc(format),
1656 fds, 1,
1657 &stride, &offset, loaderPrivate);
1658 close(fds[0]);
1659 if (!image_planar)
1660 return NULL;
1661
1662 ret = image->fromPlanar(image_planar, 0, loaderPrivate);
1663
1664 if (!ret)
1665 ret = image_planar;
1666 else
1667 image->destroyImage(image_planar);
1668
1669 return ret;
1670 }
1671
1672 #ifdef HAVE_DRI3_MODIFIERS
1673 __DRIimage *
1674 loader_dri3_create_image_from_buffers(xcb_connection_t *c,
1675 xcb_dri3_buffers_from_pixmap_reply_t *bp_reply,
1676 unsigned int format,
1677 __DRIscreen *dri_screen,
1678 const __DRIimageExtension *image,
1679 void *loaderPrivate)
1680 {
1681 __DRIimage *ret;
1682 int *fds;
1683 uint32_t *strides_in, *offsets_in;
1684 int strides[4], offsets[4];
1685 unsigned error;
1686 int i;
1687
1688 if (bp_reply->nfd > 4)
1689 return NULL;
1690
1691 fds = xcb_dri3_buffers_from_pixmap_reply_fds(c, bp_reply);
1692 strides_in = xcb_dri3_buffers_from_pixmap_strides(bp_reply);
1693 offsets_in = xcb_dri3_buffers_from_pixmap_offsets(bp_reply);
1694 for (i = 0; i < bp_reply->nfd; i++) {
1695 strides[i] = strides_in[i];
1696 offsets[i] = offsets_in[i];
1697 }
1698
1699 ret = image->createImageFromDmaBufs2(dri_screen,
1700 bp_reply->width,
1701 bp_reply->height,
1702 image_format_to_fourcc(format),
1703 bp_reply->modifier,
1704 fds, bp_reply->nfd,
1705 strides, offsets,
1706 0, 0, 0, 0, /* UNDEFINED */
1707 &error, loaderPrivate);
1708
1709 for (i = 0; i < bp_reply->nfd; i++)
1710 close(fds[i]);
1711
1712 return ret;
1713 }
1714 #endif
1715
1716 /** dri3_get_pixmap_buffer
1717 *
1718 * Get the DRM object for a pixmap from the X server and
1719 * wrap that with a __DRIimage structure using createImageFromFds
1720 */
1721 static struct loader_dri3_buffer *
1722 dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format,
1723 enum loader_dri3_buffer_type buffer_type,
1724 struct loader_dri3_drawable *draw)
1725 {
1726 int buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1727 struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
1728 xcb_drawable_t pixmap;
1729 xcb_sync_fence_t sync_fence;
1730 struct xshmfence *shm_fence;
1731 int width;
1732 int height;
1733 int fence_fd;
1734 __DRIscreen *cur_screen;
1735
1736 if (buffer)
1737 return buffer;
1738
1739 pixmap = draw->drawable;
1740
1741 buffer = calloc(1, sizeof *buffer);
1742 if (!buffer)
1743 goto no_buffer;
1744
1745 fence_fd = xshmfence_alloc_shm();
1746 if (fence_fd < 0)
1747 goto no_fence;
1748 shm_fence = xshmfence_map_shm(fence_fd);
1749 if (shm_fence == NULL) {
1750 close (fence_fd);
1751 goto no_fence;
1752 }
1753
1754 /* Get the currently-bound screen or revert to using the drawable's screen if
1755 * no contexts are currently bound. The latter case is at least necessary for
1756 * obs-studio, when using Window Capture (Xcomposite) as a Source.
1757 */
1758 cur_screen = draw->vtable->get_dri_screen();
1759 if (!cur_screen) {
1760 cur_screen = draw->dri_screen;
1761 }
1762
1763 xcb_dri3_fence_from_fd(draw->conn,
1764 pixmap,
1765 (sync_fence = xcb_generate_id(draw->conn)),
1766 false,
1767 fence_fd);
1768 #ifdef HAVE_DRI3_MODIFIERS
1769 if (draw->multiplanes_available &&
1770 draw->ext->image->base.version >= 15 &&
1771 draw->ext->image->createImageFromDmaBufs2) {
1772 xcb_dri3_buffers_from_pixmap_cookie_t bps_cookie;
1773 xcb_dri3_buffers_from_pixmap_reply_t *bps_reply;
1774
1775 bps_cookie = xcb_dri3_buffers_from_pixmap(draw->conn, pixmap);
1776 bps_reply = xcb_dri3_buffers_from_pixmap_reply(draw->conn, bps_cookie,
1777 NULL);
1778 if (!bps_reply)
1779 goto no_image;
1780 buffer->image =
1781 loader_dri3_create_image_from_buffers(draw->conn, bps_reply, format,
1782 cur_screen, draw->ext->image,
1783 buffer);
1784 width = bps_reply->width;
1785 height = bps_reply->height;
1786 free(bps_reply);
1787 } else
1788 #endif
1789 {
1790 xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1791 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1792
1793 bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap);
1794 bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn, bp_cookie, NULL);
1795 if (!bp_reply)
1796 goto no_image;
1797
1798 buffer->image = loader_dri3_create_image(draw->conn, bp_reply, format,
1799 cur_screen, draw->ext->image,
1800 buffer);
1801 width = bp_reply->width;
1802 height = bp_reply->height;
1803 free(bp_reply);
1804 }
1805
1806 if (!buffer->image)
1807 goto no_image;
1808
1809 buffer->pixmap = pixmap;
1810 buffer->own_pixmap = false;
1811 buffer->width = width;
1812 buffer->height = height;
1813 buffer->shm_fence = shm_fence;
1814 buffer->sync_fence = sync_fence;
1815
1816 draw->buffers[buf_id] = buffer;
1817
1818 return buffer;
1819
1820 no_image:
1821 xcb_sync_destroy_fence(draw->conn, sync_fence);
1822 xshmfence_unmap_shm(shm_fence);
1823 no_fence:
1824 free(buffer);
1825 no_buffer:
1826 return NULL;
1827 }
1828
1829 /** dri3_get_buffer
1830 *
1831 * Find a front or back buffer, allocating new ones as necessary
1832 */
1833 static struct loader_dri3_buffer *
1834 dri3_get_buffer(__DRIdrawable *driDrawable,
1835 unsigned int format,
1836 enum loader_dri3_buffer_type buffer_type,
1837 struct loader_dri3_drawable *draw)
1838 {
1839 struct loader_dri3_buffer *buffer;
1840 bool fence_await = buffer_type == loader_dri3_buffer_back;
1841 int buf_id;
1842
1843 if (buffer_type == loader_dri3_buffer_back) {
1844 draw->back_format = format;
1845
1846 buf_id = dri3_find_back(draw);
1847
1848 if (buf_id < 0)
1849 return NULL;
1850 } else {
1851 buf_id = LOADER_DRI3_FRONT_ID;
1852 }
1853
1854 buffer = draw->buffers[buf_id];
1855
1856 /* Allocate a new buffer if there isn't an old one, if that
1857 * old one is the wrong size, or if it's suboptimal
1858 */
1859 if (!buffer || buffer->width != draw->width ||
1860 buffer->height != draw->height ||
1861 buffer->reallocate) {
1862 struct loader_dri3_buffer *new_buffer;
1863
1864 /* Allocate the new buffers
1865 */
1866 new_buffer = dri3_alloc_render_buffer(draw,
1867 format,
1868 draw->width,
1869 draw->height,
1870 draw->depth);
1871 if (!new_buffer)
1872 return NULL;
1873
1874 /* When resizing, copy the contents of the old buffer, waiting for that
1875 * copy to complete using our fences before proceeding
1876 */
1877 if ((buffer_type == loader_dri3_buffer_back ||
1878 (buffer_type == loader_dri3_buffer_front && draw->have_fake_front))
1879 && buffer) {
1880
1881 /* Fill the new buffer with data from an old buffer */
1882 if (!loader_dri3_blit_image(draw,
1883 new_buffer->image,
1884 buffer->image,
1885 0, 0,
1886 MIN2(buffer->width, new_buffer->width),
1887 MIN2(buffer->height, new_buffer->height),
1888 0, 0, 0) &&
1889 !buffer->linear_buffer) {
1890 dri3_fence_reset(draw->conn, new_buffer);
1891 dri3_copy_area(draw->conn,
1892 buffer->pixmap,
1893 new_buffer->pixmap,
1894 dri3_drawable_gc(draw),
1895 0, 0, 0, 0,
1896 draw->width, draw->height);
1897 dri3_fence_trigger(draw->conn, new_buffer);
1898 fence_await = true;
1899 }
1900 dri3_free_render_buffer(draw, buffer);
1901 } else if (buffer_type == loader_dri3_buffer_front) {
1902 /* Fill the new fake front with data from a real front */
1903 loader_dri3_swapbuffer_barrier(draw);
1904 dri3_fence_reset(draw->conn, new_buffer);
1905 dri3_copy_area(draw->conn,
1906 draw->drawable,
1907 new_buffer->pixmap,
1908 dri3_drawable_gc(draw),
1909 0, 0, 0, 0,
1910 draw->width, draw->height);
1911 dri3_fence_trigger(draw->conn, new_buffer);
1912
1913 if (new_buffer->linear_buffer) {
1914 dri3_fence_await(draw->conn, draw, new_buffer);
1915 (void) loader_dri3_blit_image(draw,
1916 new_buffer->image,
1917 new_buffer->linear_buffer,
1918 0, 0, draw->width, draw->height,
1919 0, 0, 0);
1920 } else
1921 fence_await = true;
1922 }
1923 buffer = new_buffer;
1924 draw->buffers[buf_id] = buffer;
1925 }
1926
1927 if (fence_await)
1928 dri3_fence_await(draw->conn, draw, buffer);
1929
1930 /*
1931 * Do we need to preserve the content of a previous buffer?
1932 *
1933 * Note that this blit is needed only to avoid a wait for a buffer that
1934 * is currently in the flip chain or being scanned out from. That's really
1935 * a tradeoff. If we're ok with the wait we can reduce the number of back
1936 * buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY,
1937 * but in the latter case we must disallow page-flipping.
1938 */
1939 if (buffer_type == loader_dri3_buffer_back &&
1940 draw->cur_blit_source != -1 &&
1941 draw->buffers[draw->cur_blit_source] &&
1942 buffer != draw->buffers[draw->cur_blit_source]) {
1943
1944 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
1945
1946 /* Avoid flushing here. Will propably do good for tiling hardware. */
1947 (void) loader_dri3_blit_image(draw,
1948 buffer->image,
1949 source->image,
1950 0, 0, draw->width, draw->height,
1951 0, 0, 0);
1952 buffer->last_swap = source->last_swap;
1953 draw->cur_blit_source = -1;
1954 }
1955 /* Return the requested buffer */
1956 return buffer;
1957 }
1958
1959 /** dri3_free_buffers
1960 *
1961 * Free the front bufffer or all of the back buffers. Used
1962 * when the application changes which buffers it needs
1963 */
1964 static void
1965 dri3_free_buffers(__DRIdrawable *driDrawable,
1966 enum loader_dri3_buffer_type buffer_type,
1967 struct loader_dri3_drawable *draw)
1968 {
1969 struct loader_dri3_buffer *buffer;
1970 int first_id;
1971 int n_id;
1972 int buf_id;
1973
1974 switch (buffer_type) {
1975 case loader_dri3_buffer_back:
1976 first_id = LOADER_DRI3_BACK_ID(0);
1977 n_id = LOADER_DRI3_MAX_BACK;
1978 draw->cur_blit_source = -1;
1979 break;
1980 case loader_dri3_buffer_front:
1981 first_id = LOADER_DRI3_FRONT_ID;
1982 /* Don't free a fake front holding new backbuffer content. */
1983 n_id = (draw->cur_blit_source == LOADER_DRI3_FRONT_ID) ? 0 : 1;
1984 }
1985
1986 for (buf_id = first_id; buf_id < first_id + n_id; buf_id++) {
1987 buffer = draw->buffers[buf_id];
1988 if (buffer) {
1989 dri3_free_render_buffer(draw, buffer);
1990 draw->buffers[buf_id] = NULL;
1991 }
1992 }
1993 }
1994
1995 /** loader_dri3_get_buffers
1996 *
1997 * The published buffer allocation API.
1998 * Returns all of the necessary buffers, allocating
1999 * as needed.
2000 */
2001 int
2002 loader_dri3_get_buffers(__DRIdrawable *driDrawable,
2003 unsigned int format,
2004 uint32_t *stamp,
2005 void *loaderPrivate,
2006 uint32_t buffer_mask,
2007 struct __DRIimageList *buffers)
2008 {
2009 struct loader_dri3_drawable *draw = loaderPrivate;
2010 struct loader_dri3_buffer *front, *back;
2011 int buf_id;
2012
2013 buffers->image_mask = 0;
2014 buffers->front = NULL;
2015 buffers->back = NULL;
2016
2017 front = NULL;
2018 back = NULL;
2019
2020 if (!dri3_update_drawable(draw))
2021 return false;
2022
2023 dri3_update_num_back(draw);
2024
2025 /* Free no longer needed back buffers */
2026 for (buf_id = draw->num_back; buf_id < LOADER_DRI3_MAX_BACK; buf_id++) {
2027 if (draw->cur_blit_source != buf_id && draw->buffers[buf_id]) {
2028 dri3_free_render_buffer(draw, draw->buffers[buf_id]);
2029 draw->buffers[buf_id] = NULL;
2030 }
2031 }
2032
2033 /* pixmaps always have front buffers.
2034 * Exchange swaps also mandate fake front buffers.
2035 */
2036 if (draw->is_pixmap || draw->swap_method == __DRI_ATTRIB_SWAP_EXCHANGE)
2037 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
2038
2039 if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
2040 /* All pixmaps are owned by the server gpu.
2041 * When we use a different gpu, we can't use the pixmap
2042 * as buffer since it is potentially tiled a way
2043 * our device can't understand. In this case, use
2044 * a fake front buffer. Hopefully the pixmap
2045 * content will get synced with the fake front
2046 * buffer.
2047 */
2048 if (draw->is_pixmap && !draw->is_different_gpu)
2049 front = dri3_get_pixmap_buffer(driDrawable,
2050 format,
2051 loader_dri3_buffer_front,
2052 draw);
2053 else
2054 front = dri3_get_buffer(driDrawable,
2055 format,
2056 loader_dri3_buffer_front,
2057 draw);
2058
2059 if (!front)
2060 return false;
2061 } else {
2062 dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
2063 draw->have_fake_front = 0;
2064 }
2065
2066 if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
2067 back = dri3_get_buffer(driDrawable,
2068 format,
2069 loader_dri3_buffer_back,
2070 draw);
2071 if (!back)
2072 return false;
2073 draw->have_back = 1;
2074 } else {
2075 dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
2076 draw->have_back = 0;
2077 }
2078
2079 if (front) {
2080 buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
2081 buffers->front = front->image;
2082 draw->have_fake_front = draw->is_different_gpu || !draw->is_pixmap;
2083 }
2084
2085 if (back) {
2086 buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
2087 buffers->back = back->image;
2088 }
2089
2090 draw->stamp = stamp;
2091
2092 return true;
2093 }
2094
2095 /** loader_dri3_update_drawable_geometry
2096 *
2097 * Get the current drawable geometry.
2098 */
2099 void
2100 loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
2101 {
2102 xcb_get_geometry_cookie_t geom_cookie;
2103 xcb_get_geometry_reply_t *geom_reply;
2104
2105 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
2106
2107 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
2108
2109 if (geom_reply) {
2110 draw->width = geom_reply->width;
2111 draw->height = geom_reply->height;
2112 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
2113 draw->ext->flush->invalidate(draw->dri_drawable);
2114
2115 free(geom_reply);
2116 }
2117 }
2118
2119
2120 /**
2121 * Make sure the server has flushed all pending swap buffers to hardware
2122 * for this drawable. Ideally we'd want to send an X protocol request to
2123 * have the server block our connection until the swaps are complete. That
2124 * would avoid the potential round-trip here.
2125 */
2126 void
2127 loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
2128 {
2129 int64_t ust, msc, sbc;
2130
2131 (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
2132 }
2133
2134 /**
2135 * Perform any cleanup associated with a close screen operation.
2136 * \param dri_screen[in,out] Pointer to __DRIscreen about to be closed.
2137 *
2138 * This function destroys the screen's cached swap context if any.
2139 */
2140 void
2141 loader_dri3_close_screen(__DRIscreen *dri_screen)
2142 {
2143 mtx_lock(&blit_context.mtx);
2144 if (blit_context.ctx && blit_context.cur_screen == dri_screen) {
2145 blit_context.core->destroyContext(blit_context.ctx);
2146 blit_context.ctx = NULL;
2147 }
2148 mtx_unlock(&blit_context.mtx);
2149 }
2150
2151 /**
2152 * Find a backbuffer slot - potentially allocating a back buffer
2153 *
2154 * \param draw[in,out] Pointer to the drawable for which to find back.
2155 * \return Pointer to a new back buffer or NULL if allocation failed or was
2156 * not mandated.
2157 *
2158 * Find a potentially new back buffer, and if it's not been allocated yet and
2159 * in addition needs initializing, then try to allocate and initialize it.
2160 */
2161 #include <stdio.h>
2162 static struct loader_dri3_buffer *
2163 dri3_find_back_alloc(struct loader_dri3_drawable *draw)
2164 {
2165 struct loader_dri3_buffer *back;
2166 int id;
2167
2168 id = dri3_find_back(draw);
2169 if (id < 0)
2170 return NULL;
2171
2172 back = draw->buffers[id];
2173 /* Allocate a new back if we haven't got one */
2174 if (!back && draw->back_format != __DRI_IMAGE_FORMAT_NONE &&
2175 dri3_update_drawable(draw))
2176 back = dri3_alloc_render_buffer(draw, draw->back_format,
2177 draw->width, draw->height, draw->depth);
2178
2179 if (!back)
2180 return NULL;
2181
2182 draw->buffers[id] = back;
2183
2184 /* If necessary, prefill the back with data according to swap_method mode. */
2185 if (draw->cur_blit_source != -1 &&
2186 draw->buffers[draw->cur_blit_source] &&
2187 back != draw->buffers[draw->cur_blit_source]) {
2188 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2189
2190 dri3_fence_await(draw->conn, draw, source);
2191 dri3_fence_await(draw->conn, draw, back);
2192 (void) loader_dri3_blit_image(draw,
2193 back->image,
2194 source->image,
2195 0, 0, draw->width, draw->height,
2196 0, 0, 0);
2197 back->last_swap = source->last_swap;
2198 draw->cur_blit_source = -1;
2199 }
2200
2201 return back;
2202 }