1f666fcb8d43465a81befde7037b0276244ac2dd
[mesa.git] / src / loader / loader_dri3_helper.c
1 /*
2 * Copyright © 2013 Keith Packard
3 * Copyright © 2015 Boyan Ding
4 *
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that copyright
8 * notice and this permission notice appear in supporting documentation, and
9 * that the name of the copyright holders not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. The copyright holders make no representations
12 * about the suitability of this software for any purpose. It is provided "as
13 * is" without express or implied warranty.
14 *
15 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21 * OF THIS SOFTWARE.
22 */
23
24 #include <fcntl.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27
28 #include <X11/xshmfence.h>
29 #include <xcb/xcb.h>
30 #include <xcb/dri3.h>
31 #include <xcb/present.h>
32
33 #include <X11/Xlib-xcb.h>
34
35 #include "loader_dri3_helper.h"
36
37 /* From xmlpool/options.h, user exposed so should be stable */
38 #define DRI_CONF_VBLANK_NEVER 0
39 #define DRI_CONF_VBLANK_DEF_INTERVAL_0 1
40 #define DRI_CONF_VBLANK_DEF_INTERVAL_1 2
41 #define DRI_CONF_VBLANK_ALWAYS_SYNC 3
42
43 static inline void
44 dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
45 {
46 xshmfence_reset(buffer->shm_fence);
47 }
48
49 static inline void
50 dri3_fence_set(struct loader_dri3_buffer *buffer)
51 {
52 xshmfence_trigger(buffer->shm_fence);
53 }
54
55 static inline void
56 dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
57 {
58 xcb_sync_trigger_fence(c, buffer->sync_fence);
59 }
60
61 static inline void
62 dri3_fence_await(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
63 {
64 xcb_flush(c);
65 xshmfence_await(buffer->shm_fence);
66 }
67
68 static void
69 dri3_update_num_back(struct loader_dri3_drawable *draw)
70 {
71 if (draw->flipping)
72 draw->num_back = 3;
73 else
74 draw->num_back = 2;
75 }
76
77 void
78 loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
79 {
80 draw->vtable->set_swap_interval(draw, interval);
81 dri3_update_num_back(draw);
82 }
83
84 /** dri3_free_render_buffer
85 *
86 * Free everything associated with one render buffer including pixmap, fence
87 * stuff and the driver image
88 */
89 static void
90 dri3_free_render_buffer(struct loader_dri3_drawable *draw,
91 struct loader_dri3_buffer *buffer)
92 {
93 if (buffer->own_pixmap)
94 xcb_free_pixmap(draw->conn, buffer->pixmap);
95 xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
96 xshmfence_unmap_shm(buffer->shm_fence);
97 draw->ext->image->destroyImage(buffer->image);
98 if (buffer->linear_buffer)
99 draw->ext->image->destroyImage(buffer->linear_buffer);
100 free(buffer);
101 }
102
103 void
104 loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
105 {
106 int i;
107
108 draw->ext->core->destroyDrawable(draw->dri_drawable);
109
110 for (i = 0; i < LOADER_DRI3_NUM_BUFFERS; i++) {
111 if (draw->buffers[i])
112 dri3_free_render_buffer(draw, draw->buffers[i]);
113 }
114
115 if (draw->special_event) {
116 xcb_void_cookie_t cookie =
117 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
118 XCB_PRESENT_EVENT_MASK_NO_EVENT);
119
120 xcb_discard_reply(draw->conn, cookie.sequence);
121 xcb_unregister_for_special_event(draw->conn, draw->special_event);
122 }
123 }
124
125 int
126 loader_dri3_drawable_init(xcb_connection_t *conn,
127 xcb_drawable_t drawable,
128 __DRIscreen *dri_screen,
129 bool is_different_gpu,
130 const __DRIconfig *dri_config,
131 struct loader_dri3_extensions *ext,
132 const struct loader_dri3_vtable *vtable,
133 struct loader_dri3_drawable *draw)
134 {
135 xcb_get_geometry_cookie_t cookie;
136 xcb_get_geometry_reply_t *reply;
137 xcb_generic_error_t *error;
138 GLint vblank_mode = DRI_CONF_VBLANK_DEF_INTERVAL_1;
139 int swap_interval;
140
141 draw->conn = conn;
142 draw->ext = ext;
143 draw->vtable = vtable;
144 draw->drawable = drawable;
145 draw->dri_screen = dri_screen;
146 draw->is_different_gpu = is_different_gpu;
147
148 draw->have_back = 0;
149 draw->have_fake_front = 0;
150 draw->first_init = true;
151
152 if (draw->ext->config)
153 draw->ext->config->configQueryi(draw->dri_screen,
154 "vblank_mode", &vblank_mode);
155
156 switch (vblank_mode) {
157 case DRI_CONF_VBLANK_NEVER:
158 case DRI_CONF_VBLANK_DEF_INTERVAL_0:
159 swap_interval = 0;
160 break;
161 case DRI_CONF_VBLANK_DEF_INTERVAL_1:
162 case DRI_CONF_VBLANK_ALWAYS_SYNC:
163 default:
164 swap_interval = 1;
165 break;
166 }
167 draw->vtable->set_swap_interval(draw, swap_interval);
168
169 dri3_update_num_back(draw);
170
171 /* Create a new drawable */
172 draw->dri_drawable =
173 draw->ext->image_driver->createNewDrawable(dri_screen,
174 dri_config,
175 draw);
176
177 if (!draw->dri_drawable)
178 return 1;
179
180 cookie = xcb_get_geometry(draw->conn, draw->drawable);
181 reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
182 if (reply == NULL || error != NULL) {
183 draw->ext->core->destroyDrawable(draw->dri_drawable);
184 return 1;
185 }
186
187 draw->width = reply->width;
188 draw->height = reply->height;
189 draw->depth = reply->depth;
190 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
191 free(reply);
192
193 /*
194 * Make sure server has the same swap interval we do for the new
195 * drawable.
196 */
197 loader_dri3_set_swap_interval(draw, swap_interval);
198
199 return 0;
200 }
201
202 /*
203 * Process one Present event
204 */
205 static void
206 dri3_handle_present_event(struct loader_dri3_drawable *draw,
207 xcb_present_generic_event_t *ge)
208 {
209 switch (ge->evtype) {
210 case XCB_PRESENT_CONFIGURE_NOTIFY: {
211 xcb_present_configure_notify_event_t *ce = (void *) ge;
212
213 draw->width = ce->width;
214 draw->height = ce->height;
215 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
216 break;
217 }
218 case XCB_PRESENT_COMPLETE_NOTIFY: {
219 xcb_present_complete_notify_event_t *ce = (void *) ge;
220
221 /* Compute the processed SBC number from the received 32-bit serial number
222 * merged with the upper 32-bits of the sent 64-bit serial number while
223 * checking for wrap.
224 */
225 if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
226 draw->recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
227 if (draw->recv_sbc > draw->send_sbc)
228 draw->recv_sbc -= 0x100000000;
229 switch (ce->mode) {
230 case XCB_PRESENT_COMPLETE_MODE_FLIP:
231 draw->flipping = true;
232 break;
233 case XCB_PRESENT_COMPLETE_MODE_COPY:
234 draw->flipping = false;
235 break;
236 }
237 dri3_update_num_back(draw);
238
239 if (draw->vtable->show_fps)
240 draw->vtable->show_fps(draw, ce->ust);
241
242 draw->ust = ce->ust;
243 draw->msc = ce->msc;
244 } else {
245 draw->recv_msc_serial = ce->serial;
246 draw->notify_ust = ce->ust;
247 draw->notify_msc = ce->msc;
248 }
249 break;
250 }
251 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
252 xcb_present_idle_notify_event_t *ie = (void *) ge;
253 int b;
254
255 for (b = 0; b < sizeof(draw->buffers) / sizeof(draw->buffers[0]); b++) {
256 struct loader_dri3_buffer *buf = draw->buffers[b];
257
258 if (buf && buf->pixmap == ie->pixmap) {
259 buf->busy = 0;
260 if (draw->num_back <= b && b < LOADER_DRI3_MAX_BACK) {
261 dri3_free_render_buffer(draw, buf);
262 draw->buffers[b] = NULL;
263 }
264 break;
265 }
266 }
267 break;
268 }
269 }
270 free(ge);
271 }
272
273 static bool
274 dri3_wait_for_event(struct loader_dri3_drawable *draw)
275 {
276 xcb_generic_event_t *ev;
277 xcb_present_generic_event_t *ge;
278
279 xcb_flush(draw->conn);
280 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
281 if (!ev)
282 return false;
283 ge = (void *) ev;
284 dri3_handle_present_event(draw, ge);
285 return true;
286 }
287
288 /** loader_dri3_wait_for_msc
289 *
290 * Get the X server to send an event when the target msc/divisor/remainder is
291 * reached.
292 */
293 bool
294 loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
295 int64_t target_msc,
296 int64_t divisor, int64_t remainder,
297 int64_t *ust, int64_t *msc, int64_t *sbc)
298 {
299 uint32_t msc_serial;
300
301 msc_serial = ++draw->send_msc_serial;
302 xcb_present_notify_msc(draw->conn,
303 draw->drawable,
304 msc_serial,
305 target_msc,
306 divisor,
307 remainder);
308
309 xcb_flush(draw->conn);
310
311 /* Wait for the event */
312 if (draw->special_event) {
313 while ((int32_t) (msc_serial - draw->recv_msc_serial) > 0) {
314 if (!dri3_wait_for_event(draw))
315 return false;
316 }
317 }
318
319 *ust = draw->notify_ust;
320 *msc = draw->notify_msc;
321 *sbc = draw->recv_sbc;
322
323 return true;
324 }
325
326 /** loader_dri3_wait_for_sbc
327 *
328 * Wait for the completed swap buffer count to reach the specified
329 * target. Presumably the application knows that this will be reached with
330 * outstanding complete events, or we're going to be here awhile.
331 */
332 int
333 loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
334 int64_t target_sbc, int64_t *ust,
335 int64_t *msc, int64_t *sbc)
336 {
337 /* From the GLX_OML_sync_control spec:
338 *
339 * "If <target_sbc> = 0, the function will block until all previous
340 * swaps requested with glXSwapBuffersMscOML for that window have
341 * completed."
342 */
343 if (!target_sbc)
344 target_sbc = draw->send_sbc;
345
346 while (draw->recv_sbc < target_sbc) {
347 if (!dri3_wait_for_event(draw))
348 return 0;
349 }
350
351 *ust = draw->ust;
352 *msc = draw->msc;
353 *sbc = draw->recv_sbc;
354 return 1;
355 }
356
357 /** loader_dri3_find_back
358 *
359 * Find an idle back buffer. If there isn't one, then
360 * wait for a present idle notify event from the X server
361 */
362 static int
363 dri3_find_back(struct loader_dri3_drawable *draw)
364 {
365 int b;
366 xcb_generic_event_t *ev;
367 xcb_present_generic_event_t *ge;
368
369 for (;;) {
370 for (b = 0; b < draw->num_back; b++) {
371 int id = LOADER_DRI3_BACK_ID((b + draw->cur_back) % draw->num_back);
372 struct loader_dri3_buffer *buffer = draw->buffers[id];
373
374 if (!buffer || !buffer->busy) {
375 draw->cur_back = id;
376 return id;
377 }
378 }
379 xcb_flush(draw->conn);
380 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
381 if (!ev)
382 return -1;
383 ge = (void *) ev;
384 dri3_handle_present_event(draw, ge);
385 }
386 }
387
388 static xcb_gcontext_t
389 dri3_drawable_gc(struct loader_dri3_drawable *draw)
390 {
391 if (!draw->gc) {
392 uint32_t v = 0;
393 xcb_create_gc(draw->conn,
394 (draw->gc = xcb_generate_id(draw->conn)),
395 draw->drawable,
396 XCB_GC_GRAPHICS_EXPOSURES,
397 &v);
398 }
399 return draw->gc;
400 }
401
402
403 static struct loader_dri3_buffer *
404 dri3_back_buffer(struct loader_dri3_drawable *draw)
405 {
406 return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
407 }
408
409 static struct loader_dri3_buffer *
410 dri3_fake_front_buffer(struct loader_dri3_drawable *draw)
411 {
412 return draw->buffers[LOADER_DRI3_FRONT_ID];
413 }
414
415 static void
416 dri3_copy_area(xcb_connection_t *c,
417 xcb_drawable_t src_drawable,
418 xcb_drawable_t dst_drawable,
419 xcb_gcontext_t gc,
420 int16_t src_x,
421 int16_t src_y,
422 int16_t dst_x,
423 int16_t dst_y,
424 uint16_t width,
425 uint16_t height)
426 {
427 xcb_void_cookie_t cookie;
428
429 cookie = xcb_copy_area_checked(c,
430 src_drawable,
431 dst_drawable,
432 gc,
433 src_x,
434 src_y,
435 dst_x,
436 dst_y,
437 width,
438 height);
439 xcb_discard_reply(c, cookie.sequence);
440 }
441
442 /**
443 * Asks the driver to flush any queued work necessary for serializing with the
444 * X command stream, and optionally the slightly more strict requirement of
445 * glFlush() equivalence (which would require flushing even if nothing had
446 * been drawn to a window system framebuffer, for example).
447 */
448 void
449 loader_dri3_flush(struct loader_dri3_drawable *draw,
450 unsigned flags,
451 enum __DRI2throttleReason throttle_reason)
452 {
453 /* NEED TO CHECK WHETHER CONTEXT IS NULL */
454 __DRIcontext *dri_context = draw->vtable->get_dri_context(draw);
455
456 if (dri_context) {
457 draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable,
458 flags, throttle_reason);
459 }
460 }
461
462 void
463 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
464 int x, int y,
465 int width, int height,
466 bool flush)
467 {
468 struct loader_dri3_buffer *back;
469 unsigned flags = __DRI2_FLUSH_DRAWABLE;
470 __DRIcontext *dri_context;
471
472 dri_context = draw->vtable->get_dri_context(draw);
473
474 /* Check we have the right attachments */
475 if (!draw->have_back || draw->is_pixmap)
476 return;
477
478 if (flush)
479 flags |= __DRI2_FLUSH_CONTEXT;
480 loader_dri3_flush(draw, flags, __DRI2_THROTTLE_SWAPBUFFER);
481
482 back = dri3_back_buffer(draw);
483 y = draw->height - y - height;
484
485 if (draw->is_different_gpu && draw->vtable->in_current_context(draw)) {
486 /* Update the linear buffer part of the back buffer
487 * for the dri3_copy_area operation
488 */
489 draw->ext->image->blitImage(dri_context,
490 back->linear_buffer,
491 back->image,
492 0, 0, back->width,
493 back->height,
494 0, 0, back->width,
495 back->height, __BLIT_FLAG_FLUSH);
496 /* We use blitImage to update our fake front,
497 */
498 if (draw->have_fake_front)
499 draw->ext->image->blitImage(dri_context,
500 dri3_fake_front_buffer(draw)->image,
501 back->image,
502 x, y, width, height,
503 x, y, width, height, __BLIT_FLAG_FLUSH);
504 }
505
506 loader_dri3_swapbuffer_barrier(draw);
507 dri3_fence_reset(draw->conn, back);
508 dri3_copy_area(draw->conn,
509 dri3_back_buffer(draw)->pixmap,
510 draw->drawable,
511 dri3_drawable_gc(draw),
512 x, y, x, y, width, height);
513 dri3_fence_trigger(draw->conn, back);
514 /* Refresh the fake front (if present) after we just damaged the real
515 * front.
516 */
517 if (draw->have_fake_front && !draw->is_different_gpu) {
518 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
519 dri3_copy_area(draw->conn,
520 dri3_back_buffer(draw)->pixmap,
521 dri3_fake_front_buffer(draw)->pixmap,
522 dri3_drawable_gc(draw),
523 x, y, x, y, width, height);
524 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
525 dri3_fence_await(draw->conn, dri3_fake_front_buffer(draw));
526 }
527 dri3_fence_await(draw->conn, back);
528 }
529
530 void
531 loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
532 xcb_drawable_t dest,
533 xcb_drawable_t src)
534 {
535 loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, 0);
536
537 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
538 dri3_copy_area(draw->conn,
539 src, dest,
540 dri3_drawable_gc(draw),
541 0, 0, 0, 0, draw->width, draw->height);
542 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
543 dri3_fence_await(draw->conn, dri3_fake_front_buffer(draw));
544 }
545
546 void
547 loader_dri3_wait_x(struct loader_dri3_drawable *draw)
548 {
549 struct loader_dri3_buffer *front;
550 __DRIcontext *dri_context;
551
552 if (draw == NULL || !draw->have_fake_front)
553 return;
554
555 front = dri3_fake_front_buffer(draw);
556 dri_context = draw->vtable->get_dri_context(draw);
557
558 loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
559
560 /* In the psc->is_different_gpu case, the linear buffer has been updated,
561 * but not yet the tiled buffer.
562 * Copy back to the tiled buffer we use for rendering.
563 * Note that we don't need flushing.
564 */
565 if (draw->is_different_gpu && draw->vtable->in_current_context(draw))
566 draw->ext->image->blitImage(dri_context,
567 front->image,
568 front->linear_buffer,
569 0, 0, front->width,
570 front->height,
571 0, 0, front->width,
572 front->height, 0);
573 }
574
575 void
576 loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
577 {
578 struct loader_dri3_buffer *front;
579 __DRIcontext *dri_context;
580
581 if (draw == NULL || !draw->have_fake_front)
582 return;
583
584 front = dri3_fake_front_buffer(draw);
585 dri_context = draw->vtable->get_dri_context(draw);
586
587 /* In the psc->is_different_gpu case, we update the linear_buffer
588 * before updating the real front.
589 */
590 if (draw->is_different_gpu && draw->vtable->in_current_context(draw))
591 draw->ext->image->blitImage(dri_context,
592 front->linear_buffer,
593 front->image,
594 0, 0, front->width,
595 front->height,
596 0, 0, front->width,
597 front->height, __BLIT_FLAG_FLUSH);
598 loader_dri3_swapbuffer_barrier(draw);
599 loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
600 }
601
602 /** dri3_flush_present_events
603 *
604 * Process any present events that have been received from the X server
605 */
606 static void
607 dri3_flush_present_events(struct loader_dri3_drawable *draw)
608 {
609 /* Check to see if any configuration changes have occurred
610 * since we were last invoked
611 */
612 if (draw->special_event) {
613 xcb_generic_event_t *ev;
614
615 while ((ev = xcb_poll_for_special_event(draw->conn,
616 draw->special_event)) != NULL) {
617 xcb_present_generic_event_t *ge = (void *) ev;
618 dri3_handle_present_event(draw, ge);
619 }
620 }
621 }
622
623 /** loader_dri3_swap_buffers_msc
624 *
625 * Make the current back buffer visible using the present extension
626 */
627 int64_t
628 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
629 int64_t target_msc, int64_t divisor,
630 int64_t remainder, unsigned flush_flags,
631 bool force_copy)
632 {
633 struct loader_dri3_buffer *back;
634 __DRIcontext *dri_context;
635 int64_t ret = 0;
636 uint32_t options = XCB_PRESENT_OPTION_NONE;
637 int swap_interval;
638
639 dri_context = draw->vtable->get_dri_context(draw);
640 swap_interval = draw->vtable->get_swap_interval(draw);
641
642 draw->vtable->flush_drawable(draw, flush_flags);
643
644 back = draw->buffers[dri3_find_back(draw)];
645 if (draw->is_different_gpu && back) {
646 /* Update the linear buffer before presenting the pixmap */
647 draw->ext->image->blitImage(dri_context,
648 back->linear_buffer,
649 back->image,
650 0, 0, back->width,
651 back->height,
652 0, 0, back->width,
653 back->height, __BLIT_FLAG_FLUSH);
654 /* Update the fake front */
655 if (draw->have_fake_front)
656 draw->ext->image->blitImage(dri_context,
657 draw->buffers[LOADER_DRI3_FRONT_ID]->image,
658 back->image,
659 0, 0, draw->width, draw->height,
660 0, 0, draw->width, draw->height,
661 __BLIT_FLAG_FLUSH);
662 }
663
664 dri3_flush_present_events(draw);
665
666 if (back && !draw->is_pixmap) {
667 dri3_fence_reset(draw->conn, back);
668
669 /* Compute when we want the frame shown by taking the last known
670 * successful MSC and adding in a swap interval for each outstanding swap
671 * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
672 * semantic"
673 */
674 ++draw->send_sbc;
675 if (target_msc == 0 && divisor == 0 && remainder == 0)
676 target_msc = draw->msc + swap_interval *
677 (draw->send_sbc - draw->recv_sbc);
678 else if (divisor == 0 && remainder > 0) {
679 /* From the GLX_OML_sync_control spec:
680 * "If <divisor> = 0, the swap will occur when MSC becomes
681 * greater than or equal to <target_msc>."
682 *
683 * Note that there's no mention of the remainder. The Present
684 * extension throws BadValue for remainder != 0 with divisor == 0, so
685 * just drop the passed in value.
686 */
687 remainder = 0;
688 }
689
690 /* From the GLX_EXT_swap_control spec
691 * and the EGL 1.4 spec (page 53):
692 *
693 * "If <interval> is set to a value of 0, buffer swaps are not
694 * synchronized to a video frame."
695 *
696 * Implementation note: It is possible to enable triple buffering
697 * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
698 * the default.
699 */
700 if (swap_interval == 0)
701 options |= XCB_PRESENT_OPTION_ASYNC;
702 if (force_copy)
703 options |= XCB_PRESENT_OPTION_COPY;
704
705 back->busy = 1;
706 back->last_swap = draw->send_sbc;
707 xcb_present_pixmap(draw->conn,
708 draw->drawable,
709 back->pixmap,
710 (uint32_t) draw->send_sbc,
711 0, /* valid */
712 0, /* update */
713 0, /* x_off */
714 0, /* y_off */
715 None, /* target_crtc */
716 None,
717 back->sync_fence,
718 options,
719 target_msc,
720 divisor,
721 remainder, 0, NULL);
722 ret = (int64_t) draw->send_sbc;
723
724 /* If there's a fake front, then copy the source back buffer
725 * to the fake front to keep it up to date. This needs
726 * to reset the fence and make future users block until
727 * the X server is done copying the bits
728 */
729 if (draw->have_fake_front && !draw->is_different_gpu) {
730 dri3_fence_reset(draw->conn, draw->buffers[LOADER_DRI3_FRONT_ID]);
731 dri3_copy_area(draw->conn,
732 back->pixmap,
733 draw->buffers[LOADER_DRI3_FRONT_ID]->pixmap,
734 dri3_drawable_gc(draw),
735 0, 0, 0, 0,
736 draw->width, draw->height);
737 dri3_fence_trigger(draw->conn, draw->buffers[LOADER_DRI3_FRONT_ID]);
738 }
739 xcb_flush(draw->conn);
740 if (draw->stamp)
741 ++(*draw->stamp);
742 }
743
744 draw->ext->flush->invalidate(draw->dri_drawable);
745
746 return ret;
747 }
748
749 int
750 loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
751 {
752 int back_id = LOADER_DRI3_BACK_ID(dri3_find_back(draw));
753
754 if (back_id < 0 || !draw->buffers[back_id])
755 return 0;
756
757 if (draw->buffers[back_id]->last_swap != 0)
758 return draw->send_sbc - draw->buffers[back_id]->last_swap + 1;
759 else
760 return 0;
761 }
762
763 /** loader_dri3_open
764 *
765 * Wrapper around xcb_dri3_open
766 */
767 int
768 loader_dri3_open(xcb_connection_t *conn,
769 xcb_window_t root,
770 uint32_t provider)
771 {
772 xcb_dri3_open_cookie_t cookie;
773 xcb_dri3_open_reply_t *reply;
774 int fd;
775
776 cookie = xcb_dri3_open(conn,
777 root,
778 provider);
779
780 reply = xcb_dri3_open_reply(conn, cookie, NULL);
781 if (!reply)
782 return -1;
783
784 if (reply->nfd != 1) {
785 free(reply);
786 return -1;
787 }
788
789 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
790 free(reply);
791 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
792
793 return fd;
794 }
795
796 static uint32_t
797 dri3_cpp_for_format(uint32_t format) {
798 switch (format) {
799 case __DRI_IMAGE_FORMAT_R8:
800 return 1;
801 case __DRI_IMAGE_FORMAT_RGB565:
802 case __DRI_IMAGE_FORMAT_GR88:
803 return 2;
804 case __DRI_IMAGE_FORMAT_XRGB8888:
805 case __DRI_IMAGE_FORMAT_ARGB8888:
806 case __DRI_IMAGE_FORMAT_ABGR8888:
807 case __DRI_IMAGE_FORMAT_XBGR8888:
808 case __DRI_IMAGE_FORMAT_XRGB2101010:
809 case __DRI_IMAGE_FORMAT_ARGB2101010:
810 case __DRI_IMAGE_FORMAT_SARGB8:
811 return 4;
812 case __DRI_IMAGE_FORMAT_NONE:
813 default:
814 return 0;
815 }
816 }
817
818 /** loader_dri3_alloc_render_buffer
819 *
820 * Use the driver createImage function to construct a __DRIimage, then
821 * get a file descriptor for that and create an X pixmap from that
822 *
823 * Allocate an xshmfence for synchronization
824 */
825 static struct loader_dri3_buffer *
826 dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format,
827 int width, int height, int depth)
828 {
829 struct loader_dri3_buffer *buffer;
830 __DRIimage *pixmap_buffer;
831 xcb_pixmap_t pixmap;
832 xcb_sync_fence_t sync_fence;
833 struct xshmfence *shm_fence;
834 int buffer_fd, fence_fd;
835 int stride;
836
837 /* Create an xshmfence object and
838 * prepare to send that to the X server
839 */
840
841 fence_fd = xshmfence_alloc_shm();
842 if (fence_fd < 0)
843 return NULL;
844
845 shm_fence = xshmfence_map_shm(fence_fd);
846 if (shm_fence == NULL)
847 goto no_shm_fence;
848
849 /* Allocate the image from the driver
850 */
851 buffer = calloc(1, sizeof *buffer);
852 if (!buffer)
853 goto no_buffer;
854
855 buffer->cpp = dri3_cpp_for_format(format);
856 if (!buffer->cpp)
857 goto no_image;
858
859 if (!draw->is_different_gpu) {
860 buffer->image = draw->ext->image->createImage(draw->dri_screen,
861 width, height,
862 format,
863 __DRI_IMAGE_USE_SHARE |
864 __DRI_IMAGE_USE_SCANOUT |
865 __DRI_IMAGE_USE_BACKBUFFER,
866 buffer);
867 pixmap_buffer = buffer->image;
868
869 if (!buffer->image)
870 goto no_image;
871 } else {
872 buffer->image = draw->ext->image->createImage(draw->dri_screen,
873 width, height,
874 format,
875 0,
876 buffer);
877
878 if (!buffer->image)
879 goto no_image;
880
881 buffer->linear_buffer =
882 draw->ext->image->createImage(draw->dri_screen,
883 width, height, format,
884 __DRI_IMAGE_USE_SHARE |
885 __DRI_IMAGE_USE_LINEAR |
886 __DRI_IMAGE_USE_BACKBUFFER,
887 buffer);
888 pixmap_buffer = buffer->linear_buffer;
889
890 if (!buffer->linear_buffer)
891 goto no_linear_buffer;
892 }
893
894 /* X wants the stride, so ask the image for it
895 */
896 if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_STRIDE,
897 &stride))
898 goto no_buffer_attrib;
899
900 buffer->pitch = stride;
901
902 if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_FD,
903 &buffer_fd))
904 goto no_buffer_attrib;
905
906 xcb_dri3_pixmap_from_buffer(draw->conn,
907 (pixmap = xcb_generate_id(draw->conn)),
908 draw->drawable,
909 buffer->size,
910 width, height, buffer->pitch,
911 depth, buffer->cpp * 8,
912 buffer_fd);
913
914 xcb_dri3_fence_from_fd(draw->conn,
915 pixmap,
916 (sync_fence = xcb_generate_id(draw->conn)),
917 false,
918 fence_fd);
919
920 buffer->pixmap = pixmap;
921 buffer->own_pixmap = true;
922 buffer->sync_fence = sync_fence;
923 buffer->shm_fence = shm_fence;
924 buffer->width = width;
925 buffer->height = height;
926
927 /* Mark the buffer as idle
928 */
929 dri3_fence_set(buffer);
930
931 return buffer;
932
933 no_buffer_attrib:
934 draw->ext->image->destroyImage(pixmap_buffer);
935 no_linear_buffer:
936 if (draw->is_different_gpu)
937 draw->ext->image->destroyImage(buffer->image);
938 no_image:
939 free(buffer);
940 no_buffer:
941 xshmfence_unmap_shm(shm_fence);
942 no_shm_fence:
943 close(fence_fd);
944 return NULL;
945 }
946
947 /** loader_dri3_update_drawable
948 *
949 * Called the first time we use the drawable and then
950 * after we receive present configure notify events to
951 * track the geometry of the drawable
952 */
953 static int
954 dri3_update_drawable(__DRIdrawable *driDrawable,
955 struct loader_dri3_drawable *draw)
956 {
957 if (draw->first_init) {
958 xcb_get_geometry_cookie_t geom_cookie;
959 xcb_get_geometry_reply_t *geom_reply;
960 xcb_void_cookie_t cookie;
961 xcb_generic_error_t *error;
962 xcb_present_query_capabilities_cookie_t present_capabilities_cookie;
963 xcb_present_query_capabilities_reply_t *present_capabilities_reply;
964
965 draw->first_init = false;
966
967 /* Try to select for input on the window.
968 *
969 * If the drawable is a window, this will get our events
970 * delivered.
971 *
972 * Otherwise, we'll get a BadWindow error back from this request which
973 * will let us know that the drawable is a pixmap instead.
974 */
975
976 draw->eid = xcb_generate_id(draw->conn);
977 cookie =
978 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
979 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
980 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
981 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
982
983 present_capabilities_cookie =
984 xcb_present_query_capabilities(draw->conn, draw->drawable);
985
986 /* Create an XCB event queue to hold present events outside of the usual
987 * application event queue
988 */
989 draw->special_event = xcb_register_for_special_xge(draw->conn,
990 &xcb_present_id,
991 draw->eid,
992 draw->stamp);
993 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
994
995 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
996
997 if (!geom_reply)
998 return false;
999
1000 draw->width = geom_reply->width;
1001 draw->height = geom_reply->height;
1002 draw->depth = geom_reply->depth;
1003 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1004
1005 free(geom_reply);
1006
1007 draw->is_pixmap = false;
1008
1009 /* Check to see if our select input call failed. If it failed with a
1010 * BadWindow error, then assume the drawable is a pixmap. Destroy the
1011 * special event queue created above and mark the drawable as a pixmap
1012 */
1013
1014 error = xcb_request_check(draw->conn, cookie);
1015
1016 present_capabilities_reply =
1017 xcb_present_query_capabilities_reply(draw->conn,
1018 present_capabilities_cookie,
1019 NULL);
1020
1021 if (present_capabilities_reply) {
1022 draw->present_capabilities = present_capabilities_reply->capabilities;
1023 free(present_capabilities_reply);
1024 } else
1025 draw->present_capabilities = 0;
1026
1027 if (error) {
1028 if (error->error_code != BadWindow) {
1029 free(error);
1030 return false;
1031 }
1032 draw->is_pixmap = true;
1033 xcb_unregister_for_special_event(draw->conn, draw->special_event);
1034 draw->special_event = NULL;
1035 }
1036 }
1037 dri3_flush_present_events(draw);
1038 return true;
1039 }
1040
1041 /* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while
1042 * the createImageFromFds call takes __DRI_IMAGE_FOURCC codes. To avoid
1043 * complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and
1044 * translate to __DRI_IMAGE_FOURCC codes in the call to createImageFromFds
1045 */
1046 static int
1047 image_format_to_fourcc(int format)
1048 {
1049
1050 /* Convert from __DRI_IMAGE_FORMAT to __DRI_IMAGE_FOURCC (sigh) */
1051 switch (format) {
1052 case __DRI_IMAGE_FORMAT_SARGB8: return __DRI_IMAGE_FOURCC_SARGB8888;
1053 case __DRI_IMAGE_FORMAT_RGB565: return __DRI_IMAGE_FOURCC_RGB565;
1054 case __DRI_IMAGE_FORMAT_XRGB8888: return __DRI_IMAGE_FOURCC_XRGB8888;
1055 case __DRI_IMAGE_FORMAT_ARGB8888: return __DRI_IMAGE_FOURCC_ARGB8888;
1056 case __DRI_IMAGE_FORMAT_ABGR8888: return __DRI_IMAGE_FOURCC_ABGR8888;
1057 case __DRI_IMAGE_FORMAT_XBGR8888: return __DRI_IMAGE_FOURCC_XBGR8888;
1058 }
1059 return 0;
1060 }
1061
1062 __DRIimage *
1063 loader_dri3_create_image(xcb_connection_t *c,
1064 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1065 unsigned int format,
1066 __DRIscreen *dri_screen,
1067 const __DRIimageExtension *image,
1068 void *loaderPrivate)
1069 {
1070 int *fds;
1071 __DRIimage *image_planar, *ret;
1072 int stride, offset;
1073
1074 /* Get an FD for the pixmap object
1075 */
1076 fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1077
1078 stride = bp_reply->stride;
1079 offset = 0;
1080
1081 /* createImageFromFds creates a wrapper __DRIimage structure which
1082 * can deal with multiple planes for things like Yuv images. So, once
1083 * we've gotten the planar wrapper, pull the single plane out of it and
1084 * discard the wrapper.
1085 */
1086 image_planar = image->createImageFromFds(dri_screen,
1087 bp_reply->width,
1088 bp_reply->height,
1089 image_format_to_fourcc(format),
1090 fds, 1,
1091 &stride, &offset, loaderPrivate);
1092 close(fds[0]);
1093 if (!image_planar)
1094 return NULL;
1095
1096 ret = image->fromPlanar(image_planar, 0, loaderPrivate);
1097
1098 image->destroyImage(image_planar);
1099
1100 return ret;
1101 }
1102
1103 /** dri3_get_pixmap_buffer
1104 *
1105 * Get the DRM object for a pixmap from the X server and
1106 * wrap that with a __DRIimage structure using createImageFromFds
1107 */
1108 static struct loader_dri3_buffer *
1109 dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format,
1110 enum loader_dri3_buffer_type buffer_type,
1111 struct loader_dri3_drawable *draw)
1112 {
1113 int buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1114 struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
1115 xcb_drawable_t pixmap;
1116 xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1117 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1118 xcb_sync_fence_t sync_fence;
1119 struct xshmfence *shm_fence;
1120 int fence_fd;
1121 __DRIscreen *cur_screen;
1122
1123 if (buffer)
1124 return buffer;
1125
1126 pixmap = draw->drawable;
1127
1128 buffer = calloc(1, sizeof *buffer);
1129 if (!buffer)
1130 goto no_buffer;
1131
1132 fence_fd = xshmfence_alloc_shm();
1133 if (fence_fd < 0)
1134 goto no_fence;
1135 shm_fence = xshmfence_map_shm(fence_fd);
1136 if (shm_fence == NULL) {
1137 close (fence_fd);
1138 goto no_fence;
1139 }
1140
1141 xcb_dri3_fence_from_fd(draw->conn,
1142 pixmap,
1143 (sync_fence = xcb_generate_id(draw->conn)),
1144 false,
1145 fence_fd);
1146
1147 bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap);
1148 bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn, bp_cookie, NULL);
1149 if (!bp_reply)
1150 goto no_image;
1151
1152 /* Get the currently-bound screen or revert to using the drawable's screen if
1153 * no contexts are currently bound. The latter case is at least necessary for
1154 * obs-studio, when using Window Capture (Xcomposite) as a Source.
1155 */
1156 cur_screen = draw->vtable->get_dri_screen(draw);
1157 if (!cur_screen) {
1158 cur_screen = draw->dri_screen;
1159 }
1160
1161 buffer->image = loader_dri3_create_image(draw->conn, bp_reply, format,
1162 cur_screen, draw->ext->image,
1163 buffer);
1164 if (!buffer->image)
1165 goto no_image;
1166
1167 buffer->pixmap = pixmap;
1168 buffer->own_pixmap = false;
1169 buffer->width = bp_reply->width;
1170 buffer->height = bp_reply->height;
1171 buffer->buffer_type = buffer_type;
1172 buffer->shm_fence = shm_fence;
1173 buffer->sync_fence = sync_fence;
1174
1175 draw->buffers[buf_id] = buffer;
1176
1177 free(bp_reply);
1178
1179 return buffer;
1180
1181 no_image:
1182 free(bp_reply);
1183 xcb_sync_destroy_fence(draw->conn, sync_fence);
1184 xshmfence_unmap_shm(shm_fence);
1185 no_fence:
1186 free(buffer);
1187 no_buffer:
1188 return NULL;
1189 }
1190
1191 /** dri3_get_buffer
1192 *
1193 * Find a front or back buffer, allocating new ones as necessary
1194 */
1195 static struct loader_dri3_buffer *
1196 dri3_get_buffer(__DRIdrawable *driDrawable,
1197 unsigned int format,
1198 enum loader_dri3_buffer_type buffer_type,
1199 struct loader_dri3_drawable *draw)
1200 {
1201 struct loader_dri3_buffer *buffer;
1202 int buf_id;
1203 __DRIcontext *dri_context;
1204
1205 dri_context = draw->vtable->get_dri_context(draw);
1206
1207 if (buffer_type == loader_dri3_buffer_back) {
1208 buf_id = dri3_find_back(draw);
1209
1210 if (buf_id < 0)
1211 return NULL;
1212 } else {
1213 buf_id = LOADER_DRI3_FRONT_ID;
1214 }
1215
1216 buffer = draw->buffers[buf_id];
1217
1218 /* Allocate a new buffer if there isn't an old one, or if that
1219 * old one is the wrong size
1220 */
1221 if (!buffer || buffer->width != draw->width ||
1222 buffer->height != draw->height) {
1223 struct loader_dri3_buffer *new_buffer;
1224
1225 /* Allocate the new buffers
1226 */
1227 new_buffer = dri3_alloc_render_buffer(draw,
1228 format,
1229 draw->width,
1230 draw->height,
1231 draw->depth);
1232 if (!new_buffer)
1233 return NULL;
1234
1235 /* When resizing, copy the contents of the old buffer, waiting for that
1236 * copy to complete using our fences before proceeding
1237 */
1238 switch (buffer_type) {
1239 case loader_dri3_buffer_back:
1240 if (buffer) {
1241 if (!buffer->linear_buffer) {
1242 dri3_fence_reset(draw->conn, new_buffer);
1243 dri3_fence_await(draw->conn, buffer);
1244 dri3_copy_area(draw->conn,
1245 buffer->pixmap,
1246 new_buffer->pixmap,
1247 dri3_drawable_gc(draw),
1248 0, 0, 0, 0,
1249 draw->width, draw->height);
1250 dri3_fence_trigger(draw->conn, new_buffer);
1251 } else if (draw->vtable->in_current_context(draw)) {
1252 draw->ext->image->blitImage(dri_context,
1253 new_buffer->image,
1254 buffer->image,
1255 0, 0, draw->width, draw->height,
1256 0, 0, draw->width, draw->height, 0);
1257 }
1258 dri3_free_render_buffer(draw, buffer);
1259 }
1260 break;
1261 case loader_dri3_buffer_front:
1262 loader_dri3_swapbuffer_barrier(draw);
1263 dri3_fence_reset(draw->conn, new_buffer);
1264 dri3_copy_area(draw->conn,
1265 draw->drawable,
1266 new_buffer->pixmap,
1267 dri3_drawable_gc(draw),
1268 0, 0, 0, 0,
1269 draw->width, draw->height);
1270 dri3_fence_trigger(draw->conn, new_buffer);
1271
1272 if (new_buffer->linear_buffer &&
1273 draw->vtable->in_current_context(draw)) {
1274 dri3_fence_await(draw->conn, new_buffer);
1275 draw->ext->image->blitImage(dri_context,
1276 new_buffer->image,
1277 new_buffer->linear_buffer,
1278 0, 0, draw->width, draw->height,
1279 0, 0, draw->width, draw->height, 0);
1280 }
1281 break;
1282 }
1283 buffer = new_buffer;
1284 buffer->buffer_type = buffer_type;
1285 draw->buffers[buf_id] = buffer;
1286 }
1287 dri3_fence_await(draw->conn, buffer);
1288
1289 /* Return the requested buffer */
1290 return buffer;
1291 }
1292
1293 /** dri3_free_buffers
1294 *
1295 * Free the front bufffer or all of the back buffers. Used
1296 * when the application changes which buffers it needs
1297 */
1298 static void
1299 dri3_free_buffers(__DRIdrawable *driDrawable,
1300 enum loader_dri3_buffer_type buffer_type,
1301 struct loader_dri3_drawable *draw)
1302 {
1303 struct loader_dri3_buffer *buffer;
1304 int first_id;
1305 int n_id;
1306 int buf_id;
1307
1308 switch (buffer_type) {
1309 case loader_dri3_buffer_back:
1310 first_id = LOADER_DRI3_BACK_ID(0);
1311 n_id = LOADER_DRI3_MAX_BACK;
1312 break;
1313 case loader_dri3_buffer_front:
1314 first_id = LOADER_DRI3_FRONT_ID;
1315 n_id = 1;
1316 }
1317
1318 for (buf_id = first_id; buf_id < first_id + n_id; buf_id++) {
1319 buffer = draw->buffers[buf_id];
1320 if (buffer) {
1321 dri3_free_render_buffer(draw, buffer);
1322 draw->buffers[buf_id] = NULL;
1323 }
1324 }
1325 }
1326
1327 /** loader_dri3_get_buffers
1328 *
1329 * The published buffer allocation API.
1330 * Returns all of the necessary buffers, allocating
1331 * as needed.
1332 */
1333 int
1334 loader_dri3_get_buffers(__DRIdrawable *driDrawable,
1335 unsigned int format,
1336 uint32_t *stamp,
1337 void *loaderPrivate,
1338 uint32_t buffer_mask,
1339 struct __DRIimageList *buffers)
1340 {
1341 struct loader_dri3_drawable *draw = loaderPrivate;
1342 struct loader_dri3_buffer *front, *back;
1343
1344 buffers->image_mask = 0;
1345 buffers->front = NULL;
1346 buffers->back = NULL;
1347
1348 front = NULL;
1349 back = NULL;
1350
1351 if (!dri3_update_drawable(driDrawable, draw))
1352 return false;
1353
1354 /* pixmaps always have front buffers */
1355 if (draw->is_pixmap)
1356 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1357
1358 if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
1359 /* All pixmaps are owned by the server gpu.
1360 * When we use a different gpu, we can't use the pixmap
1361 * as buffer since it is potentially tiled a way
1362 * our device can't understand. In this case, use
1363 * a fake front buffer. Hopefully the pixmap
1364 * content will get synced with the fake front
1365 * buffer.
1366 */
1367 if (draw->is_pixmap && !draw->is_different_gpu)
1368 front = dri3_get_pixmap_buffer(driDrawable,
1369 format,
1370 loader_dri3_buffer_front,
1371 draw);
1372 else
1373 front = dri3_get_buffer(driDrawable,
1374 format,
1375 loader_dri3_buffer_front,
1376 draw);
1377
1378 if (!front)
1379 return false;
1380 } else {
1381 dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
1382 draw->have_fake_front = 0;
1383 }
1384
1385 if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
1386 back = dri3_get_buffer(driDrawable,
1387 format,
1388 loader_dri3_buffer_back,
1389 draw);
1390 if (!back)
1391 return false;
1392 draw->have_back = 1;
1393 } else {
1394 dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
1395 draw->have_back = 0;
1396 }
1397
1398 if (front) {
1399 buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
1400 buffers->front = front->image;
1401 draw->have_fake_front = draw->is_different_gpu || !draw->is_pixmap;
1402 }
1403
1404 if (back) {
1405 buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
1406 buffers->back = back->image;
1407 }
1408
1409 draw->stamp = stamp;
1410
1411 return true;
1412 }
1413
1414 /** loader_dri3_update_drawable_geometry
1415 *
1416 * Get the current drawable geometry.
1417 */
1418 void
1419 loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
1420 {
1421 xcb_get_geometry_cookie_t geom_cookie;
1422 xcb_get_geometry_reply_t *geom_reply;
1423
1424 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1425
1426 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1427
1428 if (geom_reply) {
1429 draw->width = geom_reply->width;
1430 draw->height = geom_reply->height;
1431 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1432
1433 free(geom_reply);
1434 }
1435 }
1436
1437
1438 /**
1439 * Make sure the server has flushed all pending swap buffers to hardware
1440 * for this drawable. Ideally we'd want to send an X protocol request to
1441 * have the server block our connection until the swaps are complete. That
1442 * would avoid the potential round-trip here.
1443 */
1444 void
1445 loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
1446 {
1447 int64_t ust, msc, sbc;
1448
1449 (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
1450 }