loader_dri3/glx/egl: Remove the loader_dri3_vtable get_dri_screen callback
[mesa.git] / src / loader / loader_dri3_helper.c
1 /*
2 * Copyright © 2013 Keith Packard
3 * Copyright © 2015 Boyan Ding
4 *
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that copyright
8 * notice and this permission notice appear in supporting documentation, and
9 * that the name of the copyright holders not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. The copyright holders make no representations
12 * about the suitability of this software for any purpose. It is provided "as
13 * is" without express or implied warranty.
14 *
15 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21 * OF THIS SOFTWARE.
22 */
23
24 #include <fcntl.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27
28 #include <X11/xshmfence.h>
29 #include <xcb/xcb.h>
30 #include <xcb/dri3.h>
31 #include <xcb/present.h>
32
33 #include <X11/Xlib-xcb.h>
34
35 #include "loader_dri3_helper.h"
36
37 /* From xmlpool/options.h, user exposed so should be stable */
38 #define DRI_CONF_VBLANK_NEVER 0
39 #define DRI_CONF_VBLANK_DEF_INTERVAL_0 1
40 #define DRI_CONF_VBLANK_DEF_INTERVAL_1 2
41 #define DRI_CONF_VBLANK_ALWAYS_SYNC 3
42
43 static inline void
44 dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
45 {
46 xshmfence_reset(buffer->shm_fence);
47 }
48
49 static inline void
50 dri3_fence_set(struct loader_dri3_buffer *buffer)
51 {
52 xshmfence_trigger(buffer->shm_fence);
53 }
54
55 static inline void
56 dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
57 {
58 xcb_sync_trigger_fence(c, buffer->sync_fence);
59 }
60
61 static inline void
62 dri3_fence_await(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
63 {
64 xcb_flush(c);
65 xshmfence_await(buffer->shm_fence);
66 }
67
68 static void
69 dri3_update_num_back(struct loader_dri3_drawable *draw)
70 {
71 if (draw->flipping)
72 draw->num_back = 3;
73 else
74 draw->num_back = 2;
75 }
76
77 void
78 loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
79 {
80 draw->swap_interval = interval;
81 dri3_update_num_back(draw);
82 }
83
84 /** dri3_free_render_buffer
85 *
86 * Free everything associated with one render buffer including pixmap, fence
87 * stuff and the driver image
88 */
89 static void
90 dri3_free_render_buffer(struct loader_dri3_drawable *draw,
91 struct loader_dri3_buffer *buffer)
92 {
93 if (buffer->own_pixmap)
94 xcb_free_pixmap(draw->conn, buffer->pixmap);
95 xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
96 xshmfence_unmap_shm(buffer->shm_fence);
97 draw->ext->image->destroyImage(buffer->image);
98 if (buffer->linear_buffer)
99 draw->ext->image->destroyImage(buffer->linear_buffer);
100 free(buffer);
101 }
102
103 void
104 loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
105 {
106 int i;
107
108 draw->ext->core->destroyDrawable(draw->dri_drawable);
109
110 for (i = 0; i < LOADER_DRI3_NUM_BUFFERS; i++) {
111 if (draw->buffers[i])
112 dri3_free_render_buffer(draw, draw->buffers[i]);
113 }
114
115 if (draw->special_event) {
116 xcb_void_cookie_t cookie =
117 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
118 XCB_PRESENT_EVENT_MASK_NO_EVENT);
119
120 xcb_discard_reply(draw->conn, cookie.sequence);
121 xcb_unregister_for_special_event(draw->conn, draw->special_event);
122 }
123 }
124
125 int
126 loader_dri3_drawable_init(xcb_connection_t *conn,
127 xcb_drawable_t drawable,
128 __DRIscreen *dri_screen,
129 bool is_different_gpu,
130 const __DRIconfig *dri_config,
131 struct loader_dri3_extensions *ext,
132 const struct loader_dri3_vtable *vtable,
133 struct loader_dri3_drawable *draw)
134 {
135 xcb_get_geometry_cookie_t cookie;
136 xcb_get_geometry_reply_t *reply;
137 xcb_generic_error_t *error;
138 GLint vblank_mode = DRI_CONF_VBLANK_DEF_INTERVAL_1;
139 int swap_interval;
140
141 draw->conn = conn;
142 draw->ext = ext;
143 draw->vtable = vtable;
144 draw->drawable = drawable;
145 draw->dri_screen = dri_screen;
146 draw->is_different_gpu = is_different_gpu;
147
148 draw->have_back = 0;
149 draw->have_fake_front = 0;
150 draw->first_init = true;
151
152 if (draw->ext->config)
153 draw->ext->config->configQueryi(draw->dri_screen,
154 "vblank_mode", &vblank_mode);
155
156 switch (vblank_mode) {
157 case DRI_CONF_VBLANK_NEVER:
158 case DRI_CONF_VBLANK_DEF_INTERVAL_0:
159 swap_interval = 0;
160 break;
161 case DRI_CONF_VBLANK_DEF_INTERVAL_1:
162 case DRI_CONF_VBLANK_ALWAYS_SYNC:
163 default:
164 swap_interval = 1;
165 break;
166 }
167 draw->swap_interval = swap_interval;
168
169 dri3_update_num_back(draw);
170
171 /* Create a new drawable */
172 draw->dri_drawable =
173 draw->ext->image_driver->createNewDrawable(dri_screen,
174 dri_config,
175 draw);
176
177 if (!draw->dri_drawable)
178 return 1;
179
180 cookie = xcb_get_geometry(draw->conn, draw->drawable);
181 reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
182 if (reply == NULL || error != NULL) {
183 draw->ext->core->destroyDrawable(draw->dri_drawable);
184 return 1;
185 }
186
187 draw->width = reply->width;
188 draw->height = reply->height;
189 draw->depth = reply->depth;
190 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
191 free(reply);
192
193 /*
194 * Make sure server has the same swap interval we do for the new
195 * drawable.
196 */
197 loader_dri3_set_swap_interval(draw, swap_interval);
198
199 return 0;
200 }
201
202 /*
203 * Process one Present event
204 */
205 static void
206 dri3_handle_present_event(struct loader_dri3_drawable *draw,
207 xcb_present_generic_event_t *ge)
208 {
209 switch (ge->evtype) {
210 case XCB_PRESENT_CONFIGURE_NOTIFY: {
211 xcb_present_configure_notify_event_t *ce = (void *) ge;
212
213 draw->width = ce->width;
214 draw->height = ce->height;
215 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
216 break;
217 }
218 case XCB_PRESENT_COMPLETE_NOTIFY: {
219 xcb_present_complete_notify_event_t *ce = (void *) ge;
220
221 /* Compute the processed SBC number from the received 32-bit serial number
222 * merged with the upper 32-bits of the sent 64-bit serial number while
223 * checking for wrap.
224 */
225 if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
226 draw->recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
227 if (draw->recv_sbc > draw->send_sbc)
228 draw->recv_sbc -= 0x100000000;
229 switch (ce->mode) {
230 case XCB_PRESENT_COMPLETE_MODE_FLIP:
231 draw->flipping = true;
232 break;
233 case XCB_PRESENT_COMPLETE_MODE_COPY:
234 draw->flipping = false;
235 break;
236 }
237 dri3_update_num_back(draw);
238
239 if (draw->vtable->show_fps)
240 draw->vtable->show_fps(draw, ce->ust);
241
242 draw->ust = ce->ust;
243 draw->msc = ce->msc;
244 } else {
245 draw->recv_msc_serial = ce->serial;
246 draw->notify_ust = ce->ust;
247 draw->notify_msc = ce->msc;
248 }
249 break;
250 }
251 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
252 xcb_present_idle_notify_event_t *ie = (void *) ge;
253 int b;
254
255 for (b = 0; b < sizeof(draw->buffers) / sizeof(draw->buffers[0]); b++) {
256 struct loader_dri3_buffer *buf = draw->buffers[b];
257
258 if (buf && buf->pixmap == ie->pixmap) {
259 buf->busy = 0;
260 if (draw->num_back <= b && b < LOADER_DRI3_MAX_BACK) {
261 dri3_free_render_buffer(draw, buf);
262 draw->buffers[b] = NULL;
263 }
264 break;
265 }
266 }
267 break;
268 }
269 }
270 free(ge);
271 }
272
273 static bool
274 dri3_wait_for_event(struct loader_dri3_drawable *draw)
275 {
276 xcb_generic_event_t *ev;
277 xcb_present_generic_event_t *ge;
278
279 xcb_flush(draw->conn);
280 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
281 if (!ev)
282 return false;
283 ge = (void *) ev;
284 dri3_handle_present_event(draw, ge);
285 return true;
286 }
287
288 /** loader_dri3_wait_for_msc
289 *
290 * Get the X server to send an event when the target msc/divisor/remainder is
291 * reached.
292 */
293 bool
294 loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
295 int64_t target_msc,
296 int64_t divisor, int64_t remainder,
297 int64_t *ust, int64_t *msc, int64_t *sbc)
298 {
299 uint32_t msc_serial;
300
301 msc_serial = ++draw->send_msc_serial;
302 xcb_present_notify_msc(draw->conn,
303 draw->drawable,
304 msc_serial,
305 target_msc,
306 divisor,
307 remainder);
308
309 xcb_flush(draw->conn);
310
311 /* Wait for the event */
312 if (draw->special_event) {
313 while ((int32_t) (msc_serial - draw->recv_msc_serial) > 0) {
314 if (!dri3_wait_for_event(draw))
315 return false;
316 }
317 }
318
319 *ust = draw->notify_ust;
320 *msc = draw->notify_msc;
321 *sbc = draw->recv_sbc;
322
323 return true;
324 }
325
326 /** loader_dri3_wait_for_sbc
327 *
328 * Wait for the completed swap buffer count to reach the specified
329 * target. Presumably the application knows that this will be reached with
330 * outstanding complete events, or we're going to be here awhile.
331 */
332 int
333 loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
334 int64_t target_sbc, int64_t *ust,
335 int64_t *msc, int64_t *sbc)
336 {
337 /* From the GLX_OML_sync_control spec:
338 *
339 * "If <target_sbc> = 0, the function will block until all previous
340 * swaps requested with glXSwapBuffersMscOML for that window have
341 * completed."
342 */
343 if (!target_sbc)
344 target_sbc = draw->send_sbc;
345
346 while (draw->recv_sbc < target_sbc) {
347 if (!dri3_wait_for_event(draw))
348 return 0;
349 }
350
351 *ust = draw->ust;
352 *msc = draw->msc;
353 *sbc = draw->recv_sbc;
354 return 1;
355 }
356
357 /** loader_dri3_find_back
358 *
359 * Find an idle back buffer. If there isn't one, then
360 * wait for a present idle notify event from the X server
361 */
362 static int
363 dri3_find_back(struct loader_dri3_drawable *draw)
364 {
365 int b;
366 xcb_generic_event_t *ev;
367 xcb_present_generic_event_t *ge;
368
369 for (;;) {
370 for (b = 0; b < draw->num_back; b++) {
371 int id = LOADER_DRI3_BACK_ID((b + draw->cur_back) % draw->num_back);
372 struct loader_dri3_buffer *buffer = draw->buffers[id];
373
374 if (!buffer || !buffer->busy) {
375 draw->cur_back = id;
376 return id;
377 }
378 }
379 xcb_flush(draw->conn);
380 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
381 if (!ev)
382 return -1;
383 ge = (void *) ev;
384 dri3_handle_present_event(draw, ge);
385 }
386 }
387
388 static xcb_gcontext_t
389 dri3_drawable_gc(struct loader_dri3_drawable *draw)
390 {
391 if (!draw->gc) {
392 uint32_t v = 0;
393 xcb_create_gc(draw->conn,
394 (draw->gc = xcb_generate_id(draw->conn)),
395 draw->drawable,
396 XCB_GC_GRAPHICS_EXPOSURES,
397 &v);
398 }
399 return draw->gc;
400 }
401
402
403 static struct loader_dri3_buffer *
404 dri3_back_buffer(struct loader_dri3_drawable *draw)
405 {
406 return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
407 }
408
409 static struct loader_dri3_buffer *
410 dri3_fake_front_buffer(struct loader_dri3_drawable *draw)
411 {
412 return draw->buffers[LOADER_DRI3_FRONT_ID];
413 }
414
415 static void
416 dri3_copy_area(xcb_connection_t *c,
417 xcb_drawable_t src_drawable,
418 xcb_drawable_t dst_drawable,
419 xcb_gcontext_t gc,
420 int16_t src_x,
421 int16_t src_y,
422 int16_t dst_x,
423 int16_t dst_y,
424 uint16_t width,
425 uint16_t height)
426 {
427 xcb_void_cookie_t cookie;
428
429 cookie = xcb_copy_area_checked(c,
430 src_drawable,
431 dst_drawable,
432 gc,
433 src_x,
434 src_y,
435 dst_x,
436 dst_y,
437 width,
438 height);
439 xcb_discard_reply(c, cookie.sequence);
440 }
441
442 /**
443 * Asks the driver to flush any queued work necessary for serializing with the
444 * X command stream, and optionally the slightly more strict requirement of
445 * glFlush() equivalence (which would require flushing even if nothing had
446 * been drawn to a window system framebuffer, for example).
447 */
448 void
449 loader_dri3_flush(struct loader_dri3_drawable *draw,
450 unsigned flags,
451 enum __DRI2throttleReason throttle_reason)
452 {
453 /* NEED TO CHECK WHETHER CONTEXT IS NULL */
454 __DRIcontext *dri_context = draw->vtable->get_dri_context(draw);
455
456 if (dri_context) {
457 draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable,
458 flags, throttle_reason);
459 }
460 }
461
462 void
463 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
464 int x, int y,
465 int width, int height,
466 bool flush)
467 {
468 struct loader_dri3_buffer *back;
469 unsigned flags = __DRI2_FLUSH_DRAWABLE;
470 __DRIcontext *dri_context;
471
472 dri_context = draw->vtable->get_dri_context(draw);
473
474 /* Check we have the right attachments */
475 if (!draw->have_back || draw->is_pixmap)
476 return;
477
478 if (flush)
479 flags |= __DRI2_FLUSH_CONTEXT;
480 loader_dri3_flush(draw, flags, __DRI2_THROTTLE_SWAPBUFFER);
481
482 back = dri3_back_buffer(draw);
483 y = draw->height - y - height;
484
485 if (draw->is_different_gpu && draw->vtable->in_current_context(draw)) {
486 /* Update the linear buffer part of the back buffer
487 * for the dri3_copy_area operation
488 */
489 draw->ext->image->blitImage(dri_context,
490 back->linear_buffer,
491 back->image,
492 0, 0, back->width,
493 back->height,
494 0, 0, back->width,
495 back->height, __BLIT_FLAG_FLUSH);
496 /* We use blitImage to update our fake front,
497 */
498 if (draw->have_fake_front)
499 draw->ext->image->blitImage(dri_context,
500 dri3_fake_front_buffer(draw)->image,
501 back->image,
502 x, y, width, height,
503 x, y, width, height, __BLIT_FLAG_FLUSH);
504 }
505
506 loader_dri3_swapbuffer_barrier(draw);
507 dri3_fence_reset(draw->conn, back);
508 dri3_copy_area(draw->conn,
509 dri3_back_buffer(draw)->pixmap,
510 draw->drawable,
511 dri3_drawable_gc(draw),
512 x, y, x, y, width, height);
513 dri3_fence_trigger(draw->conn, back);
514 /* Refresh the fake front (if present) after we just damaged the real
515 * front.
516 */
517 if (draw->have_fake_front && !draw->is_different_gpu) {
518 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
519 dri3_copy_area(draw->conn,
520 dri3_back_buffer(draw)->pixmap,
521 dri3_fake_front_buffer(draw)->pixmap,
522 dri3_drawable_gc(draw),
523 x, y, x, y, width, height);
524 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
525 dri3_fence_await(draw->conn, dri3_fake_front_buffer(draw));
526 }
527 dri3_fence_await(draw->conn, back);
528 }
529
530 void
531 loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
532 xcb_drawable_t dest,
533 xcb_drawable_t src)
534 {
535 loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, 0);
536
537 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
538 dri3_copy_area(draw->conn,
539 src, dest,
540 dri3_drawable_gc(draw),
541 0, 0, 0, 0, draw->width, draw->height);
542 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
543 dri3_fence_await(draw->conn, dri3_fake_front_buffer(draw));
544 }
545
546 void
547 loader_dri3_wait_x(struct loader_dri3_drawable *draw)
548 {
549 struct loader_dri3_buffer *front;
550 __DRIcontext *dri_context;
551
552 if (draw == NULL || !draw->have_fake_front)
553 return;
554
555 front = dri3_fake_front_buffer(draw);
556 dri_context = draw->vtable->get_dri_context(draw);
557
558 loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
559
560 /* In the psc->is_different_gpu case, the linear buffer has been updated,
561 * but not yet the tiled buffer.
562 * Copy back to the tiled buffer we use for rendering.
563 * Note that we don't need flushing.
564 */
565 if (draw->is_different_gpu && draw->vtable->in_current_context(draw))
566 draw->ext->image->blitImage(dri_context,
567 front->image,
568 front->linear_buffer,
569 0, 0, front->width,
570 front->height,
571 0, 0, front->width,
572 front->height, 0);
573 }
574
575 void
576 loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
577 {
578 struct loader_dri3_buffer *front;
579 __DRIcontext *dri_context;
580
581 if (draw == NULL || !draw->have_fake_front)
582 return;
583
584 front = dri3_fake_front_buffer(draw);
585 dri_context = draw->vtable->get_dri_context(draw);
586
587 /* In the psc->is_different_gpu case, we update the linear_buffer
588 * before updating the real front.
589 */
590 if (draw->is_different_gpu && draw->vtable->in_current_context(draw))
591 draw->ext->image->blitImage(dri_context,
592 front->linear_buffer,
593 front->image,
594 0, 0, front->width,
595 front->height,
596 0, 0, front->width,
597 front->height, __BLIT_FLAG_FLUSH);
598 loader_dri3_swapbuffer_barrier(draw);
599 loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
600 }
601
602 /** dri3_flush_present_events
603 *
604 * Process any present events that have been received from the X server
605 */
606 static void
607 dri3_flush_present_events(struct loader_dri3_drawable *draw)
608 {
609 /* Check to see if any configuration changes have occurred
610 * since we were last invoked
611 */
612 if (draw->special_event) {
613 xcb_generic_event_t *ev;
614
615 while ((ev = xcb_poll_for_special_event(draw->conn,
616 draw->special_event)) != NULL) {
617 xcb_present_generic_event_t *ge = (void *) ev;
618 dri3_handle_present_event(draw, ge);
619 }
620 }
621 }
622
623 /** loader_dri3_swap_buffers_msc
624 *
625 * Make the current back buffer visible using the present extension
626 */
627 int64_t
628 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
629 int64_t target_msc, int64_t divisor,
630 int64_t remainder, unsigned flush_flags,
631 bool force_copy)
632 {
633 struct loader_dri3_buffer *back;
634 __DRIcontext *dri_context;
635 int64_t ret = 0;
636 uint32_t options = XCB_PRESENT_OPTION_NONE;
637
638 dri_context = draw->vtable->get_dri_context(draw);
639
640 draw->vtable->flush_drawable(draw, flush_flags);
641
642 back = draw->buffers[dri3_find_back(draw)];
643 if (draw->is_different_gpu && back) {
644 /* Update the linear buffer before presenting the pixmap */
645 draw->ext->image->blitImage(dri_context,
646 back->linear_buffer,
647 back->image,
648 0, 0, back->width,
649 back->height,
650 0, 0, back->width,
651 back->height, __BLIT_FLAG_FLUSH);
652 /* Update the fake front */
653 if (draw->have_fake_front)
654 draw->ext->image->blitImage(dri_context,
655 draw->buffers[LOADER_DRI3_FRONT_ID]->image,
656 back->image,
657 0, 0, draw->width, draw->height,
658 0, 0, draw->width, draw->height,
659 __BLIT_FLAG_FLUSH);
660 }
661
662 dri3_flush_present_events(draw);
663
664 if (back && !draw->is_pixmap) {
665 dri3_fence_reset(draw->conn, back);
666
667 /* Compute when we want the frame shown by taking the last known
668 * successful MSC and adding in a swap interval for each outstanding swap
669 * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
670 * semantic"
671 */
672 ++draw->send_sbc;
673 if (target_msc == 0 && divisor == 0 && remainder == 0)
674 target_msc = draw->msc + draw->swap_interval *
675 (draw->send_sbc - draw->recv_sbc);
676 else if (divisor == 0 && remainder > 0) {
677 /* From the GLX_OML_sync_control spec:
678 * "If <divisor> = 0, the swap will occur when MSC becomes
679 * greater than or equal to <target_msc>."
680 *
681 * Note that there's no mention of the remainder. The Present
682 * extension throws BadValue for remainder != 0 with divisor == 0, so
683 * just drop the passed in value.
684 */
685 remainder = 0;
686 }
687
688 /* From the GLX_EXT_swap_control spec
689 * and the EGL 1.4 spec (page 53):
690 *
691 * "If <interval> is set to a value of 0, buffer swaps are not
692 * synchronized to a video frame."
693 *
694 * Implementation note: It is possible to enable triple buffering
695 * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
696 * the default.
697 */
698 if (draw->swap_interval == 0)
699 options |= XCB_PRESENT_OPTION_ASYNC;
700 if (force_copy)
701 options |= XCB_PRESENT_OPTION_COPY;
702
703 back->busy = 1;
704 back->last_swap = draw->send_sbc;
705 xcb_present_pixmap(draw->conn,
706 draw->drawable,
707 back->pixmap,
708 (uint32_t) draw->send_sbc,
709 0, /* valid */
710 0, /* update */
711 0, /* x_off */
712 0, /* y_off */
713 None, /* target_crtc */
714 None,
715 back->sync_fence,
716 options,
717 target_msc,
718 divisor,
719 remainder, 0, NULL);
720 ret = (int64_t) draw->send_sbc;
721
722 /* If there's a fake front, then copy the source back buffer
723 * to the fake front to keep it up to date. This needs
724 * to reset the fence and make future users block until
725 * the X server is done copying the bits
726 */
727 if (draw->have_fake_front && !draw->is_different_gpu) {
728 dri3_fence_reset(draw->conn, draw->buffers[LOADER_DRI3_FRONT_ID]);
729 dri3_copy_area(draw->conn,
730 back->pixmap,
731 draw->buffers[LOADER_DRI3_FRONT_ID]->pixmap,
732 dri3_drawable_gc(draw),
733 0, 0, 0, 0,
734 draw->width, draw->height);
735 dri3_fence_trigger(draw->conn, draw->buffers[LOADER_DRI3_FRONT_ID]);
736 }
737 xcb_flush(draw->conn);
738 if (draw->stamp)
739 ++(*draw->stamp);
740 }
741
742 draw->ext->flush->invalidate(draw->dri_drawable);
743
744 return ret;
745 }
746
747 int
748 loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
749 {
750 int back_id = LOADER_DRI3_BACK_ID(dri3_find_back(draw));
751
752 if (back_id < 0 || !draw->buffers[back_id])
753 return 0;
754
755 if (draw->buffers[back_id]->last_swap != 0)
756 return draw->send_sbc - draw->buffers[back_id]->last_swap + 1;
757 else
758 return 0;
759 }
760
761 /** loader_dri3_open
762 *
763 * Wrapper around xcb_dri3_open
764 */
765 int
766 loader_dri3_open(xcb_connection_t *conn,
767 xcb_window_t root,
768 uint32_t provider)
769 {
770 xcb_dri3_open_cookie_t cookie;
771 xcb_dri3_open_reply_t *reply;
772 int fd;
773
774 cookie = xcb_dri3_open(conn,
775 root,
776 provider);
777
778 reply = xcb_dri3_open_reply(conn, cookie, NULL);
779 if (!reply)
780 return -1;
781
782 if (reply->nfd != 1) {
783 free(reply);
784 return -1;
785 }
786
787 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
788 free(reply);
789 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
790
791 return fd;
792 }
793
794 static uint32_t
795 dri3_cpp_for_format(uint32_t format) {
796 switch (format) {
797 case __DRI_IMAGE_FORMAT_R8:
798 return 1;
799 case __DRI_IMAGE_FORMAT_RGB565:
800 case __DRI_IMAGE_FORMAT_GR88:
801 return 2;
802 case __DRI_IMAGE_FORMAT_XRGB8888:
803 case __DRI_IMAGE_FORMAT_ARGB8888:
804 case __DRI_IMAGE_FORMAT_ABGR8888:
805 case __DRI_IMAGE_FORMAT_XBGR8888:
806 case __DRI_IMAGE_FORMAT_XRGB2101010:
807 case __DRI_IMAGE_FORMAT_ARGB2101010:
808 case __DRI_IMAGE_FORMAT_SARGB8:
809 return 4;
810 case __DRI_IMAGE_FORMAT_NONE:
811 default:
812 return 0;
813 }
814 }
815
816 /** loader_dri3_alloc_render_buffer
817 *
818 * Use the driver createImage function to construct a __DRIimage, then
819 * get a file descriptor for that and create an X pixmap from that
820 *
821 * Allocate an xshmfence for synchronization
822 */
823 static struct loader_dri3_buffer *
824 dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format,
825 int width, int height, int depth)
826 {
827 struct loader_dri3_buffer *buffer;
828 __DRIimage *pixmap_buffer;
829 xcb_pixmap_t pixmap;
830 xcb_sync_fence_t sync_fence;
831 struct xshmfence *shm_fence;
832 int buffer_fd, fence_fd;
833 int stride;
834
835 /* Create an xshmfence object and
836 * prepare to send that to the X server
837 */
838
839 fence_fd = xshmfence_alloc_shm();
840 if (fence_fd < 0)
841 return NULL;
842
843 shm_fence = xshmfence_map_shm(fence_fd);
844 if (shm_fence == NULL)
845 goto no_shm_fence;
846
847 /* Allocate the image from the driver
848 */
849 buffer = calloc(1, sizeof *buffer);
850 if (!buffer)
851 goto no_buffer;
852
853 buffer->cpp = dri3_cpp_for_format(format);
854 if (!buffer->cpp)
855 goto no_image;
856
857 if (!draw->is_different_gpu) {
858 buffer->image = draw->ext->image->createImage(draw->dri_screen,
859 width, height,
860 format,
861 __DRI_IMAGE_USE_SHARE |
862 __DRI_IMAGE_USE_SCANOUT |
863 __DRI_IMAGE_USE_BACKBUFFER,
864 buffer);
865 pixmap_buffer = buffer->image;
866
867 if (!buffer->image)
868 goto no_image;
869 } else {
870 buffer->image = draw->ext->image->createImage(draw->dri_screen,
871 width, height,
872 format,
873 0,
874 buffer);
875
876 if (!buffer->image)
877 goto no_image;
878
879 buffer->linear_buffer =
880 draw->ext->image->createImage(draw->dri_screen,
881 width, height, format,
882 __DRI_IMAGE_USE_SHARE |
883 __DRI_IMAGE_USE_LINEAR |
884 __DRI_IMAGE_USE_BACKBUFFER,
885 buffer);
886 pixmap_buffer = buffer->linear_buffer;
887
888 if (!buffer->linear_buffer)
889 goto no_linear_buffer;
890 }
891
892 /* X wants the stride, so ask the image for it
893 */
894 if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_STRIDE,
895 &stride))
896 goto no_buffer_attrib;
897
898 buffer->pitch = stride;
899
900 if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_FD,
901 &buffer_fd))
902 goto no_buffer_attrib;
903
904 xcb_dri3_pixmap_from_buffer(draw->conn,
905 (pixmap = xcb_generate_id(draw->conn)),
906 draw->drawable,
907 buffer->size,
908 width, height, buffer->pitch,
909 depth, buffer->cpp * 8,
910 buffer_fd);
911
912 xcb_dri3_fence_from_fd(draw->conn,
913 pixmap,
914 (sync_fence = xcb_generate_id(draw->conn)),
915 false,
916 fence_fd);
917
918 buffer->pixmap = pixmap;
919 buffer->own_pixmap = true;
920 buffer->sync_fence = sync_fence;
921 buffer->shm_fence = shm_fence;
922 buffer->width = width;
923 buffer->height = height;
924
925 /* Mark the buffer as idle
926 */
927 dri3_fence_set(buffer);
928
929 return buffer;
930
931 no_buffer_attrib:
932 draw->ext->image->destroyImage(pixmap_buffer);
933 no_linear_buffer:
934 if (draw->is_different_gpu)
935 draw->ext->image->destroyImage(buffer->image);
936 no_image:
937 free(buffer);
938 no_buffer:
939 xshmfence_unmap_shm(shm_fence);
940 no_shm_fence:
941 close(fence_fd);
942 return NULL;
943 }
944
945 /** loader_dri3_update_drawable
946 *
947 * Called the first time we use the drawable and then
948 * after we receive present configure notify events to
949 * track the geometry of the drawable
950 */
951 static int
952 dri3_update_drawable(__DRIdrawable *driDrawable,
953 struct loader_dri3_drawable *draw)
954 {
955 if (draw->first_init) {
956 xcb_get_geometry_cookie_t geom_cookie;
957 xcb_get_geometry_reply_t *geom_reply;
958 xcb_void_cookie_t cookie;
959 xcb_generic_error_t *error;
960 xcb_present_query_capabilities_cookie_t present_capabilities_cookie;
961 xcb_present_query_capabilities_reply_t *present_capabilities_reply;
962
963 draw->first_init = false;
964
965 /* Try to select for input on the window.
966 *
967 * If the drawable is a window, this will get our events
968 * delivered.
969 *
970 * Otherwise, we'll get a BadWindow error back from this request which
971 * will let us know that the drawable is a pixmap instead.
972 */
973
974 draw->eid = xcb_generate_id(draw->conn);
975 cookie =
976 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
977 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
978 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
979 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
980
981 present_capabilities_cookie =
982 xcb_present_query_capabilities(draw->conn, draw->drawable);
983
984 /* Create an XCB event queue to hold present events outside of the usual
985 * application event queue
986 */
987 draw->special_event = xcb_register_for_special_xge(draw->conn,
988 &xcb_present_id,
989 draw->eid,
990 draw->stamp);
991 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
992
993 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
994
995 if (!geom_reply)
996 return false;
997
998 draw->width = geom_reply->width;
999 draw->height = geom_reply->height;
1000 draw->depth = geom_reply->depth;
1001 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1002
1003 free(geom_reply);
1004
1005 draw->is_pixmap = false;
1006
1007 /* Check to see if our select input call failed. If it failed with a
1008 * BadWindow error, then assume the drawable is a pixmap. Destroy the
1009 * special event queue created above and mark the drawable as a pixmap
1010 */
1011
1012 error = xcb_request_check(draw->conn, cookie);
1013
1014 present_capabilities_reply =
1015 xcb_present_query_capabilities_reply(draw->conn,
1016 present_capabilities_cookie,
1017 NULL);
1018
1019 if (present_capabilities_reply) {
1020 draw->present_capabilities = present_capabilities_reply->capabilities;
1021 free(present_capabilities_reply);
1022 } else
1023 draw->present_capabilities = 0;
1024
1025 if (error) {
1026 if (error->error_code != BadWindow) {
1027 free(error);
1028 return false;
1029 }
1030 draw->is_pixmap = true;
1031 xcb_unregister_for_special_event(draw->conn, draw->special_event);
1032 draw->special_event = NULL;
1033 }
1034 }
1035 dri3_flush_present_events(draw);
1036 return true;
1037 }
1038
1039 /* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while
1040 * the createImageFromFds call takes __DRI_IMAGE_FOURCC codes. To avoid
1041 * complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and
1042 * translate to __DRI_IMAGE_FOURCC codes in the call to createImageFromFds
1043 */
1044 static int
1045 image_format_to_fourcc(int format)
1046 {
1047
1048 /* Convert from __DRI_IMAGE_FORMAT to __DRI_IMAGE_FOURCC (sigh) */
1049 switch (format) {
1050 case __DRI_IMAGE_FORMAT_SARGB8: return __DRI_IMAGE_FOURCC_SARGB8888;
1051 case __DRI_IMAGE_FORMAT_RGB565: return __DRI_IMAGE_FOURCC_RGB565;
1052 case __DRI_IMAGE_FORMAT_XRGB8888: return __DRI_IMAGE_FOURCC_XRGB8888;
1053 case __DRI_IMAGE_FORMAT_ARGB8888: return __DRI_IMAGE_FOURCC_ARGB8888;
1054 case __DRI_IMAGE_FORMAT_ABGR8888: return __DRI_IMAGE_FOURCC_ABGR8888;
1055 case __DRI_IMAGE_FORMAT_XBGR8888: return __DRI_IMAGE_FOURCC_XBGR8888;
1056 }
1057 return 0;
1058 }
1059
1060 __DRIimage *
1061 loader_dri3_create_image(xcb_connection_t *c,
1062 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1063 unsigned int format,
1064 __DRIscreen *dri_screen,
1065 const __DRIimageExtension *image,
1066 void *loaderPrivate)
1067 {
1068 int *fds;
1069 __DRIimage *image_planar, *ret;
1070 int stride, offset;
1071
1072 /* Get an FD for the pixmap object
1073 */
1074 fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1075
1076 stride = bp_reply->stride;
1077 offset = 0;
1078
1079 /* createImageFromFds creates a wrapper __DRIimage structure which
1080 * can deal with multiple planes for things like Yuv images. So, once
1081 * we've gotten the planar wrapper, pull the single plane out of it and
1082 * discard the wrapper.
1083 */
1084 image_planar = image->createImageFromFds(dri_screen,
1085 bp_reply->width,
1086 bp_reply->height,
1087 image_format_to_fourcc(format),
1088 fds, 1,
1089 &stride, &offset, loaderPrivate);
1090 close(fds[0]);
1091 if (!image_planar)
1092 return NULL;
1093
1094 ret = image->fromPlanar(image_planar, 0, loaderPrivate);
1095
1096 image->destroyImage(image_planar);
1097
1098 return ret;
1099 }
1100
1101 /** dri3_get_pixmap_buffer
1102 *
1103 * Get the DRM object for a pixmap from the X server and
1104 * wrap that with a __DRIimage structure using createImageFromFds
1105 */
1106 static struct loader_dri3_buffer *
1107 dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format,
1108 enum loader_dri3_buffer_type buffer_type,
1109 struct loader_dri3_drawable *draw)
1110 {
1111 int buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1112 struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
1113 xcb_drawable_t pixmap;
1114 xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1115 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1116 xcb_sync_fence_t sync_fence;
1117 struct xshmfence *shm_fence;
1118 int fence_fd;
1119
1120 if (buffer)
1121 return buffer;
1122
1123 pixmap = draw->drawable;
1124
1125 buffer = calloc(1, sizeof *buffer);
1126 if (!buffer)
1127 goto no_buffer;
1128
1129 fence_fd = xshmfence_alloc_shm();
1130 if (fence_fd < 0)
1131 goto no_fence;
1132 shm_fence = xshmfence_map_shm(fence_fd);
1133 if (shm_fence == NULL) {
1134 close (fence_fd);
1135 goto no_fence;
1136 }
1137
1138 xcb_dri3_fence_from_fd(draw->conn,
1139 pixmap,
1140 (sync_fence = xcb_generate_id(draw->conn)),
1141 false,
1142 fence_fd);
1143
1144 bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap);
1145 bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn, bp_cookie, NULL);
1146 if (!bp_reply)
1147 goto no_image;
1148
1149 buffer->image = loader_dri3_create_image(draw->conn, bp_reply, format,
1150 draw->dri_screen, draw->ext->image,
1151 buffer);
1152 if (!buffer->image)
1153 goto no_image;
1154
1155 buffer->pixmap = pixmap;
1156 buffer->own_pixmap = false;
1157 buffer->width = bp_reply->width;
1158 buffer->height = bp_reply->height;
1159 buffer->buffer_type = buffer_type;
1160 buffer->shm_fence = shm_fence;
1161 buffer->sync_fence = sync_fence;
1162
1163 draw->buffers[buf_id] = buffer;
1164
1165 free(bp_reply);
1166
1167 return buffer;
1168
1169 no_image:
1170 free(bp_reply);
1171 xcb_sync_destroy_fence(draw->conn, sync_fence);
1172 xshmfence_unmap_shm(shm_fence);
1173 no_fence:
1174 free(buffer);
1175 no_buffer:
1176 return NULL;
1177 }
1178
1179 /** dri3_get_buffer
1180 *
1181 * Find a front or back buffer, allocating new ones as necessary
1182 */
1183 static struct loader_dri3_buffer *
1184 dri3_get_buffer(__DRIdrawable *driDrawable,
1185 unsigned int format,
1186 enum loader_dri3_buffer_type buffer_type,
1187 struct loader_dri3_drawable *draw)
1188 {
1189 struct loader_dri3_buffer *buffer;
1190 int buf_id;
1191 __DRIcontext *dri_context;
1192
1193 dri_context = draw->vtable->get_dri_context(draw);
1194
1195 if (buffer_type == loader_dri3_buffer_back) {
1196 buf_id = dri3_find_back(draw);
1197
1198 if (buf_id < 0)
1199 return NULL;
1200 } else {
1201 buf_id = LOADER_DRI3_FRONT_ID;
1202 }
1203
1204 buffer = draw->buffers[buf_id];
1205
1206 /* Allocate a new buffer if there isn't an old one, or if that
1207 * old one is the wrong size
1208 */
1209 if (!buffer || buffer->width != draw->width ||
1210 buffer->height != draw->height) {
1211 struct loader_dri3_buffer *new_buffer;
1212
1213 /* Allocate the new buffers
1214 */
1215 new_buffer = dri3_alloc_render_buffer(draw,
1216 format,
1217 draw->width,
1218 draw->height,
1219 draw->depth);
1220 if (!new_buffer)
1221 return NULL;
1222
1223 /* When resizing, copy the contents of the old buffer, waiting for that
1224 * copy to complete using our fences before proceeding
1225 */
1226 switch (buffer_type) {
1227 case loader_dri3_buffer_back:
1228 if (buffer) {
1229 if (!buffer->linear_buffer) {
1230 dri3_fence_reset(draw->conn, new_buffer);
1231 dri3_fence_await(draw->conn, buffer);
1232 dri3_copy_area(draw->conn,
1233 buffer->pixmap,
1234 new_buffer->pixmap,
1235 dri3_drawable_gc(draw),
1236 0, 0, 0, 0,
1237 draw->width, draw->height);
1238 dri3_fence_trigger(draw->conn, new_buffer);
1239 } else if (draw->vtable->in_current_context(draw)) {
1240 draw->ext->image->blitImage(dri_context,
1241 new_buffer->image,
1242 buffer->image,
1243 0, 0, draw->width, draw->height,
1244 0, 0, draw->width, draw->height, 0);
1245 }
1246 dri3_free_render_buffer(draw, buffer);
1247 }
1248 break;
1249 case loader_dri3_buffer_front:
1250 loader_dri3_swapbuffer_barrier(draw);
1251 dri3_fence_reset(draw->conn, new_buffer);
1252 dri3_copy_area(draw->conn,
1253 draw->drawable,
1254 new_buffer->pixmap,
1255 dri3_drawable_gc(draw),
1256 0, 0, 0, 0,
1257 draw->width, draw->height);
1258 dri3_fence_trigger(draw->conn, new_buffer);
1259
1260 if (new_buffer->linear_buffer &&
1261 draw->vtable->in_current_context(draw)) {
1262 dri3_fence_await(draw->conn, new_buffer);
1263 draw->ext->image->blitImage(dri_context,
1264 new_buffer->image,
1265 new_buffer->linear_buffer,
1266 0, 0, draw->width, draw->height,
1267 0, 0, draw->width, draw->height, 0);
1268 }
1269 break;
1270 }
1271 buffer = new_buffer;
1272 buffer->buffer_type = buffer_type;
1273 draw->buffers[buf_id] = buffer;
1274 }
1275 dri3_fence_await(draw->conn, buffer);
1276
1277 /* Return the requested buffer */
1278 return buffer;
1279 }
1280
1281 /** dri3_free_buffers
1282 *
1283 * Free the front bufffer or all of the back buffers. Used
1284 * when the application changes which buffers it needs
1285 */
1286 static void
1287 dri3_free_buffers(__DRIdrawable *driDrawable,
1288 enum loader_dri3_buffer_type buffer_type,
1289 struct loader_dri3_drawable *draw)
1290 {
1291 struct loader_dri3_buffer *buffer;
1292 int first_id;
1293 int n_id;
1294 int buf_id;
1295
1296 switch (buffer_type) {
1297 case loader_dri3_buffer_back:
1298 first_id = LOADER_DRI3_BACK_ID(0);
1299 n_id = LOADER_DRI3_MAX_BACK;
1300 break;
1301 case loader_dri3_buffer_front:
1302 first_id = LOADER_DRI3_FRONT_ID;
1303 n_id = 1;
1304 }
1305
1306 for (buf_id = first_id; buf_id < first_id + n_id; buf_id++) {
1307 buffer = draw->buffers[buf_id];
1308 if (buffer) {
1309 dri3_free_render_buffer(draw, buffer);
1310 draw->buffers[buf_id] = NULL;
1311 }
1312 }
1313 }
1314
1315 /** loader_dri3_get_buffers
1316 *
1317 * The published buffer allocation API.
1318 * Returns all of the necessary buffers, allocating
1319 * as needed.
1320 */
1321 int
1322 loader_dri3_get_buffers(__DRIdrawable *driDrawable,
1323 unsigned int format,
1324 uint32_t *stamp,
1325 void *loaderPrivate,
1326 uint32_t buffer_mask,
1327 struct __DRIimageList *buffers)
1328 {
1329 struct loader_dri3_drawable *draw = loaderPrivate;
1330 struct loader_dri3_buffer *front, *back;
1331
1332 buffers->image_mask = 0;
1333 buffers->front = NULL;
1334 buffers->back = NULL;
1335
1336 front = NULL;
1337 back = NULL;
1338
1339 if (!dri3_update_drawable(driDrawable, draw))
1340 return false;
1341
1342 /* pixmaps always have front buffers */
1343 if (draw->is_pixmap)
1344 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1345
1346 if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
1347 /* All pixmaps are owned by the server gpu.
1348 * When we use a different gpu, we can't use the pixmap
1349 * as buffer since it is potentially tiled a way
1350 * our device can't understand. In this case, use
1351 * a fake front buffer. Hopefully the pixmap
1352 * content will get synced with the fake front
1353 * buffer.
1354 */
1355 if (draw->is_pixmap && !draw->is_different_gpu)
1356 front = dri3_get_pixmap_buffer(driDrawable,
1357 format,
1358 loader_dri3_buffer_front,
1359 draw);
1360 else
1361 front = dri3_get_buffer(driDrawable,
1362 format,
1363 loader_dri3_buffer_front,
1364 draw);
1365
1366 if (!front)
1367 return false;
1368 } else {
1369 dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
1370 draw->have_fake_front = 0;
1371 }
1372
1373 if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
1374 back = dri3_get_buffer(driDrawable,
1375 format,
1376 loader_dri3_buffer_back,
1377 draw);
1378 if (!back)
1379 return false;
1380 draw->have_back = 1;
1381 } else {
1382 dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
1383 draw->have_back = 0;
1384 }
1385
1386 if (front) {
1387 buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
1388 buffers->front = front->image;
1389 draw->have_fake_front = draw->is_different_gpu || !draw->is_pixmap;
1390 }
1391
1392 if (back) {
1393 buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
1394 buffers->back = back->image;
1395 }
1396
1397 draw->stamp = stamp;
1398
1399 return true;
1400 }
1401
1402 /** loader_dri3_update_drawable_geometry
1403 *
1404 * Get the current drawable geometry.
1405 */
1406 void
1407 loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
1408 {
1409 xcb_get_geometry_cookie_t geom_cookie;
1410 xcb_get_geometry_reply_t *geom_reply;
1411
1412 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1413
1414 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1415
1416 if (geom_reply) {
1417 draw->width = geom_reply->width;
1418 draw->height = geom_reply->height;
1419 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1420
1421 free(geom_reply);
1422 }
1423 }
1424
1425
1426 /**
1427 * Make sure the server has flushed all pending swap buffers to hardware
1428 * for this drawable. Ideally we'd want to send an X protocol request to
1429 * have the server block our connection until the swaps are complete. That
1430 * would avoid the potential round-trip here.
1431 */
1432 void
1433 loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
1434 {
1435 int64_t ust, msc, sbc;
1436
1437 (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
1438 }