loader/dri3: Add dri3_wait_for_event_locked full_sequence out parameter
[mesa.git] / src / loader / loader_dri3_helper.c
1 /*
2 * Copyright © 2013 Keith Packard
3 * Copyright © 2015 Boyan Ding
4 *
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that copyright
8 * notice and this permission notice appear in supporting documentation, and
9 * that the name of the copyright holders not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. The copyright holders make no representations
12 * about the suitability of this software for any purpose. It is provided "as
13 * is" without express or implied warranty.
14 *
15 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21 * OF THIS SOFTWARE.
22 */
23
24 #include <fcntl.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27 #include <string.h>
28
29 #include <X11/xshmfence.h>
30 #include <xcb/xcb.h>
31 #include <xcb/dri3.h>
32 #include <xcb/present.h>
33
34 #include <X11/Xlib-xcb.h>
35
36 #include "loader_dri3_helper.h"
37 #include "util/macros.h"
38 #include "drm-uapi/drm_fourcc.h"
39
40 /* From driconf.h, user exposed so should be stable */
41 #define DRI_CONF_VBLANK_NEVER 0
42 #define DRI_CONF_VBLANK_DEF_INTERVAL_0 1
43 #define DRI_CONF_VBLANK_DEF_INTERVAL_1 2
44 #define DRI_CONF_VBLANK_ALWAYS_SYNC 3
45
46 /**
47 * A cached blit context.
48 */
49 struct loader_dri3_blit_context {
50 mtx_t mtx;
51 __DRIcontext *ctx;
52 __DRIscreen *cur_screen;
53 const __DRIcoreExtension *core;
54 };
55
56 /* For simplicity we maintain the cache only for a single screen at a time */
57 static struct loader_dri3_blit_context blit_context = {
58 _MTX_INITIALIZER_NP, NULL
59 };
60
61 static void
62 dri3_flush_present_events(struct loader_dri3_drawable *draw);
63
64 static struct loader_dri3_buffer *
65 dri3_find_back_alloc(struct loader_dri3_drawable *draw);
66
67 static xcb_screen_t *
68 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
69 {
70 xcb_screen_iterator_t screen_iter =
71 xcb_setup_roots_iterator(xcb_get_setup(conn));
72
73 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
74 if (screen_iter.data->root == root)
75 return screen_iter.data;
76 }
77
78 return NULL;
79 }
80
81 static xcb_visualtype_t *
82 get_xcb_visualtype_for_depth(struct loader_dri3_drawable *draw, int depth)
83 {
84 xcb_visualtype_iterator_t visual_iter;
85 xcb_screen_t *screen = draw->screen;
86 xcb_depth_iterator_t depth_iter;
87
88 if (!screen)
89 return NULL;
90
91 depth_iter = xcb_screen_allowed_depths_iterator(screen);
92 for (; depth_iter.rem; xcb_depth_next(&depth_iter)) {
93 if (depth_iter.data->depth != depth)
94 continue;
95
96 visual_iter = xcb_depth_visuals_iterator(depth_iter.data);
97 if (visual_iter.rem)
98 return visual_iter.data;
99 }
100
101 return NULL;
102 }
103
104 /* Sets the adaptive sync window property state. */
105 static void
106 set_adaptive_sync_property(xcb_connection_t *conn, xcb_drawable_t drawable,
107 uint32_t state)
108 {
109 static char const name[] = "_VARIABLE_REFRESH";
110 xcb_intern_atom_cookie_t cookie;
111 xcb_intern_atom_reply_t* reply;
112 xcb_void_cookie_t check;
113
114 cookie = xcb_intern_atom(conn, 0, strlen(name), name);
115 reply = xcb_intern_atom_reply(conn, cookie, NULL);
116 if (reply == NULL)
117 return;
118
119 if (state)
120 check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
121 drawable, reply->atom,
122 XCB_ATOM_CARDINAL, 32, 1, &state);
123 else
124 check = xcb_delete_property_checked(conn, drawable, reply->atom);
125
126 xcb_discard_reply(conn, check.sequence);
127 free(reply);
128 }
129
130 /* Get red channel mask for given drawable at given depth. */
131 static unsigned int
132 dri3_get_red_mask_for_depth(struct loader_dri3_drawable *draw, int depth)
133 {
134 xcb_visualtype_t *visual = get_xcb_visualtype_for_depth(draw, depth);
135
136 if (visual)
137 return visual->red_mask;
138
139 return 0;
140 }
141
142 /**
143 * Do we have blit functionality in the image blit extension?
144 *
145 * \param draw[in] The drawable intended to blit from / to.
146 * \return true if we have blit functionality. false otherwise.
147 */
148 static bool loader_dri3_have_image_blit(const struct loader_dri3_drawable *draw)
149 {
150 return draw->ext->image->base.version >= 9 &&
151 draw->ext->image->blitImage != NULL;
152 }
153
154 /**
155 * Get and lock (for use with the current thread) a dri context associated
156 * with the drawable's dri screen. The context is intended to be used with
157 * the dri image extension's blitImage method.
158 *
159 * \param draw[in] Pointer to the drawable whose dri screen we want a
160 * dri context for.
161 * \return A dri context or NULL if context creation failed.
162 *
163 * When the caller is done with the context (even if the context returned was
164 * NULL), the caller must call loader_dri3_blit_context_put.
165 */
166 static __DRIcontext *
167 loader_dri3_blit_context_get(struct loader_dri3_drawable *draw)
168 {
169 mtx_lock(&blit_context.mtx);
170
171 if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen) {
172 blit_context.core->destroyContext(blit_context.ctx);
173 blit_context.ctx = NULL;
174 }
175
176 if (!blit_context.ctx) {
177 blit_context.ctx = draw->ext->core->createNewContext(draw->dri_screen,
178 NULL, NULL, NULL);
179 blit_context.cur_screen = draw->dri_screen;
180 blit_context.core = draw->ext->core;
181 }
182
183 return blit_context.ctx;
184 }
185
186 /**
187 * Release (for use with other threads) a dri context previously obtained using
188 * loader_dri3_blit_context_get.
189 */
190 static void
191 loader_dri3_blit_context_put(void)
192 {
193 mtx_unlock(&blit_context.mtx);
194 }
195
196 /**
197 * Blit (parts of) the contents of a DRI image to another dri image
198 *
199 * \param draw[in] The drawable which owns the images.
200 * \param dst[in] The destination image.
201 * \param src[in] The source image.
202 * \param dstx0[in] Start destination coordinate.
203 * \param dsty0[in] Start destination coordinate.
204 * \param width[in] Blit width.
205 * \param height[in] Blit height.
206 * \param srcx0[in] Start source coordinate.
207 * \param srcy0[in] Start source coordinate.
208 * \param flush_flag[in] Image blit flush flag.
209 * \return true iff successful.
210 */
211 static bool
212 loader_dri3_blit_image(struct loader_dri3_drawable *draw,
213 __DRIimage *dst, __DRIimage *src,
214 int dstx0, int dsty0, int width, int height,
215 int srcx0, int srcy0, int flush_flag)
216 {
217 __DRIcontext *dri_context;
218 bool use_blit_context = false;
219
220 if (!loader_dri3_have_image_blit(draw))
221 return false;
222
223 dri_context = draw->vtable->get_dri_context(draw);
224
225 if (!dri_context || !draw->vtable->in_current_context(draw)) {
226 dri_context = loader_dri3_blit_context_get(draw);
227 use_blit_context = true;
228 flush_flag |= __BLIT_FLAG_FLUSH;
229 }
230
231 if (dri_context)
232 draw->ext->image->blitImage(dri_context, dst, src, dstx0, dsty0,
233 width, height, srcx0, srcy0,
234 width, height, flush_flag);
235
236 if (use_blit_context)
237 loader_dri3_blit_context_put();
238
239 return dri_context != NULL;
240 }
241
242 static inline void
243 dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
244 {
245 xshmfence_reset(buffer->shm_fence);
246 }
247
248 static inline void
249 dri3_fence_set(struct loader_dri3_buffer *buffer)
250 {
251 xshmfence_trigger(buffer->shm_fence);
252 }
253
254 static inline void
255 dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
256 {
257 xcb_sync_trigger_fence(c, buffer->sync_fence);
258 }
259
260 static inline void
261 dri3_fence_await(xcb_connection_t *c, struct loader_dri3_drawable *draw,
262 struct loader_dri3_buffer *buffer)
263 {
264 xcb_flush(c);
265 xshmfence_await(buffer->shm_fence);
266 if (draw) {
267 mtx_lock(&draw->mtx);
268 dri3_flush_present_events(draw);
269 mtx_unlock(&draw->mtx);
270 }
271 }
272
273 static void
274 dri3_update_num_back(struct loader_dri3_drawable *draw)
275 {
276 if (draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP)
277 draw->num_back = 3;
278 else
279 draw->num_back = 2;
280 }
281
282 void
283 loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
284 {
285 draw->swap_interval = interval;
286 }
287
288 /** dri3_free_render_buffer
289 *
290 * Free everything associated with one render buffer including pixmap, fence
291 * stuff and the driver image
292 */
293 static void
294 dri3_free_render_buffer(struct loader_dri3_drawable *draw,
295 struct loader_dri3_buffer *buffer)
296 {
297 if (buffer->own_pixmap)
298 xcb_free_pixmap(draw->conn, buffer->pixmap);
299 xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
300 xshmfence_unmap_shm(buffer->shm_fence);
301 draw->ext->image->destroyImage(buffer->image);
302 if (buffer->linear_buffer)
303 draw->ext->image->destroyImage(buffer->linear_buffer);
304 free(buffer);
305 }
306
307 void
308 loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
309 {
310 int i;
311
312 draw->ext->core->destroyDrawable(draw->dri_drawable);
313
314 for (i = 0; i < ARRAY_SIZE(draw->buffers); i++) {
315 if (draw->buffers[i])
316 dri3_free_render_buffer(draw, draw->buffers[i]);
317 }
318
319 if (draw->special_event) {
320 xcb_void_cookie_t cookie =
321 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
322 XCB_PRESENT_EVENT_MASK_NO_EVENT);
323
324 xcb_discard_reply(draw->conn, cookie.sequence);
325 xcb_unregister_for_special_event(draw->conn, draw->special_event);
326 }
327
328 cnd_destroy(&draw->event_cnd);
329 mtx_destroy(&draw->mtx);
330 }
331
332 int
333 loader_dri3_drawable_init(xcb_connection_t *conn,
334 xcb_drawable_t drawable,
335 __DRIscreen *dri_screen,
336 bool is_different_gpu,
337 bool multiplanes_available,
338 const __DRIconfig *dri_config,
339 struct loader_dri3_extensions *ext,
340 const struct loader_dri3_vtable *vtable,
341 struct loader_dri3_drawable *draw)
342 {
343 xcb_get_geometry_cookie_t cookie;
344 xcb_get_geometry_reply_t *reply;
345 xcb_generic_error_t *error;
346 GLint vblank_mode = DRI_CONF_VBLANK_DEF_INTERVAL_1;
347 int swap_interval;
348
349 draw->conn = conn;
350 draw->ext = ext;
351 draw->vtable = vtable;
352 draw->drawable = drawable;
353 draw->dri_screen = dri_screen;
354 draw->is_different_gpu = is_different_gpu;
355 draw->multiplanes_available = multiplanes_available;
356
357 draw->have_back = 0;
358 draw->have_fake_front = 0;
359 draw->first_init = true;
360 draw->adaptive_sync = false;
361 draw->adaptive_sync_active = false;
362
363 draw->cur_blit_source = -1;
364 draw->back_format = __DRI_IMAGE_FORMAT_NONE;
365 mtx_init(&draw->mtx, mtx_plain);
366 cnd_init(&draw->event_cnd);
367
368 if (draw->ext->config) {
369 unsigned char adaptive_sync = 0;
370
371 draw->ext->config->configQueryi(draw->dri_screen,
372 "vblank_mode", &vblank_mode);
373
374 draw->ext->config->configQueryb(draw->dri_screen,
375 "adaptive_sync",
376 &adaptive_sync);
377
378 draw->adaptive_sync = adaptive_sync;
379 }
380
381 if (!draw->adaptive_sync)
382 set_adaptive_sync_property(conn, draw->drawable, false);
383
384 switch (vblank_mode) {
385 case DRI_CONF_VBLANK_NEVER:
386 case DRI_CONF_VBLANK_DEF_INTERVAL_0:
387 swap_interval = 0;
388 break;
389 case DRI_CONF_VBLANK_DEF_INTERVAL_1:
390 case DRI_CONF_VBLANK_ALWAYS_SYNC:
391 default:
392 swap_interval = 1;
393 break;
394 }
395 draw->swap_interval = swap_interval;
396
397 dri3_update_num_back(draw);
398
399 /* Create a new drawable */
400 draw->dri_drawable =
401 draw->ext->image_driver->createNewDrawable(dri_screen,
402 dri_config,
403 draw);
404
405 if (!draw->dri_drawable)
406 return 1;
407
408 cookie = xcb_get_geometry(draw->conn, draw->drawable);
409 reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
410 if (reply == NULL || error != NULL) {
411 draw->ext->core->destroyDrawable(draw->dri_drawable);
412 return 1;
413 }
414
415 draw->screen = get_screen_for_root(draw->conn, reply->root);
416 draw->width = reply->width;
417 draw->height = reply->height;
418 draw->depth = reply->depth;
419 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
420 free(reply);
421
422 draw->swap_method = __DRI_ATTRIB_SWAP_UNDEFINED;
423 if (draw->ext->core->base.version >= 2) {
424 (void )draw->ext->core->getConfigAttrib(dri_config,
425 __DRI_ATTRIB_SWAP_METHOD,
426 &draw->swap_method);
427 }
428
429 /*
430 * Make sure server has the same swap interval we do for the new
431 * drawable.
432 */
433 loader_dri3_set_swap_interval(draw, swap_interval);
434
435 return 0;
436 }
437
438 /*
439 * Process one Present event
440 */
441 static void
442 dri3_handle_present_event(struct loader_dri3_drawable *draw,
443 xcb_present_generic_event_t *ge)
444 {
445 switch (ge->evtype) {
446 case XCB_PRESENT_CONFIGURE_NOTIFY: {
447 xcb_present_configure_notify_event_t *ce = (void *) ge;
448
449 draw->width = ce->width;
450 draw->height = ce->height;
451 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
452 draw->ext->flush->invalidate(draw->dri_drawable);
453 break;
454 }
455 case XCB_PRESENT_COMPLETE_NOTIFY: {
456 xcb_present_complete_notify_event_t *ce = (void *) ge;
457
458 /* Compute the processed SBC number from the received 32-bit serial number
459 * merged with the upper 32-bits of the sent 64-bit serial number while
460 * checking for wrap.
461 */
462 if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
463 uint64_t recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
464
465 /* Only assume wraparound if that results in exactly the previous
466 * SBC + 1, otherwise ignore received SBC > sent SBC (those are
467 * probably from a previous loader_dri3_drawable instance) to avoid
468 * calculating bogus target MSC values in loader_dri3_swap_buffers_msc
469 */
470 if (recv_sbc <= draw->send_sbc)
471 draw->recv_sbc = recv_sbc;
472 else if (recv_sbc == (draw->recv_sbc + 0x100000001ULL))
473 draw->recv_sbc = recv_sbc - 0x100000000ULL;
474
475 /* When moving from flip to copy, we assume that we can allocate in
476 * a more optimal way if we don't need to cater for the display
477 * controller.
478 */
479 if (ce->mode == XCB_PRESENT_COMPLETE_MODE_COPY &&
480 draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) {
481 for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
482 if (draw->buffers[b])
483 draw->buffers[b]->reallocate = true;
484 }
485 }
486
487 /* If the server tells us that our allocation is suboptimal, we
488 * reallocate once.
489 */
490 #ifdef HAVE_DRI3_MODIFIERS
491 if (ce->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY &&
492 draw->last_present_mode != ce->mode) {
493 for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
494 if (draw->buffers[b])
495 draw->buffers[b]->reallocate = true;
496 }
497 }
498 #endif
499 draw->last_present_mode = ce->mode;
500
501 if (draw->vtable->show_fps)
502 draw->vtable->show_fps(draw, ce->ust);
503
504 draw->ust = ce->ust;
505 draw->msc = ce->msc;
506 } else if (ce->serial == draw->eid) {
507 draw->notify_ust = ce->ust;
508 draw->notify_msc = ce->msc;
509 }
510 break;
511 }
512 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
513 xcb_present_idle_notify_event_t *ie = (void *) ge;
514 int b;
515
516 for (b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
517 struct loader_dri3_buffer *buf = draw->buffers[b];
518
519 if (buf && buf->pixmap == ie->pixmap)
520 buf->busy = 0;
521 }
522 break;
523 }
524 }
525 free(ge);
526 }
527
528 static bool
529 dri3_wait_for_event_locked(struct loader_dri3_drawable *draw,
530 unsigned *full_sequence)
531 {
532 xcb_generic_event_t *ev;
533 xcb_present_generic_event_t *ge;
534
535 xcb_flush(draw->conn);
536
537 /* Only have one thread waiting for events at a time */
538 if (draw->has_event_waiter) {
539 cnd_wait(&draw->event_cnd, &draw->mtx);
540 if (full_sequence)
541 *full_sequence = draw->last_special_event_sequence;
542 /* Another thread has updated the protected info, so retest. */
543 return true;
544 } else {
545 draw->has_event_waiter = true;
546 /* Allow other threads access to the drawable while we're waiting. */
547 mtx_unlock(&draw->mtx);
548 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
549 mtx_lock(&draw->mtx);
550 draw->has_event_waiter = false;
551 cnd_broadcast(&draw->event_cnd);
552 }
553 if (!ev)
554 return false;
555 draw->last_special_event_sequence = ev->full_sequence;
556 if (full_sequence)
557 *full_sequence = ev->full_sequence;
558 ge = (void *) ev;
559 dri3_handle_present_event(draw, ge);
560 return true;
561 }
562
563 /** loader_dri3_wait_for_msc
564 *
565 * Get the X server to send an event when the target msc/divisor/remainder is
566 * reached.
567 */
568 bool
569 loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
570 int64_t target_msc,
571 int64_t divisor, int64_t remainder,
572 int64_t *ust, int64_t *msc, int64_t *sbc)
573 {
574 xcb_void_cookie_t cookie = xcb_present_notify_msc(draw->conn,
575 draw->drawable,
576 draw->eid,
577 target_msc,
578 divisor,
579 remainder);
580 xcb_generic_event_t *ev;
581 unsigned full_sequence;
582
583 mtx_lock(&draw->mtx);
584 xcb_flush(draw->conn);
585
586 /* Wait for the event */
587 do {
588 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
589 if (!ev) {
590 mtx_unlock(&draw->mtx);
591 return false;
592 }
593
594 full_sequence = ev->full_sequence;
595 dri3_handle_present_event(draw, (void *) ev);
596 } while (full_sequence != cookie.sequence || draw->notify_msc < target_msc);
597
598 *ust = draw->notify_ust;
599 *msc = draw->notify_msc;
600 *sbc = draw->recv_sbc;
601 mtx_unlock(&draw->mtx);
602
603 return true;
604 }
605
606 /** loader_dri3_wait_for_sbc
607 *
608 * Wait for the completed swap buffer count to reach the specified
609 * target. Presumably the application knows that this will be reached with
610 * outstanding complete events, or we're going to be here awhile.
611 */
612 int
613 loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
614 int64_t target_sbc, int64_t *ust,
615 int64_t *msc, int64_t *sbc)
616 {
617 /* From the GLX_OML_sync_control spec:
618 *
619 * "If <target_sbc> = 0, the function will block until all previous
620 * swaps requested with glXSwapBuffersMscOML for that window have
621 * completed."
622 */
623 mtx_lock(&draw->mtx);
624 if (!target_sbc)
625 target_sbc = draw->send_sbc;
626
627 while (draw->recv_sbc < target_sbc) {
628 if (!dri3_wait_for_event_locked(draw, NULL)) {
629 mtx_unlock(&draw->mtx);
630 return 0;
631 }
632 }
633
634 *ust = draw->ust;
635 *msc = draw->msc;
636 *sbc = draw->recv_sbc;
637 mtx_unlock(&draw->mtx);
638 return 1;
639 }
640
641 /** loader_dri3_find_back
642 *
643 * Find an idle back buffer. If there isn't one, then
644 * wait for a present idle notify event from the X server
645 */
646 static int
647 dri3_find_back(struct loader_dri3_drawable *draw)
648 {
649 int b;
650 int num_to_consider;
651
652 mtx_lock(&draw->mtx);
653 /* Increase the likelyhood of reusing current buffer */
654 dri3_flush_present_events(draw);
655
656 /* Check whether we need to reuse the current back buffer as new back.
657 * In that case, wait until it's not busy anymore.
658 */
659 num_to_consider = draw->num_back;
660 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1) {
661 num_to_consider = 1;
662 draw->cur_blit_source = -1;
663 }
664
665 for (;;) {
666 for (b = 0; b < num_to_consider; b++) {
667 int id = LOADER_DRI3_BACK_ID((b + draw->cur_back) % draw->num_back);
668 struct loader_dri3_buffer *buffer = draw->buffers[id];
669
670 if (!buffer || !buffer->busy) {
671 draw->cur_back = id;
672 mtx_unlock(&draw->mtx);
673 return id;
674 }
675 }
676 if (!dri3_wait_for_event_locked(draw, NULL)) {
677 mtx_unlock(&draw->mtx);
678 return -1;
679 }
680 }
681 }
682
683 static xcb_gcontext_t
684 dri3_drawable_gc(struct loader_dri3_drawable *draw)
685 {
686 if (!draw->gc) {
687 uint32_t v = 0;
688 xcb_create_gc(draw->conn,
689 (draw->gc = xcb_generate_id(draw->conn)),
690 draw->drawable,
691 XCB_GC_GRAPHICS_EXPOSURES,
692 &v);
693 }
694 return draw->gc;
695 }
696
697
698 static struct loader_dri3_buffer *
699 dri3_back_buffer(struct loader_dri3_drawable *draw)
700 {
701 return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
702 }
703
704 static struct loader_dri3_buffer *
705 dri3_fake_front_buffer(struct loader_dri3_drawable *draw)
706 {
707 return draw->buffers[LOADER_DRI3_FRONT_ID];
708 }
709
710 static void
711 dri3_copy_area(xcb_connection_t *c,
712 xcb_drawable_t src_drawable,
713 xcb_drawable_t dst_drawable,
714 xcb_gcontext_t gc,
715 int16_t src_x,
716 int16_t src_y,
717 int16_t dst_x,
718 int16_t dst_y,
719 uint16_t width,
720 uint16_t height)
721 {
722 xcb_void_cookie_t cookie;
723
724 cookie = xcb_copy_area_checked(c,
725 src_drawable,
726 dst_drawable,
727 gc,
728 src_x,
729 src_y,
730 dst_x,
731 dst_y,
732 width,
733 height);
734 xcb_discard_reply(c, cookie.sequence);
735 }
736
737 /**
738 * Asks the driver to flush any queued work necessary for serializing with the
739 * X command stream, and optionally the slightly more strict requirement of
740 * glFlush() equivalence (which would require flushing even if nothing had
741 * been drawn to a window system framebuffer, for example).
742 */
743 void
744 loader_dri3_flush(struct loader_dri3_drawable *draw,
745 unsigned flags,
746 enum __DRI2throttleReason throttle_reason)
747 {
748 /* NEED TO CHECK WHETHER CONTEXT IS NULL */
749 __DRIcontext *dri_context = draw->vtable->get_dri_context(draw);
750
751 if (dri_context) {
752 draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable,
753 flags, throttle_reason);
754 }
755 }
756
757 void
758 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
759 int x, int y,
760 int width, int height,
761 bool flush)
762 {
763 struct loader_dri3_buffer *back;
764 unsigned flags = __DRI2_FLUSH_DRAWABLE;
765
766 /* Check we have the right attachments */
767 if (!draw->have_back || draw->is_pixmap)
768 return;
769
770 if (flush)
771 flags |= __DRI2_FLUSH_CONTEXT;
772 loader_dri3_flush(draw, flags, __DRI2_THROTTLE_COPYSUBBUFFER);
773
774 back = dri3_find_back_alloc(draw);
775 if (!back)
776 return;
777
778 y = draw->height - y - height;
779
780 if (draw->is_different_gpu) {
781 /* Update the linear buffer part of the back buffer
782 * for the dri3_copy_area operation
783 */
784 (void) loader_dri3_blit_image(draw,
785 back->linear_buffer,
786 back->image,
787 0, 0, back->width, back->height,
788 0, 0, __BLIT_FLAG_FLUSH);
789 }
790
791 loader_dri3_swapbuffer_barrier(draw);
792 dri3_fence_reset(draw->conn, back);
793 dri3_copy_area(draw->conn,
794 back->pixmap,
795 draw->drawable,
796 dri3_drawable_gc(draw),
797 x, y, x, y, width, height);
798 dri3_fence_trigger(draw->conn, back);
799 /* Refresh the fake front (if present) after we just damaged the real
800 * front.
801 */
802 if (draw->have_fake_front &&
803 !loader_dri3_blit_image(draw,
804 dri3_fake_front_buffer(draw)->image,
805 back->image,
806 x, y, width, height,
807 x, y, __BLIT_FLAG_FLUSH) &&
808 !draw->is_different_gpu) {
809 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
810 dri3_copy_area(draw->conn,
811 back->pixmap,
812 dri3_fake_front_buffer(draw)->pixmap,
813 dri3_drawable_gc(draw),
814 x, y, x, y, width, height);
815 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
816 dri3_fence_await(draw->conn, NULL, dri3_fake_front_buffer(draw));
817 }
818 dri3_fence_await(draw->conn, draw, back);
819 }
820
821 void
822 loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
823 xcb_drawable_t dest,
824 xcb_drawable_t src)
825 {
826 loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, __DRI2_THROTTLE_COPYSUBBUFFER);
827
828 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
829 dri3_copy_area(draw->conn,
830 src, dest,
831 dri3_drawable_gc(draw),
832 0, 0, 0, 0, draw->width, draw->height);
833 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
834 dri3_fence_await(draw->conn, draw, dri3_fake_front_buffer(draw));
835 }
836
837 void
838 loader_dri3_wait_x(struct loader_dri3_drawable *draw)
839 {
840 struct loader_dri3_buffer *front;
841
842 if (draw == NULL || !draw->have_fake_front)
843 return;
844
845 front = dri3_fake_front_buffer(draw);
846
847 loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
848
849 /* In the psc->is_different_gpu case, the linear buffer has been updated,
850 * but not yet the tiled buffer.
851 * Copy back to the tiled buffer we use for rendering.
852 * Note that we don't need flushing.
853 */
854 if (draw->is_different_gpu)
855 (void) loader_dri3_blit_image(draw,
856 front->image,
857 front->linear_buffer,
858 0, 0, front->width, front->height,
859 0, 0, 0);
860 }
861
862 void
863 loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
864 {
865 struct loader_dri3_buffer *front;
866
867 if (draw == NULL || !draw->have_fake_front)
868 return;
869
870 front = dri3_fake_front_buffer(draw);
871
872 /* In the psc->is_different_gpu case, we update the linear_buffer
873 * before updating the real front.
874 */
875 if (draw->is_different_gpu)
876 (void) loader_dri3_blit_image(draw,
877 front->linear_buffer,
878 front->image,
879 0, 0, front->width, front->height,
880 0, 0, __BLIT_FLAG_FLUSH);
881 loader_dri3_swapbuffer_barrier(draw);
882 loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
883 }
884
885 /** dri3_flush_present_events
886 *
887 * Process any present events that have been received from the X server
888 */
889 static void
890 dri3_flush_present_events(struct loader_dri3_drawable *draw)
891 {
892 /* Check to see if any configuration changes have occurred
893 * since we were last invoked
894 */
895 if (draw->has_event_waiter)
896 return;
897
898 if (draw->special_event) {
899 xcb_generic_event_t *ev;
900
901 while ((ev = xcb_poll_for_special_event(draw->conn,
902 draw->special_event)) != NULL) {
903 xcb_present_generic_event_t *ge = (void *) ev;
904 dri3_handle_present_event(draw, ge);
905 }
906 }
907 }
908
909 /** loader_dri3_swap_buffers_msc
910 *
911 * Make the current back buffer visible using the present extension
912 */
913 int64_t
914 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
915 int64_t target_msc, int64_t divisor,
916 int64_t remainder, unsigned flush_flags,
917 bool force_copy)
918 {
919 struct loader_dri3_buffer *back;
920 int64_t ret = 0;
921 uint32_t options = XCB_PRESENT_OPTION_NONE;
922
923 draw->vtable->flush_drawable(draw, flush_flags);
924
925 back = dri3_find_back_alloc(draw);
926
927 mtx_lock(&draw->mtx);
928
929 if (draw->adaptive_sync && !draw->adaptive_sync_active) {
930 set_adaptive_sync_property(draw->conn, draw->drawable, true);
931 draw->adaptive_sync_active = true;
932 }
933
934 if (draw->is_different_gpu && back) {
935 /* Update the linear buffer before presenting the pixmap */
936 (void) loader_dri3_blit_image(draw,
937 back->linear_buffer,
938 back->image,
939 0, 0, back->width, back->height,
940 0, 0, __BLIT_FLAG_FLUSH);
941 }
942
943 /* If we need to preload the new back buffer, remember the source.
944 * The force_copy parameter is used by EGL to attempt to preserve
945 * the back buffer across a call to this function.
946 */
947 if (draw->swap_method != __DRI_ATTRIB_SWAP_UNDEFINED || force_copy)
948 draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back);
949
950 /* Exchange the back and fake front. Even though the server knows about these
951 * buffers, it has no notion of back and fake front.
952 */
953 if (back && draw->have_fake_front) {
954 struct loader_dri3_buffer *tmp;
955
956 tmp = dri3_fake_front_buffer(draw);
957 draw->buffers[LOADER_DRI3_FRONT_ID] = back;
958 draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)] = tmp;
959
960 if (draw->swap_method == __DRI_ATTRIB_SWAP_COPY || force_copy)
961 draw->cur_blit_source = LOADER_DRI3_FRONT_ID;
962 }
963
964 dri3_flush_present_events(draw);
965
966 if (back && !draw->is_pixmap) {
967 dri3_fence_reset(draw->conn, back);
968
969 /* Compute when we want the frame shown by taking the last known
970 * successful MSC and adding in a swap interval for each outstanding swap
971 * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
972 * semantic"
973 */
974 ++draw->send_sbc;
975 if (target_msc == 0 && divisor == 0 && remainder == 0)
976 target_msc = draw->msc + draw->swap_interval *
977 (draw->send_sbc - draw->recv_sbc);
978 else if (divisor == 0 && remainder > 0) {
979 /* From the GLX_OML_sync_control spec:
980 * "If <divisor> = 0, the swap will occur when MSC becomes
981 * greater than or equal to <target_msc>."
982 *
983 * Note that there's no mention of the remainder. The Present
984 * extension throws BadValue for remainder != 0 with divisor == 0, so
985 * just drop the passed in value.
986 */
987 remainder = 0;
988 }
989
990 /* From the GLX_EXT_swap_control spec
991 * and the EGL 1.4 spec (page 53):
992 *
993 * "If <interval> is set to a value of 0, buffer swaps are not
994 * synchronized to a video frame."
995 *
996 * Implementation note: It is possible to enable triple buffering
997 * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
998 * the default.
999 */
1000 if (draw->swap_interval == 0)
1001 options |= XCB_PRESENT_OPTION_ASYNC;
1002
1003 /* If we need to populate the new back, but need to reuse the back
1004 * buffer slot due to lack of local blit capabilities, make sure
1005 * the server doesn't flip and we deadlock.
1006 */
1007 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1)
1008 options |= XCB_PRESENT_OPTION_COPY;
1009 #ifdef HAVE_DRI3_MODIFIERS
1010 if (draw->multiplanes_available)
1011 options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1012 #endif
1013 back->busy = 1;
1014 back->last_swap = draw->send_sbc;
1015 xcb_present_pixmap(draw->conn,
1016 draw->drawable,
1017 back->pixmap,
1018 (uint32_t) draw->send_sbc,
1019 0, /* valid */
1020 0, /* update */
1021 0, /* x_off */
1022 0, /* y_off */
1023 None, /* target_crtc */
1024 None,
1025 back->sync_fence,
1026 options,
1027 target_msc,
1028 divisor,
1029 remainder, 0, NULL);
1030 ret = (int64_t) draw->send_sbc;
1031
1032 /* Schedule a server-side back-preserving blit if necessary.
1033 * This happens iff all conditions below are satisfied:
1034 * a) We have a fake front,
1035 * b) We need to preserve the back buffer,
1036 * c) We don't have local blit capabilities.
1037 */
1038 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1 &&
1039 draw->cur_blit_source != LOADER_DRI3_BACK_ID(draw->cur_back)) {
1040 struct loader_dri3_buffer *new_back = dri3_back_buffer(draw);
1041 struct loader_dri3_buffer *src = draw->buffers[draw->cur_blit_source];
1042
1043 dri3_fence_reset(draw->conn, new_back);
1044 dri3_copy_area(draw->conn, src->pixmap,
1045 new_back->pixmap,
1046 dri3_drawable_gc(draw),
1047 0, 0, 0, 0, draw->width, draw->height);
1048 dri3_fence_trigger(draw->conn, new_back);
1049 new_back->last_swap = src->last_swap;
1050 }
1051
1052 xcb_flush(draw->conn);
1053 if (draw->stamp)
1054 ++(*draw->stamp);
1055 }
1056 mtx_unlock(&draw->mtx);
1057
1058 draw->ext->flush->invalidate(draw->dri_drawable);
1059
1060 return ret;
1061 }
1062
1063 int
1064 loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
1065 {
1066 struct loader_dri3_buffer *back = dri3_find_back_alloc(draw);
1067 int ret;
1068
1069 mtx_lock(&draw->mtx);
1070 ret = (!back || back->last_swap == 0) ? 0 :
1071 draw->send_sbc - back->last_swap + 1;
1072 mtx_unlock(&draw->mtx);
1073
1074 return ret;
1075 }
1076
1077 /** loader_dri3_open
1078 *
1079 * Wrapper around xcb_dri3_open
1080 */
1081 int
1082 loader_dri3_open(xcb_connection_t *conn,
1083 xcb_window_t root,
1084 uint32_t provider)
1085 {
1086 xcb_dri3_open_cookie_t cookie;
1087 xcb_dri3_open_reply_t *reply;
1088 int fd;
1089
1090 cookie = xcb_dri3_open(conn,
1091 root,
1092 provider);
1093
1094 reply = xcb_dri3_open_reply(conn, cookie, NULL);
1095 if (!reply)
1096 return -1;
1097
1098 if (reply->nfd != 1) {
1099 free(reply);
1100 return -1;
1101 }
1102
1103 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
1104 free(reply);
1105 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
1106
1107 return fd;
1108 }
1109
1110 static uint32_t
1111 dri3_cpp_for_format(uint32_t format) {
1112 switch (format) {
1113 case __DRI_IMAGE_FORMAT_R8:
1114 return 1;
1115 case __DRI_IMAGE_FORMAT_RGB565:
1116 case __DRI_IMAGE_FORMAT_GR88:
1117 return 2;
1118 case __DRI_IMAGE_FORMAT_XRGB8888:
1119 case __DRI_IMAGE_FORMAT_ARGB8888:
1120 case __DRI_IMAGE_FORMAT_ABGR8888:
1121 case __DRI_IMAGE_FORMAT_XBGR8888:
1122 case __DRI_IMAGE_FORMAT_XRGB2101010:
1123 case __DRI_IMAGE_FORMAT_ARGB2101010:
1124 case __DRI_IMAGE_FORMAT_XBGR2101010:
1125 case __DRI_IMAGE_FORMAT_ABGR2101010:
1126 case __DRI_IMAGE_FORMAT_SARGB8:
1127 case __DRI_IMAGE_FORMAT_SABGR8:
1128 case __DRI_IMAGE_FORMAT_SXRGB8:
1129 return 4;
1130 case __DRI_IMAGE_FORMAT_XBGR16161616F:
1131 case __DRI_IMAGE_FORMAT_ABGR16161616F:
1132 return 8;
1133 case __DRI_IMAGE_FORMAT_NONE:
1134 default:
1135 return 0;
1136 }
1137 }
1138
1139 /* Map format of render buffer to corresponding format for the linear_buffer
1140 * used for sharing with the display gpu of a Prime setup (== is_different_gpu).
1141 * Usually linear_format == format, except for depth >= 30 formats, where
1142 * different gpu vendors have different preferences wrt. color channel ordering.
1143 */
1144 static uint32_t
1145 dri3_linear_format_for_format(struct loader_dri3_drawable *draw, uint32_t format)
1146 {
1147 switch (format) {
1148 case __DRI_IMAGE_FORMAT_XRGB2101010:
1149 case __DRI_IMAGE_FORMAT_XBGR2101010:
1150 /* Different preferred formats for different hw */
1151 if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1152 return __DRI_IMAGE_FORMAT_XBGR2101010;
1153 else
1154 return __DRI_IMAGE_FORMAT_XRGB2101010;
1155
1156 case __DRI_IMAGE_FORMAT_ARGB2101010:
1157 case __DRI_IMAGE_FORMAT_ABGR2101010:
1158 /* Different preferred formats for different hw */
1159 if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1160 return __DRI_IMAGE_FORMAT_ABGR2101010;
1161 else
1162 return __DRI_IMAGE_FORMAT_ARGB2101010;
1163
1164 default:
1165 return format;
1166 }
1167 }
1168
1169 /* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while
1170 * the createImageFromFds call takes DRM_FORMAT codes. To avoid
1171 * complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and
1172 * translate to DRM_FORMAT codes in the call to createImageFromFds
1173 */
1174 static int
1175 image_format_to_fourcc(int format)
1176 {
1177
1178 /* Convert from __DRI_IMAGE_FORMAT to DRM_FORMAT (sigh) */
1179 switch (format) {
1180 case __DRI_IMAGE_FORMAT_SARGB8: return __DRI_IMAGE_FOURCC_SARGB8888;
1181 case __DRI_IMAGE_FORMAT_SABGR8: return __DRI_IMAGE_FOURCC_SABGR8888;
1182 case __DRI_IMAGE_FORMAT_SXRGB8: return __DRI_IMAGE_FOURCC_SXRGB8888;
1183 case __DRI_IMAGE_FORMAT_RGB565: return DRM_FORMAT_RGB565;
1184 case __DRI_IMAGE_FORMAT_XRGB8888: return DRM_FORMAT_XRGB8888;
1185 case __DRI_IMAGE_FORMAT_ARGB8888: return DRM_FORMAT_ARGB8888;
1186 case __DRI_IMAGE_FORMAT_ABGR8888: return DRM_FORMAT_ABGR8888;
1187 case __DRI_IMAGE_FORMAT_XBGR8888: return DRM_FORMAT_XBGR8888;
1188 case __DRI_IMAGE_FORMAT_XRGB2101010: return DRM_FORMAT_XRGB2101010;
1189 case __DRI_IMAGE_FORMAT_ARGB2101010: return DRM_FORMAT_ARGB2101010;
1190 case __DRI_IMAGE_FORMAT_XBGR2101010: return DRM_FORMAT_XBGR2101010;
1191 case __DRI_IMAGE_FORMAT_ABGR2101010: return DRM_FORMAT_ABGR2101010;
1192 case __DRI_IMAGE_FORMAT_XBGR16161616F: return DRM_FORMAT_XBGR16161616F;
1193 case __DRI_IMAGE_FORMAT_ABGR16161616F: return DRM_FORMAT_ABGR16161616F;
1194 }
1195 return 0;
1196 }
1197
1198 #ifdef HAVE_DRI3_MODIFIERS
1199 static bool
1200 has_supported_modifier(struct loader_dri3_drawable *draw, unsigned int format,
1201 uint64_t *modifiers, uint32_t count)
1202 {
1203 uint64_t *supported_modifiers;
1204 int32_t supported_modifiers_count;
1205 bool found = false;
1206 int i, j;
1207
1208 if (!draw->ext->image->queryDmaBufModifiers(draw->dri_screen,
1209 format, 0, NULL, NULL,
1210 &supported_modifiers_count) ||
1211 supported_modifiers_count == 0)
1212 return false;
1213
1214 supported_modifiers = malloc(supported_modifiers_count * sizeof(uint64_t));
1215 if (!supported_modifiers)
1216 return false;
1217
1218 draw->ext->image->queryDmaBufModifiers(draw->dri_screen, format,
1219 supported_modifiers_count,
1220 supported_modifiers, NULL,
1221 &supported_modifiers_count);
1222
1223 for (i = 0; !found && i < supported_modifiers_count; i++) {
1224 for (j = 0; !found && j < count; j++) {
1225 if (supported_modifiers[i] == modifiers[j])
1226 found = true;
1227 }
1228 }
1229
1230 free(supported_modifiers);
1231 return found;
1232 }
1233 #endif
1234
1235 /** loader_dri3_alloc_render_buffer
1236 *
1237 * Use the driver createImage function to construct a __DRIimage, then
1238 * get a file descriptor for that and create an X pixmap from that
1239 *
1240 * Allocate an xshmfence for synchronization
1241 */
1242 static struct loader_dri3_buffer *
1243 dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format,
1244 int width, int height, int depth)
1245 {
1246 struct loader_dri3_buffer *buffer;
1247 __DRIimage *pixmap_buffer;
1248 xcb_pixmap_t pixmap;
1249 xcb_sync_fence_t sync_fence;
1250 struct xshmfence *shm_fence;
1251 int buffer_fds[4], fence_fd;
1252 int num_planes = 0;
1253 int i, mod;
1254 int ret;
1255
1256 /* Create an xshmfence object and
1257 * prepare to send that to the X server
1258 */
1259
1260 fence_fd = xshmfence_alloc_shm();
1261 if (fence_fd < 0)
1262 return NULL;
1263
1264 shm_fence = xshmfence_map_shm(fence_fd);
1265 if (shm_fence == NULL)
1266 goto no_shm_fence;
1267
1268 /* Allocate the image from the driver
1269 */
1270 buffer = calloc(1, sizeof *buffer);
1271 if (!buffer)
1272 goto no_buffer;
1273
1274 buffer->cpp = dri3_cpp_for_format(format);
1275 if (!buffer->cpp)
1276 goto no_image;
1277
1278 if (!draw->is_different_gpu) {
1279 #ifdef HAVE_DRI3_MODIFIERS
1280 if (draw->multiplanes_available &&
1281 draw->ext->image->base.version >= 15 &&
1282 draw->ext->image->queryDmaBufModifiers &&
1283 draw->ext->image->createImageWithModifiers) {
1284 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie;
1285 xcb_dri3_get_supported_modifiers_reply_t *mod_reply;
1286 xcb_generic_error_t *error = NULL;
1287 uint64_t *modifiers = NULL;
1288 uint32_t count = 0;
1289
1290 mod_cookie = xcb_dri3_get_supported_modifiers(draw->conn,
1291 draw->window,
1292 depth, buffer->cpp * 8);
1293 mod_reply = xcb_dri3_get_supported_modifiers_reply(draw->conn,
1294 mod_cookie,
1295 &error);
1296 if (!mod_reply)
1297 goto no_image;
1298
1299 if (mod_reply->num_window_modifiers) {
1300 count = mod_reply->num_window_modifiers;
1301 modifiers = malloc(count * sizeof(uint64_t));
1302 if (!modifiers) {
1303 free(mod_reply);
1304 goto no_image;
1305 }
1306
1307 memcpy(modifiers,
1308 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1309 count * sizeof(uint64_t));
1310
1311 if (!has_supported_modifier(draw, image_format_to_fourcc(format),
1312 modifiers, count)) {
1313 free(modifiers);
1314 count = 0;
1315 modifiers = NULL;
1316 }
1317 }
1318
1319 if (mod_reply->num_screen_modifiers && modifiers == NULL) {
1320 count = mod_reply->num_screen_modifiers;
1321 modifiers = malloc(count * sizeof(uint64_t));
1322 if (!modifiers) {
1323 free(modifiers);
1324 free(mod_reply);
1325 goto no_image;
1326 }
1327
1328 memcpy(modifiers,
1329 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1330 count * sizeof(uint64_t));
1331 }
1332
1333 free(mod_reply);
1334
1335 /* don't use createImageWithModifiers() if we have no
1336 * modifiers, other things depend on the use flags when
1337 * there are no modifiers to know that a buffer can be
1338 * shared.
1339 */
1340 if (modifiers) {
1341 buffer->image = draw->ext->image->createImageWithModifiers(draw->dri_screen,
1342 width, height,
1343 format,
1344 modifiers,
1345 count,
1346 buffer);
1347 }
1348
1349 free(modifiers);
1350 }
1351 #endif
1352 if (!buffer->image)
1353 buffer->image = draw->ext->image->createImage(draw->dri_screen,
1354 width, height,
1355 format,
1356 __DRI_IMAGE_USE_SHARE |
1357 __DRI_IMAGE_USE_SCANOUT |
1358 __DRI_IMAGE_USE_BACKBUFFER,
1359 buffer);
1360
1361 pixmap_buffer = buffer->image;
1362
1363 if (!buffer->image)
1364 goto no_image;
1365 } else {
1366 buffer->image = draw->ext->image->createImage(draw->dri_screen,
1367 width, height,
1368 format,
1369 0,
1370 buffer);
1371
1372 if (!buffer->image)
1373 goto no_image;
1374
1375 buffer->linear_buffer =
1376 draw->ext->image->createImage(draw->dri_screen,
1377 width, height,
1378 dri3_linear_format_for_format(draw, format),
1379 __DRI_IMAGE_USE_SHARE |
1380 __DRI_IMAGE_USE_LINEAR |
1381 __DRI_IMAGE_USE_BACKBUFFER,
1382 buffer);
1383 pixmap_buffer = buffer->linear_buffer;
1384
1385 if (!buffer->linear_buffer)
1386 goto no_linear_buffer;
1387 }
1388
1389 /* X want some information about the planes, so ask the image for it
1390 */
1391 if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1392 &num_planes))
1393 num_planes = 1;
1394
1395 for (i = 0; i < num_planes; i++) {
1396 __DRIimage *image = draw->ext->image->fromPlanar(pixmap_buffer, i, NULL);
1397
1398 if (!image) {
1399 assert(i == 0);
1400 image = pixmap_buffer;
1401 }
1402
1403 buffer_fds[i] = -1;
1404
1405 ret = draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_FD,
1406 &buffer_fds[i]);
1407 ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE,
1408 &buffer->strides[i]);
1409 ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET,
1410 &buffer->offsets[i]);
1411 if (image != pixmap_buffer)
1412 draw->ext->image->destroyImage(image);
1413
1414 if (!ret)
1415 goto no_buffer_attrib;
1416 }
1417
1418 ret = draw->ext->image->queryImage(pixmap_buffer,
1419 __DRI_IMAGE_ATTRIB_MODIFIER_UPPER, &mod);
1420 buffer->modifier = (uint64_t) mod << 32;
1421 ret &= draw->ext->image->queryImage(pixmap_buffer,
1422 __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod);
1423 buffer->modifier |= (uint64_t)(mod & 0xffffffff);
1424
1425 if (!ret)
1426 buffer->modifier = DRM_FORMAT_MOD_INVALID;
1427
1428 pixmap = xcb_generate_id(draw->conn);
1429 #ifdef HAVE_DRI3_MODIFIERS
1430 if (draw->multiplanes_available &&
1431 buffer->modifier != DRM_FORMAT_MOD_INVALID) {
1432 xcb_dri3_pixmap_from_buffers(draw->conn,
1433 pixmap,
1434 draw->window,
1435 num_planes,
1436 width, height,
1437 buffer->strides[0], buffer->offsets[0],
1438 buffer->strides[1], buffer->offsets[1],
1439 buffer->strides[2], buffer->offsets[2],
1440 buffer->strides[3], buffer->offsets[3],
1441 depth, buffer->cpp * 8,
1442 buffer->modifier,
1443 buffer_fds);
1444 } else
1445 #endif
1446 {
1447 xcb_dri3_pixmap_from_buffer(draw->conn,
1448 pixmap,
1449 draw->drawable,
1450 buffer->size,
1451 width, height, buffer->strides[0],
1452 depth, buffer->cpp * 8,
1453 buffer_fds[0]);
1454 }
1455
1456 xcb_dri3_fence_from_fd(draw->conn,
1457 pixmap,
1458 (sync_fence = xcb_generate_id(draw->conn)),
1459 false,
1460 fence_fd);
1461
1462 buffer->pixmap = pixmap;
1463 buffer->own_pixmap = true;
1464 buffer->sync_fence = sync_fence;
1465 buffer->shm_fence = shm_fence;
1466 buffer->width = width;
1467 buffer->height = height;
1468
1469 /* Mark the buffer as idle
1470 */
1471 dri3_fence_set(buffer);
1472
1473 return buffer;
1474
1475 no_buffer_attrib:
1476 do {
1477 if (buffer_fds[i] != -1)
1478 close(buffer_fds[i]);
1479 } while (--i >= 0);
1480 draw->ext->image->destroyImage(pixmap_buffer);
1481 no_linear_buffer:
1482 if (draw->is_different_gpu)
1483 draw->ext->image->destroyImage(buffer->image);
1484 no_image:
1485 free(buffer);
1486 no_buffer:
1487 xshmfence_unmap_shm(shm_fence);
1488 no_shm_fence:
1489 close(fence_fd);
1490 return NULL;
1491 }
1492
1493 /** loader_dri3_update_drawable
1494 *
1495 * Called the first time we use the drawable and then
1496 * after we receive present configure notify events to
1497 * track the geometry of the drawable
1498 */
1499 static int
1500 dri3_update_drawable(struct loader_dri3_drawable *draw)
1501 {
1502 mtx_lock(&draw->mtx);
1503 if (draw->first_init) {
1504 xcb_get_geometry_cookie_t geom_cookie;
1505 xcb_get_geometry_reply_t *geom_reply;
1506 xcb_void_cookie_t cookie;
1507 xcb_generic_error_t *error;
1508 xcb_present_query_capabilities_cookie_t present_capabilities_cookie;
1509 xcb_present_query_capabilities_reply_t *present_capabilities_reply;
1510 xcb_window_t root_win;
1511
1512 draw->first_init = false;
1513
1514 /* Try to select for input on the window.
1515 *
1516 * If the drawable is a window, this will get our events
1517 * delivered.
1518 *
1519 * Otherwise, we'll get a BadWindow error back from this request which
1520 * will let us know that the drawable is a pixmap instead.
1521 */
1522
1523 draw->eid = xcb_generate_id(draw->conn);
1524 cookie =
1525 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
1526 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1527 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1528 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1529
1530 present_capabilities_cookie =
1531 xcb_present_query_capabilities(draw->conn, draw->drawable);
1532
1533 /* Create an XCB event queue to hold present events outside of the usual
1534 * application event queue
1535 */
1536 draw->special_event = xcb_register_for_special_xge(draw->conn,
1537 &xcb_present_id,
1538 draw->eid,
1539 draw->stamp);
1540 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1541
1542 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1543
1544 if (!geom_reply) {
1545 mtx_unlock(&draw->mtx);
1546 return false;
1547 }
1548 draw->width = geom_reply->width;
1549 draw->height = geom_reply->height;
1550 draw->depth = geom_reply->depth;
1551 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1552 root_win = geom_reply->root;
1553
1554 free(geom_reply);
1555
1556 draw->is_pixmap = false;
1557
1558 /* Check to see if our select input call failed. If it failed with a
1559 * BadWindow error, then assume the drawable is a pixmap. Destroy the
1560 * special event queue created above and mark the drawable as a pixmap
1561 */
1562
1563 error = xcb_request_check(draw->conn, cookie);
1564
1565 present_capabilities_reply =
1566 xcb_present_query_capabilities_reply(draw->conn,
1567 present_capabilities_cookie,
1568 NULL);
1569
1570 if (present_capabilities_reply) {
1571 draw->present_capabilities = present_capabilities_reply->capabilities;
1572 free(present_capabilities_reply);
1573 } else
1574 draw->present_capabilities = 0;
1575
1576 if (error) {
1577 if (error->error_code != BadWindow) {
1578 free(error);
1579 mtx_unlock(&draw->mtx);
1580 return false;
1581 }
1582 free(error);
1583 draw->is_pixmap = true;
1584 xcb_unregister_for_special_event(draw->conn, draw->special_event);
1585 draw->special_event = NULL;
1586 }
1587
1588 if (draw->is_pixmap)
1589 draw->window = root_win;
1590 else
1591 draw->window = draw->drawable;
1592 }
1593 dri3_flush_present_events(draw);
1594 mtx_unlock(&draw->mtx);
1595 return true;
1596 }
1597
1598 __DRIimage *
1599 loader_dri3_create_image(xcb_connection_t *c,
1600 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1601 unsigned int format,
1602 __DRIscreen *dri_screen,
1603 const __DRIimageExtension *image,
1604 void *loaderPrivate)
1605 {
1606 int *fds;
1607 __DRIimage *image_planar, *ret;
1608 int stride, offset;
1609
1610 /* Get an FD for the pixmap object
1611 */
1612 fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1613
1614 stride = bp_reply->stride;
1615 offset = 0;
1616
1617 /* createImageFromFds creates a wrapper __DRIimage structure which
1618 * can deal with multiple planes for things like Yuv images. So, once
1619 * we've gotten the planar wrapper, pull the single plane out of it and
1620 * discard the wrapper.
1621 */
1622 image_planar = image->createImageFromFds(dri_screen,
1623 bp_reply->width,
1624 bp_reply->height,
1625 image_format_to_fourcc(format),
1626 fds, 1,
1627 &stride, &offset, loaderPrivate);
1628 close(fds[0]);
1629 if (!image_planar)
1630 return NULL;
1631
1632 ret = image->fromPlanar(image_planar, 0, loaderPrivate);
1633
1634 if (!ret)
1635 ret = image_planar;
1636 else
1637 image->destroyImage(image_planar);
1638
1639 return ret;
1640 }
1641
1642 #ifdef HAVE_DRI3_MODIFIERS
1643 __DRIimage *
1644 loader_dri3_create_image_from_buffers(xcb_connection_t *c,
1645 xcb_dri3_buffers_from_pixmap_reply_t *bp_reply,
1646 unsigned int format,
1647 __DRIscreen *dri_screen,
1648 const __DRIimageExtension *image,
1649 void *loaderPrivate)
1650 {
1651 __DRIimage *ret;
1652 int *fds;
1653 uint32_t *strides_in, *offsets_in;
1654 int strides[4], offsets[4];
1655 unsigned error;
1656 int i;
1657
1658 if (bp_reply->nfd > 4)
1659 return NULL;
1660
1661 fds = xcb_dri3_buffers_from_pixmap_reply_fds(c, bp_reply);
1662 strides_in = xcb_dri3_buffers_from_pixmap_strides(bp_reply);
1663 offsets_in = xcb_dri3_buffers_from_pixmap_offsets(bp_reply);
1664 for (i = 0; i < bp_reply->nfd; i++) {
1665 strides[i] = strides_in[i];
1666 offsets[i] = offsets_in[i];
1667 }
1668
1669 ret = image->createImageFromDmaBufs2(dri_screen,
1670 bp_reply->width,
1671 bp_reply->height,
1672 image_format_to_fourcc(format),
1673 bp_reply->modifier,
1674 fds, bp_reply->nfd,
1675 strides, offsets,
1676 0, 0, 0, 0, /* UNDEFINED */
1677 &error, loaderPrivate);
1678
1679 for (i = 0; i < bp_reply->nfd; i++)
1680 close(fds[i]);
1681
1682 return ret;
1683 }
1684 #endif
1685
1686 /** dri3_get_pixmap_buffer
1687 *
1688 * Get the DRM object for a pixmap from the X server and
1689 * wrap that with a __DRIimage structure using createImageFromFds
1690 */
1691 static struct loader_dri3_buffer *
1692 dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format,
1693 enum loader_dri3_buffer_type buffer_type,
1694 struct loader_dri3_drawable *draw)
1695 {
1696 int buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1697 struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
1698 xcb_drawable_t pixmap;
1699 xcb_sync_fence_t sync_fence;
1700 struct xshmfence *shm_fence;
1701 int width;
1702 int height;
1703 int fence_fd;
1704 __DRIscreen *cur_screen;
1705
1706 if (buffer)
1707 return buffer;
1708
1709 pixmap = draw->drawable;
1710
1711 buffer = calloc(1, sizeof *buffer);
1712 if (!buffer)
1713 goto no_buffer;
1714
1715 fence_fd = xshmfence_alloc_shm();
1716 if (fence_fd < 0)
1717 goto no_fence;
1718 shm_fence = xshmfence_map_shm(fence_fd);
1719 if (shm_fence == NULL) {
1720 close (fence_fd);
1721 goto no_fence;
1722 }
1723
1724 /* Get the currently-bound screen or revert to using the drawable's screen if
1725 * no contexts are currently bound. The latter case is at least necessary for
1726 * obs-studio, when using Window Capture (Xcomposite) as a Source.
1727 */
1728 cur_screen = draw->vtable->get_dri_screen();
1729 if (!cur_screen) {
1730 cur_screen = draw->dri_screen;
1731 }
1732
1733 xcb_dri3_fence_from_fd(draw->conn,
1734 pixmap,
1735 (sync_fence = xcb_generate_id(draw->conn)),
1736 false,
1737 fence_fd);
1738 #ifdef HAVE_DRI3_MODIFIERS
1739 if (draw->multiplanes_available &&
1740 draw->ext->image->base.version >= 15 &&
1741 draw->ext->image->createImageFromDmaBufs2) {
1742 xcb_dri3_buffers_from_pixmap_cookie_t bps_cookie;
1743 xcb_dri3_buffers_from_pixmap_reply_t *bps_reply;
1744
1745 bps_cookie = xcb_dri3_buffers_from_pixmap(draw->conn, pixmap);
1746 bps_reply = xcb_dri3_buffers_from_pixmap_reply(draw->conn, bps_cookie,
1747 NULL);
1748 if (!bps_reply)
1749 goto no_image;
1750 buffer->image =
1751 loader_dri3_create_image_from_buffers(draw->conn, bps_reply, format,
1752 cur_screen, draw->ext->image,
1753 buffer);
1754 width = bps_reply->width;
1755 height = bps_reply->height;
1756 free(bps_reply);
1757 } else
1758 #endif
1759 {
1760 xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1761 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1762
1763 bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap);
1764 bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn, bp_cookie, NULL);
1765 if (!bp_reply)
1766 goto no_image;
1767
1768 buffer->image = loader_dri3_create_image(draw->conn, bp_reply, format,
1769 cur_screen, draw->ext->image,
1770 buffer);
1771 width = bp_reply->width;
1772 height = bp_reply->height;
1773 free(bp_reply);
1774 }
1775
1776 if (!buffer->image)
1777 goto no_image;
1778
1779 buffer->pixmap = pixmap;
1780 buffer->own_pixmap = false;
1781 buffer->width = width;
1782 buffer->height = height;
1783 buffer->shm_fence = shm_fence;
1784 buffer->sync_fence = sync_fence;
1785
1786 draw->buffers[buf_id] = buffer;
1787
1788 return buffer;
1789
1790 no_image:
1791 xcb_sync_destroy_fence(draw->conn, sync_fence);
1792 xshmfence_unmap_shm(shm_fence);
1793 no_fence:
1794 free(buffer);
1795 no_buffer:
1796 return NULL;
1797 }
1798
1799 /** dri3_get_buffer
1800 *
1801 * Find a front or back buffer, allocating new ones as necessary
1802 */
1803 static struct loader_dri3_buffer *
1804 dri3_get_buffer(__DRIdrawable *driDrawable,
1805 unsigned int format,
1806 enum loader_dri3_buffer_type buffer_type,
1807 struct loader_dri3_drawable *draw)
1808 {
1809 struct loader_dri3_buffer *buffer;
1810 bool fence_await = buffer_type == loader_dri3_buffer_back;
1811 int buf_id;
1812
1813 if (buffer_type == loader_dri3_buffer_back) {
1814 draw->back_format = format;
1815
1816 buf_id = dri3_find_back(draw);
1817
1818 if (buf_id < 0)
1819 return NULL;
1820 } else {
1821 buf_id = LOADER_DRI3_FRONT_ID;
1822 }
1823
1824 buffer = draw->buffers[buf_id];
1825
1826 /* Allocate a new buffer if there isn't an old one, if that
1827 * old one is the wrong size, or if it's suboptimal
1828 */
1829 if (!buffer || buffer->width != draw->width ||
1830 buffer->height != draw->height ||
1831 buffer->reallocate) {
1832 struct loader_dri3_buffer *new_buffer;
1833
1834 /* Allocate the new buffers
1835 */
1836 new_buffer = dri3_alloc_render_buffer(draw,
1837 format,
1838 draw->width,
1839 draw->height,
1840 draw->depth);
1841 if (!new_buffer)
1842 return NULL;
1843
1844 /* When resizing, copy the contents of the old buffer, waiting for that
1845 * copy to complete using our fences before proceeding
1846 */
1847 if ((buffer_type == loader_dri3_buffer_back ||
1848 (buffer_type == loader_dri3_buffer_front && draw->have_fake_front))
1849 && buffer) {
1850
1851 /* Fill the new buffer with data from an old buffer */
1852 if (!loader_dri3_blit_image(draw,
1853 new_buffer->image,
1854 buffer->image,
1855 0, 0,
1856 MIN2(buffer->width, new_buffer->width),
1857 MIN2(buffer->height, new_buffer->height),
1858 0, 0, 0) &&
1859 !buffer->linear_buffer) {
1860 dri3_fence_reset(draw->conn, new_buffer);
1861 dri3_copy_area(draw->conn,
1862 buffer->pixmap,
1863 new_buffer->pixmap,
1864 dri3_drawable_gc(draw),
1865 0, 0, 0, 0,
1866 draw->width, draw->height);
1867 dri3_fence_trigger(draw->conn, new_buffer);
1868 fence_await = true;
1869 }
1870 dri3_free_render_buffer(draw, buffer);
1871 } else if (buffer_type == loader_dri3_buffer_front) {
1872 /* Fill the new fake front with data from a real front */
1873 loader_dri3_swapbuffer_barrier(draw);
1874 dri3_fence_reset(draw->conn, new_buffer);
1875 dri3_copy_area(draw->conn,
1876 draw->drawable,
1877 new_buffer->pixmap,
1878 dri3_drawable_gc(draw),
1879 0, 0, 0, 0,
1880 draw->width, draw->height);
1881 dri3_fence_trigger(draw->conn, new_buffer);
1882
1883 if (new_buffer->linear_buffer) {
1884 dri3_fence_await(draw->conn, draw, new_buffer);
1885 (void) loader_dri3_blit_image(draw,
1886 new_buffer->image,
1887 new_buffer->linear_buffer,
1888 0, 0, draw->width, draw->height,
1889 0, 0, 0);
1890 } else
1891 fence_await = true;
1892 }
1893 buffer = new_buffer;
1894 draw->buffers[buf_id] = buffer;
1895 }
1896
1897 if (fence_await)
1898 dri3_fence_await(draw->conn, draw, buffer);
1899
1900 /*
1901 * Do we need to preserve the content of a previous buffer?
1902 *
1903 * Note that this blit is needed only to avoid a wait for a buffer that
1904 * is currently in the flip chain or being scanned out from. That's really
1905 * a tradeoff. If we're ok with the wait we can reduce the number of back
1906 * buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY,
1907 * but in the latter case we must disallow page-flipping.
1908 */
1909 if (buffer_type == loader_dri3_buffer_back &&
1910 draw->cur_blit_source != -1 &&
1911 draw->buffers[draw->cur_blit_source] &&
1912 buffer != draw->buffers[draw->cur_blit_source]) {
1913
1914 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
1915
1916 /* Avoid flushing here. Will propably do good for tiling hardware. */
1917 (void) loader_dri3_blit_image(draw,
1918 buffer->image,
1919 source->image,
1920 0, 0, draw->width, draw->height,
1921 0, 0, 0);
1922 buffer->last_swap = source->last_swap;
1923 draw->cur_blit_source = -1;
1924 }
1925 /* Return the requested buffer */
1926 return buffer;
1927 }
1928
1929 /** dri3_free_buffers
1930 *
1931 * Free the front bufffer or all of the back buffers. Used
1932 * when the application changes which buffers it needs
1933 */
1934 static void
1935 dri3_free_buffers(__DRIdrawable *driDrawable,
1936 enum loader_dri3_buffer_type buffer_type,
1937 struct loader_dri3_drawable *draw)
1938 {
1939 struct loader_dri3_buffer *buffer;
1940 int first_id;
1941 int n_id;
1942 int buf_id;
1943
1944 switch (buffer_type) {
1945 case loader_dri3_buffer_back:
1946 first_id = LOADER_DRI3_BACK_ID(0);
1947 n_id = LOADER_DRI3_MAX_BACK;
1948 draw->cur_blit_source = -1;
1949 break;
1950 case loader_dri3_buffer_front:
1951 first_id = LOADER_DRI3_FRONT_ID;
1952 /* Don't free a fake front holding new backbuffer content. */
1953 n_id = (draw->cur_blit_source == LOADER_DRI3_FRONT_ID) ? 0 : 1;
1954 }
1955
1956 for (buf_id = first_id; buf_id < first_id + n_id; buf_id++) {
1957 buffer = draw->buffers[buf_id];
1958 if (buffer) {
1959 dri3_free_render_buffer(draw, buffer);
1960 draw->buffers[buf_id] = NULL;
1961 }
1962 }
1963 }
1964
1965 /** loader_dri3_get_buffers
1966 *
1967 * The published buffer allocation API.
1968 * Returns all of the necessary buffers, allocating
1969 * as needed.
1970 */
1971 int
1972 loader_dri3_get_buffers(__DRIdrawable *driDrawable,
1973 unsigned int format,
1974 uint32_t *stamp,
1975 void *loaderPrivate,
1976 uint32_t buffer_mask,
1977 struct __DRIimageList *buffers)
1978 {
1979 struct loader_dri3_drawable *draw = loaderPrivate;
1980 struct loader_dri3_buffer *front, *back;
1981 int buf_id;
1982
1983 buffers->image_mask = 0;
1984 buffers->front = NULL;
1985 buffers->back = NULL;
1986
1987 front = NULL;
1988 back = NULL;
1989
1990 if (!dri3_update_drawable(draw))
1991 return false;
1992
1993 dri3_update_num_back(draw);
1994
1995 /* Free no longer needed back buffers */
1996 for (buf_id = draw->num_back; buf_id < LOADER_DRI3_MAX_BACK; buf_id++) {
1997 if (draw->cur_blit_source != buf_id && draw->buffers[buf_id]) {
1998 dri3_free_render_buffer(draw, draw->buffers[buf_id]);
1999 draw->buffers[buf_id] = NULL;
2000 }
2001 }
2002
2003 /* pixmaps always have front buffers.
2004 * Exchange swaps also mandate fake front buffers.
2005 */
2006 if (draw->is_pixmap || draw->swap_method == __DRI_ATTRIB_SWAP_EXCHANGE)
2007 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
2008
2009 if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
2010 /* All pixmaps are owned by the server gpu.
2011 * When we use a different gpu, we can't use the pixmap
2012 * as buffer since it is potentially tiled a way
2013 * our device can't understand. In this case, use
2014 * a fake front buffer. Hopefully the pixmap
2015 * content will get synced with the fake front
2016 * buffer.
2017 */
2018 if (draw->is_pixmap && !draw->is_different_gpu)
2019 front = dri3_get_pixmap_buffer(driDrawable,
2020 format,
2021 loader_dri3_buffer_front,
2022 draw);
2023 else
2024 front = dri3_get_buffer(driDrawable,
2025 format,
2026 loader_dri3_buffer_front,
2027 draw);
2028
2029 if (!front)
2030 return false;
2031 } else {
2032 dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
2033 draw->have_fake_front = 0;
2034 }
2035
2036 if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
2037 back = dri3_get_buffer(driDrawable,
2038 format,
2039 loader_dri3_buffer_back,
2040 draw);
2041 if (!back)
2042 return false;
2043 draw->have_back = 1;
2044 } else {
2045 dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
2046 draw->have_back = 0;
2047 }
2048
2049 if (front) {
2050 buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
2051 buffers->front = front->image;
2052 draw->have_fake_front = draw->is_different_gpu || !draw->is_pixmap;
2053 }
2054
2055 if (back) {
2056 buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
2057 buffers->back = back->image;
2058 }
2059
2060 draw->stamp = stamp;
2061
2062 return true;
2063 }
2064
2065 /** loader_dri3_update_drawable_geometry
2066 *
2067 * Get the current drawable geometry.
2068 */
2069 void
2070 loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
2071 {
2072 xcb_get_geometry_cookie_t geom_cookie;
2073 xcb_get_geometry_reply_t *geom_reply;
2074
2075 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
2076
2077 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
2078
2079 if (geom_reply) {
2080 draw->width = geom_reply->width;
2081 draw->height = geom_reply->height;
2082 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
2083 draw->ext->flush->invalidate(draw->dri_drawable);
2084
2085 free(geom_reply);
2086 }
2087 }
2088
2089
2090 /**
2091 * Make sure the server has flushed all pending swap buffers to hardware
2092 * for this drawable. Ideally we'd want to send an X protocol request to
2093 * have the server block our connection until the swaps are complete. That
2094 * would avoid the potential round-trip here.
2095 */
2096 void
2097 loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
2098 {
2099 int64_t ust, msc, sbc;
2100
2101 (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
2102 }
2103
2104 /**
2105 * Perform any cleanup associated with a close screen operation.
2106 * \param dri_screen[in,out] Pointer to __DRIscreen about to be closed.
2107 *
2108 * This function destroys the screen's cached swap context if any.
2109 */
2110 void
2111 loader_dri3_close_screen(__DRIscreen *dri_screen)
2112 {
2113 mtx_lock(&blit_context.mtx);
2114 if (blit_context.ctx && blit_context.cur_screen == dri_screen) {
2115 blit_context.core->destroyContext(blit_context.ctx);
2116 blit_context.ctx = NULL;
2117 }
2118 mtx_unlock(&blit_context.mtx);
2119 }
2120
2121 /**
2122 * Find a backbuffer slot - potentially allocating a back buffer
2123 *
2124 * \param draw[in,out] Pointer to the drawable for which to find back.
2125 * \return Pointer to a new back buffer or NULL if allocation failed or was
2126 * not mandated.
2127 *
2128 * Find a potentially new back buffer, and if it's not been allocated yet and
2129 * in addition needs initializing, then try to allocate and initialize it.
2130 */
2131 #include <stdio.h>
2132 static struct loader_dri3_buffer *
2133 dri3_find_back_alloc(struct loader_dri3_drawable *draw)
2134 {
2135 struct loader_dri3_buffer *back;
2136 int id;
2137
2138 id = dri3_find_back(draw);
2139 if (id < 0)
2140 return NULL;
2141
2142 back = draw->buffers[id];
2143 /* Allocate a new back if we haven't got one */
2144 if (!back && draw->back_format != __DRI_IMAGE_FORMAT_NONE &&
2145 dri3_update_drawable(draw))
2146 back = dri3_alloc_render_buffer(draw, draw->back_format,
2147 draw->width, draw->height, draw->depth);
2148
2149 if (!back)
2150 return NULL;
2151
2152 draw->buffers[id] = back;
2153
2154 /* If necessary, prefill the back with data according to swap_method mode. */
2155 if (draw->cur_blit_source != -1 &&
2156 draw->buffers[draw->cur_blit_source] &&
2157 back != draw->buffers[draw->cur_blit_source]) {
2158 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2159
2160 dri3_fence_await(draw->conn, draw, source);
2161 dri3_fence_await(draw->conn, draw, back);
2162 (void) loader_dri3_blit_image(draw,
2163 back->image,
2164 source->image,
2165 0, 0, draw->width, draw->height,
2166 0, 0, 0);
2167 back->last_swap = source->last_swap;
2168 draw->cur_blit_source = -1;
2169 }
2170
2171 return back;
2172 }