Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / loader / loader_dri3_helper.c
1 /*
2 * Copyright © 2013 Keith Packard
3 * Copyright © 2015 Boyan Ding
4 *
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that copyright
8 * notice and this permission notice appear in supporting documentation, and
9 * that the name of the copyright holders not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. The copyright holders make no representations
12 * about the suitability of this software for any purpose. It is provided "as
13 * is" without express or implied warranty.
14 *
15 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21 * OF THIS SOFTWARE.
22 */
23
24 #include <fcntl.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27 #include <string.h>
28
29 #include <X11/xshmfence.h>
30 #include <xcb/xcb.h>
31 #include <xcb/dri3.h>
32 #include <xcb/present.h>
33 #include <xcb/xfixes.h>
34
35 #include <X11/Xlib-xcb.h>
36
37 #include "loader_dri3_helper.h"
38 #include "util/macros.h"
39 #include "drm-uapi/drm_fourcc.h"
40
41 /* From driconf.h, user exposed so should be stable */
42 #define DRI_CONF_VBLANK_NEVER 0
43 #define DRI_CONF_VBLANK_DEF_INTERVAL_0 1
44 #define DRI_CONF_VBLANK_DEF_INTERVAL_1 2
45 #define DRI_CONF_VBLANK_ALWAYS_SYNC 3
46
47 /**
48 * A cached blit context.
49 */
50 struct loader_dri3_blit_context {
51 mtx_t mtx;
52 __DRIcontext *ctx;
53 __DRIscreen *cur_screen;
54 const __DRIcoreExtension *core;
55 };
56
57 /* For simplicity we maintain the cache only for a single screen at a time */
58 static struct loader_dri3_blit_context blit_context = {
59 _MTX_INITIALIZER_NP, NULL
60 };
61
62 static void
63 dri3_flush_present_events(struct loader_dri3_drawable *draw);
64
65 static struct loader_dri3_buffer *
66 dri3_find_back_alloc(struct loader_dri3_drawable *draw);
67
68 static xcb_screen_t *
69 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
70 {
71 xcb_screen_iterator_t screen_iter =
72 xcb_setup_roots_iterator(xcb_get_setup(conn));
73
74 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
75 if (screen_iter.data->root == root)
76 return screen_iter.data;
77 }
78
79 return NULL;
80 }
81
82 static xcb_visualtype_t *
83 get_xcb_visualtype_for_depth(struct loader_dri3_drawable *draw, int depth)
84 {
85 xcb_visualtype_iterator_t visual_iter;
86 xcb_screen_t *screen = draw->screen;
87 xcb_depth_iterator_t depth_iter;
88
89 if (!screen)
90 return NULL;
91
92 depth_iter = xcb_screen_allowed_depths_iterator(screen);
93 for (; depth_iter.rem; xcb_depth_next(&depth_iter)) {
94 if (depth_iter.data->depth != depth)
95 continue;
96
97 visual_iter = xcb_depth_visuals_iterator(depth_iter.data);
98 if (visual_iter.rem)
99 return visual_iter.data;
100 }
101
102 return NULL;
103 }
104
105 /* Sets the adaptive sync window property state. */
106 static void
107 set_adaptive_sync_property(xcb_connection_t *conn, xcb_drawable_t drawable,
108 uint32_t state)
109 {
110 static char const name[] = "_VARIABLE_REFRESH";
111 xcb_intern_atom_cookie_t cookie;
112 xcb_intern_atom_reply_t* reply;
113 xcb_void_cookie_t check;
114
115 cookie = xcb_intern_atom(conn, 0, strlen(name), name);
116 reply = xcb_intern_atom_reply(conn, cookie, NULL);
117 if (reply == NULL)
118 return;
119
120 if (state)
121 check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
122 drawable, reply->atom,
123 XCB_ATOM_CARDINAL, 32, 1, &state);
124 else
125 check = xcb_delete_property_checked(conn, drawable, reply->atom);
126
127 xcb_discard_reply(conn, check.sequence);
128 free(reply);
129 }
130
131 /* Get red channel mask for given drawable at given depth. */
132 static unsigned int
133 dri3_get_red_mask_for_depth(struct loader_dri3_drawable *draw, int depth)
134 {
135 xcb_visualtype_t *visual = get_xcb_visualtype_for_depth(draw, depth);
136
137 if (visual)
138 return visual->red_mask;
139
140 return 0;
141 }
142
143 /**
144 * Do we have blit functionality in the image blit extension?
145 *
146 * \param draw[in] The drawable intended to blit from / to.
147 * \return true if we have blit functionality. false otherwise.
148 */
149 static bool loader_dri3_have_image_blit(const struct loader_dri3_drawable *draw)
150 {
151 return draw->ext->image->base.version >= 9 &&
152 draw->ext->image->blitImage != NULL;
153 }
154
155 /**
156 * Get and lock (for use with the current thread) a dri context associated
157 * with the drawable's dri screen. The context is intended to be used with
158 * the dri image extension's blitImage method.
159 *
160 * \param draw[in] Pointer to the drawable whose dri screen we want a
161 * dri context for.
162 * \return A dri context or NULL if context creation failed.
163 *
164 * When the caller is done with the context (even if the context returned was
165 * NULL), the caller must call loader_dri3_blit_context_put.
166 */
167 static __DRIcontext *
168 loader_dri3_blit_context_get(struct loader_dri3_drawable *draw)
169 {
170 mtx_lock(&blit_context.mtx);
171
172 if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen) {
173 blit_context.core->destroyContext(blit_context.ctx);
174 blit_context.ctx = NULL;
175 }
176
177 if (!blit_context.ctx) {
178 blit_context.ctx = draw->ext->core->createNewContext(draw->dri_screen,
179 NULL, NULL, NULL);
180 blit_context.cur_screen = draw->dri_screen;
181 blit_context.core = draw->ext->core;
182 }
183
184 return blit_context.ctx;
185 }
186
187 /**
188 * Release (for use with other threads) a dri context previously obtained using
189 * loader_dri3_blit_context_get.
190 */
191 static void
192 loader_dri3_blit_context_put(void)
193 {
194 mtx_unlock(&blit_context.mtx);
195 }
196
197 /**
198 * Blit (parts of) the contents of a DRI image to another dri image
199 *
200 * \param draw[in] The drawable which owns the images.
201 * \param dst[in] The destination image.
202 * \param src[in] The source image.
203 * \param dstx0[in] Start destination coordinate.
204 * \param dsty0[in] Start destination coordinate.
205 * \param width[in] Blit width.
206 * \param height[in] Blit height.
207 * \param srcx0[in] Start source coordinate.
208 * \param srcy0[in] Start source coordinate.
209 * \param flush_flag[in] Image blit flush flag.
210 * \return true iff successful.
211 */
212 static bool
213 loader_dri3_blit_image(struct loader_dri3_drawable *draw,
214 __DRIimage *dst, __DRIimage *src,
215 int dstx0, int dsty0, int width, int height,
216 int srcx0, int srcy0, int flush_flag)
217 {
218 __DRIcontext *dri_context;
219 bool use_blit_context = false;
220
221 if (!loader_dri3_have_image_blit(draw))
222 return false;
223
224 dri_context = draw->vtable->get_dri_context(draw);
225
226 if (!dri_context || !draw->vtable->in_current_context(draw)) {
227 dri_context = loader_dri3_blit_context_get(draw);
228 use_blit_context = true;
229 flush_flag |= __BLIT_FLAG_FLUSH;
230 }
231
232 if (dri_context)
233 draw->ext->image->blitImage(dri_context, dst, src, dstx0, dsty0,
234 width, height, srcx0, srcy0,
235 width, height, flush_flag);
236
237 if (use_blit_context)
238 loader_dri3_blit_context_put();
239
240 return dri_context != NULL;
241 }
242
243 static inline void
244 dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
245 {
246 xshmfence_reset(buffer->shm_fence);
247 }
248
249 static inline void
250 dri3_fence_set(struct loader_dri3_buffer *buffer)
251 {
252 xshmfence_trigger(buffer->shm_fence);
253 }
254
255 static inline void
256 dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
257 {
258 xcb_sync_trigger_fence(c, buffer->sync_fence);
259 }
260
261 static inline void
262 dri3_fence_await(xcb_connection_t *c, struct loader_dri3_drawable *draw,
263 struct loader_dri3_buffer *buffer)
264 {
265 xcb_flush(c);
266 xshmfence_await(buffer->shm_fence);
267 if (draw) {
268 mtx_lock(&draw->mtx);
269 dri3_flush_present_events(draw);
270 mtx_unlock(&draw->mtx);
271 }
272 }
273
274 static void
275 dri3_update_num_back(struct loader_dri3_drawable *draw)
276 {
277 if (draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP)
278 draw->num_back = 3;
279 else
280 draw->num_back = 2;
281 }
282
283 void
284 loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
285 {
286 draw->swap_interval = interval;
287 }
288
289 /** dri3_free_render_buffer
290 *
291 * Free everything associated with one render buffer including pixmap, fence
292 * stuff and the driver image
293 */
294 static void
295 dri3_free_render_buffer(struct loader_dri3_drawable *draw,
296 struct loader_dri3_buffer *buffer)
297 {
298 if (buffer->own_pixmap)
299 xcb_free_pixmap(draw->conn, buffer->pixmap);
300 xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
301 xshmfence_unmap_shm(buffer->shm_fence);
302 draw->ext->image->destroyImage(buffer->image);
303 if (buffer->linear_buffer)
304 draw->ext->image->destroyImage(buffer->linear_buffer);
305 free(buffer);
306 }
307
308 void
309 loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
310 {
311 int i;
312
313 draw->ext->core->destroyDrawable(draw->dri_drawable);
314
315 for (i = 0; i < ARRAY_SIZE(draw->buffers); i++) {
316 if (draw->buffers[i])
317 dri3_free_render_buffer(draw, draw->buffers[i]);
318 }
319
320 if (draw->special_event) {
321 xcb_void_cookie_t cookie =
322 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
323 XCB_PRESENT_EVENT_MASK_NO_EVENT);
324
325 xcb_discard_reply(draw->conn, cookie.sequence);
326 xcb_unregister_for_special_event(draw->conn, draw->special_event);
327 }
328
329 cnd_destroy(&draw->event_cnd);
330 mtx_destroy(&draw->mtx);
331 }
332
333 int
334 loader_dri3_drawable_init(xcb_connection_t *conn,
335 xcb_drawable_t drawable,
336 __DRIscreen *dri_screen,
337 bool is_different_gpu,
338 bool multiplanes_available,
339 const __DRIconfig *dri_config,
340 struct loader_dri3_extensions *ext,
341 const struct loader_dri3_vtable *vtable,
342 struct loader_dri3_drawable *draw)
343 {
344 xcb_get_geometry_cookie_t cookie;
345 xcb_get_geometry_reply_t *reply;
346 xcb_generic_error_t *error;
347 GLint vblank_mode = DRI_CONF_VBLANK_DEF_INTERVAL_1;
348 int swap_interval;
349
350 draw->conn = conn;
351 draw->ext = ext;
352 draw->vtable = vtable;
353 draw->drawable = drawable;
354 draw->dri_screen = dri_screen;
355 draw->is_different_gpu = is_different_gpu;
356 draw->multiplanes_available = multiplanes_available;
357
358 draw->have_back = 0;
359 draw->have_fake_front = 0;
360 draw->first_init = true;
361 draw->adaptive_sync = false;
362 draw->adaptive_sync_active = false;
363
364 draw->cur_blit_source = -1;
365 draw->back_format = __DRI_IMAGE_FORMAT_NONE;
366 mtx_init(&draw->mtx, mtx_plain);
367 cnd_init(&draw->event_cnd);
368
369 if (draw->ext->config) {
370 unsigned char adaptive_sync = 0;
371
372 draw->ext->config->configQueryi(draw->dri_screen,
373 "vblank_mode", &vblank_mode);
374
375 draw->ext->config->configQueryb(draw->dri_screen,
376 "adaptive_sync",
377 &adaptive_sync);
378
379 draw->adaptive_sync = adaptive_sync;
380 }
381
382 if (!draw->adaptive_sync)
383 set_adaptive_sync_property(conn, draw->drawable, false);
384
385 switch (vblank_mode) {
386 case DRI_CONF_VBLANK_NEVER:
387 case DRI_CONF_VBLANK_DEF_INTERVAL_0:
388 swap_interval = 0;
389 break;
390 case DRI_CONF_VBLANK_DEF_INTERVAL_1:
391 case DRI_CONF_VBLANK_ALWAYS_SYNC:
392 default:
393 swap_interval = 1;
394 break;
395 }
396 draw->swap_interval = swap_interval;
397
398 dri3_update_num_back(draw);
399
400 /* Create a new drawable */
401 draw->dri_drawable =
402 draw->ext->image_driver->createNewDrawable(dri_screen,
403 dri_config,
404 draw);
405
406 if (!draw->dri_drawable)
407 return 1;
408
409 cookie = xcb_get_geometry(draw->conn, draw->drawable);
410 reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
411 if (reply == NULL || error != NULL) {
412 draw->ext->core->destroyDrawable(draw->dri_drawable);
413 return 1;
414 }
415
416 draw->screen = get_screen_for_root(draw->conn, reply->root);
417 draw->width = reply->width;
418 draw->height = reply->height;
419 draw->depth = reply->depth;
420 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
421 free(reply);
422
423 draw->swap_method = __DRI_ATTRIB_SWAP_UNDEFINED;
424 if (draw->ext->core->base.version >= 2) {
425 (void )draw->ext->core->getConfigAttrib(dri_config,
426 __DRI_ATTRIB_SWAP_METHOD,
427 &draw->swap_method);
428 }
429
430 /*
431 * Make sure server has the same swap interval we do for the new
432 * drawable.
433 */
434 loader_dri3_set_swap_interval(draw, swap_interval);
435
436 return 0;
437 }
438
439 /*
440 * Process one Present event
441 */
442 static void
443 dri3_handle_present_event(struct loader_dri3_drawable *draw,
444 xcb_present_generic_event_t *ge)
445 {
446 switch (ge->evtype) {
447 case XCB_PRESENT_CONFIGURE_NOTIFY: {
448 xcb_present_configure_notify_event_t *ce = (void *) ge;
449
450 draw->width = ce->width;
451 draw->height = ce->height;
452 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
453 draw->ext->flush->invalidate(draw->dri_drawable);
454 break;
455 }
456 case XCB_PRESENT_COMPLETE_NOTIFY: {
457 xcb_present_complete_notify_event_t *ce = (void *) ge;
458
459 /* Compute the processed SBC number from the received 32-bit serial number
460 * merged with the upper 32-bits of the sent 64-bit serial number while
461 * checking for wrap.
462 */
463 if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
464 uint64_t recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
465
466 /* Only assume wraparound if that results in exactly the previous
467 * SBC + 1, otherwise ignore received SBC > sent SBC (those are
468 * probably from a previous loader_dri3_drawable instance) to avoid
469 * calculating bogus target MSC values in loader_dri3_swap_buffers_msc
470 */
471 if (recv_sbc <= draw->send_sbc)
472 draw->recv_sbc = recv_sbc;
473 else if (recv_sbc == (draw->recv_sbc + 0x100000001ULL))
474 draw->recv_sbc = recv_sbc - 0x100000000ULL;
475
476 /* When moving from flip to copy, we assume that we can allocate in
477 * a more optimal way if we don't need to cater for the display
478 * controller.
479 */
480 if (ce->mode == XCB_PRESENT_COMPLETE_MODE_COPY &&
481 draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) {
482 for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
483 if (draw->buffers[b])
484 draw->buffers[b]->reallocate = true;
485 }
486 }
487
488 /* If the server tells us that our allocation is suboptimal, we
489 * reallocate once.
490 */
491 #ifdef HAVE_DRI3_MODIFIERS
492 if (ce->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY &&
493 draw->last_present_mode != ce->mode) {
494 for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
495 if (draw->buffers[b])
496 draw->buffers[b]->reallocate = true;
497 }
498 }
499 #endif
500 draw->last_present_mode = ce->mode;
501
502 if (draw->vtable->show_fps)
503 draw->vtable->show_fps(draw, ce->ust);
504
505 draw->ust = ce->ust;
506 draw->msc = ce->msc;
507 } else if (ce->serial == draw->eid) {
508 draw->notify_ust = ce->ust;
509 draw->notify_msc = ce->msc;
510 }
511 break;
512 }
513 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
514 xcb_present_idle_notify_event_t *ie = (void *) ge;
515 int b;
516
517 for (b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
518 struct loader_dri3_buffer *buf = draw->buffers[b];
519
520 if (buf && buf->pixmap == ie->pixmap)
521 buf->busy = 0;
522 }
523 break;
524 }
525 }
526 free(ge);
527 }
528
529 static bool
530 dri3_wait_for_event_locked(struct loader_dri3_drawable *draw,
531 unsigned *full_sequence)
532 {
533 xcb_generic_event_t *ev;
534 xcb_present_generic_event_t *ge;
535
536 xcb_flush(draw->conn);
537
538 /* Only have one thread waiting for events at a time */
539 if (draw->has_event_waiter) {
540 cnd_wait(&draw->event_cnd, &draw->mtx);
541 if (full_sequence)
542 *full_sequence = draw->last_special_event_sequence;
543 /* Another thread has updated the protected info, so retest. */
544 return true;
545 } else {
546 draw->has_event_waiter = true;
547 /* Allow other threads access to the drawable while we're waiting. */
548 mtx_unlock(&draw->mtx);
549 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
550 mtx_lock(&draw->mtx);
551 draw->has_event_waiter = false;
552 cnd_broadcast(&draw->event_cnd);
553 }
554 if (!ev)
555 return false;
556 draw->last_special_event_sequence = ev->full_sequence;
557 if (full_sequence)
558 *full_sequence = ev->full_sequence;
559 ge = (void *) ev;
560 dri3_handle_present_event(draw, ge);
561 return true;
562 }
563
564 /** loader_dri3_wait_for_msc
565 *
566 * Get the X server to send an event when the target msc/divisor/remainder is
567 * reached.
568 */
569 bool
570 loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
571 int64_t target_msc,
572 int64_t divisor, int64_t remainder,
573 int64_t *ust, int64_t *msc, int64_t *sbc)
574 {
575 xcb_void_cookie_t cookie = xcb_present_notify_msc(draw->conn,
576 draw->drawable,
577 draw->eid,
578 target_msc,
579 divisor,
580 remainder);
581 unsigned full_sequence;
582
583 mtx_lock(&draw->mtx);
584
585 /* Wait for the event */
586 do {
587 if (!dri3_wait_for_event_locked(draw, &full_sequence)) {
588 mtx_unlock(&draw->mtx);
589 return false;
590 }
591 } while (full_sequence != cookie.sequence || draw->notify_msc < target_msc);
592
593 *ust = draw->notify_ust;
594 *msc = draw->notify_msc;
595 *sbc = draw->recv_sbc;
596 mtx_unlock(&draw->mtx);
597
598 return true;
599 }
600
601 /** loader_dri3_wait_for_sbc
602 *
603 * Wait for the completed swap buffer count to reach the specified
604 * target. Presumably the application knows that this will be reached with
605 * outstanding complete events, or we're going to be here awhile.
606 */
607 int
608 loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
609 int64_t target_sbc, int64_t *ust,
610 int64_t *msc, int64_t *sbc)
611 {
612 /* From the GLX_OML_sync_control spec:
613 *
614 * "If <target_sbc> = 0, the function will block until all previous
615 * swaps requested with glXSwapBuffersMscOML for that window have
616 * completed."
617 */
618 mtx_lock(&draw->mtx);
619 if (!target_sbc)
620 target_sbc = draw->send_sbc;
621
622 while (draw->recv_sbc < target_sbc) {
623 if (!dri3_wait_for_event_locked(draw, NULL)) {
624 mtx_unlock(&draw->mtx);
625 return 0;
626 }
627 }
628
629 *ust = draw->ust;
630 *msc = draw->msc;
631 *sbc = draw->recv_sbc;
632 mtx_unlock(&draw->mtx);
633 return 1;
634 }
635
636 /** loader_dri3_find_back
637 *
638 * Find an idle back buffer. If there isn't one, then
639 * wait for a present idle notify event from the X server
640 */
641 static int
642 dri3_find_back(struct loader_dri3_drawable *draw)
643 {
644 int b;
645 int num_to_consider;
646
647 mtx_lock(&draw->mtx);
648 /* Increase the likelyhood of reusing current buffer */
649 dri3_flush_present_events(draw);
650
651 /* Check whether we need to reuse the current back buffer as new back.
652 * In that case, wait until it's not busy anymore.
653 */
654 num_to_consider = draw->num_back;
655 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1) {
656 num_to_consider = 1;
657 draw->cur_blit_source = -1;
658 }
659
660 for (;;) {
661 for (b = 0; b < num_to_consider; b++) {
662 int id = LOADER_DRI3_BACK_ID((b + draw->cur_back) % draw->num_back);
663 struct loader_dri3_buffer *buffer = draw->buffers[id];
664
665 if (!buffer || !buffer->busy) {
666 draw->cur_back = id;
667 mtx_unlock(&draw->mtx);
668 return id;
669 }
670 }
671 if (!dri3_wait_for_event_locked(draw, NULL)) {
672 mtx_unlock(&draw->mtx);
673 return -1;
674 }
675 }
676 }
677
678 static xcb_gcontext_t
679 dri3_drawable_gc(struct loader_dri3_drawable *draw)
680 {
681 if (!draw->gc) {
682 uint32_t v = 0;
683 xcb_create_gc(draw->conn,
684 (draw->gc = xcb_generate_id(draw->conn)),
685 draw->drawable,
686 XCB_GC_GRAPHICS_EXPOSURES,
687 &v);
688 }
689 return draw->gc;
690 }
691
692
693 static struct loader_dri3_buffer *
694 dri3_back_buffer(struct loader_dri3_drawable *draw)
695 {
696 return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
697 }
698
699 static struct loader_dri3_buffer *
700 dri3_fake_front_buffer(struct loader_dri3_drawable *draw)
701 {
702 return draw->buffers[LOADER_DRI3_FRONT_ID];
703 }
704
705 static void
706 dri3_copy_area(xcb_connection_t *c,
707 xcb_drawable_t src_drawable,
708 xcb_drawable_t dst_drawable,
709 xcb_gcontext_t gc,
710 int16_t src_x,
711 int16_t src_y,
712 int16_t dst_x,
713 int16_t dst_y,
714 uint16_t width,
715 uint16_t height)
716 {
717 xcb_void_cookie_t cookie;
718
719 cookie = xcb_copy_area_checked(c,
720 src_drawable,
721 dst_drawable,
722 gc,
723 src_x,
724 src_y,
725 dst_x,
726 dst_y,
727 width,
728 height);
729 xcb_discard_reply(c, cookie.sequence);
730 }
731
732 /**
733 * Asks the driver to flush any queued work necessary for serializing with the
734 * X command stream, and optionally the slightly more strict requirement of
735 * glFlush() equivalence (which would require flushing even if nothing had
736 * been drawn to a window system framebuffer, for example).
737 */
738 void
739 loader_dri3_flush(struct loader_dri3_drawable *draw,
740 unsigned flags,
741 enum __DRI2throttleReason throttle_reason)
742 {
743 /* NEED TO CHECK WHETHER CONTEXT IS NULL */
744 __DRIcontext *dri_context = draw->vtable->get_dri_context(draw);
745
746 if (dri_context) {
747 draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable,
748 flags, throttle_reason);
749 }
750 }
751
752 void
753 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
754 int x, int y,
755 int width, int height,
756 bool flush)
757 {
758 struct loader_dri3_buffer *back;
759 unsigned flags = __DRI2_FLUSH_DRAWABLE;
760
761 /* Check we have the right attachments */
762 if (!draw->have_back || draw->is_pixmap)
763 return;
764
765 if (flush)
766 flags |= __DRI2_FLUSH_CONTEXT;
767 loader_dri3_flush(draw, flags, __DRI2_THROTTLE_COPYSUBBUFFER);
768
769 back = dri3_find_back_alloc(draw);
770 if (!back)
771 return;
772
773 y = draw->height - y - height;
774
775 if (draw->is_different_gpu) {
776 /* Update the linear buffer part of the back buffer
777 * for the dri3_copy_area operation
778 */
779 (void) loader_dri3_blit_image(draw,
780 back->linear_buffer,
781 back->image,
782 0, 0, back->width, back->height,
783 0, 0, __BLIT_FLAG_FLUSH);
784 }
785
786 loader_dri3_swapbuffer_barrier(draw);
787 dri3_fence_reset(draw->conn, back);
788 dri3_copy_area(draw->conn,
789 back->pixmap,
790 draw->drawable,
791 dri3_drawable_gc(draw),
792 x, y, x, y, width, height);
793 dri3_fence_trigger(draw->conn, back);
794 /* Refresh the fake front (if present) after we just damaged the real
795 * front.
796 */
797 if (draw->have_fake_front &&
798 !loader_dri3_blit_image(draw,
799 dri3_fake_front_buffer(draw)->image,
800 back->image,
801 x, y, width, height,
802 x, y, __BLIT_FLAG_FLUSH) &&
803 !draw->is_different_gpu) {
804 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
805 dri3_copy_area(draw->conn,
806 back->pixmap,
807 dri3_fake_front_buffer(draw)->pixmap,
808 dri3_drawable_gc(draw),
809 x, y, x, y, width, height);
810 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
811 dri3_fence_await(draw->conn, NULL, dri3_fake_front_buffer(draw));
812 }
813 dri3_fence_await(draw->conn, draw, back);
814 }
815
816 void
817 loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
818 xcb_drawable_t dest,
819 xcb_drawable_t src)
820 {
821 loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, __DRI2_THROTTLE_COPYSUBBUFFER);
822
823 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
824 dri3_copy_area(draw->conn,
825 src, dest,
826 dri3_drawable_gc(draw),
827 0, 0, 0, 0, draw->width, draw->height);
828 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
829 dri3_fence_await(draw->conn, draw, dri3_fake_front_buffer(draw));
830 }
831
832 void
833 loader_dri3_wait_x(struct loader_dri3_drawable *draw)
834 {
835 struct loader_dri3_buffer *front;
836
837 if (draw == NULL || !draw->have_fake_front)
838 return;
839
840 front = dri3_fake_front_buffer(draw);
841
842 loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
843
844 /* In the psc->is_different_gpu case, the linear buffer has been updated,
845 * but not yet the tiled buffer.
846 * Copy back to the tiled buffer we use for rendering.
847 * Note that we don't need flushing.
848 */
849 if (draw->is_different_gpu)
850 (void) loader_dri3_blit_image(draw,
851 front->image,
852 front->linear_buffer,
853 0, 0, front->width, front->height,
854 0, 0, 0);
855 }
856
857 void
858 loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
859 {
860 struct loader_dri3_buffer *front;
861
862 if (draw == NULL || !draw->have_fake_front)
863 return;
864
865 front = dri3_fake_front_buffer(draw);
866
867 /* In the psc->is_different_gpu case, we update the linear_buffer
868 * before updating the real front.
869 */
870 if (draw->is_different_gpu)
871 (void) loader_dri3_blit_image(draw,
872 front->linear_buffer,
873 front->image,
874 0, 0, front->width, front->height,
875 0, 0, __BLIT_FLAG_FLUSH);
876 loader_dri3_swapbuffer_barrier(draw);
877 loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
878 }
879
880 /** dri3_flush_present_events
881 *
882 * Process any present events that have been received from the X server
883 */
884 static void
885 dri3_flush_present_events(struct loader_dri3_drawable *draw)
886 {
887 /* Check to see if any configuration changes have occurred
888 * since we were last invoked
889 */
890 if (draw->has_event_waiter)
891 return;
892
893 if (draw->special_event) {
894 xcb_generic_event_t *ev;
895
896 while ((ev = xcb_poll_for_special_event(draw->conn,
897 draw->special_event)) != NULL) {
898 xcb_present_generic_event_t *ge = (void *) ev;
899 dri3_handle_present_event(draw, ge);
900 }
901 }
902 }
903
904 /** loader_dri3_swap_buffers_msc
905 *
906 * Make the current back buffer visible using the present extension
907 */
908 int64_t
909 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
910 int64_t target_msc, int64_t divisor,
911 int64_t remainder, unsigned flush_flags,
912 const int *rects, int n_rects,
913 bool force_copy)
914 {
915 struct loader_dri3_buffer *back;
916 int64_t ret = 0;
917 uint32_t options = XCB_PRESENT_OPTION_NONE;
918
919 draw->vtable->flush_drawable(draw, flush_flags);
920
921 back = dri3_find_back_alloc(draw);
922
923 mtx_lock(&draw->mtx);
924
925 if (draw->adaptive_sync && !draw->adaptive_sync_active) {
926 set_adaptive_sync_property(draw->conn, draw->drawable, true);
927 draw->adaptive_sync_active = true;
928 }
929
930 if (draw->is_different_gpu && back) {
931 /* Update the linear buffer before presenting the pixmap */
932 (void) loader_dri3_blit_image(draw,
933 back->linear_buffer,
934 back->image,
935 0, 0, back->width, back->height,
936 0, 0, __BLIT_FLAG_FLUSH);
937 }
938
939 /* If we need to preload the new back buffer, remember the source.
940 * The force_copy parameter is used by EGL to attempt to preserve
941 * the back buffer across a call to this function.
942 */
943 if (draw->swap_method != __DRI_ATTRIB_SWAP_UNDEFINED || force_copy)
944 draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back);
945
946 /* Exchange the back and fake front. Even though the server knows about these
947 * buffers, it has no notion of back and fake front.
948 */
949 if (back && draw->have_fake_front) {
950 struct loader_dri3_buffer *tmp;
951
952 tmp = dri3_fake_front_buffer(draw);
953 draw->buffers[LOADER_DRI3_FRONT_ID] = back;
954 draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)] = tmp;
955
956 if (draw->swap_method == __DRI_ATTRIB_SWAP_COPY || force_copy)
957 draw->cur_blit_source = LOADER_DRI3_FRONT_ID;
958 }
959
960 dri3_flush_present_events(draw);
961
962 if (back && !draw->is_pixmap) {
963 dri3_fence_reset(draw->conn, back);
964
965 /* Compute when we want the frame shown by taking the last known
966 * successful MSC and adding in a swap interval for each outstanding swap
967 * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
968 * semantic"
969 */
970 ++draw->send_sbc;
971 if (target_msc == 0 && divisor == 0 && remainder == 0)
972 target_msc = draw->msc + draw->swap_interval *
973 (draw->send_sbc - draw->recv_sbc);
974 else if (divisor == 0 && remainder > 0) {
975 /* From the GLX_OML_sync_control spec:
976 * "If <divisor> = 0, the swap will occur when MSC becomes
977 * greater than or equal to <target_msc>."
978 *
979 * Note that there's no mention of the remainder. The Present
980 * extension throws BadValue for remainder != 0 with divisor == 0, so
981 * just drop the passed in value.
982 */
983 remainder = 0;
984 }
985
986 /* From the GLX_EXT_swap_control spec
987 * and the EGL 1.4 spec (page 53):
988 *
989 * "If <interval> is set to a value of 0, buffer swaps are not
990 * synchronized to a video frame."
991 *
992 * Implementation note: It is possible to enable triple buffering
993 * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
994 * the default.
995 */
996 if (draw->swap_interval == 0)
997 options |= XCB_PRESENT_OPTION_ASYNC;
998
999 /* If we need to populate the new back, but need to reuse the back
1000 * buffer slot due to lack of local blit capabilities, make sure
1001 * the server doesn't flip and we deadlock.
1002 */
1003 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1)
1004 options |= XCB_PRESENT_OPTION_COPY;
1005 #ifdef HAVE_DRI3_MODIFIERS
1006 if (draw->multiplanes_available)
1007 options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1008 #endif
1009 back->busy = 1;
1010 back->last_swap = draw->send_sbc;
1011
1012 xcb_xfixes_region_t region = 0;
1013 xcb_rectangle_t xcb_rects[64];
1014
1015 if (n_rects > 0 && n_rects <= ARRAY_SIZE(xcb_rects)) {
1016 for (int i = 0; i < n_rects; i++) {
1017 const int *rect = &rects[i * 4];
1018 xcb_rects[i].x = rect[0];
1019 xcb_rects[i].y = draw->height - rect[1] - rect[3];
1020 xcb_rects[i].width = rect[2];
1021 xcb_rects[i].height = rect[3];
1022 }
1023
1024 region = xcb_generate_id(draw->conn);
1025 xcb_xfixes_create_region(draw->conn, region, n_rects, xcb_rects);
1026 }
1027
1028 xcb_present_pixmap(draw->conn,
1029 draw->drawable,
1030 back->pixmap,
1031 (uint32_t) draw->send_sbc,
1032 0, /* valid */
1033 region, /* update */
1034 0, /* x_off */
1035 0, /* y_off */
1036 None, /* target_crtc */
1037 None,
1038 back->sync_fence,
1039 options,
1040 target_msc,
1041 divisor,
1042 remainder, 0, NULL);
1043 ret = (int64_t) draw->send_sbc;
1044
1045 if (region)
1046 xcb_xfixes_destroy_region(draw->conn, region);
1047
1048 /* Schedule a server-side back-preserving blit if necessary.
1049 * This happens iff all conditions below are satisfied:
1050 * a) We have a fake front,
1051 * b) We need to preserve the back buffer,
1052 * c) We don't have local blit capabilities.
1053 */
1054 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1 &&
1055 draw->cur_blit_source != LOADER_DRI3_BACK_ID(draw->cur_back)) {
1056 struct loader_dri3_buffer *new_back = dri3_back_buffer(draw);
1057 struct loader_dri3_buffer *src = draw->buffers[draw->cur_blit_source];
1058
1059 dri3_fence_reset(draw->conn, new_back);
1060 dri3_copy_area(draw->conn, src->pixmap,
1061 new_back->pixmap,
1062 dri3_drawable_gc(draw),
1063 0, 0, 0, 0, draw->width, draw->height);
1064 dri3_fence_trigger(draw->conn, new_back);
1065 new_back->last_swap = src->last_swap;
1066 }
1067
1068 xcb_flush(draw->conn);
1069 if (draw->stamp)
1070 ++(*draw->stamp);
1071 }
1072 mtx_unlock(&draw->mtx);
1073
1074 draw->ext->flush->invalidate(draw->dri_drawable);
1075
1076 return ret;
1077 }
1078
1079 int
1080 loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
1081 {
1082 struct loader_dri3_buffer *back = dri3_find_back_alloc(draw);
1083 int ret;
1084
1085 mtx_lock(&draw->mtx);
1086 ret = (!back || back->last_swap == 0) ? 0 :
1087 draw->send_sbc - back->last_swap + 1;
1088 mtx_unlock(&draw->mtx);
1089
1090 return ret;
1091 }
1092
1093 /** loader_dri3_open
1094 *
1095 * Wrapper around xcb_dri3_open
1096 */
1097 int
1098 loader_dri3_open(xcb_connection_t *conn,
1099 xcb_window_t root,
1100 uint32_t provider)
1101 {
1102 xcb_dri3_open_cookie_t cookie;
1103 xcb_dri3_open_reply_t *reply;
1104 int fd;
1105
1106 cookie = xcb_dri3_open(conn,
1107 root,
1108 provider);
1109
1110 reply = xcb_dri3_open_reply(conn, cookie, NULL);
1111 if (!reply)
1112 return -1;
1113
1114 if (reply->nfd != 1) {
1115 free(reply);
1116 return -1;
1117 }
1118
1119 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
1120 free(reply);
1121 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
1122
1123 return fd;
1124 }
1125
1126 static uint32_t
1127 dri3_cpp_for_format(uint32_t format) {
1128 switch (format) {
1129 case __DRI_IMAGE_FORMAT_R8:
1130 return 1;
1131 case __DRI_IMAGE_FORMAT_RGB565:
1132 case __DRI_IMAGE_FORMAT_GR88:
1133 return 2;
1134 case __DRI_IMAGE_FORMAT_XRGB8888:
1135 case __DRI_IMAGE_FORMAT_ARGB8888:
1136 case __DRI_IMAGE_FORMAT_ABGR8888:
1137 case __DRI_IMAGE_FORMAT_XBGR8888:
1138 case __DRI_IMAGE_FORMAT_XRGB2101010:
1139 case __DRI_IMAGE_FORMAT_ARGB2101010:
1140 case __DRI_IMAGE_FORMAT_XBGR2101010:
1141 case __DRI_IMAGE_FORMAT_ABGR2101010:
1142 case __DRI_IMAGE_FORMAT_SARGB8:
1143 case __DRI_IMAGE_FORMAT_SABGR8:
1144 case __DRI_IMAGE_FORMAT_SXRGB8:
1145 return 4;
1146 case __DRI_IMAGE_FORMAT_XBGR16161616F:
1147 case __DRI_IMAGE_FORMAT_ABGR16161616F:
1148 return 8;
1149 case __DRI_IMAGE_FORMAT_NONE:
1150 default:
1151 return 0;
1152 }
1153 }
1154
1155 /* Map format of render buffer to corresponding format for the linear_buffer
1156 * used for sharing with the display gpu of a Prime setup (== is_different_gpu).
1157 * Usually linear_format == format, except for depth >= 30 formats, where
1158 * different gpu vendors have different preferences wrt. color channel ordering.
1159 */
1160 static uint32_t
1161 dri3_linear_format_for_format(struct loader_dri3_drawable *draw, uint32_t format)
1162 {
1163 switch (format) {
1164 case __DRI_IMAGE_FORMAT_XRGB2101010:
1165 case __DRI_IMAGE_FORMAT_XBGR2101010:
1166 /* Different preferred formats for different hw */
1167 if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1168 return __DRI_IMAGE_FORMAT_XBGR2101010;
1169 else
1170 return __DRI_IMAGE_FORMAT_XRGB2101010;
1171
1172 case __DRI_IMAGE_FORMAT_ARGB2101010:
1173 case __DRI_IMAGE_FORMAT_ABGR2101010:
1174 /* Different preferred formats for different hw */
1175 if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1176 return __DRI_IMAGE_FORMAT_ABGR2101010;
1177 else
1178 return __DRI_IMAGE_FORMAT_ARGB2101010;
1179
1180 default:
1181 return format;
1182 }
1183 }
1184
1185 /* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while
1186 * the createImageFromFds call takes DRM_FORMAT codes. To avoid
1187 * complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and
1188 * translate to DRM_FORMAT codes in the call to createImageFromFds
1189 */
1190 static int
1191 image_format_to_fourcc(int format)
1192 {
1193
1194 /* Convert from __DRI_IMAGE_FORMAT to DRM_FORMAT (sigh) */
1195 switch (format) {
1196 case __DRI_IMAGE_FORMAT_SARGB8: return __DRI_IMAGE_FOURCC_SARGB8888;
1197 case __DRI_IMAGE_FORMAT_SABGR8: return __DRI_IMAGE_FOURCC_SABGR8888;
1198 case __DRI_IMAGE_FORMAT_SXRGB8: return __DRI_IMAGE_FOURCC_SXRGB8888;
1199 case __DRI_IMAGE_FORMAT_RGB565: return DRM_FORMAT_RGB565;
1200 case __DRI_IMAGE_FORMAT_XRGB8888: return DRM_FORMAT_XRGB8888;
1201 case __DRI_IMAGE_FORMAT_ARGB8888: return DRM_FORMAT_ARGB8888;
1202 case __DRI_IMAGE_FORMAT_ABGR8888: return DRM_FORMAT_ABGR8888;
1203 case __DRI_IMAGE_FORMAT_XBGR8888: return DRM_FORMAT_XBGR8888;
1204 case __DRI_IMAGE_FORMAT_XRGB2101010: return DRM_FORMAT_XRGB2101010;
1205 case __DRI_IMAGE_FORMAT_ARGB2101010: return DRM_FORMAT_ARGB2101010;
1206 case __DRI_IMAGE_FORMAT_XBGR2101010: return DRM_FORMAT_XBGR2101010;
1207 case __DRI_IMAGE_FORMAT_ABGR2101010: return DRM_FORMAT_ABGR2101010;
1208 case __DRI_IMAGE_FORMAT_XBGR16161616F: return DRM_FORMAT_XBGR16161616F;
1209 case __DRI_IMAGE_FORMAT_ABGR16161616F: return DRM_FORMAT_ABGR16161616F;
1210 }
1211 return 0;
1212 }
1213
1214 #ifdef HAVE_DRI3_MODIFIERS
1215 static bool
1216 has_supported_modifier(struct loader_dri3_drawable *draw, unsigned int format,
1217 uint64_t *modifiers, uint32_t count)
1218 {
1219 uint64_t *supported_modifiers;
1220 int32_t supported_modifiers_count;
1221 bool found = false;
1222 int i, j;
1223
1224 if (!draw->ext->image->queryDmaBufModifiers(draw->dri_screen,
1225 format, 0, NULL, NULL,
1226 &supported_modifiers_count) ||
1227 supported_modifiers_count == 0)
1228 return false;
1229
1230 supported_modifiers = malloc(supported_modifiers_count * sizeof(uint64_t));
1231 if (!supported_modifiers)
1232 return false;
1233
1234 draw->ext->image->queryDmaBufModifiers(draw->dri_screen, format,
1235 supported_modifiers_count,
1236 supported_modifiers, NULL,
1237 &supported_modifiers_count);
1238
1239 for (i = 0; !found && i < supported_modifiers_count; i++) {
1240 for (j = 0; !found && j < count; j++) {
1241 if (supported_modifiers[i] == modifiers[j])
1242 found = true;
1243 }
1244 }
1245
1246 free(supported_modifiers);
1247 return found;
1248 }
1249 #endif
1250
1251 /** loader_dri3_alloc_render_buffer
1252 *
1253 * Use the driver createImage function to construct a __DRIimage, then
1254 * get a file descriptor for that and create an X pixmap from that
1255 *
1256 * Allocate an xshmfence for synchronization
1257 */
1258 static struct loader_dri3_buffer *
1259 dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format,
1260 int width, int height, int depth)
1261 {
1262 struct loader_dri3_buffer *buffer;
1263 __DRIimage *pixmap_buffer;
1264 xcb_pixmap_t pixmap;
1265 xcb_sync_fence_t sync_fence;
1266 struct xshmfence *shm_fence;
1267 int buffer_fds[4], fence_fd;
1268 int num_planes = 0;
1269 int i, mod;
1270 int ret;
1271
1272 /* Create an xshmfence object and
1273 * prepare to send that to the X server
1274 */
1275
1276 fence_fd = xshmfence_alloc_shm();
1277 if (fence_fd < 0)
1278 return NULL;
1279
1280 shm_fence = xshmfence_map_shm(fence_fd);
1281 if (shm_fence == NULL)
1282 goto no_shm_fence;
1283
1284 /* Allocate the image from the driver
1285 */
1286 buffer = calloc(1, sizeof *buffer);
1287 if (!buffer)
1288 goto no_buffer;
1289
1290 buffer->cpp = dri3_cpp_for_format(format);
1291 if (!buffer->cpp)
1292 goto no_image;
1293
1294 if (!draw->is_different_gpu) {
1295 #ifdef HAVE_DRI3_MODIFIERS
1296 if (draw->multiplanes_available &&
1297 draw->ext->image->base.version >= 15 &&
1298 draw->ext->image->queryDmaBufModifiers &&
1299 draw->ext->image->createImageWithModifiers) {
1300 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie;
1301 xcb_dri3_get_supported_modifiers_reply_t *mod_reply;
1302 xcb_generic_error_t *error = NULL;
1303 uint64_t *modifiers = NULL;
1304 uint32_t count = 0;
1305
1306 mod_cookie = xcb_dri3_get_supported_modifiers(draw->conn,
1307 draw->window,
1308 depth, buffer->cpp * 8);
1309 mod_reply = xcb_dri3_get_supported_modifiers_reply(draw->conn,
1310 mod_cookie,
1311 &error);
1312 if (!mod_reply)
1313 goto no_image;
1314
1315 if (mod_reply->num_window_modifiers) {
1316 count = mod_reply->num_window_modifiers;
1317 modifiers = malloc(count * sizeof(uint64_t));
1318 if (!modifiers) {
1319 free(mod_reply);
1320 goto no_image;
1321 }
1322
1323 memcpy(modifiers,
1324 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1325 count * sizeof(uint64_t));
1326
1327 if (!has_supported_modifier(draw, image_format_to_fourcc(format),
1328 modifiers, count)) {
1329 free(modifiers);
1330 count = 0;
1331 modifiers = NULL;
1332 }
1333 }
1334
1335 if (mod_reply->num_screen_modifiers && modifiers == NULL) {
1336 count = mod_reply->num_screen_modifiers;
1337 modifiers = malloc(count * sizeof(uint64_t));
1338 if (!modifiers) {
1339 free(modifiers);
1340 free(mod_reply);
1341 goto no_image;
1342 }
1343
1344 memcpy(modifiers,
1345 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1346 count * sizeof(uint64_t));
1347 }
1348
1349 free(mod_reply);
1350
1351 /* don't use createImageWithModifiers() if we have no
1352 * modifiers, other things depend on the use flags when
1353 * there are no modifiers to know that a buffer can be
1354 * shared.
1355 */
1356 if (modifiers) {
1357 buffer->image = draw->ext->image->createImageWithModifiers(draw->dri_screen,
1358 width, height,
1359 format,
1360 modifiers,
1361 count,
1362 buffer);
1363 }
1364
1365 free(modifiers);
1366 }
1367 #endif
1368 if (!buffer->image)
1369 buffer->image = draw->ext->image->createImage(draw->dri_screen,
1370 width, height,
1371 format,
1372 __DRI_IMAGE_USE_SHARE |
1373 __DRI_IMAGE_USE_SCANOUT |
1374 __DRI_IMAGE_USE_BACKBUFFER,
1375 buffer);
1376
1377 pixmap_buffer = buffer->image;
1378
1379 if (!buffer->image)
1380 goto no_image;
1381 } else {
1382 buffer->image = draw->ext->image->createImage(draw->dri_screen,
1383 width, height,
1384 format,
1385 0,
1386 buffer);
1387
1388 if (!buffer->image)
1389 goto no_image;
1390
1391 buffer->linear_buffer =
1392 draw->ext->image->createImage(draw->dri_screen,
1393 width, height,
1394 dri3_linear_format_for_format(draw, format),
1395 __DRI_IMAGE_USE_SHARE |
1396 __DRI_IMAGE_USE_LINEAR |
1397 __DRI_IMAGE_USE_BACKBUFFER,
1398 buffer);
1399 pixmap_buffer = buffer->linear_buffer;
1400
1401 if (!buffer->linear_buffer)
1402 goto no_linear_buffer;
1403 }
1404
1405 /* X want some information about the planes, so ask the image for it
1406 */
1407 if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1408 &num_planes))
1409 num_planes = 1;
1410
1411 for (i = 0; i < num_planes; i++) {
1412 __DRIimage *image = draw->ext->image->fromPlanar(pixmap_buffer, i, NULL);
1413
1414 if (!image) {
1415 assert(i == 0);
1416 image = pixmap_buffer;
1417 }
1418
1419 buffer_fds[i] = -1;
1420
1421 ret = draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_FD,
1422 &buffer_fds[i]);
1423 ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE,
1424 &buffer->strides[i]);
1425 ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET,
1426 &buffer->offsets[i]);
1427 if (image != pixmap_buffer)
1428 draw->ext->image->destroyImage(image);
1429
1430 if (!ret)
1431 goto no_buffer_attrib;
1432 }
1433
1434 ret = draw->ext->image->queryImage(pixmap_buffer,
1435 __DRI_IMAGE_ATTRIB_MODIFIER_UPPER, &mod);
1436 buffer->modifier = (uint64_t) mod << 32;
1437 ret &= draw->ext->image->queryImage(pixmap_buffer,
1438 __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod);
1439 buffer->modifier |= (uint64_t)(mod & 0xffffffff);
1440
1441 if (!ret)
1442 buffer->modifier = DRM_FORMAT_MOD_INVALID;
1443
1444 pixmap = xcb_generate_id(draw->conn);
1445 #ifdef HAVE_DRI3_MODIFIERS
1446 if (draw->multiplanes_available &&
1447 buffer->modifier != DRM_FORMAT_MOD_INVALID) {
1448 xcb_dri3_pixmap_from_buffers(draw->conn,
1449 pixmap,
1450 draw->window,
1451 num_planes,
1452 width, height,
1453 buffer->strides[0], buffer->offsets[0],
1454 buffer->strides[1], buffer->offsets[1],
1455 buffer->strides[2], buffer->offsets[2],
1456 buffer->strides[3], buffer->offsets[3],
1457 depth, buffer->cpp * 8,
1458 buffer->modifier,
1459 buffer_fds);
1460 } else
1461 #endif
1462 {
1463 xcb_dri3_pixmap_from_buffer(draw->conn,
1464 pixmap,
1465 draw->drawable,
1466 buffer->size,
1467 width, height, buffer->strides[0],
1468 depth, buffer->cpp * 8,
1469 buffer_fds[0]);
1470 }
1471
1472 xcb_dri3_fence_from_fd(draw->conn,
1473 pixmap,
1474 (sync_fence = xcb_generate_id(draw->conn)),
1475 false,
1476 fence_fd);
1477
1478 buffer->pixmap = pixmap;
1479 buffer->own_pixmap = true;
1480 buffer->sync_fence = sync_fence;
1481 buffer->shm_fence = shm_fence;
1482 buffer->width = width;
1483 buffer->height = height;
1484
1485 /* Mark the buffer as idle
1486 */
1487 dri3_fence_set(buffer);
1488
1489 return buffer;
1490
1491 no_buffer_attrib:
1492 do {
1493 if (buffer_fds[i] != -1)
1494 close(buffer_fds[i]);
1495 } while (--i >= 0);
1496 draw->ext->image->destroyImage(pixmap_buffer);
1497 no_linear_buffer:
1498 if (draw->is_different_gpu)
1499 draw->ext->image->destroyImage(buffer->image);
1500 no_image:
1501 free(buffer);
1502 no_buffer:
1503 xshmfence_unmap_shm(shm_fence);
1504 no_shm_fence:
1505 close(fence_fd);
1506 return NULL;
1507 }
1508
1509 /** loader_dri3_update_drawable
1510 *
1511 * Called the first time we use the drawable and then
1512 * after we receive present configure notify events to
1513 * track the geometry of the drawable
1514 */
1515 static int
1516 dri3_update_drawable(struct loader_dri3_drawable *draw)
1517 {
1518 mtx_lock(&draw->mtx);
1519 if (draw->first_init) {
1520 xcb_get_geometry_cookie_t geom_cookie;
1521 xcb_get_geometry_reply_t *geom_reply;
1522 xcb_void_cookie_t cookie;
1523 xcb_generic_error_t *error;
1524 xcb_present_query_capabilities_cookie_t present_capabilities_cookie;
1525 xcb_present_query_capabilities_reply_t *present_capabilities_reply;
1526 xcb_window_t root_win;
1527
1528 draw->first_init = false;
1529
1530 /* Try to select for input on the window.
1531 *
1532 * If the drawable is a window, this will get our events
1533 * delivered.
1534 *
1535 * Otherwise, we'll get a BadWindow error back from this request which
1536 * will let us know that the drawable is a pixmap instead.
1537 */
1538
1539 draw->eid = xcb_generate_id(draw->conn);
1540 cookie =
1541 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
1542 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1543 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1544 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1545
1546 present_capabilities_cookie =
1547 xcb_present_query_capabilities(draw->conn, draw->drawable);
1548
1549 /* Create an XCB event queue to hold present events outside of the usual
1550 * application event queue
1551 */
1552 draw->special_event = xcb_register_for_special_xge(draw->conn,
1553 &xcb_present_id,
1554 draw->eid,
1555 draw->stamp);
1556 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1557
1558 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1559
1560 if (!geom_reply) {
1561 mtx_unlock(&draw->mtx);
1562 return false;
1563 }
1564 draw->width = geom_reply->width;
1565 draw->height = geom_reply->height;
1566 draw->depth = geom_reply->depth;
1567 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1568 root_win = geom_reply->root;
1569
1570 free(geom_reply);
1571
1572 draw->is_pixmap = false;
1573
1574 /* Check to see if our select input call failed. If it failed with a
1575 * BadWindow error, then assume the drawable is a pixmap. Destroy the
1576 * special event queue created above and mark the drawable as a pixmap
1577 */
1578
1579 error = xcb_request_check(draw->conn, cookie);
1580
1581 present_capabilities_reply =
1582 xcb_present_query_capabilities_reply(draw->conn,
1583 present_capabilities_cookie,
1584 NULL);
1585
1586 if (present_capabilities_reply) {
1587 draw->present_capabilities = present_capabilities_reply->capabilities;
1588 free(present_capabilities_reply);
1589 } else
1590 draw->present_capabilities = 0;
1591
1592 if (error) {
1593 if (error->error_code != BadWindow) {
1594 free(error);
1595 mtx_unlock(&draw->mtx);
1596 return false;
1597 }
1598 free(error);
1599 draw->is_pixmap = true;
1600 xcb_unregister_for_special_event(draw->conn, draw->special_event);
1601 draw->special_event = NULL;
1602 }
1603
1604 if (draw->is_pixmap)
1605 draw->window = root_win;
1606 else
1607 draw->window = draw->drawable;
1608 }
1609 dri3_flush_present_events(draw);
1610 mtx_unlock(&draw->mtx);
1611 return true;
1612 }
1613
1614 __DRIimage *
1615 loader_dri3_create_image(xcb_connection_t *c,
1616 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1617 unsigned int format,
1618 __DRIscreen *dri_screen,
1619 const __DRIimageExtension *image,
1620 void *loaderPrivate)
1621 {
1622 int *fds;
1623 __DRIimage *image_planar, *ret;
1624 int stride, offset;
1625
1626 /* Get an FD for the pixmap object
1627 */
1628 fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1629
1630 stride = bp_reply->stride;
1631 offset = 0;
1632
1633 /* createImageFromFds creates a wrapper __DRIimage structure which
1634 * can deal with multiple planes for things like Yuv images. So, once
1635 * we've gotten the planar wrapper, pull the single plane out of it and
1636 * discard the wrapper.
1637 */
1638 image_planar = image->createImageFromFds(dri_screen,
1639 bp_reply->width,
1640 bp_reply->height,
1641 image_format_to_fourcc(format),
1642 fds, 1,
1643 &stride, &offset, loaderPrivate);
1644 close(fds[0]);
1645 if (!image_planar)
1646 return NULL;
1647
1648 ret = image->fromPlanar(image_planar, 0, loaderPrivate);
1649
1650 if (!ret)
1651 ret = image_planar;
1652 else
1653 image->destroyImage(image_planar);
1654
1655 return ret;
1656 }
1657
1658 #ifdef HAVE_DRI3_MODIFIERS
1659 __DRIimage *
1660 loader_dri3_create_image_from_buffers(xcb_connection_t *c,
1661 xcb_dri3_buffers_from_pixmap_reply_t *bp_reply,
1662 unsigned int format,
1663 __DRIscreen *dri_screen,
1664 const __DRIimageExtension *image,
1665 void *loaderPrivate)
1666 {
1667 __DRIimage *ret;
1668 int *fds;
1669 uint32_t *strides_in, *offsets_in;
1670 int strides[4], offsets[4];
1671 unsigned error;
1672 int i;
1673
1674 if (bp_reply->nfd > 4)
1675 return NULL;
1676
1677 fds = xcb_dri3_buffers_from_pixmap_reply_fds(c, bp_reply);
1678 strides_in = xcb_dri3_buffers_from_pixmap_strides(bp_reply);
1679 offsets_in = xcb_dri3_buffers_from_pixmap_offsets(bp_reply);
1680 for (i = 0; i < bp_reply->nfd; i++) {
1681 strides[i] = strides_in[i];
1682 offsets[i] = offsets_in[i];
1683 }
1684
1685 ret = image->createImageFromDmaBufs2(dri_screen,
1686 bp_reply->width,
1687 bp_reply->height,
1688 image_format_to_fourcc(format),
1689 bp_reply->modifier,
1690 fds, bp_reply->nfd,
1691 strides, offsets,
1692 0, 0, 0, 0, /* UNDEFINED */
1693 &error, loaderPrivate);
1694
1695 for (i = 0; i < bp_reply->nfd; i++)
1696 close(fds[i]);
1697
1698 return ret;
1699 }
1700 #endif
1701
1702 /** dri3_get_pixmap_buffer
1703 *
1704 * Get the DRM object for a pixmap from the X server and
1705 * wrap that with a __DRIimage structure using createImageFromFds
1706 */
1707 static struct loader_dri3_buffer *
1708 dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format,
1709 enum loader_dri3_buffer_type buffer_type,
1710 struct loader_dri3_drawable *draw)
1711 {
1712 int buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1713 struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
1714 xcb_drawable_t pixmap;
1715 xcb_sync_fence_t sync_fence;
1716 struct xshmfence *shm_fence;
1717 int width;
1718 int height;
1719 int fence_fd;
1720 __DRIscreen *cur_screen;
1721
1722 if (buffer)
1723 return buffer;
1724
1725 pixmap = draw->drawable;
1726
1727 buffer = calloc(1, sizeof *buffer);
1728 if (!buffer)
1729 goto no_buffer;
1730
1731 fence_fd = xshmfence_alloc_shm();
1732 if (fence_fd < 0)
1733 goto no_fence;
1734 shm_fence = xshmfence_map_shm(fence_fd);
1735 if (shm_fence == NULL) {
1736 close (fence_fd);
1737 goto no_fence;
1738 }
1739
1740 /* Get the currently-bound screen or revert to using the drawable's screen if
1741 * no contexts are currently bound. The latter case is at least necessary for
1742 * obs-studio, when using Window Capture (Xcomposite) as a Source.
1743 */
1744 cur_screen = draw->vtable->get_dri_screen();
1745 if (!cur_screen) {
1746 cur_screen = draw->dri_screen;
1747 }
1748
1749 xcb_dri3_fence_from_fd(draw->conn,
1750 pixmap,
1751 (sync_fence = xcb_generate_id(draw->conn)),
1752 false,
1753 fence_fd);
1754 #ifdef HAVE_DRI3_MODIFIERS
1755 if (draw->multiplanes_available &&
1756 draw->ext->image->base.version >= 15 &&
1757 draw->ext->image->createImageFromDmaBufs2) {
1758 xcb_dri3_buffers_from_pixmap_cookie_t bps_cookie;
1759 xcb_dri3_buffers_from_pixmap_reply_t *bps_reply;
1760
1761 bps_cookie = xcb_dri3_buffers_from_pixmap(draw->conn, pixmap);
1762 bps_reply = xcb_dri3_buffers_from_pixmap_reply(draw->conn, bps_cookie,
1763 NULL);
1764 if (!bps_reply)
1765 goto no_image;
1766 buffer->image =
1767 loader_dri3_create_image_from_buffers(draw->conn, bps_reply, format,
1768 cur_screen, draw->ext->image,
1769 buffer);
1770 width = bps_reply->width;
1771 height = bps_reply->height;
1772 free(bps_reply);
1773 } else
1774 #endif
1775 {
1776 xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1777 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1778
1779 bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap);
1780 bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn, bp_cookie, NULL);
1781 if (!bp_reply)
1782 goto no_image;
1783
1784 buffer->image = loader_dri3_create_image(draw->conn, bp_reply, format,
1785 cur_screen, draw->ext->image,
1786 buffer);
1787 width = bp_reply->width;
1788 height = bp_reply->height;
1789 free(bp_reply);
1790 }
1791
1792 if (!buffer->image)
1793 goto no_image;
1794
1795 buffer->pixmap = pixmap;
1796 buffer->own_pixmap = false;
1797 buffer->width = width;
1798 buffer->height = height;
1799 buffer->shm_fence = shm_fence;
1800 buffer->sync_fence = sync_fence;
1801
1802 draw->buffers[buf_id] = buffer;
1803
1804 return buffer;
1805
1806 no_image:
1807 xcb_sync_destroy_fence(draw->conn, sync_fence);
1808 xshmfence_unmap_shm(shm_fence);
1809 no_fence:
1810 free(buffer);
1811 no_buffer:
1812 return NULL;
1813 }
1814
1815 /** dri3_get_buffer
1816 *
1817 * Find a front or back buffer, allocating new ones as necessary
1818 */
1819 static struct loader_dri3_buffer *
1820 dri3_get_buffer(__DRIdrawable *driDrawable,
1821 unsigned int format,
1822 enum loader_dri3_buffer_type buffer_type,
1823 struct loader_dri3_drawable *draw)
1824 {
1825 struct loader_dri3_buffer *buffer;
1826 bool fence_await = buffer_type == loader_dri3_buffer_back;
1827 int buf_id;
1828
1829 if (buffer_type == loader_dri3_buffer_back) {
1830 draw->back_format = format;
1831
1832 buf_id = dri3_find_back(draw);
1833
1834 if (buf_id < 0)
1835 return NULL;
1836 } else {
1837 buf_id = LOADER_DRI3_FRONT_ID;
1838 }
1839
1840 buffer = draw->buffers[buf_id];
1841
1842 /* Allocate a new buffer if there isn't an old one, if that
1843 * old one is the wrong size, or if it's suboptimal
1844 */
1845 if (!buffer || buffer->width != draw->width ||
1846 buffer->height != draw->height ||
1847 buffer->reallocate) {
1848 struct loader_dri3_buffer *new_buffer;
1849
1850 /* Allocate the new buffers
1851 */
1852 new_buffer = dri3_alloc_render_buffer(draw,
1853 format,
1854 draw->width,
1855 draw->height,
1856 draw->depth);
1857 if (!new_buffer)
1858 return NULL;
1859
1860 /* When resizing, copy the contents of the old buffer, waiting for that
1861 * copy to complete using our fences before proceeding
1862 */
1863 if ((buffer_type == loader_dri3_buffer_back ||
1864 (buffer_type == loader_dri3_buffer_front && draw->have_fake_front))
1865 && buffer) {
1866
1867 /* Fill the new buffer with data from an old buffer */
1868 if (!loader_dri3_blit_image(draw,
1869 new_buffer->image,
1870 buffer->image,
1871 0, 0,
1872 MIN2(buffer->width, new_buffer->width),
1873 MIN2(buffer->height, new_buffer->height),
1874 0, 0, 0) &&
1875 !buffer->linear_buffer) {
1876 dri3_fence_reset(draw->conn, new_buffer);
1877 dri3_copy_area(draw->conn,
1878 buffer->pixmap,
1879 new_buffer->pixmap,
1880 dri3_drawable_gc(draw),
1881 0, 0, 0, 0,
1882 draw->width, draw->height);
1883 dri3_fence_trigger(draw->conn, new_buffer);
1884 fence_await = true;
1885 }
1886 dri3_free_render_buffer(draw, buffer);
1887 } else if (buffer_type == loader_dri3_buffer_front) {
1888 /* Fill the new fake front with data from a real front */
1889 loader_dri3_swapbuffer_barrier(draw);
1890 dri3_fence_reset(draw->conn, new_buffer);
1891 dri3_copy_area(draw->conn,
1892 draw->drawable,
1893 new_buffer->pixmap,
1894 dri3_drawable_gc(draw),
1895 0, 0, 0, 0,
1896 draw->width, draw->height);
1897 dri3_fence_trigger(draw->conn, new_buffer);
1898
1899 if (new_buffer->linear_buffer) {
1900 dri3_fence_await(draw->conn, draw, new_buffer);
1901 (void) loader_dri3_blit_image(draw,
1902 new_buffer->image,
1903 new_buffer->linear_buffer,
1904 0, 0, draw->width, draw->height,
1905 0, 0, 0);
1906 } else
1907 fence_await = true;
1908 }
1909 buffer = new_buffer;
1910 draw->buffers[buf_id] = buffer;
1911 }
1912
1913 if (fence_await)
1914 dri3_fence_await(draw->conn, draw, buffer);
1915
1916 /*
1917 * Do we need to preserve the content of a previous buffer?
1918 *
1919 * Note that this blit is needed only to avoid a wait for a buffer that
1920 * is currently in the flip chain or being scanned out from. That's really
1921 * a tradeoff. If we're ok with the wait we can reduce the number of back
1922 * buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY,
1923 * but in the latter case we must disallow page-flipping.
1924 */
1925 if (buffer_type == loader_dri3_buffer_back &&
1926 draw->cur_blit_source != -1 &&
1927 draw->buffers[draw->cur_blit_source] &&
1928 buffer != draw->buffers[draw->cur_blit_source]) {
1929
1930 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
1931
1932 /* Avoid flushing here. Will propably do good for tiling hardware. */
1933 (void) loader_dri3_blit_image(draw,
1934 buffer->image,
1935 source->image,
1936 0, 0, draw->width, draw->height,
1937 0, 0, 0);
1938 buffer->last_swap = source->last_swap;
1939 draw->cur_blit_source = -1;
1940 }
1941 /* Return the requested buffer */
1942 return buffer;
1943 }
1944
1945 /** dri3_free_buffers
1946 *
1947 * Free the front bufffer or all of the back buffers. Used
1948 * when the application changes which buffers it needs
1949 */
1950 static void
1951 dri3_free_buffers(__DRIdrawable *driDrawable,
1952 enum loader_dri3_buffer_type buffer_type,
1953 struct loader_dri3_drawable *draw)
1954 {
1955 struct loader_dri3_buffer *buffer;
1956 int first_id;
1957 int n_id;
1958 int buf_id;
1959
1960 switch (buffer_type) {
1961 case loader_dri3_buffer_back:
1962 first_id = LOADER_DRI3_BACK_ID(0);
1963 n_id = LOADER_DRI3_MAX_BACK;
1964 draw->cur_blit_source = -1;
1965 break;
1966 case loader_dri3_buffer_front:
1967 first_id = LOADER_DRI3_FRONT_ID;
1968 /* Don't free a fake front holding new backbuffer content. */
1969 n_id = (draw->cur_blit_source == LOADER_DRI3_FRONT_ID) ? 0 : 1;
1970 }
1971
1972 for (buf_id = first_id; buf_id < first_id + n_id; buf_id++) {
1973 buffer = draw->buffers[buf_id];
1974 if (buffer) {
1975 dri3_free_render_buffer(draw, buffer);
1976 draw->buffers[buf_id] = NULL;
1977 }
1978 }
1979 }
1980
1981 /** loader_dri3_get_buffers
1982 *
1983 * The published buffer allocation API.
1984 * Returns all of the necessary buffers, allocating
1985 * as needed.
1986 */
1987 int
1988 loader_dri3_get_buffers(__DRIdrawable *driDrawable,
1989 unsigned int format,
1990 uint32_t *stamp,
1991 void *loaderPrivate,
1992 uint32_t buffer_mask,
1993 struct __DRIimageList *buffers)
1994 {
1995 struct loader_dri3_drawable *draw = loaderPrivate;
1996 struct loader_dri3_buffer *front, *back;
1997 int buf_id;
1998
1999 buffers->image_mask = 0;
2000 buffers->front = NULL;
2001 buffers->back = NULL;
2002
2003 front = NULL;
2004 back = NULL;
2005
2006 if (!dri3_update_drawable(draw))
2007 return false;
2008
2009 dri3_update_num_back(draw);
2010
2011 /* Free no longer needed back buffers */
2012 for (buf_id = draw->num_back; buf_id < LOADER_DRI3_MAX_BACK; buf_id++) {
2013 if (draw->cur_blit_source != buf_id && draw->buffers[buf_id]) {
2014 dri3_free_render_buffer(draw, draw->buffers[buf_id]);
2015 draw->buffers[buf_id] = NULL;
2016 }
2017 }
2018
2019 /* pixmaps always have front buffers.
2020 * Exchange swaps also mandate fake front buffers.
2021 */
2022 if (draw->is_pixmap || draw->swap_method == __DRI_ATTRIB_SWAP_EXCHANGE)
2023 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
2024
2025 if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
2026 /* All pixmaps are owned by the server gpu.
2027 * When we use a different gpu, we can't use the pixmap
2028 * as buffer since it is potentially tiled a way
2029 * our device can't understand. In this case, use
2030 * a fake front buffer. Hopefully the pixmap
2031 * content will get synced with the fake front
2032 * buffer.
2033 */
2034 if (draw->is_pixmap && !draw->is_different_gpu)
2035 front = dri3_get_pixmap_buffer(driDrawable,
2036 format,
2037 loader_dri3_buffer_front,
2038 draw);
2039 else
2040 front = dri3_get_buffer(driDrawable,
2041 format,
2042 loader_dri3_buffer_front,
2043 draw);
2044
2045 if (!front)
2046 return false;
2047 } else {
2048 dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
2049 draw->have_fake_front = 0;
2050 }
2051
2052 if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
2053 back = dri3_get_buffer(driDrawable,
2054 format,
2055 loader_dri3_buffer_back,
2056 draw);
2057 if (!back)
2058 return false;
2059 draw->have_back = 1;
2060 } else {
2061 dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
2062 draw->have_back = 0;
2063 }
2064
2065 if (front) {
2066 buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
2067 buffers->front = front->image;
2068 draw->have_fake_front = draw->is_different_gpu || !draw->is_pixmap;
2069 }
2070
2071 if (back) {
2072 buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
2073 buffers->back = back->image;
2074 }
2075
2076 draw->stamp = stamp;
2077
2078 return true;
2079 }
2080
2081 /** loader_dri3_update_drawable_geometry
2082 *
2083 * Get the current drawable geometry.
2084 */
2085 void
2086 loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
2087 {
2088 xcb_get_geometry_cookie_t geom_cookie;
2089 xcb_get_geometry_reply_t *geom_reply;
2090
2091 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
2092
2093 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
2094
2095 if (geom_reply) {
2096 draw->width = geom_reply->width;
2097 draw->height = geom_reply->height;
2098 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
2099 draw->ext->flush->invalidate(draw->dri_drawable);
2100
2101 free(geom_reply);
2102 }
2103 }
2104
2105
2106 /**
2107 * Make sure the server has flushed all pending swap buffers to hardware
2108 * for this drawable. Ideally we'd want to send an X protocol request to
2109 * have the server block our connection until the swaps are complete. That
2110 * would avoid the potential round-trip here.
2111 */
2112 void
2113 loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
2114 {
2115 int64_t ust, msc, sbc;
2116
2117 (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
2118 }
2119
2120 /**
2121 * Perform any cleanup associated with a close screen operation.
2122 * \param dri_screen[in,out] Pointer to __DRIscreen about to be closed.
2123 *
2124 * This function destroys the screen's cached swap context if any.
2125 */
2126 void
2127 loader_dri3_close_screen(__DRIscreen *dri_screen)
2128 {
2129 mtx_lock(&blit_context.mtx);
2130 if (blit_context.ctx && blit_context.cur_screen == dri_screen) {
2131 blit_context.core->destroyContext(blit_context.ctx);
2132 blit_context.ctx = NULL;
2133 }
2134 mtx_unlock(&blit_context.mtx);
2135 }
2136
2137 /**
2138 * Find a backbuffer slot - potentially allocating a back buffer
2139 *
2140 * \param draw[in,out] Pointer to the drawable for which to find back.
2141 * \return Pointer to a new back buffer or NULL if allocation failed or was
2142 * not mandated.
2143 *
2144 * Find a potentially new back buffer, and if it's not been allocated yet and
2145 * in addition needs initializing, then try to allocate and initialize it.
2146 */
2147 #include <stdio.h>
2148 static struct loader_dri3_buffer *
2149 dri3_find_back_alloc(struct loader_dri3_drawable *draw)
2150 {
2151 struct loader_dri3_buffer *back;
2152 int id;
2153
2154 id = dri3_find_back(draw);
2155 if (id < 0)
2156 return NULL;
2157
2158 back = draw->buffers[id];
2159 /* Allocate a new back if we haven't got one */
2160 if (!back && draw->back_format != __DRI_IMAGE_FORMAT_NONE &&
2161 dri3_update_drawable(draw))
2162 back = dri3_alloc_render_buffer(draw, draw->back_format,
2163 draw->width, draw->height, draw->depth);
2164
2165 if (!back)
2166 return NULL;
2167
2168 draw->buffers[id] = back;
2169
2170 /* If necessary, prefill the back with data according to swap_method mode. */
2171 if (draw->cur_blit_source != -1 &&
2172 draw->buffers[draw->cur_blit_source] &&
2173 back != draw->buffers[draw->cur_blit_source]) {
2174 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2175
2176 dri3_fence_await(draw->conn, draw, source);
2177 dri3_fence_await(draw->conn, draw, back);
2178 (void) loader_dri3_blit_image(draw,
2179 back->image,
2180 source->image,
2181 0, 0, draw->width, draw->height,
2182 0, 0, 0);
2183 back->last_swap = source->last_swap;
2184 draw->cur_blit_source = -1;
2185 }
2186
2187 return back;
2188 }