loader_dri3: Remove buffer_type from buffer metadata
[mesa.git] / src / loader / loader_dri3_helper.c
1 /*
2 * Copyright © 2013 Keith Packard
3 * Copyright © 2015 Boyan Ding
4 *
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that copyright
8 * notice and this permission notice appear in supporting documentation, and
9 * that the name of the copyright holders not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. The copyright holders make no representations
12 * about the suitability of this software for any purpose. It is provided "as
13 * is" without express or implied warranty.
14 *
15 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21 * OF THIS SOFTWARE.
22 */
23
24 #include <fcntl.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27
28 #include <X11/xshmfence.h>
29 #include <xcb/xcb.h>
30 #include <xcb/dri3.h>
31 #include <xcb/present.h>
32
33 #include <X11/Xlib-xcb.h>
34
35 #include <c11/threads.h>
36 #include "loader_dri3_helper.h"
37
38 /* From xmlpool/options.h, user exposed so should be stable */
39 #define DRI_CONF_VBLANK_NEVER 0
40 #define DRI_CONF_VBLANK_DEF_INTERVAL_0 1
41 #define DRI_CONF_VBLANK_DEF_INTERVAL_1 2
42 #define DRI_CONF_VBLANK_ALWAYS_SYNC 3
43
44 /**
45 * A cached blit context.
46 */
47 struct loader_dri3_blit_context {
48 mtx_t mtx;
49 __DRIcontext *ctx;
50 __DRIscreen *cur_screen;
51 const __DRIcoreExtension *core;
52 };
53
54 /* For simplicity we maintain the cache only for a single screen at a time */
55 static struct loader_dri3_blit_context blit_context = {
56 _MTX_INITIALIZER_NP, NULL
57 };
58
59 static void
60 dri3_flush_present_events(struct loader_dri3_drawable *draw);
61
62 /**
63 * Do we have blit functionality in the image blit extension?
64 *
65 * \param draw[in] The drawable intended to blit from / to.
66 * \return true if we have blit functionality. false otherwise.
67 */
68 static bool loader_dri3_have_image_blit(const struct loader_dri3_drawable *draw)
69 {
70 return draw->ext->image->base.version >= 9 &&
71 draw->ext->image->blitImage != NULL;
72 }
73
74 /**
75 * Get and lock (for use with the current thread) a dri context associated
76 * with the drawable's dri screen. The context is intended to be used with
77 * the dri image extension's blitImage method.
78 *
79 * \param draw[in] Pointer to the drawable whose dri screen we want a
80 * dri context for.
81 * \return A dri context or NULL if context creation failed.
82 *
83 * When the caller is done with the context (even if the context returned was
84 * NULL), the caller must call loader_dri3_blit_context_put.
85 */
86 static __DRIcontext *
87 loader_dri3_blit_context_get(struct loader_dri3_drawable *draw)
88 {
89 mtx_lock(&blit_context.mtx);
90
91 if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen) {
92 blit_context.core->destroyContext(blit_context.ctx);
93 blit_context.ctx = NULL;
94 }
95
96 if (!blit_context.ctx) {
97 blit_context.ctx = draw->ext->core->createNewContext(draw->dri_screen,
98 NULL, NULL, NULL);
99 blit_context.cur_screen = draw->dri_screen;
100 blit_context.core = draw->ext->core;
101 }
102
103 return blit_context.ctx;
104 }
105
106 /**
107 * Release (for use with other threads) a dri context previously obtained using
108 * loader_dri3_blit_context_get.
109 */
110 static void
111 loader_dri3_blit_context_put(void)
112 {
113 mtx_unlock(&blit_context.mtx);
114 }
115
116 /**
117 * Blit (parts of) the contents of a DRI image to another dri image
118 *
119 * \param draw[in] The drawable which owns the images.
120 * \param dst[in] The destination image.
121 * \param src[in] The source image.
122 * \param dstx0[in] Start destination coordinate.
123 * \param dsty0[in] Start destination coordinate.
124 * \param width[in] Blit width.
125 * \param height[in] Blit height.
126 * \param srcx0[in] Start source coordinate.
127 * \param srcy0[in] Start source coordinate.
128 * \param flush_flag[in] Image blit flush flag.
129 * \return true iff successful.
130 */
131 static bool
132 loader_dri3_blit_image(struct loader_dri3_drawable *draw,
133 __DRIimage *dst, __DRIimage *src,
134 int dstx0, int dsty0, int width, int height,
135 int srcx0, int srcy0, int flush_flag)
136 {
137 __DRIcontext *dri_context;
138 bool use_blit_context = false;
139
140 if (!loader_dri3_have_image_blit(draw))
141 return false;
142
143 dri_context = draw->vtable->get_dri_context(draw);
144
145 if (!dri_context || !draw->vtable->in_current_context(draw)) {
146 dri_context = loader_dri3_blit_context_get(draw);
147 use_blit_context = true;
148 flush_flag |= __BLIT_FLAG_FLUSH;
149 }
150
151 if (dri_context)
152 draw->ext->image->blitImage(dri_context, dst, src, dstx0, dsty0,
153 width, height, srcx0, srcy0,
154 width, height, flush_flag);
155
156 if (use_blit_context)
157 loader_dri3_blit_context_put();
158
159 return dri_context != NULL;
160 }
161
162 static inline void
163 dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
164 {
165 xshmfence_reset(buffer->shm_fence);
166 }
167
168 static inline void
169 dri3_fence_set(struct loader_dri3_buffer *buffer)
170 {
171 xshmfence_trigger(buffer->shm_fence);
172 }
173
174 static inline void
175 dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
176 {
177 xcb_sync_trigger_fence(c, buffer->sync_fence);
178 }
179
180 static inline void
181 dri3_fence_await(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
182 {
183 xcb_flush(c);
184 xshmfence_await(buffer->shm_fence);
185 }
186
187 static void
188 dri3_update_num_back(struct loader_dri3_drawable *draw)
189 {
190 if (draw->flipping)
191 draw->num_back = 3;
192 else
193 draw->num_back = 2;
194 }
195
196 void
197 loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
198 {
199 draw->swap_interval = interval;
200 dri3_update_num_back(draw);
201 }
202
203 /** dri3_free_render_buffer
204 *
205 * Free everything associated with one render buffer including pixmap, fence
206 * stuff and the driver image
207 */
208 static void
209 dri3_free_render_buffer(struct loader_dri3_drawable *draw,
210 struct loader_dri3_buffer *buffer)
211 {
212 if (buffer->own_pixmap)
213 xcb_free_pixmap(draw->conn, buffer->pixmap);
214 xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
215 xshmfence_unmap_shm(buffer->shm_fence);
216 draw->ext->image->destroyImage(buffer->image);
217 if (buffer->linear_buffer)
218 draw->ext->image->destroyImage(buffer->linear_buffer);
219 free(buffer);
220 }
221
222 void
223 loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
224 {
225 int i;
226
227 draw->ext->core->destroyDrawable(draw->dri_drawable);
228
229 for (i = 0; i < LOADER_DRI3_NUM_BUFFERS; i++) {
230 if (draw->buffers[i])
231 dri3_free_render_buffer(draw, draw->buffers[i]);
232 }
233
234 if (draw->special_event) {
235 xcb_void_cookie_t cookie =
236 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
237 XCB_PRESENT_EVENT_MASK_NO_EVENT);
238
239 xcb_discard_reply(draw->conn, cookie.sequence);
240 xcb_unregister_for_special_event(draw->conn, draw->special_event);
241 }
242 }
243
244 int
245 loader_dri3_drawable_init(xcb_connection_t *conn,
246 xcb_drawable_t drawable,
247 __DRIscreen *dri_screen,
248 bool is_different_gpu,
249 const __DRIconfig *dri_config,
250 struct loader_dri3_extensions *ext,
251 const struct loader_dri3_vtable *vtable,
252 struct loader_dri3_drawable *draw)
253 {
254 xcb_get_geometry_cookie_t cookie;
255 xcb_get_geometry_reply_t *reply;
256 xcb_generic_error_t *error;
257 GLint vblank_mode = DRI_CONF_VBLANK_DEF_INTERVAL_1;
258 int swap_interval;
259
260 draw->conn = conn;
261 draw->ext = ext;
262 draw->vtable = vtable;
263 draw->drawable = drawable;
264 draw->dri_screen = dri_screen;
265 draw->is_different_gpu = is_different_gpu;
266
267 draw->have_back = 0;
268 draw->have_fake_front = 0;
269 draw->first_init = true;
270
271 draw->cur_blit_source = -1;
272
273 if (draw->ext->config)
274 draw->ext->config->configQueryi(draw->dri_screen,
275 "vblank_mode", &vblank_mode);
276
277 switch (vblank_mode) {
278 case DRI_CONF_VBLANK_NEVER:
279 case DRI_CONF_VBLANK_DEF_INTERVAL_0:
280 swap_interval = 0;
281 break;
282 case DRI_CONF_VBLANK_DEF_INTERVAL_1:
283 case DRI_CONF_VBLANK_ALWAYS_SYNC:
284 default:
285 swap_interval = 1;
286 break;
287 }
288 draw->swap_interval = swap_interval;
289
290 dri3_update_num_back(draw);
291
292 /* Create a new drawable */
293 draw->dri_drawable =
294 draw->ext->image_driver->createNewDrawable(dri_screen,
295 dri_config,
296 draw);
297
298 if (!draw->dri_drawable)
299 return 1;
300
301 cookie = xcb_get_geometry(draw->conn, draw->drawable);
302 reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
303 if (reply == NULL || error != NULL) {
304 draw->ext->core->destroyDrawable(draw->dri_drawable);
305 return 1;
306 }
307
308 draw->width = reply->width;
309 draw->height = reply->height;
310 draw->depth = reply->depth;
311 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
312 free(reply);
313
314 draw->swap_method = __DRI_ATTRIB_SWAP_UNDEFINED;
315 if (draw->ext->core->base.version >= 2) {
316 (void )draw->ext->core->getConfigAttrib(dri_config,
317 __DRI_ATTRIB_SWAP_METHOD,
318 &draw->swap_method);
319 }
320
321 /*
322 * Make sure server has the same swap interval we do for the new
323 * drawable.
324 */
325 loader_dri3_set_swap_interval(draw, swap_interval);
326
327 return 0;
328 }
329
330 /*
331 * Process one Present event
332 */
333 static void
334 dri3_handle_present_event(struct loader_dri3_drawable *draw,
335 xcb_present_generic_event_t *ge)
336 {
337 switch (ge->evtype) {
338 case XCB_PRESENT_CONFIGURE_NOTIFY: {
339 xcb_present_configure_notify_event_t *ce = (void *) ge;
340
341 draw->width = ce->width;
342 draw->height = ce->height;
343 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
344 break;
345 }
346 case XCB_PRESENT_COMPLETE_NOTIFY: {
347 xcb_present_complete_notify_event_t *ce = (void *) ge;
348
349 /* Compute the processed SBC number from the received 32-bit serial number
350 * merged with the upper 32-bits of the sent 64-bit serial number while
351 * checking for wrap.
352 */
353 if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
354 draw->recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
355 if (draw->recv_sbc > draw->send_sbc)
356 draw->recv_sbc -= 0x100000000;
357 switch (ce->mode) {
358 case XCB_PRESENT_COMPLETE_MODE_FLIP:
359 draw->flipping = true;
360 break;
361 case XCB_PRESENT_COMPLETE_MODE_COPY:
362 draw->flipping = false;
363 break;
364 }
365 dri3_update_num_back(draw);
366
367 if (draw->vtable->show_fps)
368 draw->vtable->show_fps(draw, ce->ust);
369
370 draw->ust = ce->ust;
371 draw->msc = ce->msc;
372 } else {
373 draw->recv_msc_serial = ce->serial;
374 draw->notify_ust = ce->ust;
375 draw->notify_msc = ce->msc;
376 }
377 break;
378 }
379 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
380 xcb_present_idle_notify_event_t *ie = (void *) ge;
381 int b;
382
383 for (b = 0; b < sizeof(draw->buffers) / sizeof(draw->buffers[0]); b++) {
384 struct loader_dri3_buffer *buf = draw->buffers[b];
385
386 if (buf && buf->pixmap == ie->pixmap) {
387 buf->busy = 0;
388 if (draw->num_back <= b && b < LOADER_DRI3_MAX_BACK) {
389 dri3_free_render_buffer(draw, buf);
390 draw->buffers[b] = NULL;
391 }
392 break;
393 }
394 }
395 break;
396 }
397 }
398 free(ge);
399 }
400
401 static bool
402 dri3_wait_for_event(struct loader_dri3_drawable *draw)
403 {
404 xcb_generic_event_t *ev;
405 xcb_present_generic_event_t *ge;
406
407 xcb_flush(draw->conn);
408 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
409 if (!ev)
410 return false;
411 ge = (void *) ev;
412 dri3_handle_present_event(draw, ge);
413 return true;
414 }
415
416 /** loader_dri3_wait_for_msc
417 *
418 * Get the X server to send an event when the target msc/divisor/remainder is
419 * reached.
420 */
421 bool
422 loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
423 int64_t target_msc,
424 int64_t divisor, int64_t remainder,
425 int64_t *ust, int64_t *msc, int64_t *sbc)
426 {
427 uint32_t msc_serial;
428
429 msc_serial = ++draw->send_msc_serial;
430 xcb_present_notify_msc(draw->conn,
431 draw->drawable,
432 msc_serial,
433 target_msc,
434 divisor,
435 remainder);
436
437 xcb_flush(draw->conn);
438
439 /* Wait for the event */
440 if (draw->special_event) {
441 while ((int32_t) (msc_serial - draw->recv_msc_serial) > 0) {
442 if (!dri3_wait_for_event(draw))
443 return false;
444 }
445 }
446
447 *ust = draw->notify_ust;
448 *msc = draw->notify_msc;
449 *sbc = draw->recv_sbc;
450
451 return true;
452 }
453
454 /** loader_dri3_wait_for_sbc
455 *
456 * Wait for the completed swap buffer count to reach the specified
457 * target. Presumably the application knows that this will be reached with
458 * outstanding complete events, or we're going to be here awhile.
459 */
460 int
461 loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
462 int64_t target_sbc, int64_t *ust,
463 int64_t *msc, int64_t *sbc)
464 {
465 /* From the GLX_OML_sync_control spec:
466 *
467 * "If <target_sbc> = 0, the function will block until all previous
468 * swaps requested with glXSwapBuffersMscOML for that window have
469 * completed."
470 */
471 if (!target_sbc)
472 target_sbc = draw->send_sbc;
473
474 while (draw->recv_sbc < target_sbc) {
475 if (!dri3_wait_for_event(draw))
476 return 0;
477 }
478
479 *ust = draw->ust;
480 *msc = draw->msc;
481 *sbc = draw->recv_sbc;
482 return 1;
483 }
484
485 /** loader_dri3_find_back
486 *
487 * Find an idle back buffer. If there isn't one, then
488 * wait for a present idle notify event from the X server
489 */
490 static int
491 dri3_find_back(struct loader_dri3_drawable *draw)
492 {
493 int b;
494 xcb_generic_event_t *ev;
495 xcb_present_generic_event_t *ge;
496 int num_to_consider = draw->num_back;
497
498 /* Increase the likelyhood of reusing current buffer */
499 dri3_flush_present_events(draw);
500
501 /* Check whether we need to reuse the current back buffer as new back.
502 * In that case, wait until it's not busy anymore.
503 */
504 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1) {
505 num_to_consider = 1;
506 draw->cur_blit_source = -1;
507 }
508
509 for (;;) {
510 for (b = 0; b < num_to_consider; b++) {
511 int id = LOADER_DRI3_BACK_ID((b + draw->cur_back) % draw->num_back);
512 struct loader_dri3_buffer *buffer = draw->buffers[id];
513
514 if (!buffer || !buffer->busy) {
515 draw->cur_back = id;
516 return id;
517 }
518 }
519 xcb_flush(draw->conn);
520 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
521 if (!ev)
522 return -1;
523 ge = (void *) ev;
524 dri3_handle_present_event(draw, ge);
525 }
526 }
527
528 static xcb_gcontext_t
529 dri3_drawable_gc(struct loader_dri3_drawable *draw)
530 {
531 if (!draw->gc) {
532 uint32_t v = 0;
533 xcb_create_gc(draw->conn,
534 (draw->gc = xcb_generate_id(draw->conn)),
535 draw->drawable,
536 XCB_GC_GRAPHICS_EXPOSURES,
537 &v);
538 }
539 return draw->gc;
540 }
541
542
543 static struct loader_dri3_buffer *
544 dri3_back_buffer(struct loader_dri3_drawable *draw)
545 {
546 return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
547 }
548
549 static struct loader_dri3_buffer *
550 dri3_fake_front_buffer(struct loader_dri3_drawable *draw)
551 {
552 return draw->buffers[LOADER_DRI3_FRONT_ID];
553 }
554
555 static void
556 dri3_copy_area(xcb_connection_t *c,
557 xcb_drawable_t src_drawable,
558 xcb_drawable_t dst_drawable,
559 xcb_gcontext_t gc,
560 int16_t src_x,
561 int16_t src_y,
562 int16_t dst_x,
563 int16_t dst_y,
564 uint16_t width,
565 uint16_t height)
566 {
567 xcb_void_cookie_t cookie;
568
569 cookie = xcb_copy_area_checked(c,
570 src_drawable,
571 dst_drawable,
572 gc,
573 src_x,
574 src_y,
575 dst_x,
576 dst_y,
577 width,
578 height);
579 xcb_discard_reply(c, cookie.sequence);
580 }
581
582 /**
583 * Asks the driver to flush any queued work necessary for serializing with the
584 * X command stream, and optionally the slightly more strict requirement of
585 * glFlush() equivalence (which would require flushing even if nothing had
586 * been drawn to a window system framebuffer, for example).
587 */
588 void
589 loader_dri3_flush(struct loader_dri3_drawable *draw,
590 unsigned flags,
591 enum __DRI2throttleReason throttle_reason)
592 {
593 /* NEED TO CHECK WHETHER CONTEXT IS NULL */
594 __DRIcontext *dri_context = draw->vtable->get_dri_context(draw);
595
596 if (dri_context) {
597 draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable,
598 flags, throttle_reason);
599 }
600 }
601
602 void
603 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
604 int x, int y,
605 int width, int height,
606 bool flush)
607 {
608 struct loader_dri3_buffer *back;
609 unsigned flags = __DRI2_FLUSH_DRAWABLE;
610
611 /* Check we have the right attachments */
612 if (!draw->have_back || draw->is_pixmap)
613 return;
614
615 if (flush)
616 flags |= __DRI2_FLUSH_CONTEXT;
617 loader_dri3_flush(draw, flags, __DRI2_THROTTLE_SWAPBUFFER);
618
619 back = dri3_back_buffer(draw);
620 y = draw->height - y - height;
621
622 if (draw->is_different_gpu) {
623 /* Update the linear buffer part of the back buffer
624 * for the dri3_copy_area operation
625 */
626 (void) loader_dri3_blit_image(draw,
627 back->linear_buffer,
628 back->image,
629 0, 0, back->width, back->height,
630 0, 0, __BLIT_FLAG_FLUSH);
631 /* We use blit_image to update our fake front,
632 */
633 if (draw->have_fake_front)
634 (void) loader_dri3_blit_image(draw,
635 dri3_fake_front_buffer(draw)->image,
636 back->image,
637 x, y, width, height,
638 x, y, __BLIT_FLAG_FLUSH);
639 }
640
641 loader_dri3_swapbuffer_barrier(draw);
642 dri3_fence_reset(draw->conn, back);
643 dri3_copy_area(draw->conn,
644 dri3_back_buffer(draw)->pixmap,
645 draw->drawable,
646 dri3_drawable_gc(draw),
647 x, y, x, y, width, height);
648 dri3_fence_trigger(draw->conn, back);
649 /* Refresh the fake front (if present) after we just damaged the real
650 * front.
651 */
652 if (draw->have_fake_front && !draw->is_different_gpu) {
653 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
654 dri3_copy_area(draw->conn,
655 dri3_back_buffer(draw)->pixmap,
656 dri3_fake_front_buffer(draw)->pixmap,
657 dri3_drawable_gc(draw),
658 x, y, x, y, width, height);
659 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
660 dri3_fence_await(draw->conn, dri3_fake_front_buffer(draw));
661 }
662 dri3_fence_await(draw->conn, back);
663 }
664
665 void
666 loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
667 xcb_drawable_t dest,
668 xcb_drawable_t src)
669 {
670 loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, 0);
671
672 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
673 dri3_copy_area(draw->conn,
674 src, dest,
675 dri3_drawable_gc(draw),
676 0, 0, 0, 0, draw->width, draw->height);
677 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
678 dri3_fence_await(draw->conn, dri3_fake_front_buffer(draw));
679 }
680
681 void
682 loader_dri3_wait_x(struct loader_dri3_drawable *draw)
683 {
684 struct loader_dri3_buffer *front;
685
686 if (draw == NULL || !draw->have_fake_front)
687 return;
688
689 front = dri3_fake_front_buffer(draw);
690
691 loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
692
693 /* In the psc->is_different_gpu case, the linear buffer has been updated,
694 * but not yet the tiled buffer.
695 * Copy back to the tiled buffer we use for rendering.
696 * Note that we don't need flushing.
697 */
698 if (draw->is_different_gpu)
699 (void) loader_dri3_blit_image(draw,
700 front->image,
701 front->linear_buffer,
702 0, 0, front->width, front->height,
703 0, 0, 0);
704 }
705
706 void
707 loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
708 {
709 struct loader_dri3_buffer *front;
710
711 if (draw == NULL || !draw->have_fake_front)
712 return;
713
714 front = dri3_fake_front_buffer(draw);
715
716 /* In the psc->is_different_gpu case, we update the linear_buffer
717 * before updating the real front.
718 */
719 if (draw->is_different_gpu)
720 (void) loader_dri3_blit_image(draw,
721 front->linear_buffer,
722 front->image,
723 0, 0, front->width, front->height,
724 0, 0, __BLIT_FLAG_FLUSH);
725 loader_dri3_swapbuffer_barrier(draw);
726 loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
727 }
728
729 /** dri3_flush_present_events
730 *
731 * Process any present events that have been received from the X server
732 */
733 static void
734 dri3_flush_present_events(struct loader_dri3_drawable *draw)
735 {
736 /* Check to see if any configuration changes have occurred
737 * since we were last invoked
738 */
739 if (draw->special_event) {
740 xcb_generic_event_t *ev;
741
742 while ((ev = xcb_poll_for_special_event(draw->conn,
743 draw->special_event)) != NULL) {
744 xcb_present_generic_event_t *ge = (void *) ev;
745 dri3_handle_present_event(draw, ge);
746 }
747 }
748 }
749
750 /** loader_dri3_swap_buffers_msc
751 *
752 * Make the current back buffer visible using the present extension
753 */
754 int64_t
755 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
756 int64_t target_msc, int64_t divisor,
757 int64_t remainder, unsigned flush_flags,
758 bool force_copy)
759 {
760 struct loader_dri3_buffer *back;
761 int64_t ret = 0;
762 uint32_t options = XCB_PRESENT_OPTION_NONE;
763
764 draw->vtable->flush_drawable(draw, flush_flags);
765
766 back = draw->buffers[dri3_find_back(draw)];
767 if (draw->is_different_gpu && back) {
768 /* Update the linear buffer before presenting the pixmap */
769 (void) loader_dri3_blit_image(draw,
770 back->linear_buffer,
771 back->image,
772 0, 0, back->width, back->height,
773 0, 0, __BLIT_FLAG_FLUSH);
774 /* Update the fake front */
775 if (draw->have_fake_front)
776 (void) loader_dri3_blit_image(draw,
777 draw->buffers[LOADER_DRI3_FRONT_ID]->image,
778 back->image,
779 0, 0, draw->width, draw->height,
780 0, 0, __BLIT_FLAG_FLUSH);
781 }
782
783 /* If we need to preload the new back buffer, remember the source.
784 * The force_copy parameter is used by EGL to attempt to preserve
785 * the back buffer across a call to this function.
786 */
787 if (draw->swap_method == __DRI_ATTRIB_SWAP_COPY || force_copy)
788 draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back);
789
790 dri3_flush_present_events(draw);
791
792 if (back && !draw->is_pixmap) {
793 dri3_fence_reset(draw->conn, back);
794
795 /* Compute when we want the frame shown by taking the last known
796 * successful MSC and adding in a swap interval for each outstanding swap
797 * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
798 * semantic"
799 */
800 ++draw->send_sbc;
801 if (target_msc == 0 && divisor == 0 && remainder == 0)
802 target_msc = draw->msc + draw->swap_interval *
803 (draw->send_sbc - draw->recv_sbc);
804 else if (divisor == 0 && remainder > 0) {
805 /* From the GLX_OML_sync_control spec:
806 * "If <divisor> = 0, the swap will occur when MSC becomes
807 * greater than or equal to <target_msc>."
808 *
809 * Note that there's no mention of the remainder. The Present
810 * extension throws BadValue for remainder != 0 with divisor == 0, so
811 * just drop the passed in value.
812 */
813 remainder = 0;
814 }
815
816 /* From the GLX_EXT_swap_control spec
817 * and the EGL 1.4 spec (page 53):
818 *
819 * "If <interval> is set to a value of 0, buffer swaps are not
820 * synchronized to a video frame."
821 *
822 * Implementation note: It is possible to enable triple buffering
823 * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
824 * the default.
825 */
826 if (draw->swap_interval == 0)
827 options |= XCB_PRESENT_OPTION_ASYNC;
828
829 /* If we need to populate the new back, but need to reuse the back
830 * buffer slot due to lack of local blit capabilities, make sure
831 * the server doesn't flip and we deadlock.
832 */
833 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1)
834 options |= XCB_PRESENT_OPTION_COPY;
835
836 back->busy = 1;
837 back->last_swap = draw->send_sbc;
838 xcb_present_pixmap(draw->conn,
839 draw->drawable,
840 back->pixmap,
841 (uint32_t) draw->send_sbc,
842 0, /* valid */
843 0, /* update */
844 0, /* x_off */
845 0, /* y_off */
846 None, /* target_crtc */
847 None,
848 back->sync_fence,
849 options,
850 target_msc,
851 divisor,
852 remainder, 0, NULL);
853 ret = (int64_t) draw->send_sbc;
854
855 /* If there's a fake front, then copy the source back buffer
856 * to the fake front to keep it up to date. This needs
857 * to reset the fence and make future users block until
858 * the X server is done copying the bits
859 */
860 if (draw->have_fake_front && !draw->is_different_gpu) {
861 dri3_fence_reset(draw->conn, draw->buffers[LOADER_DRI3_FRONT_ID]);
862 dri3_copy_area(draw->conn,
863 back->pixmap,
864 draw->buffers[LOADER_DRI3_FRONT_ID]->pixmap,
865 dri3_drawable_gc(draw),
866 0, 0, 0, 0,
867 draw->width, draw->height);
868 dri3_fence_trigger(draw->conn, draw->buffers[LOADER_DRI3_FRONT_ID]);
869 }
870 xcb_flush(draw->conn);
871 if (draw->stamp)
872 ++(*draw->stamp);
873 }
874
875 draw->ext->flush->invalidate(draw->dri_drawable);
876
877 return ret;
878 }
879
880 int
881 loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
882 {
883 int back_id = LOADER_DRI3_BACK_ID(dri3_find_back(draw));
884
885 if (back_id < 0 || !draw->buffers[back_id])
886 return 0;
887
888 if (draw->buffers[back_id]->last_swap != 0)
889 return draw->send_sbc - draw->buffers[back_id]->last_swap + 1;
890 else
891 return 0;
892 }
893
894 /** loader_dri3_open
895 *
896 * Wrapper around xcb_dri3_open
897 */
898 int
899 loader_dri3_open(xcb_connection_t *conn,
900 xcb_window_t root,
901 uint32_t provider)
902 {
903 xcb_dri3_open_cookie_t cookie;
904 xcb_dri3_open_reply_t *reply;
905 int fd;
906
907 cookie = xcb_dri3_open(conn,
908 root,
909 provider);
910
911 reply = xcb_dri3_open_reply(conn, cookie, NULL);
912 if (!reply)
913 return -1;
914
915 if (reply->nfd != 1) {
916 free(reply);
917 return -1;
918 }
919
920 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
921 free(reply);
922 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
923
924 return fd;
925 }
926
927 static uint32_t
928 dri3_cpp_for_format(uint32_t format) {
929 switch (format) {
930 case __DRI_IMAGE_FORMAT_R8:
931 return 1;
932 case __DRI_IMAGE_FORMAT_RGB565:
933 case __DRI_IMAGE_FORMAT_GR88:
934 return 2;
935 case __DRI_IMAGE_FORMAT_XRGB8888:
936 case __DRI_IMAGE_FORMAT_ARGB8888:
937 case __DRI_IMAGE_FORMAT_ABGR8888:
938 case __DRI_IMAGE_FORMAT_XBGR8888:
939 case __DRI_IMAGE_FORMAT_XRGB2101010:
940 case __DRI_IMAGE_FORMAT_ARGB2101010:
941 case __DRI_IMAGE_FORMAT_SARGB8:
942 return 4;
943 case __DRI_IMAGE_FORMAT_NONE:
944 default:
945 return 0;
946 }
947 }
948
949 /** loader_dri3_alloc_render_buffer
950 *
951 * Use the driver createImage function to construct a __DRIimage, then
952 * get a file descriptor for that and create an X pixmap from that
953 *
954 * Allocate an xshmfence for synchronization
955 */
956 static struct loader_dri3_buffer *
957 dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format,
958 int width, int height, int depth)
959 {
960 struct loader_dri3_buffer *buffer;
961 __DRIimage *pixmap_buffer;
962 xcb_pixmap_t pixmap;
963 xcb_sync_fence_t sync_fence;
964 struct xshmfence *shm_fence;
965 int buffer_fd, fence_fd;
966 int stride;
967
968 /* Create an xshmfence object and
969 * prepare to send that to the X server
970 */
971
972 fence_fd = xshmfence_alloc_shm();
973 if (fence_fd < 0)
974 return NULL;
975
976 shm_fence = xshmfence_map_shm(fence_fd);
977 if (shm_fence == NULL)
978 goto no_shm_fence;
979
980 /* Allocate the image from the driver
981 */
982 buffer = calloc(1, sizeof *buffer);
983 if (!buffer)
984 goto no_buffer;
985
986 buffer->cpp = dri3_cpp_for_format(format);
987 if (!buffer->cpp)
988 goto no_image;
989
990 if (!draw->is_different_gpu) {
991 buffer->image = draw->ext->image->createImage(draw->dri_screen,
992 width, height,
993 format,
994 __DRI_IMAGE_USE_SHARE |
995 __DRI_IMAGE_USE_SCANOUT |
996 __DRI_IMAGE_USE_BACKBUFFER,
997 buffer);
998 pixmap_buffer = buffer->image;
999
1000 if (!buffer->image)
1001 goto no_image;
1002 } else {
1003 buffer->image = draw->ext->image->createImage(draw->dri_screen,
1004 width, height,
1005 format,
1006 0,
1007 buffer);
1008
1009 if (!buffer->image)
1010 goto no_image;
1011
1012 buffer->linear_buffer =
1013 draw->ext->image->createImage(draw->dri_screen,
1014 width, height, format,
1015 __DRI_IMAGE_USE_SHARE |
1016 __DRI_IMAGE_USE_LINEAR |
1017 __DRI_IMAGE_USE_BACKBUFFER,
1018 buffer);
1019 pixmap_buffer = buffer->linear_buffer;
1020
1021 if (!buffer->linear_buffer)
1022 goto no_linear_buffer;
1023 }
1024
1025 /* X wants the stride, so ask the image for it
1026 */
1027 if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_STRIDE,
1028 &stride))
1029 goto no_buffer_attrib;
1030
1031 buffer->pitch = stride;
1032
1033 if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_FD,
1034 &buffer_fd))
1035 goto no_buffer_attrib;
1036
1037 xcb_dri3_pixmap_from_buffer(draw->conn,
1038 (pixmap = xcb_generate_id(draw->conn)),
1039 draw->drawable,
1040 buffer->size,
1041 width, height, buffer->pitch,
1042 depth, buffer->cpp * 8,
1043 buffer_fd);
1044
1045 xcb_dri3_fence_from_fd(draw->conn,
1046 pixmap,
1047 (sync_fence = xcb_generate_id(draw->conn)),
1048 false,
1049 fence_fd);
1050
1051 buffer->pixmap = pixmap;
1052 buffer->own_pixmap = true;
1053 buffer->sync_fence = sync_fence;
1054 buffer->shm_fence = shm_fence;
1055 buffer->width = width;
1056 buffer->height = height;
1057
1058 /* Mark the buffer as idle
1059 */
1060 dri3_fence_set(buffer);
1061
1062 return buffer;
1063
1064 no_buffer_attrib:
1065 draw->ext->image->destroyImage(pixmap_buffer);
1066 no_linear_buffer:
1067 if (draw->is_different_gpu)
1068 draw->ext->image->destroyImage(buffer->image);
1069 no_image:
1070 free(buffer);
1071 no_buffer:
1072 xshmfence_unmap_shm(shm_fence);
1073 no_shm_fence:
1074 close(fence_fd);
1075 return NULL;
1076 }
1077
1078 /** loader_dri3_update_drawable
1079 *
1080 * Called the first time we use the drawable and then
1081 * after we receive present configure notify events to
1082 * track the geometry of the drawable
1083 */
1084 static int
1085 dri3_update_drawable(__DRIdrawable *driDrawable,
1086 struct loader_dri3_drawable *draw)
1087 {
1088 if (draw->first_init) {
1089 xcb_get_geometry_cookie_t geom_cookie;
1090 xcb_get_geometry_reply_t *geom_reply;
1091 xcb_void_cookie_t cookie;
1092 xcb_generic_error_t *error;
1093 xcb_present_query_capabilities_cookie_t present_capabilities_cookie;
1094 xcb_present_query_capabilities_reply_t *present_capabilities_reply;
1095
1096 draw->first_init = false;
1097
1098 /* Try to select for input on the window.
1099 *
1100 * If the drawable is a window, this will get our events
1101 * delivered.
1102 *
1103 * Otherwise, we'll get a BadWindow error back from this request which
1104 * will let us know that the drawable is a pixmap instead.
1105 */
1106
1107 draw->eid = xcb_generate_id(draw->conn);
1108 cookie =
1109 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
1110 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1111 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1112 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1113
1114 present_capabilities_cookie =
1115 xcb_present_query_capabilities(draw->conn, draw->drawable);
1116
1117 /* Create an XCB event queue to hold present events outside of the usual
1118 * application event queue
1119 */
1120 draw->special_event = xcb_register_for_special_xge(draw->conn,
1121 &xcb_present_id,
1122 draw->eid,
1123 draw->stamp);
1124 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1125
1126 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1127
1128 if (!geom_reply)
1129 return false;
1130
1131 draw->width = geom_reply->width;
1132 draw->height = geom_reply->height;
1133 draw->depth = geom_reply->depth;
1134 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1135
1136 free(geom_reply);
1137
1138 draw->is_pixmap = false;
1139
1140 /* Check to see if our select input call failed. If it failed with a
1141 * BadWindow error, then assume the drawable is a pixmap. Destroy the
1142 * special event queue created above and mark the drawable as a pixmap
1143 */
1144
1145 error = xcb_request_check(draw->conn, cookie);
1146
1147 present_capabilities_reply =
1148 xcb_present_query_capabilities_reply(draw->conn,
1149 present_capabilities_cookie,
1150 NULL);
1151
1152 if (present_capabilities_reply) {
1153 draw->present_capabilities = present_capabilities_reply->capabilities;
1154 free(present_capabilities_reply);
1155 } else
1156 draw->present_capabilities = 0;
1157
1158 if (error) {
1159 if (error->error_code != BadWindow) {
1160 free(error);
1161 return false;
1162 }
1163 draw->is_pixmap = true;
1164 xcb_unregister_for_special_event(draw->conn, draw->special_event);
1165 draw->special_event = NULL;
1166 }
1167 }
1168 dri3_flush_present_events(draw);
1169 return true;
1170 }
1171
1172 /* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while
1173 * the createImageFromFds call takes __DRI_IMAGE_FOURCC codes. To avoid
1174 * complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and
1175 * translate to __DRI_IMAGE_FOURCC codes in the call to createImageFromFds
1176 */
1177 static int
1178 image_format_to_fourcc(int format)
1179 {
1180
1181 /* Convert from __DRI_IMAGE_FORMAT to __DRI_IMAGE_FOURCC (sigh) */
1182 switch (format) {
1183 case __DRI_IMAGE_FORMAT_SARGB8: return __DRI_IMAGE_FOURCC_SARGB8888;
1184 case __DRI_IMAGE_FORMAT_RGB565: return __DRI_IMAGE_FOURCC_RGB565;
1185 case __DRI_IMAGE_FORMAT_XRGB8888: return __DRI_IMAGE_FOURCC_XRGB8888;
1186 case __DRI_IMAGE_FORMAT_ARGB8888: return __DRI_IMAGE_FOURCC_ARGB8888;
1187 case __DRI_IMAGE_FORMAT_ABGR8888: return __DRI_IMAGE_FOURCC_ABGR8888;
1188 case __DRI_IMAGE_FORMAT_XBGR8888: return __DRI_IMAGE_FOURCC_XBGR8888;
1189 }
1190 return 0;
1191 }
1192
1193 __DRIimage *
1194 loader_dri3_create_image(xcb_connection_t *c,
1195 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1196 unsigned int format,
1197 __DRIscreen *dri_screen,
1198 const __DRIimageExtension *image,
1199 void *loaderPrivate)
1200 {
1201 int *fds;
1202 __DRIimage *image_planar, *ret;
1203 int stride, offset;
1204
1205 /* Get an FD for the pixmap object
1206 */
1207 fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1208
1209 stride = bp_reply->stride;
1210 offset = 0;
1211
1212 /* createImageFromFds creates a wrapper __DRIimage structure which
1213 * can deal with multiple planes for things like Yuv images. So, once
1214 * we've gotten the planar wrapper, pull the single plane out of it and
1215 * discard the wrapper.
1216 */
1217 image_planar = image->createImageFromFds(dri_screen,
1218 bp_reply->width,
1219 bp_reply->height,
1220 image_format_to_fourcc(format),
1221 fds, 1,
1222 &stride, &offset, loaderPrivate);
1223 close(fds[0]);
1224 if (!image_planar)
1225 return NULL;
1226
1227 ret = image->fromPlanar(image_planar, 0, loaderPrivate);
1228
1229 image->destroyImage(image_planar);
1230
1231 return ret;
1232 }
1233
1234 /** dri3_get_pixmap_buffer
1235 *
1236 * Get the DRM object for a pixmap from the X server and
1237 * wrap that with a __DRIimage structure using createImageFromFds
1238 */
1239 static struct loader_dri3_buffer *
1240 dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format,
1241 enum loader_dri3_buffer_type buffer_type,
1242 struct loader_dri3_drawable *draw)
1243 {
1244 int buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1245 struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
1246 xcb_drawable_t pixmap;
1247 xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1248 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1249 xcb_sync_fence_t sync_fence;
1250 struct xshmfence *shm_fence;
1251 int fence_fd;
1252
1253 if (buffer)
1254 return buffer;
1255
1256 pixmap = draw->drawable;
1257
1258 buffer = calloc(1, sizeof *buffer);
1259 if (!buffer)
1260 goto no_buffer;
1261
1262 fence_fd = xshmfence_alloc_shm();
1263 if (fence_fd < 0)
1264 goto no_fence;
1265 shm_fence = xshmfence_map_shm(fence_fd);
1266 if (shm_fence == NULL) {
1267 close (fence_fd);
1268 goto no_fence;
1269 }
1270
1271 xcb_dri3_fence_from_fd(draw->conn,
1272 pixmap,
1273 (sync_fence = xcb_generate_id(draw->conn)),
1274 false,
1275 fence_fd);
1276
1277 bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap);
1278 bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn, bp_cookie, NULL);
1279 if (!bp_reply)
1280 goto no_image;
1281
1282 buffer->image = loader_dri3_create_image(draw->conn, bp_reply, format,
1283 draw->dri_screen, draw->ext->image,
1284 buffer);
1285 if (!buffer->image)
1286 goto no_image;
1287
1288 buffer->pixmap = pixmap;
1289 buffer->own_pixmap = false;
1290 buffer->width = bp_reply->width;
1291 buffer->height = bp_reply->height;
1292 buffer->shm_fence = shm_fence;
1293 buffer->sync_fence = sync_fence;
1294
1295 draw->buffers[buf_id] = buffer;
1296
1297 free(bp_reply);
1298
1299 return buffer;
1300
1301 no_image:
1302 free(bp_reply);
1303 xcb_sync_destroy_fence(draw->conn, sync_fence);
1304 xshmfence_unmap_shm(shm_fence);
1305 no_fence:
1306 free(buffer);
1307 no_buffer:
1308 return NULL;
1309 }
1310
1311 /** dri3_get_buffer
1312 *
1313 * Find a front or back buffer, allocating new ones as necessary
1314 */
1315 static struct loader_dri3_buffer *
1316 dri3_get_buffer(__DRIdrawable *driDrawable,
1317 unsigned int format,
1318 enum loader_dri3_buffer_type buffer_type,
1319 struct loader_dri3_drawable *draw)
1320 {
1321 struct loader_dri3_buffer *buffer;
1322 int buf_id;
1323
1324 if (buffer_type == loader_dri3_buffer_back) {
1325 buf_id = dri3_find_back(draw);
1326
1327 if (buf_id < 0)
1328 return NULL;
1329 } else {
1330 buf_id = LOADER_DRI3_FRONT_ID;
1331 }
1332
1333 buffer = draw->buffers[buf_id];
1334
1335 /* Allocate a new buffer if there isn't an old one, or if that
1336 * old one is the wrong size
1337 */
1338 if (!buffer || buffer->width != draw->width ||
1339 buffer->height != draw->height) {
1340 struct loader_dri3_buffer *new_buffer;
1341
1342 /* Allocate the new buffers
1343 */
1344 new_buffer = dri3_alloc_render_buffer(draw,
1345 format,
1346 draw->width,
1347 draw->height,
1348 draw->depth);
1349 if (!new_buffer)
1350 return NULL;
1351
1352 /* When resizing, copy the contents of the old buffer, waiting for that
1353 * copy to complete using our fences before proceeding
1354 */
1355 switch (buffer_type) {
1356 case loader_dri3_buffer_back:
1357 if (buffer) {
1358 if (!buffer->linear_buffer) {
1359 dri3_fence_reset(draw->conn, new_buffer);
1360 dri3_fence_await(draw->conn, buffer);
1361 dri3_copy_area(draw->conn,
1362 buffer->pixmap,
1363 new_buffer->pixmap,
1364 dri3_drawable_gc(draw),
1365 0, 0, 0, 0,
1366 draw->width, draw->height);
1367 dri3_fence_trigger(draw->conn, new_buffer);
1368 } else if (draw->vtable->in_current_context(draw)) {
1369 (void) loader_dri3_blit_image(draw,
1370 new_buffer->image,
1371 buffer->image,
1372 0, 0, draw->width, draw->height,
1373 0, 0, 0);
1374 }
1375 dri3_free_render_buffer(draw, buffer);
1376 }
1377 break;
1378 case loader_dri3_buffer_front:
1379 loader_dri3_swapbuffer_barrier(draw);
1380 dri3_fence_reset(draw->conn, new_buffer);
1381 dri3_copy_area(draw->conn,
1382 draw->drawable,
1383 new_buffer->pixmap,
1384 dri3_drawable_gc(draw),
1385 0, 0, 0, 0,
1386 draw->width, draw->height);
1387 dri3_fence_trigger(draw->conn, new_buffer);
1388
1389 if (new_buffer->linear_buffer &&
1390 draw->vtable->in_current_context(draw)) {
1391 dri3_fence_await(draw->conn, new_buffer);
1392 (void) loader_dri3_blit_image(draw,
1393 new_buffer->image,
1394 new_buffer->linear_buffer,
1395 0, 0, draw->width, draw->height,
1396 0, 0, 0);
1397 }
1398 break;
1399 }
1400 buffer = new_buffer;
1401 draw->buffers[buf_id] = buffer;
1402 }
1403 dri3_fence_await(draw->conn, buffer);
1404
1405 /*
1406 * Do we need to preserve the content of a previous buffer?
1407 *
1408 * Note that this blit is needed only to avoid a wait for a buffer that
1409 * is currently in the flip chain or being scanned out from. That's really
1410 * a tradeoff. If we're ok with the wait we can reduce the number of back
1411 * buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY,
1412 * but in the latter case we must disallow page-flipping.
1413 */
1414 if (buffer_type == loader_dri3_buffer_back &&
1415 draw->cur_blit_source != -1 &&
1416 draw->buffers[draw->cur_blit_source] &&
1417 buffer != draw->buffers[draw->cur_blit_source]) {
1418
1419 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
1420
1421 /* Avoid flushing here. Will propably do good for tiling hardware. */
1422 (void) loader_dri3_blit_image(draw,
1423 buffer->image,
1424 source->image,
1425 0, 0, draw->width, draw->height,
1426 0, 0, 0);
1427 buffer->last_swap = source->last_swap;
1428 draw->cur_blit_source = -1;
1429 }
1430 /* Return the requested buffer */
1431 return buffer;
1432 }
1433
1434 /** dri3_free_buffers
1435 *
1436 * Free the front bufffer or all of the back buffers. Used
1437 * when the application changes which buffers it needs
1438 */
1439 static void
1440 dri3_free_buffers(__DRIdrawable *driDrawable,
1441 enum loader_dri3_buffer_type buffer_type,
1442 struct loader_dri3_drawable *draw)
1443 {
1444 struct loader_dri3_buffer *buffer;
1445 int first_id;
1446 int n_id;
1447 int buf_id;
1448
1449 switch (buffer_type) {
1450 case loader_dri3_buffer_back:
1451 first_id = LOADER_DRI3_BACK_ID(0);
1452 n_id = LOADER_DRI3_MAX_BACK;
1453 break;
1454 case loader_dri3_buffer_front:
1455 first_id = LOADER_DRI3_FRONT_ID;
1456 n_id = 1;
1457 }
1458
1459 for (buf_id = first_id; buf_id < first_id + n_id; buf_id++) {
1460 buffer = draw->buffers[buf_id];
1461 if (buffer) {
1462 dri3_free_render_buffer(draw, buffer);
1463 draw->buffers[buf_id] = NULL;
1464 }
1465 }
1466 }
1467
1468 /** loader_dri3_get_buffers
1469 *
1470 * The published buffer allocation API.
1471 * Returns all of the necessary buffers, allocating
1472 * as needed.
1473 */
1474 int
1475 loader_dri3_get_buffers(__DRIdrawable *driDrawable,
1476 unsigned int format,
1477 uint32_t *stamp,
1478 void *loaderPrivate,
1479 uint32_t buffer_mask,
1480 struct __DRIimageList *buffers)
1481 {
1482 struct loader_dri3_drawable *draw = loaderPrivate;
1483 struct loader_dri3_buffer *front, *back;
1484
1485 buffers->image_mask = 0;
1486 buffers->front = NULL;
1487 buffers->back = NULL;
1488
1489 front = NULL;
1490 back = NULL;
1491
1492 if (!dri3_update_drawable(driDrawable, draw))
1493 return false;
1494
1495 /* pixmaps always have front buffers */
1496 if (draw->is_pixmap)
1497 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1498
1499 if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
1500 /* All pixmaps are owned by the server gpu.
1501 * When we use a different gpu, we can't use the pixmap
1502 * as buffer since it is potentially tiled a way
1503 * our device can't understand. In this case, use
1504 * a fake front buffer. Hopefully the pixmap
1505 * content will get synced with the fake front
1506 * buffer.
1507 */
1508 if (draw->is_pixmap && !draw->is_different_gpu)
1509 front = dri3_get_pixmap_buffer(driDrawable,
1510 format,
1511 loader_dri3_buffer_front,
1512 draw);
1513 else
1514 front = dri3_get_buffer(driDrawable,
1515 format,
1516 loader_dri3_buffer_front,
1517 draw);
1518
1519 if (!front)
1520 return false;
1521 } else {
1522 dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
1523 draw->have_fake_front = 0;
1524 }
1525
1526 if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
1527 back = dri3_get_buffer(driDrawable,
1528 format,
1529 loader_dri3_buffer_back,
1530 draw);
1531 if (!back)
1532 return false;
1533 draw->have_back = 1;
1534 } else {
1535 dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
1536 draw->have_back = 0;
1537 }
1538
1539 if (front) {
1540 buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
1541 buffers->front = front->image;
1542 draw->have_fake_front = draw->is_different_gpu || !draw->is_pixmap;
1543 }
1544
1545 if (back) {
1546 buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
1547 buffers->back = back->image;
1548 }
1549
1550 draw->stamp = stamp;
1551
1552 return true;
1553 }
1554
1555 /** loader_dri3_update_drawable_geometry
1556 *
1557 * Get the current drawable geometry.
1558 */
1559 void
1560 loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
1561 {
1562 xcb_get_geometry_cookie_t geom_cookie;
1563 xcb_get_geometry_reply_t *geom_reply;
1564
1565 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1566
1567 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1568
1569 if (geom_reply) {
1570 draw->width = geom_reply->width;
1571 draw->height = geom_reply->height;
1572 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1573
1574 free(geom_reply);
1575 }
1576 }
1577
1578
1579 /**
1580 * Make sure the server has flushed all pending swap buffers to hardware
1581 * for this drawable. Ideally we'd want to send an X protocol request to
1582 * have the server block our connection until the swaps are complete. That
1583 * would avoid the potential round-trip here.
1584 */
1585 void
1586 loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
1587 {
1588 int64_t ust, msc, sbc;
1589
1590 (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
1591 }
1592
1593 /**
1594 * Perform any cleanup associated with a close screen operation.
1595 * \param dri_screen[in,out] Pointer to __DRIscreen about to be closed.
1596 *
1597 * This function destroys the screen's cached swap context if any.
1598 */
1599 void
1600 loader_dri3_close_screen(__DRIscreen *dri_screen)
1601 {
1602 mtx_lock(&blit_context.mtx);
1603 if (blit_context.ctx && blit_context.cur_screen == dri_screen) {
1604 blit_context.core->destroyContext(blit_context.ctx);
1605 blit_context.ctx = NULL;
1606 }
1607 mtx_unlock(&blit_context.mtx);
1608 }