loader: Add dri3 helper
[mesa.git] / src / loader / loader_dri3_helper.c
1 /*
2 * Copyright © 2013 Keith Packard
3 * Copyright © 2015 Boyan Ding
4 *
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that copyright
8 * notice and this permission notice appear in supporting documentation, and
9 * that the name of the copyright holders not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. The copyright holders make no representations
12 * about the suitability of this software for any purpose. It is provided "as
13 * is" without express or implied warranty.
14 *
15 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21 * OF THIS SOFTWARE.
22 */
23
24 #include <fcntl.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27
28 #include <X11/xshmfence.h>
29 #include <xcb/xcb.h>
30 #include <xcb/dri3.h>
31 #include <xcb/present.h>
32
33 #include <X11/Xlib-xcb.h>
34
35 #include "loader_dri3_helper.h"
36
37 /* From xmlpool/options.h, user exposed so should be stable */
38 #define DRI_CONF_VBLANK_NEVER 0
39 #define DRI_CONF_VBLANK_DEF_INTERVAL_0 1
40 #define DRI_CONF_VBLANK_DEF_INTERVAL_1 2
41 #define DRI_CONF_VBLANK_ALWAYS_SYNC 3
42
43 static inline void
44 dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
45 {
46 xshmfence_reset(buffer->shm_fence);
47 }
48
49 static inline void
50 dri3_fence_set(struct loader_dri3_buffer *buffer)
51 {
52 xshmfence_trigger(buffer->shm_fence);
53 }
54
55 static inline void
56 dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
57 {
58 xcb_sync_trigger_fence(c, buffer->sync_fence);
59 }
60
61 static inline void
62 dri3_fence_await(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
63 {
64 xcb_flush(c);
65 xshmfence_await(buffer->shm_fence);
66 }
67
68 static void
69 dri3_update_num_back(struct loader_dri3_drawable *draw)
70 {
71 draw->num_back = 1;
72 if (draw->flipping) {
73 if (!draw->is_pixmap &&
74 !(draw->present_capabilities & XCB_PRESENT_CAPABILITY_ASYNC))
75 draw->num_back++;
76 draw->num_back++;
77 }
78 if (draw->vtable->get_swap_interval(draw) == 0)
79 draw->num_back++;
80 }
81
82 void
83 loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
84 {
85 interval = draw->vtable->clamp_swap_interval(draw, interval);
86 draw->vtable->set_swap_interval(draw, interval);
87 dri3_update_num_back(draw);
88 }
89
90 /** dri3_free_render_buffer
91 *
92 * Free everything associated with one render buffer including pixmap, fence
93 * stuff and the driver image
94 */
95 static void
96 dri3_free_render_buffer(struct loader_dri3_drawable *draw,
97 struct loader_dri3_buffer *buffer)
98 {
99 if (buffer->own_pixmap)
100 xcb_free_pixmap(draw->conn, buffer->pixmap);
101 xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
102 xshmfence_unmap_shm(buffer->shm_fence);
103 (draw->ext->image->destroyImage)(buffer->image);
104 if (buffer->linear_buffer)
105 (draw->ext->image->destroyImage)(buffer->linear_buffer);
106 free(buffer);
107 }
108
109 void
110 loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
111 {
112 int i;
113
114 (draw->ext->core->destroyDrawable)(draw->dri_drawable);
115
116 for (i = 0; i < LOADER_DRI3_NUM_BUFFERS; i++) {
117 if (draw->buffers[i])
118 dri3_free_render_buffer(draw, draw->buffers[i]);
119 }
120
121 if (draw->special_event)
122 xcb_unregister_for_special_event(draw->conn, draw->special_event);
123 }
124
125 int
126 loader_dri3_drawable_init(xcb_connection_t *conn,
127 xcb_drawable_t drawable,
128 __DRIscreen *dri_screen,
129 bool is_different_gpu,
130 const __DRIconfig *dri_config,
131 struct loader_dri3_extensions *ext,
132 struct loader_dri3_vtable *vtable,
133 struct loader_dri3_drawable *draw)
134 {
135 xcb_get_geometry_cookie_t cookie;
136 xcb_get_geometry_reply_t *reply;
137 xcb_generic_error_t *error;
138 GLint vblank_mode = DRI_CONF_VBLANK_DEF_INTERVAL_1;
139 int swap_interval;
140
141 draw->conn = conn;
142 draw->ext = ext;
143 draw->vtable = vtable;
144 draw->drawable = drawable;
145 draw->dri_screen = dri_screen;
146 draw->is_different_gpu = is_different_gpu;
147
148 draw->have_back = 0;
149 draw->have_fake_front = 0;
150 draw->first_init = true;
151
152 if (draw->ext->config)
153 draw->ext->config->configQueryi(draw->dri_screen,
154 "vblank_mode", &vblank_mode);
155
156 switch (vblank_mode) {
157 case DRI_CONF_VBLANK_NEVER:
158 case DRI_CONF_VBLANK_DEF_INTERVAL_0:
159 swap_interval = 0;
160 break;
161 case DRI_CONF_VBLANK_DEF_INTERVAL_1:
162 case DRI_CONF_VBLANK_ALWAYS_SYNC:
163 default:
164 swap_interval = 1;
165 break;
166 }
167 draw->vtable->set_swap_interval(draw, swap_interval);
168
169 dri3_update_num_back(draw);
170
171 /* Create a new drawable */
172 draw->dri_drawable =
173 (draw->ext->image_driver->createNewDrawable)(dri_screen,
174 dri_config,
175 draw);
176
177 if (!draw->dri_drawable)
178 return 1;
179
180 cookie = xcb_get_geometry(draw->conn, draw->drawable);
181 reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
182 if (reply == NULL || error != NULL) {
183 draw->ext->core->destroyDrawable(draw->dri_drawable);
184 return 1;
185 }
186
187 draw->width = reply->width;
188 draw->height = reply->height;
189 draw->depth = reply->depth;
190 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
191 free(reply);
192
193 /*
194 * Make sure server has the same swap interval we do for the new
195 * drawable.
196 */
197 loader_dri3_set_swap_interval(draw, swap_interval);
198
199 return 0;
200 }
201
202 /*
203 * Process one Present event
204 */
205 static void
206 dri3_handle_present_event(struct loader_dri3_drawable *draw,
207 xcb_present_generic_event_t *ge)
208 {
209 switch (ge->evtype) {
210 case XCB_PRESENT_CONFIGURE_NOTIFY: {
211 xcb_present_configure_notify_event_t *ce = (void *) ge;
212
213 draw->width = ce->width;
214 draw->height = ce->height;
215 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
216 break;
217 }
218 case XCB_PRESENT_COMPLETE_NOTIFY: {
219 xcb_present_complete_notify_event_t *ce = (void *) ge;
220
221 /* Compute the processed SBC number from the received 32-bit serial number
222 * merged with the upper 32-bits of the sent 64-bit serial number while
223 * checking for wrap.
224 */
225 if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
226 draw->recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
227 if (draw->recv_sbc > draw->send_sbc)
228 draw->recv_sbc -= 0x100000000;
229 switch (ce->mode) {
230 case XCB_PRESENT_COMPLETE_MODE_FLIP:
231 draw->flipping = true;
232 break;
233 case XCB_PRESENT_COMPLETE_MODE_COPY:
234 draw->flipping = false;
235 break;
236 }
237 dri3_update_num_back(draw);
238
239 if (draw->vtable->show_fps)
240 draw->vtable->show_fps(draw, ce->ust);
241
242 draw->ust = ce->ust;
243 draw->msc = ce->msc;
244 } else {
245 draw->recv_msc_serial = ce->serial;
246 draw->notify_ust = ce->ust;
247 draw->notify_msc = ce->msc;
248 }
249 break;
250 }
251 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
252 xcb_present_idle_notify_event_t *ie = (void *) ge;
253 int b;
254
255 for (b = 0; b < sizeof(draw->buffers) / sizeof(draw->buffers[0]); b++) {
256 struct loader_dri3_buffer *buf = draw->buffers[b];
257
258 if (buf && buf->pixmap == ie->pixmap) {
259 buf->busy = 0;
260 if (draw->num_back <= b && b < LOADER_DRI3_MAX_BACK) {
261 dri3_free_render_buffer(draw, buf);
262 draw->buffers[b] = NULL;
263 }
264 break;
265 }
266 }
267 break;
268 }
269 }
270 free(ge);
271 }
272
273 static bool
274 dri3_wait_for_event(struct loader_dri3_drawable *draw)
275 {
276 xcb_generic_event_t *ev;
277 xcb_present_generic_event_t *ge;
278
279 xcb_flush(draw->conn);
280 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
281 if (!ev)
282 return false;
283 ge = (void *) ev;
284 dri3_handle_present_event(draw, ge);
285 return true;
286 }
287
288 /** loader_dri3_wait_for_msc
289 *
290 * Get the X server to send an event when the target msc/divisor/remainder is
291 * reached.
292 */
293 bool
294 loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
295 int64_t target_msc,
296 int64_t divisor, int64_t remainder,
297 int64_t *ust, int64_t *msc, int64_t *sbc)
298 {
299 uint32_t msc_serial;
300
301 msc_serial = ++draw->send_msc_serial;
302 xcb_present_notify_msc(draw->conn,
303 draw->drawable,
304 msc_serial,
305 target_msc,
306 divisor,
307 remainder);
308
309 xcb_flush(draw->conn);
310
311 /* Wait for the event */
312 if (draw->special_event) {
313 while ((int32_t) (msc_serial - draw->recv_msc_serial) > 0) {
314 if (!dri3_wait_for_event(draw))
315 return false;
316 }
317 }
318
319 *ust = draw->notify_ust;
320 *msc = draw->notify_msc;
321 *sbc = draw->recv_sbc;
322
323 return true;
324 }
325
326 /** loader_dri3_wait_for_sbc
327 *
328 * Wait for the completed swap buffer count to reach the specified
329 * target. Presumably the application knows that this will be reached with
330 * outstanding complete events, or we're going to be here awhile.
331 */
332 int
333 loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
334 int64_t target_sbc, int64_t *ust,
335 int64_t *msc, int64_t *sbc)
336 {
337 /* From the GLX_OML_sync_control spec:
338 *
339 * "If <target_sbc> = 0, the function will block until all previous
340 * swaps requested with glXSwapBuffersMscOML for that window have
341 * completed."
342 */
343 if (!target_sbc)
344 target_sbc = draw->send_sbc;
345
346 while (draw->recv_sbc < target_sbc) {
347 if (!dri3_wait_for_event(draw))
348 return 0;
349 }
350
351 *ust = draw->ust;
352 *msc = draw->msc;
353 *sbc = draw->recv_sbc;
354 return 1;
355 }
356
357 /** loader_dri3_find_back
358 *
359 * Find an idle back buffer. If there isn't one, then
360 * wait for a present idle notify event from the X server
361 */
362 static int
363 dri3_find_back(struct loader_dri3_drawable *draw)
364 {
365 int b;
366 xcb_generic_event_t *ev;
367 xcb_present_generic_event_t *ge;
368
369 for (;;) {
370 for (b = 0; b < draw->num_back; b++) {
371 int id = LOADER_DRI3_BACK_ID((b + draw->cur_back) % draw->num_back);
372 struct loader_dri3_buffer *buffer = draw->buffers[id];
373
374 if (!buffer || !buffer->busy) {
375 draw->cur_back = id;
376 return id;
377 }
378 }
379 xcb_flush(draw->conn);
380 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
381 if (!ev)
382 return -1;
383 ge = (void *) ev;
384 dri3_handle_present_event(draw, ge);
385 }
386 }
387
388 static xcb_gcontext_t
389 dri3_drawable_gc(struct loader_dri3_drawable *draw)
390 {
391 if (!draw->gc) {
392 uint32_t v = 0;
393 xcb_create_gc(draw->conn,
394 (draw->gc = xcb_generate_id(draw->conn)),
395 draw->drawable,
396 XCB_GC_GRAPHICS_EXPOSURES,
397 &v);
398 }
399 return draw->gc;
400 }
401
402
403 static struct loader_dri3_buffer *
404 dri3_back_buffer(struct loader_dri3_drawable *draw)
405 {
406 return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
407 }
408
409 static struct loader_dri3_buffer *
410 dri3_fake_front_buffer(struct loader_dri3_drawable *draw)
411 {
412 return draw->buffers[LOADER_DRI3_FRONT_ID];
413 }
414
415 static void
416 dri3_copy_area(xcb_connection_t *c,
417 xcb_drawable_t src_drawable,
418 xcb_drawable_t dst_drawable,
419 xcb_gcontext_t gc,
420 int16_t src_x,
421 int16_t src_y,
422 int16_t dst_x,
423 int16_t dst_y,
424 uint16_t width,
425 uint16_t height)
426 {
427 xcb_void_cookie_t cookie;
428
429 cookie = xcb_copy_area_checked(c,
430 src_drawable,
431 dst_drawable,
432 gc,
433 src_x,
434 src_y,
435 dst_x,
436 dst_y,
437 width,
438 height);
439 xcb_discard_reply(c, cookie.sequence);
440 }
441
442 /**
443 * Asks the driver to flush any queued work necessary for serializing with the
444 * X command stream, and optionally the slightly more strict requirement of
445 * glFlush() equivalence (which would require flushing even if nothing had
446 * been drawn to a window system framebuffer, for example).
447 */
448 void
449 loader_dri3_flush(struct loader_dri3_drawable *draw,
450 unsigned flags,
451 enum __DRI2throttleReason throttle_reason)
452 {
453 /* NEED TO CHECK WHETHER CONTEXT IS NULL */
454 __DRIcontext *dri_context = draw->vtable->get_dri_context(draw);
455
456 if (dri_context) {
457 draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable,
458 flags, throttle_reason);
459 }
460 }
461
462 void
463 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
464 int x, int y,
465 int width, int height,
466 bool flush)
467 {
468 struct loader_dri3_buffer *back;
469 unsigned flags = __DRI2_FLUSH_DRAWABLE;
470 __DRIcontext *dri_context;
471
472 dri_context = draw->vtable->get_dri_context(draw);
473
474 /* Check we have the right attachments */
475 if (!draw->have_back || draw->is_pixmap)
476 return;
477
478 if (flush)
479 flags |= __DRI2_FLUSH_CONTEXT;
480 loader_dri3_flush(draw, flags, __DRI2_THROTTLE_SWAPBUFFER);
481
482 back = dri3_back_buffer(draw);
483 y = draw->height - y - height;
484
485 if (draw->is_different_gpu && draw->vtable->in_current_context(draw)) {
486 /* Update the linear buffer part of the back buffer
487 * for the dri3_copy_area operation
488 */
489 draw->ext->image->blitImage(dri_context,
490 back->linear_buffer,
491 back->image,
492 0, 0, back->width,
493 back->height,
494 0, 0, back->width,
495 back->height, __BLIT_FLAG_FLUSH);
496 /* We use blitImage to update our fake front,
497 */
498 if (draw->have_fake_front)
499 draw->ext->image->blitImage(dri_context,
500 dri3_fake_front_buffer(draw)->image,
501 back->image,
502 x, y, width, height,
503 x, y, width, height, __BLIT_FLAG_FLUSH);
504 }
505
506 dri3_fence_reset(draw->conn, back);
507 dri3_copy_area(draw->conn,
508 dri3_back_buffer(draw)->pixmap,
509 draw->drawable,
510 dri3_drawable_gc(draw),
511 x, y, x, y, width, height);
512 dri3_fence_trigger(draw->conn, back);
513 /* Refresh the fake front (if present) after we just damaged the real
514 * front.
515 */
516 if (draw->have_fake_front && !draw->is_different_gpu) {
517 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
518 dri3_copy_area(draw->conn,
519 dri3_back_buffer(draw)->pixmap,
520 dri3_fake_front_buffer(draw)->pixmap,
521 dri3_drawable_gc(draw),
522 x, y, x, y, width, height);
523 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
524 dri3_fence_await(draw->conn, dri3_fake_front_buffer(draw));
525 }
526 dri3_fence_await(draw->conn, back);
527 }
528
529 void
530 loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
531 xcb_drawable_t dest,
532 xcb_drawable_t src)
533 {
534 loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, 0);
535
536 dri3_fence_reset(draw->conn, dri3_fake_front_buffer(draw));
537 dri3_copy_area(draw->conn,
538 src, dest,
539 dri3_drawable_gc(draw),
540 0, 0, 0, 0, draw->width, draw->height);
541 dri3_fence_trigger(draw->conn, dri3_fake_front_buffer(draw));
542 dri3_fence_await(draw->conn, dri3_fake_front_buffer(draw));
543 }
544
545 void
546 loader_dri3_wait_x(struct loader_dri3_drawable *draw)
547 {
548 struct loader_dri3_buffer *front;
549 __DRIcontext *dri_context;
550
551 if (draw == NULL || !draw->have_fake_front)
552 return;
553
554 front = dri3_fake_front_buffer(draw);
555 dri_context = draw->vtable->get_dri_context(draw);
556
557 loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
558
559 /* In the psc->is_different_gpu case, the linear buffer has been updated,
560 * but not yet the tiled buffer.
561 * Copy back to the tiled buffer we use for rendering.
562 * Note that we don't need flushing.
563 */
564 if (draw->is_different_gpu && draw->vtable->in_current_context(draw))
565 draw->ext->image->blitImage(dri_context,
566 front->image,
567 front->linear_buffer,
568 0, 0, front->width,
569 front->height,
570 0, 0, front->width,
571 front->height, 0);
572 }
573
574 void
575 loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
576 {
577 struct loader_dri3_buffer *front;
578 __DRIcontext *dri_context;
579
580 if (draw == NULL || !draw->have_fake_front)
581 return;
582
583 front = dri3_fake_front_buffer(draw);
584 dri_context = draw->vtable->get_dri_context(draw);
585
586 /* In the psc->is_different_gpu case, we update the linear_buffer
587 * before updating the real front.
588 */
589 if (draw->is_different_gpu && draw->vtable->in_current_context(draw))
590 draw->ext->image->blitImage(dri_context,
591 front->linear_buffer,
592 front->image,
593 0, 0, front->width,
594 front->height,
595 0, 0, front->width,
596 front->height, __BLIT_FLAG_FLUSH);
597 loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
598 }
599
600 /** dri3_flush_present_events
601 *
602 * Process any present events that have been received from the X server
603 */
604 static void
605 dri3_flush_present_events(struct loader_dri3_drawable *draw)
606 {
607 /* Check to see if any configuration changes have occurred
608 * since we were last invoked
609 */
610 if (draw->special_event) {
611 xcb_generic_event_t *ev;
612
613 while ((ev = xcb_poll_for_special_event(draw->conn,
614 draw->special_event)) != NULL) {
615 xcb_present_generic_event_t *ge = (void *) ev;
616 dri3_handle_present_event(draw, ge);
617 }
618 }
619 }
620
621 /** loader_dri3_swap_buffers_msc
622 *
623 * Make the current back buffer visible using the present extension
624 */
625 int64_t
626 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
627 int64_t target_msc, int64_t divisor,
628 int64_t remainder, unsigned flush_flags,
629 bool force_copy)
630 {
631 struct loader_dri3_buffer *back;
632 __DRIcontext *dri_context;
633 int64_t ret = 0;
634 uint32_t options = XCB_PRESENT_OPTION_NONE;
635 int swap_interval;
636
637 dri_context = draw->vtable->get_dri_context(draw);
638 swap_interval = draw->vtable->get_swap_interval(draw);
639
640 draw->vtable->flush_drawable(draw, flush_flags);
641
642 back = draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
643 if (draw->is_different_gpu && back) {
644 /* Update the linear buffer before presenting the pixmap */
645 draw->ext->image->blitImage(dri_context,
646 back->linear_buffer,
647 back->image,
648 0, 0, back->width,
649 back->height,
650 0, 0, back->width,
651 back->height, __BLIT_FLAG_FLUSH);
652 /* Update the fake front */
653 if (draw->have_fake_front)
654 draw->ext->image->blitImage(dri_context,
655 draw->buffers[LOADER_DRI3_FRONT_ID]->image,
656 back->image,
657 0, 0, draw->width, draw->height,
658 0, 0, draw->width, draw->height,
659 __BLIT_FLAG_FLUSH);
660 }
661
662 dri3_flush_present_events(draw);
663
664 if (back && !draw->is_pixmap) {
665 dri3_fence_reset(draw->conn, back);
666
667 /* Compute when we want the frame shown by taking the last known
668 * successful MSC and adding in a swap interval for each outstanding swap
669 * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
670 * semantic"
671 */
672 ++draw->send_sbc;
673 if (target_msc == 0 && divisor == 0 && remainder == 0)
674 target_msc = draw->msc + swap_interval *
675 (draw->send_sbc - draw->recv_sbc);
676 else if (divisor == 0 && remainder > 0) {
677 /* From the GLX_OML_sync_control spec:
678 * "If <divisor> = 0, the swap will occur when MSC becomes
679 * greater than or equal to <target_msc>."
680 *
681 * Note that there's no mention of the remainder. The Present
682 * extension throws BadValue for remainder != 0 with divisor == 0, so
683 * just drop the passed in value.
684 */
685 remainder = 0;
686 }
687
688 /* From the GLX_EXT_swap_control spec
689 * and the EGL 1.4 spec (page 53):
690 *
691 * "If <interval> is set to a value of 0, buffer swaps are not
692 * synchronized to a video frame."
693 *
694 * Implementation note: It is possible to enable triple buffering
695 * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
696 * the default.
697 */
698 if (swap_interval == 0)
699 options |= XCB_PRESENT_OPTION_ASYNC;
700 if (force_copy)
701 options |= XCB_PRESENT_OPTION_COPY;
702
703 back->busy = 1;
704 back->last_swap = draw->send_sbc;
705 xcb_present_pixmap(draw->conn,
706 draw->drawable,
707 back->pixmap,
708 (uint32_t) draw->send_sbc,
709 0, /* valid */
710 0, /* update */
711 0, /* x_off */
712 0, /* y_off */
713 None, /* target_crtc */
714 None,
715 back->sync_fence,
716 options,
717 target_msc,
718 divisor,
719 remainder, 0, NULL);
720 ret = (int64_t) draw->send_sbc;
721
722 /* If there's a fake front, then copy the source back buffer
723 * to the fake front to keep it up to date. This needs
724 * to reset the fence and make future users block until
725 * the X server is done copying the bits
726 */
727 if (draw->have_fake_front && !draw->is_different_gpu) {
728 dri3_fence_reset(draw->conn, draw->buffers[LOADER_DRI3_FRONT_ID]);
729 dri3_copy_area(draw->conn,
730 back->pixmap,
731 draw->buffers[LOADER_DRI3_FRONT_ID]->pixmap,
732 dri3_drawable_gc(draw),
733 0, 0, 0, 0,
734 draw->width, draw->height);
735 dri3_fence_trigger(draw->conn, draw->buffers[LOADER_DRI3_FRONT_ID]);
736 }
737 xcb_flush(draw->conn);
738 if (draw->stamp)
739 ++(*draw->stamp);
740 }
741
742 (draw->ext->flush->invalidate)(draw->dri_drawable);
743
744 return ret;
745 }
746
747 int
748 loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
749 {
750 int back_id = LOADER_DRI3_BACK_ID(dri3_find_back(draw));
751
752 if (back_id < 0 || !draw->buffers[back_id])
753 return 0;
754
755 if (draw->buffers[back_id]->last_swap != 0)
756 return draw->send_sbc - draw->buffers[back_id]->last_swap + 1;
757 else
758 return 0;
759 }
760
761 /** loader_dri3_open
762 *
763 * Wrapper around xcb_dri3_open
764 */
765 int
766 loader_dri3_open(xcb_connection_t *conn,
767 xcb_window_t root,
768 uint32_t provider)
769 {
770 xcb_dri3_open_cookie_t cookie;
771 xcb_dri3_open_reply_t *reply;
772 int fd;
773
774 cookie = xcb_dri3_open(conn,
775 root,
776 provider);
777
778 reply = xcb_dri3_open_reply(conn, cookie, NULL);
779 if (!reply)
780 return -1;
781
782 if (reply->nfd != 1) {
783 free(reply);
784 return -1;
785 }
786
787 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
788 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
789
790 return fd;
791 }
792
793 static uint32_t
794 dri3_cpp_for_format(uint32_t format) {
795 switch (format) {
796 case __DRI_IMAGE_FORMAT_R8:
797 return 1;
798 case __DRI_IMAGE_FORMAT_RGB565:
799 case __DRI_IMAGE_FORMAT_GR88:
800 return 2;
801 case __DRI_IMAGE_FORMAT_XRGB8888:
802 case __DRI_IMAGE_FORMAT_ARGB8888:
803 case __DRI_IMAGE_FORMAT_ABGR8888:
804 case __DRI_IMAGE_FORMAT_XBGR8888:
805 case __DRI_IMAGE_FORMAT_XRGB2101010:
806 case __DRI_IMAGE_FORMAT_ARGB2101010:
807 case __DRI_IMAGE_FORMAT_SARGB8:
808 return 4;
809 case __DRI_IMAGE_FORMAT_NONE:
810 default:
811 return 0;
812 }
813 }
814
815 /** loader_dri3_alloc_render_buffer
816 *
817 * Use the driver createImage function to construct a __DRIimage, then
818 * get a file descriptor for that and create an X pixmap from that
819 *
820 * Allocate an xshmfence for synchronization
821 */
822 static struct loader_dri3_buffer *
823 dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format,
824 int width, int height, int depth)
825 {
826 struct loader_dri3_buffer *buffer;
827 __DRIimage *pixmap_buffer;
828 xcb_pixmap_t pixmap;
829 xcb_sync_fence_t sync_fence;
830 struct xshmfence *shm_fence;
831 int buffer_fd, fence_fd;
832 int stride;
833
834 /* Create an xshmfence object and
835 * prepare to send that to the X server
836 */
837
838 fence_fd = xshmfence_alloc_shm();
839 if (fence_fd < 0)
840 return NULL;
841
842 shm_fence = xshmfence_map_shm(fence_fd);
843 if (shm_fence == NULL)
844 goto no_shm_fence;
845
846 /* Allocate the image from the driver
847 */
848 buffer = calloc(1, sizeof *buffer);
849 if (!buffer)
850 goto no_buffer;
851
852 buffer->cpp = dri3_cpp_for_format(format);
853 if (!buffer->cpp)
854 goto no_image;
855
856 if (!draw->is_different_gpu) {
857 buffer->image = (draw->ext->image->createImage)(draw->dri_screen,
858 width, height,
859 format,
860 __DRI_IMAGE_USE_SHARE |
861 __DRI_IMAGE_USE_SCANOUT,
862 buffer);
863 pixmap_buffer = buffer->image;
864
865 if (!buffer->image)
866 goto no_image;
867 } else {
868 buffer->image = (draw->ext->image->createImage)(draw->dri_screen,
869 width, height,
870 format,
871 0,
872 buffer);
873
874 if (!buffer->image)
875 goto no_image;
876
877 buffer->linear_buffer =
878 (draw->ext->image->createImage)(draw->dri_screen,
879 width, height, format,
880 __DRI_IMAGE_USE_SHARE |
881 __DRI_IMAGE_USE_LINEAR,
882 buffer);
883 pixmap_buffer = buffer->linear_buffer;
884
885 if (!buffer->linear_buffer)
886 goto no_linear_buffer;
887 }
888
889 /* X wants the stride, so ask the image for it
890 */
891 if (!(draw->ext->image->queryImage)(pixmap_buffer, __DRI_IMAGE_ATTRIB_STRIDE,
892 &stride))
893 goto no_buffer_attrib;
894
895 buffer->pitch = stride;
896
897 if (!(draw->ext->image->queryImage)(pixmap_buffer, __DRI_IMAGE_ATTRIB_FD,
898 &buffer_fd))
899 goto no_buffer_attrib;
900
901 xcb_dri3_pixmap_from_buffer(draw->conn,
902 (pixmap = xcb_generate_id(draw->conn)),
903 draw->drawable,
904 buffer->size,
905 width, height, buffer->pitch,
906 depth, buffer->cpp * 8,
907 buffer_fd);
908
909 xcb_dri3_fence_from_fd(draw->conn,
910 pixmap,
911 (sync_fence = xcb_generate_id(draw->conn)),
912 false,
913 fence_fd);
914
915 buffer->pixmap = pixmap;
916 buffer->own_pixmap = true;
917 buffer->sync_fence = sync_fence;
918 buffer->shm_fence = shm_fence;
919 buffer->width = width;
920 buffer->height = height;
921
922 /* Mark the buffer as idle
923 */
924 dri3_fence_set(buffer);
925
926 return buffer;
927
928 no_buffer_attrib:
929 (draw->ext->image->destroyImage)(pixmap_buffer);
930 no_linear_buffer:
931 if (draw->is_different_gpu)
932 (draw->ext->image->destroyImage)(buffer->image);
933 no_image:
934 free(buffer);
935 no_buffer:
936 xshmfence_unmap_shm(shm_fence);
937 no_shm_fence:
938 close(fence_fd);
939 return NULL;
940 }
941
942 /** loader_dri3_update_drawable
943 *
944 * Called the first time we use the drawable and then
945 * after we receive present configure notify events to
946 * track the geometry of the drawable
947 */
948 static int
949 dri3_update_drawable(__DRIdrawable *driDrawable,
950 struct loader_dri3_drawable *draw)
951 {
952 if (draw->first_init) {
953 xcb_get_geometry_cookie_t geom_cookie;
954 xcb_get_geometry_reply_t *geom_reply;
955 xcb_void_cookie_t cookie;
956 xcb_generic_error_t *error;
957 xcb_present_query_capabilities_cookie_t present_capabilities_cookie;
958 xcb_present_query_capabilities_reply_t *present_capabilities_reply;
959
960 draw->first_init = false;
961
962 /* Try to select for input on the window.
963 *
964 * If the drawable is a window, this will get our events
965 * delivered.
966 *
967 * Otherwise, we'll get a BadWindow error back from this request which
968 * will let us know that the drawable is a pixmap instead.
969 */
970
971 draw->eid = xcb_generate_id(draw->conn);
972 cookie =
973 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
974 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
975 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
976 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
977
978 present_capabilities_cookie =
979 xcb_present_query_capabilities(draw->conn, draw->drawable);
980
981 /* Create an XCB event queue to hold present events outside of the usual
982 * application event queue
983 */
984 draw->special_event = xcb_register_for_special_xge(draw->conn,
985 &xcb_present_id,
986 draw->eid,
987 draw->stamp);
988 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
989
990 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
991
992 if (!geom_reply)
993 return false;
994
995 draw->width = geom_reply->width;
996 draw->height = geom_reply->height;
997 draw->depth = geom_reply->depth;
998 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
999
1000 free(geom_reply);
1001
1002 draw->is_pixmap = false;
1003
1004 /* Check to see if our select input call failed. If it failed with a
1005 * BadWindow error, then assume the drawable is a pixmap. Destroy the
1006 * special event queue created above and mark the drawable as a pixmap
1007 */
1008
1009 error = xcb_request_check(draw->conn, cookie);
1010
1011 present_capabilities_reply =
1012 xcb_present_query_capabilities_reply(draw->conn,
1013 present_capabilities_cookie,
1014 NULL);
1015
1016 if (present_capabilities_reply) {
1017 draw->present_capabilities = present_capabilities_reply->capabilities;
1018 free(present_capabilities_reply);
1019 } else
1020 draw->present_capabilities = 0;
1021
1022 if (error) {
1023 if (error->error_code != BadWindow) {
1024 free(error);
1025 return false;
1026 }
1027 draw->is_pixmap = true;
1028 xcb_unregister_for_special_event(draw->conn, draw->special_event);
1029 draw->special_event = NULL;
1030 }
1031 }
1032 dri3_flush_present_events(draw);
1033 return true;
1034 }
1035
1036 /* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while
1037 * the createImageFromFds call takes __DRI_IMAGE_FOURCC codes. To avoid
1038 * complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and
1039 * translate to __DRI_IMAGE_FOURCC codes in the call to createImageFromFds
1040 */
1041 static int
1042 image_format_to_fourcc(int format)
1043 {
1044
1045 /* Convert from __DRI_IMAGE_FORMAT to __DRI_IMAGE_FOURCC (sigh) */
1046 switch (format) {
1047 case __DRI_IMAGE_FORMAT_SARGB8: return __DRI_IMAGE_FOURCC_SARGB8888;
1048 case __DRI_IMAGE_FORMAT_RGB565: return __DRI_IMAGE_FOURCC_RGB565;
1049 case __DRI_IMAGE_FORMAT_XRGB8888: return __DRI_IMAGE_FOURCC_XRGB8888;
1050 case __DRI_IMAGE_FORMAT_ARGB8888: return __DRI_IMAGE_FOURCC_ARGB8888;
1051 case __DRI_IMAGE_FORMAT_ABGR8888: return __DRI_IMAGE_FOURCC_ABGR8888;
1052 case __DRI_IMAGE_FORMAT_XBGR8888: return __DRI_IMAGE_FOURCC_XBGR8888;
1053 }
1054 return 0;
1055 }
1056
1057 /** dri3_get_pixmap_buffer
1058 *
1059 * Get the DRM object for a pixmap from the X server and
1060 * wrap that with a __DRIimage structure using createImageFromFds
1061 */
1062 static struct loader_dri3_buffer *
1063 dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format,
1064 enum loader_dri3_buffer_type buffer_type,
1065 struct loader_dri3_drawable *draw)
1066 {
1067 int buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1068 struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
1069 xcb_drawable_t pixmap;
1070 xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1071 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1072 int *fds;
1073 xcb_sync_fence_t sync_fence;
1074 struct xshmfence *shm_fence;
1075 int fence_fd;
1076 __DRIimage *image_planar;
1077 int stride, offset;
1078
1079 if (buffer)
1080 return buffer;
1081
1082 pixmap = draw->drawable;
1083
1084 buffer = calloc(1, sizeof *buffer);
1085 if (!buffer)
1086 goto no_buffer;
1087
1088 fence_fd = xshmfence_alloc_shm();
1089 if (fence_fd < 0)
1090 goto no_fence;
1091 shm_fence = xshmfence_map_shm(fence_fd);
1092 if (shm_fence == NULL) {
1093 close (fence_fd);
1094 goto no_fence;
1095 }
1096
1097 xcb_dri3_fence_from_fd(draw->conn,
1098 pixmap,
1099 (sync_fence = xcb_generate_id(draw->conn)),
1100 false,
1101 fence_fd);
1102
1103 /* Get an FD for the pixmap object
1104 */
1105 bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap);
1106 bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn,
1107 bp_cookie, NULL);
1108 if (!bp_reply)
1109 goto no_image;
1110 fds = xcb_dri3_buffer_from_pixmap_reply_fds(draw->conn, bp_reply);
1111
1112 stride = bp_reply->stride;
1113 offset = 0;
1114
1115 /* createImageFromFds creates a wrapper __DRIimage structure which
1116 * can deal with multiple planes for things like Yuv images. So, once
1117 * we've gotten the planar wrapper, pull the single plane out of it and
1118 * discard the wrapper.
1119 */
1120 image_planar =
1121 (draw->ext->image->createImageFromFds)(draw->dri_screen, bp_reply->width,
1122 bp_reply->height,
1123 image_format_to_fourcc(format),
1124 fds, 1, &stride, &offset, buffer);
1125 close(fds[0]);
1126 if (!image_planar)
1127 goto no_image;
1128
1129 buffer->image = (draw->ext->image->fromPlanar)(image_planar, 0, buffer);
1130
1131 (draw->ext->image->destroyImage)(image_planar);
1132
1133 if (!buffer->image)
1134 goto no_image;
1135
1136 buffer->pixmap = pixmap;
1137 buffer->own_pixmap = false;
1138 buffer->width = bp_reply->width;
1139 buffer->height = bp_reply->height;
1140 buffer->buffer_type = buffer_type;
1141 buffer->shm_fence = shm_fence;
1142 buffer->sync_fence = sync_fence;
1143
1144 draw->buffers[buf_id] = buffer;
1145 return buffer;
1146
1147 no_image:
1148 xcb_sync_destroy_fence(draw->conn, sync_fence);
1149 xshmfence_unmap_shm(shm_fence);
1150 no_fence:
1151 free(buffer);
1152 no_buffer:
1153 return NULL;
1154 }
1155
1156 /** dri3_get_buffer
1157 *
1158 * Find a front or back buffer, allocating new ones as necessary
1159 */
1160 static struct loader_dri3_buffer *
1161 dri3_get_buffer(__DRIdrawable *driDrawable,
1162 unsigned int format,
1163 enum loader_dri3_buffer_type buffer_type,
1164 struct loader_dri3_drawable *draw)
1165 {
1166 struct loader_dri3_buffer *buffer;
1167 int buf_id;
1168 __DRIcontext *dri_context;
1169
1170 dri_context = draw->vtable->get_dri_context(draw);
1171
1172 if (buffer_type == loader_dri3_buffer_back) {
1173 buf_id = dri3_find_back(draw);
1174
1175 if (buf_id < 0)
1176 return NULL;
1177 } else {
1178 buf_id = LOADER_DRI3_FRONT_ID;
1179 }
1180
1181 buffer = draw->buffers[buf_id];
1182
1183 /* Allocate a new buffer if there isn't an old one, or if that
1184 * old one is the wrong size
1185 */
1186 if (!buffer || buffer->width != draw->width ||
1187 buffer->height != draw->height) {
1188 struct loader_dri3_buffer *new_buffer;
1189
1190 /* Allocate the new buffers
1191 */
1192 new_buffer = dri3_alloc_render_buffer(draw,
1193 format,
1194 draw->width,
1195 draw->height,
1196 draw->depth);
1197 if (!new_buffer)
1198 return NULL;
1199
1200 /* When resizing, copy the contents of the old buffer, waiting for that
1201 * copy to complete using our fences before proceeding
1202 */
1203 switch (buffer_type) {
1204 case loader_dri3_buffer_back:
1205 if (buffer) {
1206 if (!buffer->linear_buffer) {
1207 dri3_fence_reset(draw->conn, new_buffer);
1208 dri3_fence_await(draw->conn, buffer);
1209 dri3_copy_area(draw->conn,
1210 buffer->pixmap,
1211 new_buffer->pixmap,
1212 dri3_drawable_gc(draw),
1213 0, 0, 0, 0,
1214 draw->width, draw->height);
1215 dri3_fence_trigger(draw->conn, new_buffer);
1216 } else if (draw->vtable->in_current_context(draw)) {
1217 draw->ext->image->blitImage(dri_context,
1218 new_buffer->image,
1219 buffer->image,
1220 0, 0, draw->width, draw->height,
1221 0, 0, draw->width, draw->height, 0);
1222 }
1223 dri3_free_render_buffer(draw, buffer);
1224 }
1225 break;
1226 case loader_dri3_buffer_front:
1227 dri3_fence_reset(draw->conn, new_buffer);
1228 dri3_copy_area(draw->conn,
1229 draw->drawable,
1230 new_buffer->pixmap,
1231 dri3_drawable_gc(draw),
1232 0, 0, 0, 0,
1233 draw->width, draw->height);
1234 dri3_fence_trigger(draw->conn, new_buffer);
1235
1236 if (new_buffer->linear_buffer &&
1237 draw->vtable->in_current_context(draw)) {
1238 dri3_fence_await(draw->conn, new_buffer);
1239 draw->ext->image->blitImage(dri_context,
1240 new_buffer->image,
1241 new_buffer->linear_buffer,
1242 0, 0, draw->width, draw->height,
1243 0, 0, draw->width, draw->height, 0);
1244 }
1245 break;
1246 }
1247 buffer = new_buffer;
1248 buffer->buffer_type = buffer_type;
1249 draw->buffers[buf_id] = buffer;
1250 }
1251 dri3_fence_await(draw->conn, buffer);
1252
1253 /* Return the requested buffer */
1254 return buffer;
1255 }
1256
1257 /** dri3_free_buffers
1258 *
1259 * Free the front bufffer or all of the back buffers. Used
1260 * when the application changes which buffers it needs
1261 */
1262 static void
1263 dri3_free_buffers(__DRIdrawable *driDrawable,
1264 enum loader_dri3_buffer_type buffer_type,
1265 struct loader_dri3_drawable *draw)
1266 {
1267 struct loader_dri3_buffer *buffer;
1268 int first_id;
1269 int n_id;
1270 int buf_id;
1271
1272 switch (buffer_type) {
1273 case loader_dri3_buffer_back:
1274 first_id = LOADER_DRI3_BACK_ID(0);
1275 n_id = LOADER_DRI3_MAX_BACK;
1276 break;
1277 case loader_dri3_buffer_front:
1278 first_id = LOADER_DRI3_FRONT_ID;
1279 n_id = 1;
1280 }
1281
1282 for (buf_id = first_id; buf_id < first_id + n_id; buf_id++) {
1283 buffer = draw->buffers[buf_id];
1284 if (buffer) {
1285 dri3_free_render_buffer(draw, buffer);
1286 draw->buffers[buf_id] = NULL;
1287 }
1288 }
1289 }
1290
1291 /** loader_dri3_get_buffers
1292 *
1293 * The published buffer allocation API.
1294 * Returns all of the necessary buffers, allocating
1295 * as needed.
1296 */
1297 int
1298 loader_dri3_get_buffers(__DRIdrawable *driDrawable,
1299 unsigned int format,
1300 uint32_t *stamp,
1301 void *loaderPrivate,
1302 uint32_t buffer_mask,
1303 struct __DRIimageList *buffers)
1304 {
1305 struct loader_dri3_drawable *draw = loaderPrivate;
1306 struct loader_dri3_buffer *front, *back;
1307
1308 buffers->image_mask = 0;
1309 buffers->front = NULL;
1310 buffers->back = NULL;
1311
1312 front = NULL;
1313 back = NULL;
1314
1315 if (!dri3_update_drawable(driDrawable, draw))
1316 return false;
1317
1318 /* pixmaps always have front buffers */
1319 if (draw->is_pixmap)
1320 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1321
1322 if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
1323 /* All pixmaps are owned by the server gpu.
1324 * When we use a different gpu, we can't use the pixmap
1325 * as buffer since it is potentially tiled a way
1326 * our device can't understand. In this case, use
1327 * a fake front buffer. Hopefully the pixmap
1328 * content will get synced with the fake front
1329 * buffer.
1330 */
1331 if (draw->is_pixmap && !draw->is_different_gpu)
1332 front = dri3_get_pixmap_buffer(driDrawable,
1333 format,
1334 loader_dri3_buffer_front,
1335 draw);
1336 else
1337 front = dri3_get_buffer(driDrawable,
1338 format,
1339 loader_dri3_buffer_front,
1340 draw);
1341
1342 if (!front)
1343 return false;
1344 } else {
1345 dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
1346 draw->have_fake_front = 0;
1347 }
1348
1349 if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
1350 back = dri3_get_buffer(driDrawable,
1351 format,
1352 loader_dri3_buffer_back,
1353 draw);
1354 if (!back)
1355 return false;
1356 draw->have_back = 1;
1357 } else {
1358 dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
1359 draw->have_back = 0;
1360 }
1361
1362 if (front) {
1363 buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
1364 buffers->front = front->image;
1365 draw->have_fake_front = draw->is_different_gpu || !draw->is_pixmap;
1366 }
1367
1368 if (back) {
1369 buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
1370 buffers->back = back->image;
1371 }
1372
1373 draw->stamp = stamp;
1374
1375 return true;
1376 }