2 * Copyright © 2013 Keith Packard
3 * Copyright © 2015 Boyan Ding
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that copyright
8 * notice and this permission notice appear in supporting documentation, and
9 * that the name of the copyright holders not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. The copyright holders make no representations
12 * about the suitability of this software for any purpose. It is provided "as
13 * is" without express or implied warranty.
15 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
28 #include <X11/xshmfence.h>
31 #include <xcb/present.h>
33 #include <X11/Xlib-xcb.h>
35 #include <c11/threads.h>
36 #include "loader_dri3_helper.h"
38 /* From xmlpool/options.h, user exposed so should be stable */
39 #define DRI_CONF_VBLANK_NEVER 0
40 #define DRI_CONF_VBLANK_DEF_INTERVAL_0 1
41 #define DRI_CONF_VBLANK_DEF_INTERVAL_1 2
42 #define DRI_CONF_VBLANK_ALWAYS_SYNC 3
45 * A cached blit context.
47 struct loader_dri3_blit_context
{
50 __DRIscreen
*cur_screen
;
51 const __DRIcoreExtension
*core
;
54 /* For simplicity we maintain the cache only for a single screen at a time */
55 static struct loader_dri3_blit_context blit_context
= {
56 _MTX_INITIALIZER_NP
, NULL
60 dri3_flush_present_events(struct loader_dri3_drawable
*draw
);
63 * Do we have blit functionality in the image blit extension?
65 * \param draw[in] The drawable intended to blit from / to.
66 * \return true if we have blit functionality. false otherwise.
68 static bool loader_dri3_have_image_blit(const struct loader_dri3_drawable
*draw
)
70 return draw
->ext
->image
->base
.version
>= 9 &&
71 draw
->ext
->image
->blitImage
!= NULL
;
75 * Get and lock (for use with the current thread) a dri context associated
76 * with the drawable's dri screen. The context is intended to be used with
77 * the dri image extension's blitImage method.
79 * \param draw[in] Pointer to the drawable whose dri screen we want a
81 * \return A dri context or NULL if context creation failed.
83 * When the caller is done with the context (even if the context returned was
84 * NULL), the caller must call loader_dri3_blit_context_put.
87 loader_dri3_blit_context_get(struct loader_dri3_drawable
*draw
)
89 mtx_lock(&blit_context
.mtx
);
91 if (blit_context
.ctx
&& blit_context
.cur_screen
!= draw
->dri_screen
) {
92 blit_context
.core
->destroyContext(blit_context
.ctx
);
93 blit_context
.ctx
= NULL
;
96 if (!blit_context
.ctx
) {
97 blit_context
.ctx
= draw
->ext
->core
->createNewContext(draw
->dri_screen
,
99 blit_context
.cur_screen
= draw
->dri_screen
;
100 blit_context
.core
= draw
->ext
->core
;
103 return blit_context
.ctx
;
107 * Release (for use with other threads) a dri context previously obtained using
108 * loader_dri3_blit_context_get.
111 loader_dri3_blit_context_put(void)
113 mtx_unlock(&blit_context
.mtx
);
117 * Blit (parts of) the contents of a DRI image to another dri image
119 * \param draw[in] The drawable which owns the images.
120 * \param dst[in] The destination image.
121 * \param src[in] The source image.
122 * \param dstx0[in] Start destination coordinate.
123 * \param dsty0[in] Start destination coordinate.
124 * \param width[in] Blit width.
125 * \param height[in] Blit height.
126 * \param srcx0[in] Start source coordinate.
127 * \param srcy0[in] Start source coordinate.
128 * \param flush_flag[in] Image blit flush flag.
129 * \return true iff successful.
132 loader_dri3_blit_image(struct loader_dri3_drawable
*draw
,
133 __DRIimage
*dst
, __DRIimage
*src
,
134 int dstx0
, int dsty0
, int width
, int height
,
135 int srcx0
, int srcy0
, int flush_flag
)
137 __DRIcontext
*dri_context
;
138 bool use_blit_context
= false;
140 if (!loader_dri3_have_image_blit(draw
))
143 dri_context
= draw
->vtable
->get_dri_context(draw
);
145 if (!dri_context
|| !draw
->vtable
->in_current_context(draw
)) {
146 dri_context
= loader_dri3_blit_context_get(draw
);
147 use_blit_context
= true;
148 flush_flag
|= __BLIT_FLAG_FLUSH
;
152 draw
->ext
->image
->blitImage(dri_context
, dst
, src
, dstx0
, dsty0
,
153 width
, height
, srcx0
, srcy0
,
154 width
, height
, flush_flag
);
156 if (use_blit_context
)
157 loader_dri3_blit_context_put();
159 return dri_context
!= NULL
;
163 dri3_fence_reset(xcb_connection_t
*c
, struct loader_dri3_buffer
*buffer
)
165 xshmfence_reset(buffer
->shm_fence
);
169 dri3_fence_set(struct loader_dri3_buffer
*buffer
)
171 xshmfence_trigger(buffer
->shm_fence
);
175 dri3_fence_trigger(xcb_connection_t
*c
, struct loader_dri3_buffer
*buffer
)
177 xcb_sync_trigger_fence(c
, buffer
->sync_fence
);
181 dri3_fence_await(xcb_connection_t
*c
, struct loader_dri3_buffer
*buffer
)
184 xshmfence_await(buffer
->shm_fence
);
188 dri3_update_num_back(struct loader_dri3_drawable
*draw
)
197 loader_dri3_set_swap_interval(struct loader_dri3_drawable
*draw
, int interval
)
199 draw
->swap_interval
= interval
;
200 dri3_update_num_back(draw
);
203 /** dri3_free_render_buffer
205 * Free everything associated with one render buffer including pixmap, fence
206 * stuff and the driver image
209 dri3_free_render_buffer(struct loader_dri3_drawable
*draw
,
210 struct loader_dri3_buffer
*buffer
)
212 if (buffer
->own_pixmap
)
213 xcb_free_pixmap(draw
->conn
, buffer
->pixmap
);
214 xcb_sync_destroy_fence(draw
->conn
, buffer
->sync_fence
);
215 xshmfence_unmap_shm(buffer
->shm_fence
);
216 draw
->ext
->image
->destroyImage(buffer
->image
);
217 if (buffer
->linear_buffer
)
218 draw
->ext
->image
->destroyImage(buffer
->linear_buffer
);
223 loader_dri3_drawable_fini(struct loader_dri3_drawable
*draw
)
227 draw
->ext
->core
->destroyDrawable(draw
->dri_drawable
);
229 for (i
= 0; i
< LOADER_DRI3_NUM_BUFFERS
; i
++) {
230 if (draw
->buffers
[i
])
231 dri3_free_render_buffer(draw
, draw
->buffers
[i
]);
234 if (draw
->special_event
) {
235 xcb_void_cookie_t cookie
=
236 xcb_present_select_input_checked(draw
->conn
, draw
->eid
, draw
->drawable
,
237 XCB_PRESENT_EVENT_MASK_NO_EVENT
);
239 xcb_discard_reply(draw
->conn
, cookie
.sequence
);
240 xcb_unregister_for_special_event(draw
->conn
, draw
->special_event
);
245 loader_dri3_drawable_init(xcb_connection_t
*conn
,
246 xcb_drawable_t drawable
,
247 __DRIscreen
*dri_screen
,
248 bool is_different_gpu
,
249 const __DRIconfig
*dri_config
,
250 struct loader_dri3_extensions
*ext
,
251 const struct loader_dri3_vtable
*vtable
,
252 struct loader_dri3_drawable
*draw
)
254 xcb_get_geometry_cookie_t cookie
;
255 xcb_get_geometry_reply_t
*reply
;
256 xcb_generic_error_t
*error
;
257 GLint vblank_mode
= DRI_CONF_VBLANK_DEF_INTERVAL_1
;
262 draw
->vtable
= vtable
;
263 draw
->drawable
= drawable
;
264 draw
->dri_screen
= dri_screen
;
265 draw
->is_different_gpu
= is_different_gpu
;
268 draw
->have_fake_front
= 0;
269 draw
->first_init
= true;
271 if (draw
->ext
->config
)
272 draw
->ext
->config
->configQueryi(draw
->dri_screen
,
273 "vblank_mode", &vblank_mode
);
275 switch (vblank_mode
) {
276 case DRI_CONF_VBLANK_NEVER
:
277 case DRI_CONF_VBLANK_DEF_INTERVAL_0
:
280 case DRI_CONF_VBLANK_DEF_INTERVAL_1
:
281 case DRI_CONF_VBLANK_ALWAYS_SYNC
:
286 draw
->swap_interval
= swap_interval
;
288 dri3_update_num_back(draw
);
290 /* Create a new drawable */
292 draw
->ext
->image_driver
->createNewDrawable(dri_screen
,
296 if (!draw
->dri_drawable
)
299 cookie
= xcb_get_geometry(draw
->conn
, draw
->drawable
);
300 reply
= xcb_get_geometry_reply(draw
->conn
, cookie
, &error
);
301 if (reply
== NULL
|| error
!= NULL
) {
302 draw
->ext
->core
->destroyDrawable(draw
->dri_drawable
);
306 draw
->width
= reply
->width
;
307 draw
->height
= reply
->height
;
308 draw
->depth
= reply
->depth
;
309 draw
->vtable
->set_drawable_size(draw
, draw
->width
, draw
->height
);
313 * Make sure server has the same swap interval we do for the new
316 loader_dri3_set_swap_interval(draw
, swap_interval
);
322 * Process one Present event
325 dri3_handle_present_event(struct loader_dri3_drawable
*draw
,
326 xcb_present_generic_event_t
*ge
)
328 switch (ge
->evtype
) {
329 case XCB_PRESENT_CONFIGURE_NOTIFY
: {
330 xcb_present_configure_notify_event_t
*ce
= (void *) ge
;
332 draw
->width
= ce
->width
;
333 draw
->height
= ce
->height
;
334 draw
->vtable
->set_drawable_size(draw
, draw
->width
, draw
->height
);
337 case XCB_PRESENT_COMPLETE_NOTIFY
: {
338 xcb_present_complete_notify_event_t
*ce
= (void *) ge
;
340 /* Compute the processed SBC number from the received 32-bit serial number
341 * merged with the upper 32-bits of the sent 64-bit serial number while
344 if (ce
->kind
== XCB_PRESENT_COMPLETE_KIND_PIXMAP
) {
345 draw
->recv_sbc
= (draw
->send_sbc
& 0xffffffff00000000LL
) | ce
->serial
;
346 if (draw
->recv_sbc
> draw
->send_sbc
)
347 draw
->recv_sbc
-= 0x100000000;
349 case XCB_PRESENT_COMPLETE_MODE_FLIP
:
350 draw
->flipping
= true;
352 case XCB_PRESENT_COMPLETE_MODE_COPY
:
353 draw
->flipping
= false;
356 dri3_update_num_back(draw
);
358 if (draw
->vtable
->show_fps
)
359 draw
->vtable
->show_fps(draw
, ce
->ust
);
364 draw
->recv_msc_serial
= ce
->serial
;
365 draw
->notify_ust
= ce
->ust
;
366 draw
->notify_msc
= ce
->msc
;
370 case XCB_PRESENT_EVENT_IDLE_NOTIFY
: {
371 xcb_present_idle_notify_event_t
*ie
= (void *) ge
;
374 for (b
= 0; b
< sizeof(draw
->buffers
) / sizeof(draw
->buffers
[0]); b
++) {
375 struct loader_dri3_buffer
*buf
= draw
->buffers
[b
];
377 if (buf
&& buf
->pixmap
== ie
->pixmap
) {
379 if (draw
->num_back
<= b
&& b
< LOADER_DRI3_MAX_BACK
) {
380 dri3_free_render_buffer(draw
, buf
);
381 draw
->buffers
[b
] = NULL
;
393 dri3_wait_for_event(struct loader_dri3_drawable
*draw
)
395 xcb_generic_event_t
*ev
;
396 xcb_present_generic_event_t
*ge
;
398 xcb_flush(draw
->conn
);
399 ev
= xcb_wait_for_special_event(draw
->conn
, draw
->special_event
);
403 dri3_handle_present_event(draw
, ge
);
407 /** loader_dri3_wait_for_msc
409 * Get the X server to send an event when the target msc/divisor/remainder is
413 loader_dri3_wait_for_msc(struct loader_dri3_drawable
*draw
,
415 int64_t divisor
, int64_t remainder
,
416 int64_t *ust
, int64_t *msc
, int64_t *sbc
)
420 msc_serial
= ++draw
->send_msc_serial
;
421 xcb_present_notify_msc(draw
->conn
,
428 xcb_flush(draw
->conn
);
430 /* Wait for the event */
431 if (draw
->special_event
) {
432 while ((int32_t) (msc_serial
- draw
->recv_msc_serial
) > 0) {
433 if (!dri3_wait_for_event(draw
))
438 *ust
= draw
->notify_ust
;
439 *msc
= draw
->notify_msc
;
440 *sbc
= draw
->recv_sbc
;
445 /** loader_dri3_wait_for_sbc
447 * Wait for the completed swap buffer count to reach the specified
448 * target. Presumably the application knows that this will be reached with
449 * outstanding complete events, or we're going to be here awhile.
452 loader_dri3_wait_for_sbc(struct loader_dri3_drawable
*draw
,
453 int64_t target_sbc
, int64_t *ust
,
454 int64_t *msc
, int64_t *sbc
)
456 /* From the GLX_OML_sync_control spec:
458 * "If <target_sbc> = 0, the function will block until all previous
459 * swaps requested with glXSwapBuffersMscOML for that window have
463 target_sbc
= draw
->send_sbc
;
465 while (draw
->recv_sbc
< target_sbc
) {
466 if (!dri3_wait_for_event(draw
))
472 *sbc
= draw
->recv_sbc
;
476 /** loader_dri3_find_back
478 * Find an idle back buffer. If there isn't one, then
479 * wait for a present idle notify event from the X server
482 dri3_find_back(struct loader_dri3_drawable
*draw
)
485 xcb_generic_event_t
*ev
;
486 xcb_present_generic_event_t
*ge
;
488 /* Increase the likelyhood of reusing current buffer */
489 dri3_flush_present_events(draw
);
492 for (b
= 0; b
< draw
->num_back
; b
++) {
493 int id
= LOADER_DRI3_BACK_ID((b
+ draw
->cur_back
) % draw
->num_back
);
494 struct loader_dri3_buffer
*buffer
= draw
->buffers
[id
];
496 if (!buffer
|| !buffer
->busy
) {
501 xcb_flush(draw
->conn
);
502 ev
= xcb_wait_for_special_event(draw
->conn
, draw
->special_event
);
506 dri3_handle_present_event(draw
, ge
);
510 static xcb_gcontext_t
511 dri3_drawable_gc(struct loader_dri3_drawable
*draw
)
515 xcb_create_gc(draw
->conn
,
516 (draw
->gc
= xcb_generate_id(draw
->conn
)),
518 XCB_GC_GRAPHICS_EXPOSURES
,
525 static struct loader_dri3_buffer
*
526 dri3_back_buffer(struct loader_dri3_drawable
*draw
)
528 return draw
->buffers
[LOADER_DRI3_BACK_ID(draw
->cur_back
)];
531 static struct loader_dri3_buffer
*
532 dri3_fake_front_buffer(struct loader_dri3_drawable
*draw
)
534 return draw
->buffers
[LOADER_DRI3_FRONT_ID
];
538 dri3_copy_area(xcb_connection_t
*c
,
539 xcb_drawable_t src_drawable
,
540 xcb_drawable_t dst_drawable
,
549 xcb_void_cookie_t cookie
;
551 cookie
= xcb_copy_area_checked(c
,
561 xcb_discard_reply(c
, cookie
.sequence
);
565 * Asks the driver to flush any queued work necessary for serializing with the
566 * X command stream, and optionally the slightly more strict requirement of
567 * glFlush() equivalence (which would require flushing even if nothing had
568 * been drawn to a window system framebuffer, for example).
571 loader_dri3_flush(struct loader_dri3_drawable
*draw
,
573 enum __DRI2throttleReason throttle_reason
)
575 /* NEED TO CHECK WHETHER CONTEXT IS NULL */
576 __DRIcontext
*dri_context
= draw
->vtable
->get_dri_context(draw
);
579 draw
->ext
->flush
->flush_with_flags(dri_context
, draw
->dri_drawable
,
580 flags
, throttle_reason
);
585 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable
*draw
,
587 int width
, int height
,
590 struct loader_dri3_buffer
*back
;
591 unsigned flags
= __DRI2_FLUSH_DRAWABLE
;
593 /* Check we have the right attachments */
594 if (!draw
->have_back
|| draw
->is_pixmap
)
598 flags
|= __DRI2_FLUSH_CONTEXT
;
599 loader_dri3_flush(draw
, flags
, __DRI2_THROTTLE_SWAPBUFFER
);
601 back
= dri3_back_buffer(draw
);
602 y
= draw
->height
- y
- height
;
604 if (draw
->is_different_gpu
) {
605 /* Update the linear buffer part of the back buffer
606 * for the dri3_copy_area operation
608 (void) loader_dri3_blit_image(draw
,
611 0, 0, back
->width
, back
->height
,
612 0, 0, __BLIT_FLAG_FLUSH
);
613 /* We use blit_image to update our fake front,
615 if (draw
->have_fake_front
)
616 (void) loader_dri3_blit_image(draw
,
617 dri3_fake_front_buffer(draw
)->image
,
620 x
, y
, __BLIT_FLAG_FLUSH
);
623 loader_dri3_swapbuffer_barrier(draw
);
624 dri3_fence_reset(draw
->conn
, back
);
625 dri3_copy_area(draw
->conn
,
626 dri3_back_buffer(draw
)->pixmap
,
628 dri3_drawable_gc(draw
),
629 x
, y
, x
, y
, width
, height
);
630 dri3_fence_trigger(draw
->conn
, back
);
631 /* Refresh the fake front (if present) after we just damaged the real
634 if (draw
->have_fake_front
&& !draw
->is_different_gpu
) {
635 dri3_fence_reset(draw
->conn
, dri3_fake_front_buffer(draw
));
636 dri3_copy_area(draw
->conn
,
637 dri3_back_buffer(draw
)->pixmap
,
638 dri3_fake_front_buffer(draw
)->pixmap
,
639 dri3_drawable_gc(draw
),
640 x
, y
, x
, y
, width
, height
);
641 dri3_fence_trigger(draw
->conn
, dri3_fake_front_buffer(draw
));
642 dri3_fence_await(draw
->conn
, dri3_fake_front_buffer(draw
));
644 dri3_fence_await(draw
->conn
, back
);
648 loader_dri3_copy_drawable(struct loader_dri3_drawable
*draw
,
652 loader_dri3_flush(draw
, __DRI2_FLUSH_DRAWABLE
, 0);
654 dri3_fence_reset(draw
->conn
, dri3_fake_front_buffer(draw
));
655 dri3_copy_area(draw
->conn
,
657 dri3_drawable_gc(draw
),
658 0, 0, 0, 0, draw
->width
, draw
->height
);
659 dri3_fence_trigger(draw
->conn
, dri3_fake_front_buffer(draw
));
660 dri3_fence_await(draw
->conn
, dri3_fake_front_buffer(draw
));
664 loader_dri3_wait_x(struct loader_dri3_drawable
*draw
)
666 struct loader_dri3_buffer
*front
;
668 if (draw
== NULL
|| !draw
->have_fake_front
)
671 front
= dri3_fake_front_buffer(draw
);
673 loader_dri3_copy_drawable(draw
, front
->pixmap
, draw
->drawable
);
675 /* In the psc->is_different_gpu case, the linear buffer has been updated,
676 * but not yet the tiled buffer.
677 * Copy back to the tiled buffer we use for rendering.
678 * Note that we don't need flushing.
680 if (draw
->is_different_gpu
)
681 (void) loader_dri3_blit_image(draw
,
683 front
->linear_buffer
,
684 0, 0, front
->width
, front
->height
,
689 loader_dri3_wait_gl(struct loader_dri3_drawable
*draw
)
691 struct loader_dri3_buffer
*front
;
693 if (draw
== NULL
|| !draw
->have_fake_front
)
696 front
= dri3_fake_front_buffer(draw
);
698 /* In the psc->is_different_gpu case, we update the linear_buffer
699 * before updating the real front.
701 if (draw
->is_different_gpu
)
702 (void) loader_dri3_blit_image(draw
,
703 front
->linear_buffer
,
705 0, 0, front
->width
, front
->height
,
706 0, 0, __BLIT_FLAG_FLUSH
);
707 loader_dri3_swapbuffer_barrier(draw
);
708 loader_dri3_copy_drawable(draw
, draw
->drawable
, front
->pixmap
);
711 /** dri3_flush_present_events
713 * Process any present events that have been received from the X server
716 dri3_flush_present_events(struct loader_dri3_drawable
*draw
)
718 /* Check to see if any configuration changes have occurred
719 * since we were last invoked
721 if (draw
->special_event
) {
722 xcb_generic_event_t
*ev
;
724 while ((ev
= xcb_poll_for_special_event(draw
->conn
,
725 draw
->special_event
)) != NULL
) {
726 xcb_present_generic_event_t
*ge
= (void *) ev
;
727 dri3_handle_present_event(draw
, ge
);
732 /** loader_dri3_swap_buffers_msc
734 * Make the current back buffer visible using the present extension
737 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable
*draw
,
738 int64_t target_msc
, int64_t divisor
,
739 int64_t remainder
, unsigned flush_flags
,
742 struct loader_dri3_buffer
*back
;
744 uint32_t options
= XCB_PRESENT_OPTION_NONE
;
746 draw
->vtable
->flush_drawable(draw
, flush_flags
);
748 back
= draw
->buffers
[dri3_find_back(draw
)];
749 if (draw
->is_different_gpu
&& back
) {
750 /* Update the linear buffer before presenting the pixmap */
751 (void) loader_dri3_blit_image(draw
,
754 0, 0, back
->width
, back
->height
,
755 0, 0, __BLIT_FLAG_FLUSH
);
756 /* Update the fake front */
757 if (draw
->have_fake_front
)
758 (void) loader_dri3_blit_image(draw
,
759 draw
->buffers
[LOADER_DRI3_FRONT_ID
]->image
,
761 0, 0, draw
->width
, draw
->height
,
762 0, 0, __BLIT_FLAG_FLUSH
);
765 dri3_flush_present_events(draw
);
767 if (back
&& !draw
->is_pixmap
) {
768 dri3_fence_reset(draw
->conn
, back
);
770 /* Compute when we want the frame shown by taking the last known
771 * successful MSC and adding in a swap interval for each outstanding swap
772 * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
776 if (target_msc
== 0 && divisor
== 0 && remainder
== 0)
777 target_msc
= draw
->msc
+ draw
->swap_interval
*
778 (draw
->send_sbc
- draw
->recv_sbc
);
779 else if (divisor
== 0 && remainder
> 0) {
780 /* From the GLX_OML_sync_control spec:
781 * "If <divisor> = 0, the swap will occur when MSC becomes
782 * greater than or equal to <target_msc>."
784 * Note that there's no mention of the remainder. The Present
785 * extension throws BadValue for remainder != 0 with divisor == 0, so
786 * just drop the passed in value.
791 /* From the GLX_EXT_swap_control spec
792 * and the EGL 1.4 spec (page 53):
794 * "If <interval> is set to a value of 0, buffer swaps are not
795 * synchronized to a video frame."
797 * Implementation note: It is possible to enable triple buffering
798 * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
801 if (draw
->swap_interval
== 0)
802 options
|= XCB_PRESENT_OPTION_ASYNC
;
804 options
|= XCB_PRESENT_OPTION_COPY
;
807 back
->last_swap
= draw
->send_sbc
;
808 xcb_present_pixmap(draw
->conn
,
811 (uint32_t) draw
->send_sbc
,
816 None
, /* target_crtc */
823 ret
= (int64_t) draw
->send_sbc
;
825 /* If there's a fake front, then copy the source back buffer
826 * to the fake front to keep it up to date. This needs
827 * to reset the fence and make future users block until
828 * the X server is done copying the bits
830 if (draw
->have_fake_front
&& !draw
->is_different_gpu
) {
831 dri3_fence_reset(draw
->conn
, draw
->buffers
[LOADER_DRI3_FRONT_ID
]);
832 dri3_copy_area(draw
->conn
,
834 draw
->buffers
[LOADER_DRI3_FRONT_ID
]->pixmap
,
835 dri3_drawable_gc(draw
),
837 draw
->width
, draw
->height
);
838 dri3_fence_trigger(draw
->conn
, draw
->buffers
[LOADER_DRI3_FRONT_ID
]);
840 xcb_flush(draw
->conn
);
845 draw
->ext
->flush
->invalidate(draw
->dri_drawable
);
851 loader_dri3_query_buffer_age(struct loader_dri3_drawable
*draw
)
853 int back_id
= LOADER_DRI3_BACK_ID(dri3_find_back(draw
));
855 if (back_id
< 0 || !draw
->buffers
[back_id
])
858 if (draw
->buffers
[back_id
]->last_swap
!= 0)
859 return draw
->send_sbc
- draw
->buffers
[back_id
]->last_swap
+ 1;
866 * Wrapper around xcb_dri3_open
869 loader_dri3_open(xcb_connection_t
*conn
,
873 xcb_dri3_open_cookie_t cookie
;
874 xcb_dri3_open_reply_t
*reply
;
877 cookie
= xcb_dri3_open(conn
,
881 reply
= xcb_dri3_open_reply(conn
, cookie
, NULL
);
885 if (reply
->nfd
!= 1) {
890 fd
= xcb_dri3_open_reply_fds(conn
, reply
)[0];
892 fcntl(fd
, F_SETFD
, fcntl(fd
, F_GETFD
) | FD_CLOEXEC
);
898 dri3_cpp_for_format(uint32_t format
) {
900 case __DRI_IMAGE_FORMAT_R8
:
902 case __DRI_IMAGE_FORMAT_RGB565
:
903 case __DRI_IMAGE_FORMAT_GR88
:
905 case __DRI_IMAGE_FORMAT_XRGB8888
:
906 case __DRI_IMAGE_FORMAT_ARGB8888
:
907 case __DRI_IMAGE_FORMAT_ABGR8888
:
908 case __DRI_IMAGE_FORMAT_XBGR8888
:
909 case __DRI_IMAGE_FORMAT_XRGB2101010
:
910 case __DRI_IMAGE_FORMAT_ARGB2101010
:
911 case __DRI_IMAGE_FORMAT_SARGB8
:
913 case __DRI_IMAGE_FORMAT_NONE
:
919 /** loader_dri3_alloc_render_buffer
921 * Use the driver createImage function to construct a __DRIimage, then
922 * get a file descriptor for that and create an X pixmap from that
924 * Allocate an xshmfence for synchronization
926 static struct loader_dri3_buffer
*
927 dri3_alloc_render_buffer(struct loader_dri3_drawable
*draw
, unsigned int format
,
928 int width
, int height
, int depth
)
930 struct loader_dri3_buffer
*buffer
;
931 __DRIimage
*pixmap_buffer
;
933 xcb_sync_fence_t sync_fence
;
934 struct xshmfence
*shm_fence
;
935 int buffer_fd
, fence_fd
;
938 /* Create an xshmfence object and
939 * prepare to send that to the X server
942 fence_fd
= xshmfence_alloc_shm();
946 shm_fence
= xshmfence_map_shm(fence_fd
);
947 if (shm_fence
== NULL
)
950 /* Allocate the image from the driver
952 buffer
= calloc(1, sizeof *buffer
);
956 buffer
->cpp
= dri3_cpp_for_format(format
);
960 if (!draw
->is_different_gpu
) {
961 buffer
->image
= draw
->ext
->image
->createImage(draw
->dri_screen
,
964 __DRI_IMAGE_USE_SHARE
|
965 __DRI_IMAGE_USE_SCANOUT
|
966 __DRI_IMAGE_USE_BACKBUFFER
,
968 pixmap_buffer
= buffer
->image
;
973 buffer
->image
= draw
->ext
->image
->createImage(draw
->dri_screen
,
982 buffer
->linear_buffer
=
983 draw
->ext
->image
->createImage(draw
->dri_screen
,
984 width
, height
, format
,
985 __DRI_IMAGE_USE_SHARE
|
986 __DRI_IMAGE_USE_LINEAR
|
987 __DRI_IMAGE_USE_BACKBUFFER
,
989 pixmap_buffer
= buffer
->linear_buffer
;
991 if (!buffer
->linear_buffer
)
992 goto no_linear_buffer
;
995 /* X wants the stride, so ask the image for it
997 if (!draw
->ext
->image
->queryImage(pixmap_buffer
, __DRI_IMAGE_ATTRIB_STRIDE
,
999 goto no_buffer_attrib
;
1001 buffer
->pitch
= stride
;
1003 if (!draw
->ext
->image
->queryImage(pixmap_buffer
, __DRI_IMAGE_ATTRIB_FD
,
1005 goto no_buffer_attrib
;
1007 xcb_dri3_pixmap_from_buffer(draw
->conn
,
1008 (pixmap
= xcb_generate_id(draw
->conn
)),
1011 width
, height
, buffer
->pitch
,
1012 depth
, buffer
->cpp
* 8,
1015 xcb_dri3_fence_from_fd(draw
->conn
,
1017 (sync_fence
= xcb_generate_id(draw
->conn
)),
1021 buffer
->pixmap
= pixmap
;
1022 buffer
->own_pixmap
= true;
1023 buffer
->sync_fence
= sync_fence
;
1024 buffer
->shm_fence
= shm_fence
;
1025 buffer
->width
= width
;
1026 buffer
->height
= height
;
1028 /* Mark the buffer as idle
1030 dri3_fence_set(buffer
);
1035 draw
->ext
->image
->destroyImage(pixmap_buffer
);
1037 if (draw
->is_different_gpu
)
1038 draw
->ext
->image
->destroyImage(buffer
->image
);
1042 xshmfence_unmap_shm(shm_fence
);
1048 /** loader_dri3_update_drawable
1050 * Called the first time we use the drawable and then
1051 * after we receive present configure notify events to
1052 * track the geometry of the drawable
1055 dri3_update_drawable(__DRIdrawable
*driDrawable
,
1056 struct loader_dri3_drawable
*draw
)
1058 if (draw
->first_init
) {
1059 xcb_get_geometry_cookie_t geom_cookie
;
1060 xcb_get_geometry_reply_t
*geom_reply
;
1061 xcb_void_cookie_t cookie
;
1062 xcb_generic_error_t
*error
;
1063 xcb_present_query_capabilities_cookie_t present_capabilities_cookie
;
1064 xcb_present_query_capabilities_reply_t
*present_capabilities_reply
;
1066 draw
->first_init
= false;
1068 /* Try to select for input on the window.
1070 * If the drawable is a window, this will get our events
1073 * Otherwise, we'll get a BadWindow error back from this request which
1074 * will let us know that the drawable is a pixmap instead.
1077 draw
->eid
= xcb_generate_id(draw
->conn
);
1079 xcb_present_select_input_checked(draw
->conn
, draw
->eid
, draw
->drawable
,
1080 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY
|
1081 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY
|
1082 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY
);
1084 present_capabilities_cookie
=
1085 xcb_present_query_capabilities(draw
->conn
, draw
->drawable
);
1087 /* Create an XCB event queue to hold present events outside of the usual
1088 * application event queue
1090 draw
->special_event
= xcb_register_for_special_xge(draw
->conn
,
1094 geom_cookie
= xcb_get_geometry(draw
->conn
, draw
->drawable
);
1096 geom_reply
= xcb_get_geometry_reply(draw
->conn
, geom_cookie
, NULL
);
1101 draw
->width
= geom_reply
->width
;
1102 draw
->height
= geom_reply
->height
;
1103 draw
->depth
= geom_reply
->depth
;
1104 draw
->vtable
->set_drawable_size(draw
, draw
->width
, draw
->height
);
1108 draw
->is_pixmap
= false;
1110 /* Check to see if our select input call failed. If it failed with a
1111 * BadWindow error, then assume the drawable is a pixmap. Destroy the
1112 * special event queue created above and mark the drawable as a pixmap
1115 error
= xcb_request_check(draw
->conn
, cookie
);
1117 present_capabilities_reply
=
1118 xcb_present_query_capabilities_reply(draw
->conn
,
1119 present_capabilities_cookie
,
1122 if (present_capabilities_reply
) {
1123 draw
->present_capabilities
= present_capabilities_reply
->capabilities
;
1124 free(present_capabilities_reply
);
1126 draw
->present_capabilities
= 0;
1129 if (error
->error_code
!= BadWindow
) {
1133 draw
->is_pixmap
= true;
1134 xcb_unregister_for_special_event(draw
->conn
, draw
->special_event
);
1135 draw
->special_event
= NULL
;
1138 dri3_flush_present_events(draw
);
1142 /* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while
1143 * the createImageFromFds call takes __DRI_IMAGE_FOURCC codes. To avoid
1144 * complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and
1145 * translate to __DRI_IMAGE_FOURCC codes in the call to createImageFromFds
1148 image_format_to_fourcc(int format
)
1151 /* Convert from __DRI_IMAGE_FORMAT to __DRI_IMAGE_FOURCC (sigh) */
1153 case __DRI_IMAGE_FORMAT_SARGB8
: return __DRI_IMAGE_FOURCC_SARGB8888
;
1154 case __DRI_IMAGE_FORMAT_RGB565
: return __DRI_IMAGE_FOURCC_RGB565
;
1155 case __DRI_IMAGE_FORMAT_XRGB8888
: return __DRI_IMAGE_FOURCC_XRGB8888
;
1156 case __DRI_IMAGE_FORMAT_ARGB8888
: return __DRI_IMAGE_FOURCC_ARGB8888
;
1157 case __DRI_IMAGE_FORMAT_ABGR8888
: return __DRI_IMAGE_FOURCC_ABGR8888
;
1158 case __DRI_IMAGE_FORMAT_XBGR8888
: return __DRI_IMAGE_FOURCC_XBGR8888
;
1164 loader_dri3_create_image(xcb_connection_t
*c
,
1165 xcb_dri3_buffer_from_pixmap_reply_t
*bp_reply
,
1166 unsigned int format
,
1167 __DRIscreen
*dri_screen
,
1168 const __DRIimageExtension
*image
,
1169 void *loaderPrivate
)
1172 __DRIimage
*image_planar
, *ret
;
1175 /* Get an FD for the pixmap object
1177 fds
= xcb_dri3_buffer_from_pixmap_reply_fds(c
, bp_reply
);
1179 stride
= bp_reply
->stride
;
1182 /* createImageFromFds creates a wrapper __DRIimage structure which
1183 * can deal with multiple planes for things like Yuv images. So, once
1184 * we've gotten the planar wrapper, pull the single plane out of it and
1185 * discard the wrapper.
1187 image_planar
= image
->createImageFromFds(dri_screen
,
1190 image_format_to_fourcc(format
),
1192 &stride
, &offset
, loaderPrivate
);
1197 ret
= image
->fromPlanar(image_planar
, 0, loaderPrivate
);
1199 image
->destroyImage(image_planar
);
1204 /** dri3_get_pixmap_buffer
1206 * Get the DRM object for a pixmap from the X server and
1207 * wrap that with a __DRIimage structure using createImageFromFds
1209 static struct loader_dri3_buffer
*
1210 dri3_get_pixmap_buffer(__DRIdrawable
*driDrawable
, unsigned int format
,
1211 enum loader_dri3_buffer_type buffer_type
,
1212 struct loader_dri3_drawable
*draw
)
1214 int buf_id
= loader_dri3_pixmap_buf_id(buffer_type
);
1215 struct loader_dri3_buffer
*buffer
= draw
->buffers
[buf_id
];
1216 xcb_drawable_t pixmap
;
1217 xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie
;
1218 xcb_dri3_buffer_from_pixmap_reply_t
*bp_reply
;
1219 xcb_sync_fence_t sync_fence
;
1220 struct xshmfence
*shm_fence
;
1226 pixmap
= draw
->drawable
;
1228 buffer
= calloc(1, sizeof *buffer
);
1232 fence_fd
= xshmfence_alloc_shm();
1235 shm_fence
= xshmfence_map_shm(fence_fd
);
1236 if (shm_fence
== NULL
) {
1241 xcb_dri3_fence_from_fd(draw
->conn
,
1243 (sync_fence
= xcb_generate_id(draw
->conn
)),
1247 bp_cookie
= xcb_dri3_buffer_from_pixmap(draw
->conn
, pixmap
);
1248 bp_reply
= xcb_dri3_buffer_from_pixmap_reply(draw
->conn
, bp_cookie
, NULL
);
1252 buffer
->image
= loader_dri3_create_image(draw
->conn
, bp_reply
, format
,
1253 draw
->dri_screen
, draw
->ext
->image
,
1258 buffer
->pixmap
= pixmap
;
1259 buffer
->own_pixmap
= false;
1260 buffer
->width
= bp_reply
->width
;
1261 buffer
->height
= bp_reply
->height
;
1262 buffer
->buffer_type
= buffer_type
;
1263 buffer
->shm_fence
= shm_fence
;
1264 buffer
->sync_fence
= sync_fence
;
1266 draw
->buffers
[buf_id
] = buffer
;
1274 xcb_sync_destroy_fence(draw
->conn
, sync_fence
);
1275 xshmfence_unmap_shm(shm_fence
);
1284 * Find a front or back buffer, allocating new ones as necessary
1286 static struct loader_dri3_buffer
*
1287 dri3_get_buffer(__DRIdrawable
*driDrawable
,
1288 unsigned int format
,
1289 enum loader_dri3_buffer_type buffer_type
,
1290 struct loader_dri3_drawable
*draw
)
1292 struct loader_dri3_buffer
*buffer
;
1295 if (buffer_type
== loader_dri3_buffer_back
) {
1296 buf_id
= dri3_find_back(draw
);
1301 buf_id
= LOADER_DRI3_FRONT_ID
;
1304 buffer
= draw
->buffers
[buf_id
];
1306 /* Allocate a new buffer if there isn't an old one, or if that
1307 * old one is the wrong size
1309 if (!buffer
|| buffer
->width
!= draw
->width
||
1310 buffer
->height
!= draw
->height
) {
1311 struct loader_dri3_buffer
*new_buffer
;
1313 /* Allocate the new buffers
1315 new_buffer
= dri3_alloc_render_buffer(draw
,
1323 /* When resizing, copy the contents of the old buffer, waiting for that
1324 * copy to complete using our fences before proceeding
1326 switch (buffer_type
) {
1327 case loader_dri3_buffer_back
:
1329 if (!buffer
->linear_buffer
) {
1330 dri3_fence_reset(draw
->conn
, new_buffer
);
1331 dri3_fence_await(draw
->conn
, buffer
);
1332 dri3_copy_area(draw
->conn
,
1335 dri3_drawable_gc(draw
),
1337 draw
->width
, draw
->height
);
1338 dri3_fence_trigger(draw
->conn
, new_buffer
);
1339 } else if (draw
->vtable
->in_current_context(draw
)) {
1340 (void) loader_dri3_blit_image(draw
,
1343 0, 0, draw
->width
, draw
->height
,
1346 dri3_free_render_buffer(draw
, buffer
);
1349 case loader_dri3_buffer_front
:
1350 loader_dri3_swapbuffer_barrier(draw
);
1351 dri3_fence_reset(draw
->conn
, new_buffer
);
1352 dri3_copy_area(draw
->conn
,
1355 dri3_drawable_gc(draw
),
1357 draw
->width
, draw
->height
);
1358 dri3_fence_trigger(draw
->conn
, new_buffer
);
1360 if (new_buffer
->linear_buffer
&&
1361 draw
->vtable
->in_current_context(draw
)) {
1362 dri3_fence_await(draw
->conn
, new_buffer
);
1363 (void) loader_dri3_blit_image(draw
,
1365 new_buffer
->linear_buffer
,
1366 0, 0, draw
->width
, draw
->height
,
1371 buffer
= new_buffer
;
1372 buffer
->buffer_type
= buffer_type
;
1373 draw
->buffers
[buf_id
] = buffer
;
1375 dri3_fence_await(draw
->conn
, buffer
);
1377 /* Return the requested buffer */
1381 /** dri3_free_buffers
1383 * Free the front bufffer or all of the back buffers. Used
1384 * when the application changes which buffers it needs
1387 dri3_free_buffers(__DRIdrawable
*driDrawable
,
1388 enum loader_dri3_buffer_type buffer_type
,
1389 struct loader_dri3_drawable
*draw
)
1391 struct loader_dri3_buffer
*buffer
;
1396 switch (buffer_type
) {
1397 case loader_dri3_buffer_back
:
1398 first_id
= LOADER_DRI3_BACK_ID(0);
1399 n_id
= LOADER_DRI3_MAX_BACK
;
1401 case loader_dri3_buffer_front
:
1402 first_id
= LOADER_DRI3_FRONT_ID
;
1406 for (buf_id
= first_id
; buf_id
< first_id
+ n_id
; buf_id
++) {
1407 buffer
= draw
->buffers
[buf_id
];
1409 dri3_free_render_buffer(draw
, buffer
);
1410 draw
->buffers
[buf_id
] = NULL
;
1415 /** loader_dri3_get_buffers
1417 * The published buffer allocation API.
1418 * Returns all of the necessary buffers, allocating
1422 loader_dri3_get_buffers(__DRIdrawable
*driDrawable
,
1423 unsigned int format
,
1425 void *loaderPrivate
,
1426 uint32_t buffer_mask
,
1427 struct __DRIimageList
*buffers
)
1429 struct loader_dri3_drawable
*draw
= loaderPrivate
;
1430 struct loader_dri3_buffer
*front
, *back
;
1432 buffers
->image_mask
= 0;
1433 buffers
->front
= NULL
;
1434 buffers
->back
= NULL
;
1439 if (!dri3_update_drawable(driDrawable
, draw
))
1442 /* pixmaps always have front buffers */
1443 if (draw
->is_pixmap
)
1444 buffer_mask
|= __DRI_IMAGE_BUFFER_FRONT
;
1446 if (buffer_mask
& __DRI_IMAGE_BUFFER_FRONT
) {
1447 /* All pixmaps are owned by the server gpu.
1448 * When we use a different gpu, we can't use the pixmap
1449 * as buffer since it is potentially tiled a way
1450 * our device can't understand. In this case, use
1451 * a fake front buffer. Hopefully the pixmap
1452 * content will get synced with the fake front
1455 if (draw
->is_pixmap
&& !draw
->is_different_gpu
)
1456 front
= dri3_get_pixmap_buffer(driDrawable
,
1458 loader_dri3_buffer_front
,
1461 front
= dri3_get_buffer(driDrawable
,
1463 loader_dri3_buffer_front
,
1469 dri3_free_buffers(driDrawable
, loader_dri3_buffer_front
, draw
);
1470 draw
->have_fake_front
= 0;
1473 if (buffer_mask
& __DRI_IMAGE_BUFFER_BACK
) {
1474 back
= dri3_get_buffer(driDrawable
,
1476 loader_dri3_buffer_back
,
1480 draw
->have_back
= 1;
1482 dri3_free_buffers(driDrawable
, loader_dri3_buffer_back
, draw
);
1483 draw
->have_back
= 0;
1487 buffers
->image_mask
|= __DRI_IMAGE_BUFFER_FRONT
;
1488 buffers
->front
= front
->image
;
1489 draw
->have_fake_front
= draw
->is_different_gpu
|| !draw
->is_pixmap
;
1493 buffers
->image_mask
|= __DRI_IMAGE_BUFFER_BACK
;
1494 buffers
->back
= back
->image
;
1497 draw
->stamp
= stamp
;
1502 /** loader_dri3_update_drawable_geometry
1504 * Get the current drawable geometry.
1507 loader_dri3_update_drawable_geometry(struct loader_dri3_drawable
*draw
)
1509 xcb_get_geometry_cookie_t geom_cookie
;
1510 xcb_get_geometry_reply_t
*geom_reply
;
1512 geom_cookie
= xcb_get_geometry(draw
->conn
, draw
->drawable
);
1514 geom_reply
= xcb_get_geometry_reply(draw
->conn
, geom_cookie
, NULL
);
1517 draw
->width
= geom_reply
->width
;
1518 draw
->height
= geom_reply
->height
;
1519 draw
->vtable
->set_drawable_size(draw
, draw
->width
, draw
->height
);
1527 * Make sure the server has flushed all pending swap buffers to hardware
1528 * for this drawable. Ideally we'd want to send an X protocol request to
1529 * have the server block our connection until the swaps are complete. That
1530 * would avoid the potential round-trip here.
1533 loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable
*draw
)
1535 int64_t ust
, msc
, sbc
;
1537 (void) loader_dri3_wait_for_sbc(draw
, 0, &ust
, &msc
, &sbc
);
1541 * Perform any cleanup associated with a close screen operation.
1542 * \param dri_screen[in,out] Pointer to __DRIscreen about to be closed.
1544 * This function destroys the screen's cached swap context if any.
1547 loader_dri3_close_screen(__DRIscreen
*dri_screen
)
1549 mtx_lock(&blit_context
.mtx
);
1550 if (blit_context
.ctx
&& blit_context
.cur_screen
== dri_screen
) {
1551 blit_context
.core
->destroyContext(blit_context
.ctx
);
1552 blit_context
.ctx
= NULL
;
1554 mtx_unlock(&blit_context
.mtx
);