}
+/**
+ * Flush the primitive queue if this buffer is referred.
+ *
+ * Otherwise DMA commands on the referred buffer will be emitted too late.
+ */
+void svga_hwtnl_flush_buffer( struct svga_context *svga,
+ struct pipe_resource *buffer )
+{
+ if (svga_hwtnl_is_buffer_referred(svga->hwtnl, buffer)) {
+ svga_hwtnl_flush_retry(svga);
+ }
+}
+
+
/* Emit all operations pending on host surfaces.
*/
void svga_surfaces_flush(struct svga_context *svga)
struct pipe_fence_handle **pfence );
void svga_hwtnl_flush_retry( struct svga_context *svga );
+void svga_hwtnl_flush_buffer( struct svga_context *svga,
+ struct pipe_resource *buffer );
void svga_surfaces_flush(struct svga_context *svga);
}
+/**
+ * Determine whether the specified buffer is referred in the primitive queue,
+ * for which no commands have been written yet.
+ */
+boolean
+svga_hwtnl_is_buffer_referred( struct svga_hwtnl *hwtnl,
+ struct pipe_resource *buffer)
+{
+ unsigned i;
+
+ if (svga_buffer_is_user_buffer(buffer)) {
+ return FALSE;
+ }
+
+ if (!hwtnl->cmd.prim_count) {
+ return FALSE;
+ }
+
+ for (i = 0; i < hwtnl->cmd.vdecl_count; ++i) {
+ if (hwtnl->cmd.vdecl_vb[i] == buffer) {
+ return TRUE;
+ }
+ }
+
+ for (i = 0; i < hwtnl->cmd.prim_count; ++i) {
+ if (hwtnl->cmd.prim_ib[i] == buffer) {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
enum pipe_error
svga_hwtnl_flush( struct svga_hwtnl *hwtnl )
unsigned start,
unsigned count );
+boolean
+svga_hwtnl_is_buffer_referred( struct svga_hwtnl *hwtnl,
+ struct pipe_resource *buffer );
+
enum pipe_error
svga_hwtnl_flush( struct svga_hwtnl *hwtnl );
if (usage & PIPE_TRANSFER_WRITE) {
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
/*
- * Finish writing any pending DMA commands, and tell the host to discard
- * the buffer contents on the next DMA operation.
+ * Flush any pending primitives, finish writing any pending DMA
+ * commands, and tell the host to discard the buffer contents on
+ * the next DMA operation.
*/
+ svga_hwtnl_flush_buffer(svga, resource);
+
if (sbuf->dma.pending) {
svga_buffer_upload_flush(svga, sbuf);
}
} else {
/*
- * Synchronizing, so finish writing any pending DMA command, and
- * ensure the next DMA will be done in order.
+ * Synchronizing, so flush any pending primitives, finish writing any
+ * pending DMA command, and ensure the next DMA will be done in order.
*/
+ svga_hwtnl_flush_buffer(svga, resource);
+
if (sbuf->dma.pending) {
svga_buffer_upload_flush(svga, sbuf);