2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "pipe/p_shader_tokens.h"
27 #include "pipe/p_context.h"
28 #include "pipe/p_defines.h"
29 #include "pipe/p_screen.h"
30 #include "pipe/p_state.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/format/u_format.h"
34 #include "util/u_prim.h"
35 #include "util/u_transfer.h"
36 #include "util/u_helpers.h"
37 #include "util/slab.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_blitter.h"
40 #include "tgsi/tgsi_text.h"
41 #include "indices/u_primconvert.h"
43 #include "pipebuffer/pb_buffer.h"
45 #include "virgl_encode.h"
46 #include "virgl_context.h"
47 #include "virtio-gpu/virgl_protocol.h"
48 #include "virgl_resource.h"
49 #include "virgl_screen.h"
50 #include "virgl_staging_mgr.h"
52 struct virgl_vertex_elements_state
{
54 uint8_t binding_map
[PIPE_MAX_ATTRIBS
];
58 static uint32_t next_handle
;
59 uint32_t virgl_object_assign_handle(void)
65 virgl_can_rebind_resource(struct virgl_context
*vctx
,
66 struct pipe_resource
*res
)
68 /* We cannot rebind resources that are referenced by host objects, which
71 * - VIRGL_OBJECT_SURFACE
72 * - VIRGL_OBJECT_SAMPLER_VIEW
73 * - VIRGL_OBJECT_STREAMOUT_TARGET
75 * Because surfaces cannot be created from buffers, we require the resource
76 * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
78 const unsigned unsupported_bind
= (PIPE_BIND_SAMPLER_VIEW
|
79 PIPE_BIND_STREAM_OUTPUT
);
80 const unsigned bind_history
= virgl_resource(res
)->bind_history
;
81 return res
->target
== PIPE_BUFFER
&& !(bind_history
& unsupported_bind
);
85 virgl_rebind_resource(struct virgl_context
*vctx
,
86 struct pipe_resource
*res
)
88 /* Queries use internally created buffers and do not go through transfers.
89 * Index buffers are not bindable. They are not tracked.
91 ASSERTED
const unsigned tracked_bind
= (PIPE_BIND_VERTEX_BUFFER
|
92 PIPE_BIND_CONSTANT_BUFFER
|
93 PIPE_BIND_SHADER_BUFFER
|
94 PIPE_BIND_SHADER_IMAGE
);
95 const unsigned bind_history
= virgl_resource(res
)->bind_history
;
98 assert(virgl_can_rebind_resource(vctx
, res
) &&
99 (bind_history
& tracked_bind
) == bind_history
);
101 if (bind_history
& PIPE_BIND_VERTEX_BUFFER
) {
102 for (i
= 0; i
< vctx
->num_vertex_buffers
; i
++) {
103 if (vctx
->vertex_buffer
[i
].buffer
.resource
== res
) {
104 vctx
->vertex_array_dirty
= true;
110 if (bind_history
& PIPE_BIND_SHADER_BUFFER
) {
111 uint32_t remaining_mask
= vctx
->atomic_buffer_enabled_mask
;
112 while (remaining_mask
) {
113 int i
= u_bit_scan(&remaining_mask
);
114 if (vctx
->atomic_buffers
[i
].buffer
== res
) {
115 const struct pipe_shader_buffer
*abo
= &vctx
->atomic_buffers
[i
];
116 virgl_encode_set_hw_atomic_buffers(vctx
, i
, 1, abo
);
121 /* check per-stage shader bindings */
122 if (bind_history
& (PIPE_BIND_CONSTANT_BUFFER
|
123 PIPE_BIND_SHADER_BUFFER
|
124 PIPE_BIND_SHADER_IMAGE
)) {
125 enum pipe_shader_type shader_type
;
126 for (shader_type
= 0; shader_type
< PIPE_SHADER_TYPES
; shader_type
++) {
127 const struct virgl_shader_binding_state
*binding
=
128 &vctx
->shader_bindings
[shader_type
];
130 if (bind_history
& PIPE_BIND_CONSTANT_BUFFER
) {
131 uint32_t remaining_mask
= binding
->ubo_enabled_mask
;
132 while (remaining_mask
) {
133 int i
= u_bit_scan(&remaining_mask
);
134 if (binding
->ubos
[i
].buffer
== res
) {
135 const struct pipe_constant_buffer
*ubo
= &binding
->ubos
[i
];
136 virgl_encoder_set_uniform_buffer(vctx
, shader_type
, i
,
139 virgl_resource(res
));
144 if (bind_history
& PIPE_BIND_SHADER_BUFFER
) {
145 uint32_t remaining_mask
= binding
->ssbo_enabled_mask
;
146 while (remaining_mask
) {
147 int i
= u_bit_scan(&remaining_mask
);
148 if (binding
->ssbos
[i
].buffer
== res
) {
149 const struct pipe_shader_buffer
*ssbo
= &binding
->ssbos
[i
];
150 virgl_encode_set_shader_buffers(vctx
, shader_type
, i
, 1,
156 if (bind_history
& PIPE_BIND_SHADER_IMAGE
) {
157 uint32_t remaining_mask
= binding
->image_enabled_mask
;
158 while (remaining_mask
) {
159 int i
= u_bit_scan(&remaining_mask
);
160 if (binding
->images
[i
].resource
== res
) {
161 const struct pipe_image_view
*image
= &binding
->images
[i
];
162 virgl_encode_set_shader_images(vctx
, shader_type
, i
, 1,
171 static void virgl_attach_res_framebuffer(struct virgl_context
*vctx
)
173 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
174 struct pipe_surface
*surf
;
175 struct virgl_resource
*res
;
178 surf
= vctx
->framebuffer
.zsbuf
;
180 res
= virgl_resource(surf
->texture
);
182 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
183 virgl_resource_dirty(res
, surf
->u
.tex
.level
);
186 for (i
= 0; i
< vctx
->framebuffer
.nr_cbufs
; i
++) {
187 surf
= vctx
->framebuffer
.cbufs
[i
];
189 res
= virgl_resource(surf
->texture
);
191 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
192 virgl_resource_dirty(res
, surf
->u
.tex
.level
);
198 static void virgl_attach_res_sampler_views(struct virgl_context
*vctx
,
199 enum pipe_shader_type shader_type
)
201 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
202 const struct virgl_shader_binding_state
*binding
=
203 &vctx
->shader_bindings
[shader_type
];
204 uint32_t remaining_mask
= binding
->view_enabled_mask
;
205 struct virgl_resource
*res
;
207 while (remaining_mask
) {
208 int i
= u_bit_scan(&remaining_mask
);
209 assert(binding
->views
[i
] && binding
->views
[i
]->texture
);
210 res
= virgl_resource(binding
->views
[i
]->texture
);
211 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
215 static void virgl_attach_res_vertex_buffers(struct virgl_context
*vctx
)
217 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
218 struct virgl_resource
*res
;
221 for (i
= 0; i
< vctx
->num_vertex_buffers
; i
++) {
222 res
= virgl_resource(vctx
->vertex_buffer
[i
].buffer
.resource
);
224 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
228 static void virgl_attach_res_index_buffer(struct virgl_context
*vctx
,
229 struct virgl_indexbuf
*ib
)
231 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
232 struct virgl_resource
*res
;
234 res
= virgl_resource(ib
->buffer
);
236 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
239 static void virgl_attach_res_so_targets(struct virgl_context
*vctx
)
241 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
242 struct virgl_resource
*res
;
245 for (i
= 0; i
< vctx
->num_so_targets
; i
++) {
246 res
= virgl_resource(vctx
->so_targets
[i
].base
.buffer
);
248 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
252 static void virgl_attach_res_uniform_buffers(struct virgl_context
*vctx
,
253 enum pipe_shader_type shader_type
)
255 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
256 const struct virgl_shader_binding_state
*binding
=
257 &vctx
->shader_bindings
[shader_type
];
258 uint32_t remaining_mask
= binding
->ubo_enabled_mask
;
259 struct virgl_resource
*res
;
261 while (remaining_mask
) {
262 int i
= u_bit_scan(&remaining_mask
);
263 res
= virgl_resource(binding
->ubos
[i
].buffer
);
265 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
269 static void virgl_attach_res_shader_buffers(struct virgl_context
*vctx
,
270 enum pipe_shader_type shader_type
)
272 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
273 const struct virgl_shader_binding_state
*binding
=
274 &vctx
->shader_bindings
[shader_type
];
275 uint32_t remaining_mask
= binding
->ssbo_enabled_mask
;
276 struct virgl_resource
*res
;
278 while (remaining_mask
) {
279 int i
= u_bit_scan(&remaining_mask
);
280 res
= virgl_resource(binding
->ssbos
[i
].buffer
);
282 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
286 static void virgl_attach_res_shader_images(struct virgl_context
*vctx
,
287 enum pipe_shader_type shader_type
)
289 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
290 const struct virgl_shader_binding_state
*binding
=
291 &vctx
->shader_bindings
[shader_type
];
292 uint32_t remaining_mask
= binding
->image_enabled_mask
;
293 struct virgl_resource
*res
;
295 while (remaining_mask
) {
296 int i
= u_bit_scan(&remaining_mask
);
297 res
= virgl_resource(binding
->images
[i
].resource
);
299 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
303 static void virgl_attach_res_atomic_buffers(struct virgl_context
*vctx
)
305 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
306 uint32_t remaining_mask
= vctx
->atomic_buffer_enabled_mask
;
307 struct virgl_resource
*res
;
309 while (remaining_mask
) {
310 int i
= u_bit_scan(&remaining_mask
);
311 res
= virgl_resource(vctx
->atomic_buffers
[i
].buffer
);
313 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
318 * after flushing, the hw context still has a bunch of
319 * resources bound, so we need to rebind those here.
321 static void virgl_reemit_draw_resources(struct virgl_context
*vctx
)
323 enum pipe_shader_type shader_type
;
325 /* reattach any flushed resources */
326 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
327 virgl_attach_res_framebuffer(vctx
);
329 for (shader_type
= 0; shader_type
< PIPE_SHADER_COMPUTE
; shader_type
++) {
330 virgl_attach_res_sampler_views(vctx
, shader_type
);
331 virgl_attach_res_uniform_buffers(vctx
, shader_type
);
332 virgl_attach_res_shader_buffers(vctx
, shader_type
);
333 virgl_attach_res_shader_images(vctx
, shader_type
);
335 virgl_attach_res_atomic_buffers(vctx
);
336 virgl_attach_res_vertex_buffers(vctx
);
337 virgl_attach_res_so_targets(vctx
);
340 static void virgl_reemit_compute_resources(struct virgl_context
*vctx
)
342 virgl_attach_res_sampler_views(vctx
, PIPE_SHADER_COMPUTE
);
343 virgl_attach_res_uniform_buffers(vctx
, PIPE_SHADER_COMPUTE
);
344 virgl_attach_res_shader_buffers(vctx
, PIPE_SHADER_COMPUTE
);
345 virgl_attach_res_shader_images(vctx
, PIPE_SHADER_COMPUTE
);
347 virgl_attach_res_atomic_buffers(vctx
);
350 static struct pipe_surface
*virgl_create_surface(struct pipe_context
*ctx
,
351 struct pipe_resource
*resource
,
352 const struct pipe_surface
*templ
)
354 struct virgl_context
*vctx
= virgl_context(ctx
);
355 struct virgl_surface
*surf
;
356 struct virgl_resource
*res
= virgl_resource(resource
);
359 /* no support for buffer surfaces */
360 if (resource
->target
== PIPE_BUFFER
)
363 surf
= CALLOC_STRUCT(virgl_surface
);
367 assert(ctx
->screen
->get_param(ctx
->screen
,
368 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL
) ||
369 (util_format_is_srgb(templ
->format
) ==
370 util_format_is_srgb(resource
->format
)));
372 virgl_resource_dirty(res
, 0);
373 handle
= virgl_object_assign_handle();
374 pipe_reference_init(&surf
->base
.reference
, 1);
375 pipe_resource_reference(&surf
->base
.texture
, resource
);
376 surf
->base
.context
= ctx
;
377 surf
->base
.format
= templ
->format
;
379 surf
->base
.width
= u_minify(resource
->width0
, templ
->u
.tex
.level
);
380 surf
->base
.height
= u_minify(resource
->height0
, templ
->u
.tex
.level
);
381 surf
->base
.u
.tex
.level
= templ
->u
.tex
.level
;
382 surf
->base
.u
.tex
.first_layer
= templ
->u
.tex
.first_layer
;
383 surf
->base
.u
.tex
.last_layer
= templ
->u
.tex
.last_layer
;
385 virgl_encoder_create_surface(vctx
, handle
, res
, &surf
->base
);
386 surf
->handle
= handle
;
390 static void virgl_surface_destroy(struct pipe_context
*ctx
,
391 struct pipe_surface
*psurf
)
393 struct virgl_context
*vctx
= virgl_context(ctx
);
394 struct virgl_surface
*surf
= virgl_surface(psurf
);
396 pipe_resource_reference(&surf
->base
.texture
, NULL
);
397 virgl_encode_delete_object(vctx
, surf
->handle
, VIRGL_OBJECT_SURFACE
);
401 static void *virgl_create_blend_state(struct pipe_context
*ctx
,
402 const struct pipe_blend_state
*blend_state
)
404 struct virgl_context
*vctx
= virgl_context(ctx
);
406 handle
= virgl_object_assign_handle();
408 virgl_encode_blend_state(vctx
, handle
, blend_state
);
409 return (void *)(unsigned long)handle
;
413 static void virgl_bind_blend_state(struct pipe_context
*ctx
,
416 struct virgl_context
*vctx
= virgl_context(ctx
);
417 uint32_t handle
= (unsigned long)blend_state
;
418 virgl_encode_bind_object(vctx
, handle
, VIRGL_OBJECT_BLEND
);
421 static void virgl_delete_blend_state(struct pipe_context
*ctx
,
424 struct virgl_context
*vctx
= virgl_context(ctx
);
425 uint32_t handle
= (unsigned long)blend_state
;
426 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_BLEND
);
429 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context
*ctx
,
430 const struct pipe_depth_stencil_alpha_state
*blend_state
)
432 struct virgl_context
*vctx
= virgl_context(ctx
);
434 handle
= virgl_object_assign_handle();
436 virgl_encode_dsa_state(vctx
, handle
, blend_state
);
437 return (void *)(unsigned long)handle
;
440 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context
*ctx
,
443 struct virgl_context
*vctx
= virgl_context(ctx
);
444 uint32_t handle
= (unsigned long)blend_state
;
445 virgl_encode_bind_object(vctx
, handle
, VIRGL_OBJECT_DSA
);
448 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context
*ctx
,
451 struct virgl_context
*vctx
= virgl_context(ctx
);
452 uint32_t handle
= (unsigned long)dsa_state
;
453 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_DSA
);
456 static void *virgl_create_rasterizer_state(struct pipe_context
*ctx
,
457 const struct pipe_rasterizer_state
*rs_state
)
459 struct virgl_context
*vctx
= virgl_context(ctx
);
460 struct virgl_rasterizer_state
*vrs
= CALLOC_STRUCT(virgl_rasterizer_state
);
465 vrs
->handle
= virgl_object_assign_handle();
467 assert(rs_state
->depth_clip_near
||
468 virgl_screen(ctx
->screen
)->caps
.caps
.v1
.bset
.depth_clip_disable
);
470 virgl_encode_rasterizer_state(vctx
, vrs
->handle
, rs_state
);
474 static void virgl_bind_rasterizer_state(struct pipe_context
*ctx
,
477 struct virgl_context
*vctx
= virgl_context(ctx
);
480 struct virgl_rasterizer_state
*vrs
= rs_state
;
481 vctx
->rs_state
= *vrs
;
482 handle
= vrs
->handle
;
484 virgl_encode_bind_object(vctx
, handle
, VIRGL_OBJECT_RASTERIZER
);
487 static void virgl_delete_rasterizer_state(struct pipe_context
*ctx
,
490 struct virgl_context
*vctx
= virgl_context(ctx
);
491 struct virgl_rasterizer_state
*vrs
= rs_state
;
492 virgl_encode_delete_object(vctx
, vrs
->handle
, VIRGL_OBJECT_RASTERIZER
);
496 static void virgl_set_framebuffer_state(struct pipe_context
*ctx
,
497 const struct pipe_framebuffer_state
*state
)
499 struct virgl_context
*vctx
= virgl_context(ctx
);
501 vctx
->framebuffer
= *state
;
502 virgl_encoder_set_framebuffer_state(vctx
, state
);
503 virgl_attach_res_framebuffer(vctx
);
506 static void virgl_set_viewport_states(struct pipe_context
*ctx
,
508 unsigned num_viewports
,
509 const struct pipe_viewport_state
*state
)
511 struct virgl_context
*vctx
= virgl_context(ctx
);
512 virgl_encoder_set_viewport_states(vctx
, start_slot
, num_viewports
, state
);
515 static void *virgl_create_vertex_elements_state(struct pipe_context
*ctx
,
516 unsigned num_elements
,
517 const struct pipe_vertex_element
*elements
)
519 struct pipe_vertex_element new_elements
[PIPE_MAX_ATTRIBS
];
520 struct virgl_context
*vctx
= virgl_context(ctx
);
521 struct virgl_vertex_elements_state
*state
=
522 CALLOC_STRUCT(virgl_vertex_elements_state
);
524 for (int i
= 0; i
< num_elements
; ++i
) {
525 if (elements
[i
].instance_divisor
) {
526 /* Virglrenderer doesn't deal with instance_divisor correctly if
527 * there isn't a 1:1 relationship between elements and bindings.
528 * So let's make sure there is, by duplicating bindings.
530 for (int j
= 0; j
< num_elements
; ++j
) {
531 new_elements
[j
] = elements
[j
];
532 new_elements
[j
].vertex_buffer_index
= j
;
533 state
->binding_map
[j
] = elements
[j
].vertex_buffer_index
;
535 elements
= new_elements
;
536 state
->num_bindings
= num_elements
;
541 state
->handle
= virgl_object_assign_handle();
542 virgl_encoder_create_vertex_elements(vctx
, state
->handle
,
543 num_elements
, elements
);
547 static void virgl_delete_vertex_elements_state(struct pipe_context
*ctx
,
550 struct virgl_context
*vctx
= virgl_context(ctx
);
551 struct virgl_vertex_elements_state
*state
=
552 (struct virgl_vertex_elements_state
*)ve
;
553 virgl_encode_delete_object(vctx
, state
->handle
, VIRGL_OBJECT_VERTEX_ELEMENTS
);
557 static void virgl_bind_vertex_elements_state(struct pipe_context
*ctx
,
560 struct virgl_context
*vctx
= virgl_context(ctx
);
561 struct virgl_vertex_elements_state
*state
=
562 (struct virgl_vertex_elements_state
*)ve
;
563 vctx
->vertex_elements
= state
;
564 virgl_encode_bind_object(vctx
, state
? state
->handle
: 0,
565 VIRGL_OBJECT_VERTEX_ELEMENTS
);
566 vctx
->vertex_array_dirty
= TRUE
;
569 static void virgl_set_vertex_buffers(struct pipe_context
*ctx
,
571 unsigned num_buffers
,
572 const struct pipe_vertex_buffer
*buffers
)
574 struct virgl_context
*vctx
= virgl_context(ctx
);
576 util_set_vertex_buffers_count(vctx
->vertex_buffer
,
577 &vctx
->num_vertex_buffers
,
578 buffers
, start_slot
, num_buffers
);
581 for (unsigned i
= 0; i
< num_buffers
; i
++) {
582 struct virgl_resource
*res
=
583 virgl_resource(buffers
[i
].buffer
.resource
);
584 if (res
&& !buffers
[i
].is_user_buffer
)
585 res
->bind_history
|= PIPE_BIND_VERTEX_BUFFER
;
589 vctx
->vertex_array_dirty
= TRUE
;
592 static void virgl_hw_set_vertex_buffers(struct virgl_context
*vctx
)
594 if (vctx
->vertex_array_dirty
) {
595 struct virgl_vertex_elements_state
*ve
= vctx
->vertex_elements
;
597 if (ve
->num_bindings
) {
598 struct pipe_vertex_buffer vertex_buffers
[PIPE_MAX_ATTRIBS
];
599 for (int i
= 0; i
< ve
->num_bindings
; ++i
)
600 vertex_buffers
[i
] = vctx
->vertex_buffer
[ve
->binding_map
[i
]];
602 virgl_encoder_set_vertex_buffers(vctx
, ve
->num_bindings
, vertex_buffers
);
604 virgl_encoder_set_vertex_buffers(vctx
, vctx
->num_vertex_buffers
, vctx
->vertex_buffer
);
606 virgl_attach_res_vertex_buffers(vctx
);
608 vctx
->vertex_array_dirty
= FALSE
;
612 static void virgl_set_stencil_ref(struct pipe_context
*ctx
,
613 const struct pipe_stencil_ref
*ref
)
615 struct virgl_context
*vctx
= virgl_context(ctx
);
616 virgl_encoder_set_stencil_ref(vctx
, ref
);
619 static void virgl_set_blend_color(struct pipe_context
*ctx
,
620 const struct pipe_blend_color
*color
)
622 struct virgl_context
*vctx
= virgl_context(ctx
);
623 virgl_encoder_set_blend_color(vctx
, color
);
626 static void virgl_hw_set_index_buffer(struct virgl_context
*vctx
,
627 struct virgl_indexbuf
*ib
)
629 virgl_encoder_set_index_buffer(vctx
, ib
);
630 virgl_attach_res_index_buffer(vctx
, ib
);
633 static void virgl_set_constant_buffer(struct pipe_context
*ctx
,
634 enum pipe_shader_type shader
, uint index
,
635 const struct pipe_constant_buffer
*buf
)
637 struct virgl_context
*vctx
= virgl_context(ctx
);
638 struct virgl_shader_binding_state
*binding
=
639 &vctx
->shader_bindings
[shader
];
641 if (buf
&& buf
->buffer
) {
642 struct virgl_resource
*res
= virgl_resource(buf
->buffer
);
643 res
->bind_history
|= PIPE_BIND_CONSTANT_BUFFER
;
645 virgl_encoder_set_uniform_buffer(vctx
, shader
, index
,
647 buf
->buffer_size
, res
);
649 pipe_resource_reference(&binding
->ubos
[index
].buffer
, buf
->buffer
);
650 binding
->ubos
[index
] = *buf
;
651 binding
->ubo_enabled_mask
|= 1 << index
;
653 static const struct pipe_constant_buffer dummy_ubo
;
656 virgl_encoder_write_constant_buffer(vctx
, shader
, index
,
657 buf
->buffer_size
/ 4,
660 pipe_resource_reference(&binding
->ubos
[index
].buffer
, NULL
);
661 binding
->ubo_enabled_mask
&= ~(1 << index
);
665 static void *virgl_shader_encoder(struct pipe_context
*ctx
,
666 const struct pipe_shader_state
*shader
,
669 struct virgl_context
*vctx
= virgl_context(ctx
);
671 struct tgsi_token
*new_tokens
;
674 new_tokens
= virgl_tgsi_transform(vctx
, shader
->tokens
);
678 handle
= virgl_object_assign_handle();
679 /* encode VS state */
680 ret
= virgl_encode_shader_state(vctx
, handle
, type
,
681 &shader
->stream_output
, 0,
688 return (void *)(unsigned long)handle
;
691 static void *virgl_create_vs_state(struct pipe_context
*ctx
,
692 const struct pipe_shader_state
*shader
)
694 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_VERTEX
);
697 static void *virgl_create_tcs_state(struct pipe_context
*ctx
,
698 const struct pipe_shader_state
*shader
)
700 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_TESS_CTRL
);
703 static void *virgl_create_tes_state(struct pipe_context
*ctx
,
704 const struct pipe_shader_state
*shader
)
706 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_TESS_EVAL
);
709 static void *virgl_create_gs_state(struct pipe_context
*ctx
,
710 const struct pipe_shader_state
*shader
)
712 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_GEOMETRY
);
715 static void *virgl_create_fs_state(struct pipe_context
*ctx
,
716 const struct pipe_shader_state
*shader
)
718 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_FRAGMENT
);
722 virgl_delete_fs_state(struct pipe_context
*ctx
,
725 uint32_t handle
= (unsigned long)fs
;
726 struct virgl_context
*vctx
= virgl_context(ctx
);
728 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
732 virgl_delete_gs_state(struct pipe_context
*ctx
,
735 uint32_t handle
= (unsigned long)gs
;
736 struct virgl_context
*vctx
= virgl_context(ctx
);
738 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
742 virgl_delete_vs_state(struct pipe_context
*ctx
,
745 uint32_t handle
= (unsigned long)vs
;
746 struct virgl_context
*vctx
= virgl_context(ctx
);
748 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
752 virgl_delete_tcs_state(struct pipe_context
*ctx
,
755 uint32_t handle
= (unsigned long)tcs
;
756 struct virgl_context
*vctx
= virgl_context(ctx
);
758 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
762 virgl_delete_tes_state(struct pipe_context
*ctx
,
765 uint32_t handle
= (unsigned long)tes
;
766 struct virgl_context
*vctx
= virgl_context(ctx
);
768 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
771 static void virgl_bind_vs_state(struct pipe_context
*ctx
,
774 uint32_t handle
= (unsigned long)vss
;
775 struct virgl_context
*vctx
= virgl_context(ctx
);
777 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_VERTEX
);
780 static void virgl_bind_tcs_state(struct pipe_context
*ctx
,
783 uint32_t handle
= (unsigned long)vss
;
784 struct virgl_context
*vctx
= virgl_context(ctx
);
786 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_TESS_CTRL
);
789 static void virgl_bind_tes_state(struct pipe_context
*ctx
,
792 uint32_t handle
= (unsigned long)vss
;
793 struct virgl_context
*vctx
= virgl_context(ctx
);
795 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_TESS_EVAL
);
798 static void virgl_bind_gs_state(struct pipe_context
*ctx
,
801 uint32_t handle
= (unsigned long)vss
;
802 struct virgl_context
*vctx
= virgl_context(ctx
);
804 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_GEOMETRY
);
808 static void virgl_bind_fs_state(struct pipe_context
*ctx
,
811 uint32_t handle
= (unsigned long)vss
;
812 struct virgl_context
*vctx
= virgl_context(ctx
);
814 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_FRAGMENT
);
817 static void virgl_clear(struct pipe_context
*ctx
,
819 const struct pipe_scissor_state
*scissor_state
,
820 const union pipe_color_union
*color
,
821 double depth
, unsigned stencil
)
823 struct virgl_context
*vctx
= virgl_context(ctx
);
825 if (!vctx
->num_draws
)
826 virgl_reemit_draw_resources(vctx
);
829 virgl_encode_clear(vctx
, buffers
, color
, depth
, stencil
);
832 static void virgl_clear_texture(struct pipe_context
*ctx
,
833 struct pipe_resource
*res
,
835 const struct pipe_box
*box
,
838 struct virgl_context
*vctx
= virgl_context(ctx
);
839 struct virgl_resource
*vres
= virgl_resource(res
);
841 virgl_encode_clear_texture(vctx
, vres
, level
, box
, data
);
843 /* Mark as dirty, since we are updating the host side resource
844 * without going through the corresponding guest side resource, and
845 * hence the two will diverge.
847 virgl_resource_dirty(vres
, level
);
850 static void virgl_draw_vbo(struct pipe_context
*ctx
,
851 const struct pipe_draw_info
*dinfo
)
853 struct virgl_context
*vctx
= virgl_context(ctx
);
854 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
855 struct virgl_indexbuf ib
= {};
856 struct pipe_draw_info info
= *dinfo
;
858 if (!dinfo
->count_from_stream_output
&& !dinfo
->indirect
&&
859 !dinfo
->primitive_restart
&&
860 !u_trim_pipe_prim(dinfo
->mode
, (unsigned*)&dinfo
->count
))
863 if (!(rs
->caps
.caps
.v1
.prim_mask
& (1 << dinfo
->mode
))) {
864 util_primconvert_save_rasterizer_state(vctx
->primconvert
, &vctx
->rs_state
.rs
);
865 util_primconvert_draw_vbo(vctx
->primconvert
, dinfo
);
868 if (info
.index_size
) {
869 pipe_resource_reference(&ib
.buffer
, info
.has_user_indices
? NULL
: info
.index
.resource
);
870 ib
.user_buffer
= info
.has_user_indices
? info
.index
.user
: NULL
;
871 ib
.index_size
= dinfo
->index_size
;
872 ib
.offset
= info
.start
* ib
.index_size
;
874 if (ib
.user_buffer
) {
875 u_upload_data(vctx
->uploader
, 0, info
.count
* ib
.index_size
, 4,
876 ib
.user_buffer
, &ib
.offset
, &ib
.buffer
);
877 ib
.user_buffer
= NULL
;
881 if (!vctx
->num_draws
)
882 virgl_reemit_draw_resources(vctx
);
885 virgl_hw_set_vertex_buffers(vctx
);
887 virgl_hw_set_index_buffer(vctx
, &ib
);
889 virgl_encoder_draw_vbo(vctx
, &info
);
891 pipe_resource_reference(&ib
.buffer
, NULL
);
895 static void virgl_submit_cmd(struct virgl_winsys
*vws
,
896 struct virgl_cmd_buf
*cbuf
,
897 struct pipe_fence_handle
**fence
)
899 if (unlikely(virgl_debug
& VIRGL_DEBUG_SYNC
)) {
900 struct pipe_fence_handle
*sync_fence
= NULL
;
902 vws
->submit_cmd(vws
, cbuf
, &sync_fence
);
904 vws
->fence_wait(vws
, sync_fence
, PIPE_TIMEOUT_INFINITE
);
905 vws
->fence_reference(vws
, &sync_fence
, NULL
);
907 vws
->submit_cmd(vws
, cbuf
, fence
);
911 static void virgl_flush_eq(struct virgl_context
*ctx
, void *closure
,
912 struct pipe_fence_handle
**fence
)
914 struct virgl_screen
*rs
= virgl_screen(ctx
->base
.screen
);
916 /* skip empty cbuf */
917 if (ctx
->cbuf
->cdw
== ctx
->cbuf_initial_cdw
&&
918 ctx
->queue
.num_dwords
== 0 &&
923 u_upload_unmap(ctx
->uploader
);
925 /* send the buffer to the remote side for decoding */
926 ctx
->num_draws
= ctx
->num_compute
= 0;
928 virgl_transfer_queue_clear(&ctx
->queue
, ctx
->cbuf
);
930 virgl_submit_cmd(rs
->vws
, ctx
->cbuf
, fence
);
932 /* Reserve some space for transfers. */
933 if (ctx
->encoded_transfers
)
934 ctx
->cbuf
->cdw
= VIRGL_MAX_TBUF_DWORDS
;
936 virgl_encoder_set_sub_ctx(ctx
, ctx
->hw_sub_ctx_id
);
938 ctx
->cbuf_initial_cdw
= ctx
->cbuf
->cdw
;
940 /* We have flushed the command queue, including any pending copy transfers
941 * involving staging resources.
943 ctx
->queued_staging_res_size
= 0;
946 static void virgl_flush_from_st(struct pipe_context
*ctx
,
947 struct pipe_fence_handle
**fence
,
948 enum pipe_flush_flags flags
)
950 struct virgl_context
*vctx
= virgl_context(ctx
);
952 virgl_flush_eq(vctx
, vctx
, fence
);
955 static struct pipe_sampler_view
*virgl_create_sampler_view(struct pipe_context
*ctx
,
956 struct pipe_resource
*texture
,
957 const struct pipe_sampler_view
*state
)
959 struct virgl_context
*vctx
= virgl_context(ctx
);
960 struct virgl_sampler_view
*grview
;
962 struct virgl_resource
*res
;
967 grview
= CALLOC_STRUCT(virgl_sampler_view
);
971 res
= virgl_resource(texture
);
972 handle
= virgl_object_assign_handle();
973 virgl_encode_sampler_view(vctx
, handle
, res
, state
);
975 grview
->base
= *state
;
976 grview
->base
.reference
.count
= 1;
978 grview
->base
.texture
= NULL
;
979 grview
->base
.context
= ctx
;
980 pipe_resource_reference(&grview
->base
.texture
, texture
);
981 grview
->handle
= handle
;
982 return &grview
->base
;
985 static void virgl_set_sampler_views(struct pipe_context
*ctx
,
986 enum pipe_shader_type shader_type
,
989 struct pipe_sampler_view
**views
)
991 struct virgl_context
*vctx
= virgl_context(ctx
);
992 struct virgl_shader_binding_state
*binding
=
993 &vctx
->shader_bindings
[shader_type
];
995 binding
->view_enabled_mask
&= ~u_bit_consecutive(start_slot
, num_views
);
996 for (unsigned i
= 0; i
< num_views
; i
++) {
997 unsigned idx
= start_slot
+ i
;
998 if (views
&& views
[i
]) {
999 struct virgl_resource
*res
= virgl_resource(views
[i
]->texture
);
1000 res
->bind_history
|= PIPE_BIND_SAMPLER_VIEW
;
1002 pipe_sampler_view_reference(&binding
->views
[idx
], views
[i
]);
1003 binding
->view_enabled_mask
|= 1 << idx
;
1005 pipe_sampler_view_reference(&binding
->views
[idx
], NULL
);
1009 virgl_encode_set_sampler_views(vctx
, shader_type
,
1010 start_slot
, num_views
, (struct virgl_sampler_view
**)binding
->views
);
1011 virgl_attach_res_sampler_views(vctx
, shader_type
);
1015 virgl_texture_barrier(struct pipe_context
*ctx
, unsigned flags
)
1017 struct virgl_context
*vctx
= virgl_context(ctx
);
1018 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1020 if (!(rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_TEXTURE_BARRIER
) &&
1021 !(rs
->caps
.caps
.v2
.capability_bits_v2
& VIRGL_CAP_V2_BLEND_EQUATION
))
1023 virgl_encode_texture_barrier(vctx
, flags
);
1026 static void virgl_destroy_sampler_view(struct pipe_context
*ctx
,
1027 struct pipe_sampler_view
*view
)
1029 struct virgl_context
*vctx
= virgl_context(ctx
);
1030 struct virgl_sampler_view
*grview
= virgl_sampler_view(view
);
1032 virgl_encode_delete_object(vctx
, grview
->handle
, VIRGL_OBJECT_SAMPLER_VIEW
);
1033 pipe_resource_reference(&view
->texture
, NULL
);
1037 static void *virgl_create_sampler_state(struct pipe_context
*ctx
,
1038 const struct pipe_sampler_state
*state
)
1040 struct virgl_context
*vctx
= virgl_context(ctx
);
1043 handle
= virgl_object_assign_handle();
1045 virgl_encode_sampler_state(vctx
, handle
, state
);
1046 return (void *)(unsigned long)handle
;
1049 static void virgl_delete_sampler_state(struct pipe_context
*ctx
,
1052 struct virgl_context
*vctx
= virgl_context(ctx
);
1053 uint32_t handle
= (unsigned long)ss
;
1055 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SAMPLER_STATE
);
1058 static void virgl_bind_sampler_states(struct pipe_context
*ctx
,
1059 enum pipe_shader_type shader
,
1060 unsigned start_slot
,
1061 unsigned num_samplers
,
1064 struct virgl_context
*vctx
= virgl_context(ctx
);
1065 uint32_t handles
[32];
1067 for (i
= 0; i
< num_samplers
; i
++) {
1068 handles
[i
] = (unsigned long)(samplers
[i
]);
1070 virgl_encode_bind_sampler_states(vctx
, shader
, start_slot
, num_samplers
, handles
);
1073 static void virgl_set_polygon_stipple(struct pipe_context
*ctx
,
1074 const struct pipe_poly_stipple
*ps
)
1076 struct virgl_context
*vctx
= virgl_context(ctx
);
1077 virgl_encoder_set_polygon_stipple(vctx
, ps
);
1080 static void virgl_set_scissor_states(struct pipe_context
*ctx
,
1081 unsigned start_slot
,
1082 unsigned num_scissor
,
1083 const struct pipe_scissor_state
*ss
)
1085 struct virgl_context
*vctx
= virgl_context(ctx
);
1086 virgl_encoder_set_scissor_state(vctx
, start_slot
, num_scissor
, ss
);
1089 static void virgl_set_sample_mask(struct pipe_context
*ctx
,
1090 unsigned sample_mask
)
1092 struct virgl_context
*vctx
= virgl_context(ctx
);
1093 virgl_encoder_set_sample_mask(vctx
, sample_mask
);
1096 static void virgl_set_min_samples(struct pipe_context
*ctx
,
1097 unsigned min_samples
)
1099 struct virgl_context
*vctx
= virgl_context(ctx
);
1100 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1102 if (!(rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_SET_MIN_SAMPLES
))
1104 virgl_encoder_set_min_samples(vctx
, min_samples
);
1107 static void virgl_set_clip_state(struct pipe_context
*ctx
,
1108 const struct pipe_clip_state
*clip
)
1110 struct virgl_context
*vctx
= virgl_context(ctx
);
1111 virgl_encoder_set_clip_state(vctx
, clip
);
1114 static void virgl_set_tess_state(struct pipe_context
*ctx
,
1115 const float default_outer_level
[4],
1116 const float default_inner_level
[2])
1118 struct virgl_context
*vctx
= virgl_context(ctx
);
1119 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1121 if (!rs
->caps
.caps
.v1
.bset
.has_tessellation_shaders
)
1123 virgl_encode_set_tess_state(vctx
, default_outer_level
, default_inner_level
);
1126 static void virgl_resource_copy_region(struct pipe_context
*ctx
,
1127 struct pipe_resource
*dst
,
1129 unsigned dstx
, unsigned dsty
, unsigned dstz
,
1130 struct pipe_resource
*src
,
1132 const struct pipe_box
*src_box
)
1134 struct virgl_context
*vctx
= virgl_context(ctx
);
1135 struct virgl_resource
*dres
= virgl_resource(dst
);
1136 struct virgl_resource
*sres
= virgl_resource(src
);
1138 if (dres
->u
.b
.target
== PIPE_BUFFER
)
1139 util_range_add(&dres
->u
.b
, &dres
->valid_buffer_range
, dstx
, dstx
+ src_box
->width
);
1140 virgl_resource_dirty(dres
, dst_level
);
1142 virgl_encode_resource_copy_region(vctx
, dres
,
1143 dst_level
, dstx
, dsty
, dstz
,
1149 virgl_flush_resource(struct pipe_context
*pipe
,
1150 struct pipe_resource
*resource
)
1154 static void virgl_blit(struct pipe_context
*ctx
,
1155 const struct pipe_blit_info
*blit
)
1157 struct virgl_context
*vctx
= virgl_context(ctx
);
1158 struct virgl_resource
*dres
= virgl_resource(blit
->dst
.resource
);
1159 struct virgl_resource
*sres
= virgl_resource(blit
->src
.resource
);
1161 assert(ctx
->screen
->get_param(ctx
->screen
,
1162 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL
) ||
1163 (util_format_is_srgb(blit
->dst
.resource
->format
) ==
1164 util_format_is_srgb(blit
->dst
.format
)));
1166 virgl_resource_dirty(dres
, blit
->dst
.level
);
1167 virgl_encode_blit(vctx
, dres
, sres
,
1171 static void virgl_set_hw_atomic_buffers(struct pipe_context
*ctx
,
1172 unsigned start_slot
,
1174 const struct pipe_shader_buffer
*buffers
)
1176 struct virgl_context
*vctx
= virgl_context(ctx
);
1178 vctx
->atomic_buffer_enabled_mask
&= ~u_bit_consecutive(start_slot
, count
);
1179 for (unsigned i
= 0; i
< count
; i
++) {
1180 unsigned idx
= start_slot
+ i
;
1181 if (buffers
&& buffers
[i
].buffer
) {
1182 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
);
1183 res
->bind_history
|= PIPE_BIND_SHADER_BUFFER
;
1185 pipe_resource_reference(&vctx
->atomic_buffers
[idx
].buffer
,
1187 vctx
->atomic_buffers
[idx
] = buffers
[i
];
1188 vctx
->atomic_buffer_enabled_mask
|= 1 << idx
;
1190 pipe_resource_reference(&vctx
->atomic_buffers
[idx
].buffer
, NULL
);
1194 virgl_encode_set_hw_atomic_buffers(vctx
, start_slot
, count
, buffers
);
1197 static void virgl_set_shader_buffers(struct pipe_context
*ctx
,
1198 enum pipe_shader_type shader
,
1199 unsigned start_slot
, unsigned count
,
1200 const struct pipe_shader_buffer
*buffers
,
1201 unsigned writable_bitmask
)
1203 struct virgl_context
*vctx
= virgl_context(ctx
);
1204 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1205 struct virgl_shader_binding_state
*binding
=
1206 &vctx
->shader_bindings
[shader
];
1208 binding
->ssbo_enabled_mask
&= ~u_bit_consecutive(start_slot
, count
);
1209 for (unsigned i
= 0; i
< count
; i
++) {
1210 unsigned idx
= start_slot
+ i
;
1211 if (buffers
&& buffers
[i
].buffer
) {
1212 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
);
1213 res
->bind_history
|= PIPE_BIND_SHADER_BUFFER
;
1215 pipe_resource_reference(&binding
->ssbos
[idx
].buffer
, buffers
[i
].buffer
);
1216 binding
->ssbos
[idx
] = buffers
[i
];
1217 binding
->ssbo_enabled_mask
|= 1 << idx
;
1219 pipe_resource_reference(&binding
->ssbos
[idx
].buffer
, NULL
);
1223 uint32_t max_shader_buffer
= (shader
== PIPE_SHADER_FRAGMENT
|| shader
== PIPE_SHADER_COMPUTE
) ?
1224 rs
->caps
.caps
.v2
.max_shader_buffer_frag_compute
:
1225 rs
->caps
.caps
.v2
.max_shader_buffer_other_stages
;
1226 if (!max_shader_buffer
)
1228 virgl_encode_set_shader_buffers(vctx
, shader
, start_slot
, count
, buffers
);
1231 static void virgl_create_fence_fd(struct pipe_context
*ctx
,
1232 struct pipe_fence_handle
**fence
,
1234 enum pipe_fd_type type
)
1236 assert(type
== PIPE_FD_TYPE_NATIVE_SYNC
);
1237 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1239 if (rs
->vws
->cs_create_fence
)
1240 *fence
= rs
->vws
->cs_create_fence(rs
->vws
, fd
);
1243 static void virgl_fence_server_sync(struct pipe_context
*ctx
,
1244 struct pipe_fence_handle
*fence
)
1246 struct virgl_context
*vctx
= virgl_context(ctx
);
1247 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1249 if (rs
->vws
->fence_server_sync
)
1250 rs
->vws
->fence_server_sync(rs
->vws
, vctx
->cbuf
, fence
);
1253 static void virgl_set_shader_images(struct pipe_context
*ctx
,
1254 enum pipe_shader_type shader
,
1255 unsigned start_slot
, unsigned count
,
1256 const struct pipe_image_view
*images
)
1258 struct virgl_context
*vctx
= virgl_context(ctx
);
1259 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1260 struct virgl_shader_binding_state
*binding
=
1261 &vctx
->shader_bindings
[shader
];
1263 binding
->image_enabled_mask
&= ~u_bit_consecutive(start_slot
, count
);
1264 for (unsigned i
= 0; i
< count
; i
++) {
1265 unsigned idx
= start_slot
+ i
;
1266 if (images
&& images
[i
].resource
) {
1267 struct virgl_resource
*res
= virgl_resource(images
[i
].resource
);
1268 res
->bind_history
|= PIPE_BIND_SHADER_IMAGE
;
1270 pipe_resource_reference(&binding
->images
[idx
].resource
,
1271 images
[i
].resource
);
1272 binding
->images
[idx
] = images
[i
];
1273 binding
->image_enabled_mask
|= 1 << idx
;
1275 pipe_resource_reference(&binding
->images
[idx
].resource
, NULL
);
1279 uint32_t max_shader_images
= (shader
== PIPE_SHADER_FRAGMENT
|| shader
== PIPE_SHADER_COMPUTE
) ?
1280 rs
->caps
.caps
.v2
.max_shader_image_frag_compute
:
1281 rs
->caps
.caps
.v2
.max_shader_image_other_stages
;
1282 if (!max_shader_images
)
1284 virgl_encode_set_shader_images(vctx
, shader
, start_slot
, count
, images
);
1287 static void virgl_memory_barrier(struct pipe_context
*ctx
,
1290 struct virgl_context
*vctx
= virgl_context(ctx
);
1291 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1293 if (!(rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_MEMORY_BARRIER
))
1295 virgl_encode_memory_barrier(vctx
, flags
);
1298 static void *virgl_create_compute_state(struct pipe_context
*ctx
,
1299 const struct pipe_compute_state
*state
)
1301 struct virgl_context
*vctx
= virgl_context(ctx
);
1303 const struct tgsi_token
*new_tokens
= state
->prog
;
1304 struct pipe_stream_output_info so_info
= {};
1307 handle
= virgl_object_assign_handle();
1308 ret
= virgl_encode_shader_state(vctx
, handle
, PIPE_SHADER_COMPUTE
,
1310 state
->req_local_mem
,
1316 return (void *)(unsigned long)handle
;
1319 static void virgl_bind_compute_state(struct pipe_context
*ctx
, void *state
)
1321 uint32_t handle
= (unsigned long)state
;
1322 struct virgl_context
*vctx
= virgl_context(ctx
);
1324 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_COMPUTE
);
1327 static void virgl_delete_compute_state(struct pipe_context
*ctx
, void *state
)
1329 uint32_t handle
= (unsigned long)state
;
1330 struct virgl_context
*vctx
= virgl_context(ctx
);
1332 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
1335 static void virgl_launch_grid(struct pipe_context
*ctx
,
1336 const struct pipe_grid_info
*info
)
1338 struct virgl_context
*vctx
= virgl_context(ctx
);
1340 if (!vctx
->num_compute
)
1341 virgl_reemit_compute_resources(vctx
);
1342 vctx
->num_compute
++;
1344 virgl_encode_launch_grid(vctx
, info
);
1348 virgl_release_shader_binding(struct virgl_context
*vctx
,
1349 enum pipe_shader_type shader_type
)
1351 struct virgl_shader_binding_state
*binding
=
1352 &vctx
->shader_bindings
[shader_type
];
1354 while (binding
->view_enabled_mask
) {
1355 int i
= u_bit_scan(&binding
->view_enabled_mask
);
1356 pipe_sampler_view_reference(
1357 (struct pipe_sampler_view
**)&binding
->views
[i
], NULL
);
1360 while (binding
->ubo_enabled_mask
) {
1361 int i
= u_bit_scan(&binding
->ubo_enabled_mask
);
1362 pipe_resource_reference(&binding
->ubos
[i
].buffer
, NULL
);
1365 while (binding
->ssbo_enabled_mask
) {
1366 int i
= u_bit_scan(&binding
->ssbo_enabled_mask
);
1367 pipe_resource_reference(&binding
->ssbos
[i
].buffer
, NULL
);
1370 while (binding
->image_enabled_mask
) {
1371 int i
= u_bit_scan(&binding
->image_enabled_mask
);
1372 pipe_resource_reference(&binding
->images
[i
].resource
, NULL
);
1377 virgl_context_destroy( struct pipe_context
*ctx
)
1379 struct virgl_context
*vctx
= virgl_context(ctx
);
1380 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1381 enum pipe_shader_type shader_type
;
1383 vctx
->framebuffer
.zsbuf
= NULL
;
1384 vctx
->framebuffer
.nr_cbufs
= 0;
1385 virgl_encoder_destroy_sub_ctx(vctx
, vctx
->hw_sub_ctx_id
);
1386 virgl_flush_eq(vctx
, vctx
, NULL
);
1388 for (shader_type
= 0; shader_type
< PIPE_SHADER_TYPES
; shader_type
++)
1389 virgl_release_shader_binding(vctx
, shader_type
);
1391 while (vctx
->atomic_buffer_enabled_mask
) {
1392 int i
= u_bit_scan(&vctx
->atomic_buffer_enabled_mask
);
1393 pipe_resource_reference(&vctx
->atomic_buffers
[i
].buffer
, NULL
);
1396 rs
->vws
->cmd_buf_destroy(vctx
->cbuf
);
1398 u_upload_destroy(vctx
->uploader
);
1399 if (vctx
->supports_staging
)
1400 virgl_staging_destroy(&vctx
->staging
);
1401 util_primconvert_destroy(vctx
->primconvert
);
1402 virgl_transfer_queue_fini(&vctx
->queue
);
1404 slab_destroy_child(&vctx
->transfer_pool
);
1408 static void virgl_get_sample_position(struct pipe_context
*ctx
,
1409 unsigned sample_count
,
1413 struct virgl_context
*vctx
= virgl_context(ctx
);
1414 struct virgl_screen
*vs
= virgl_screen(vctx
->base
.screen
);
1416 if (sample_count
> vs
->caps
.caps
.v1
.max_samples
) {
1417 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1418 sample_count
, vs
->caps
.caps
.v1
.max_samples
);
1422 /* The following is basically copied from dri/i965gen6_get_sample_position
1423 * The only addition is that we hold the msaa positions for all sample
1424 * counts in a flat array. */
1426 if (sample_count
== 1) {
1427 out_value
[0] = out_value
[1] = 0.5f
;
1429 } else if (sample_count
== 2) {
1430 bits
= vs
->caps
.caps
.v2
.sample_locations
[0] >> (8 * index
);
1431 } else if (sample_count
<= 4) {
1432 bits
= vs
->caps
.caps
.v2
.sample_locations
[1] >> (8 * index
);
1433 } else if (sample_count
<= 8) {
1434 bits
= vs
->caps
.caps
.v2
.sample_locations
[2 + (index
>> 2)] >> (8 * (index
& 3));
1435 } else if (sample_count
<= 16) {
1436 bits
= vs
->caps
.caps
.v2
.sample_locations
[4 + (index
>> 2)] >> (8 * (index
& 3));
1438 out_value
[0] = ((bits
>> 4) & 0xf) / 16.0f
;
1439 out_value
[1] = (bits
& 0xf) / 16.0f
;
1441 if (virgl_debug
& VIRGL_DEBUG_VERBOSE
)
1442 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1443 index
, sample_count
, out_value
[0], out_value
[1]);
1446 static void virgl_send_tweaks(struct virgl_context
*vctx
, struct virgl_screen
*rs
)
1448 if (rs
->tweak_gles_emulate_bgra
)
1449 virgl_encode_tweak(vctx
, virgl_tweak_gles_brga_emulate
, 1);
1451 if (rs
->tweak_gles_apply_bgra_dest_swizzle
)
1452 virgl_encode_tweak(vctx
, virgl_tweak_gles_brga_apply_dest_swizzle
, 1);
1454 if (rs
->tweak_gles_tf3_value
> 0)
1455 virgl_encode_tweak(vctx
, virgl_tweak_gles_tf3_samples_passes_multiplier
,
1456 rs
->tweak_gles_tf3_value
);
1459 struct pipe_context
*virgl_context_create(struct pipe_screen
*pscreen
,
1463 struct virgl_context
*vctx
;
1464 struct virgl_screen
*rs
= virgl_screen(pscreen
);
1465 vctx
= CALLOC_STRUCT(virgl_context
);
1466 const char *host_debug_flagstring
;
1468 vctx
->cbuf
= rs
->vws
->cmd_buf_create(rs
->vws
, VIRGL_MAX_CMDBUF_DWORDS
);
1474 vctx
->base
.destroy
= virgl_context_destroy
;
1475 vctx
->base
.create_surface
= virgl_create_surface
;
1476 vctx
->base
.surface_destroy
= virgl_surface_destroy
;
1477 vctx
->base
.set_framebuffer_state
= virgl_set_framebuffer_state
;
1478 vctx
->base
.create_blend_state
= virgl_create_blend_state
;
1479 vctx
->base
.bind_blend_state
= virgl_bind_blend_state
;
1480 vctx
->base
.delete_blend_state
= virgl_delete_blend_state
;
1481 vctx
->base
.create_depth_stencil_alpha_state
= virgl_create_depth_stencil_alpha_state
;
1482 vctx
->base
.bind_depth_stencil_alpha_state
= virgl_bind_depth_stencil_alpha_state
;
1483 vctx
->base
.delete_depth_stencil_alpha_state
= virgl_delete_depth_stencil_alpha_state
;
1484 vctx
->base
.create_rasterizer_state
= virgl_create_rasterizer_state
;
1485 vctx
->base
.bind_rasterizer_state
= virgl_bind_rasterizer_state
;
1486 vctx
->base
.delete_rasterizer_state
= virgl_delete_rasterizer_state
;
1488 vctx
->base
.set_viewport_states
= virgl_set_viewport_states
;
1489 vctx
->base
.create_vertex_elements_state
= virgl_create_vertex_elements_state
;
1490 vctx
->base
.bind_vertex_elements_state
= virgl_bind_vertex_elements_state
;
1491 vctx
->base
.delete_vertex_elements_state
= virgl_delete_vertex_elements_state
;
1492 vctx
->base
.set_vertex_buffers
= virgl_set_vertex_buffers
;
1493 vctx
->base
.set_constant_buffer
= virgl_set_constant_buffer
;
1495 vctx
->base
.set_tess_state
= virgl_set_tess_state
;
1496 vctx
->base
.create_vs_state
= virgl_create_vs_state
;
1497 vctx
->base
.create_tcs_state
= virgl_create_tcs_state
;
1498 vctx
->base
.create_tes_state
= virgl_create_tes_state
;
1499 vctx
->base
.create_gs_state
= virgl_create_gs_state
;
1500 vctx
->base
.create_fs_state
= virgl_create_fs_state
;
1502 vctx
->base
.bind_vs_state
= virgl_bind_vs_state
;
1503 vctx
->base
.bind_tcs_state
= virgl_bind_tcs_state
;
1504 vctx
->base
.bind_tes_state
= virgl_bind_tes_state
;
1505 vctx
->base
.bind_gs_state
= virgl_bind_gs_state
;
1506 vctx
->base
.bind_fs_state
= virgl_bind_fs_state
;
1508 vctx
->base
.delete_vs_state
= virgl_delete_vs_state
;
1509 vctx
->base
.delete_tcs_state
= virgl_delete_tcs_state
;
1510 vctx
->base
.delete_tes_state
= virgl_delete_tes_state
;
1511 vctx
->base
.delete_gs_state
= virgl_delete_gs_state
;
1512 vctx
->base
.delete_fs_state
= virgl_delete_fs_state
;
1514 vctx
->base
.create_compute_state
= virgl_create_compute_state
;
1515 vctx
->base
.bind_compute_state
= virgl_bind_compute_state
;
1516 vctx
->base
.delete_compute_state
= virgl_delete_compute_state
;
1517 vctx
->base
.launch_grid
= virgl_launch_grid
;
1519 vctx
->base
.clear
= virgl_clear
;
1520 vctx
->base
.clear_texture
= virgl_clear_texture
;
1521 vctx
->base
.draw_vbo
= virgl_draw_vbo
;
1522 vctx
->base
.flush
= virgl_flush_from_st
;
1523 vctx
->base
.screen
= pscreen
;
1524 vctx
->base
.create_sampler_view
= virgl_create_sampler_view
;
1525 vctx
->base
.sampler_view_destroy
= virgl_destroy_sampler_view
;
1526 vctx
->base
.set_sampler_views
= virgl_set_sampler_views
;
1527 vctx
->base
.texture_barrier
= virgl_texture_barrier
;
1529 vctx
->base
.create_sampler_state
= virgl_create_sampler_state
;
1530 vctx
->base
.delete_sampler_state
= virgl_delete_sampler_state
;
1531 vctx
->base
.bind_sampler_states
= virgl_bind_sampler_states
;
1533 vctx
->base
.set_polygon_stipple
= virgl_set_polygon_stipple
;
1534 vctx
->base
.set_scissor_states
= virgl_set_scissor_states
;
1535 vctx
->base
.set_sample_mask
= virgl_set_sample_mask
;
1536 vctx
->base
.set_min_samples
= virgl_set_min_samples
;
1537 vctx
->base
.set_stencil_ref
= virgl_set_stencil_ref
;
1538 vctx
->base
.set_clip_state
= virgl_set_clip_state
;
1540 vctx
->base
.set_blend_color
= virgl_set_blend_color
;
1542 vctx
->base
.get_sample_position
= virgl_get_sample_position
;
1544 vctx
->base
.resource_copy_region
= virgl_resource_copy_region
;
1545 vctx
->base
.flush_resource
= virgl_flush_resource
;
1546 vctx
->base
.blit
= virgl_blit
;
1547 vctx
->base
.create_fence_fd
= virgl_create_fence_fd
;
1548 vctx
->base
.fence_server_sync
= virgl_fence_server_sync
;
1550 vctx
->base
.set_shader_buffers
= virgl_set_shader_buffers
;
1551 vctx
->base
.set_hw_atomic_buffers
= virgl_set_hw_atomic_buffers
;
1552 vctx
->base
.set_shader_images
= virgl_set_shader_images
;
1553 vctx
->base
.memory_barrier
= virgl_memory_barrier
;
1555 virgl_init_context_resource_functions(&vctx
->base
);
1556 virgl_init_query_functions(vctx
);
1557 virgl_init_so_functions(vctx
);
1559 slab_create_child(&vctx
->transfer_pool
, &rs
->transfer_pool
);
1560 virgl_transfer_queue_init(&vctx
->queue
, vctx
);
1561 vctx
->encoded_transfers
= (rs
->vws
->supports_encoded_transfers
&&
1562 (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_TRANSFER
));
1564 /* Reserve some space for transfers. */
1565 if (vctx
->encoded_transfers
)
1566 vctx
->cbuf
->cdw
= VIRGL_MAX_TBUF_DWORDS
;
1568 vctx
->primconvert
= util_primconvert_create(&vctx
->base
, rs
->caps
.caps
.v1
.prim_mask
);
1569 vctx
->uploader
= u_upload_create(&vctx
->base
, 1024 * 1024,
1570 PIPE_BIND_INDEX_BUFFER
, PIPE_USAGE_STREAM
, 0);
1571 if (!vctx
->uploader
)
1573 vctx
->base
.stream_uploader
= vctx
->uploader
;
1574 vctx
->base
.const_uploader
= vctx
->uploader
;
1576 /* We use a special staging buffer as the source of copy transfers. */
1577 if ((rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_COPY_TRANSFER
) &&
1578 vctx
->encoded_transfers
) {
1579 virgl_staging_init(&vctx
->staging
, &vctx
->base
, 1024 * 1024);
1580 vctx
->supports_staging
= true;
1583 vctx
->hw_sub_ctx_id
= rs
->sub_ctx_id
++;
1584 virgl_encoder_create_sub_ctx(vctx
, vctx
->hw_sub_ctx_id
);
1586 virgl_encoder_set_sub_ctx(vctx
, vctx
->hw_sub_ctx_id
);
1588 if (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_GUEST_MAY_INIT_LOG
) {
1589 host_debug_flagstring
= getenv("VIRGL_HOST_DEBUG");
1590 if (host_debug_flagstring
)
1591 virgl_encode_host_debug_flagstring(vctx
, host_debug_flagstring
);
1594 if (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_APP_TWEAK_SUPPORT
)
1595 virgl_send_tweaks(vctx
, rs
);
1599 virgl_context_destroy(&vctx
->base
);