2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "pipe/p_shader_tokens.h"
27 #include "pipe/p_context.h"
28 #include "pipe/p_defines.h"
29 #include "pipe/p_screen.h"
30 #include "pipe/p_state.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_prim.h"
35 #include "util/u_transfer.h"
36 #include "util/u_helpers.h"
37 #include "util/slab.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_blitter.h"
40 #include "tgsi/tgsi_text.h"
41 #include "indices/u_primconvert.h"
43 #include "pipebuffer/pb_buffer.h"
45 #include "virgl_encode.h"
46 #include "virgl_context.h"
47 #include "virgl_protocol.h"
48 #include "virgl_resource.h"
49 #include "virgl_screen.h"
51 struct virgl_vertex_elements_state
{
53 uint8_t binding_map
[PIPE_MAX_ATTRIBS
];
57 static uint32_t next_handle
;
58 uint32_t virgl_object_assign_handle(void)
63 static void virgl_attach_res_framebuffer(struct virgl_context
*vctx
)
65 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
66 struct pipe_surface
*surf
;
67 struct virgl_resource
*res
;
70 surf
= vctx
->framebuffer
.zsbuf
;
72 res
= virgl_resource(surf
->texture
);
74 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
75 virgl_resource_dirty(res
, surf
->u
.tex
.level
);
78 for (i
= 0; i
< vctx
->framebuffer
.nr_cbufs
; i
++) {
79 surf
= vctx
->framebuffer
.cbufs
[i
];
81 res
= virgl_resource(surf
->texture
);
83 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
84 virgl_resource_dirty(res
, surf
->u
.tex
.level
);
90 static void virgl_attach_res_sampler_views(struct virgl_context
*vctx
,
91 enum pipe_shader_type shader_type
)
93 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
94 const struct virgl_shader_binding_state
*binding
=
95 &vctx
->shader_bindings
[shader_type
];
96 uint32_t remaining_mask
= binding
->view_enabled_mask
;
97 struct virgl_resource
*res
;
99 while (remaining_mask
) {
100 int i
= u_bit_scan(&remaining_mask
);
101 assert(binding
->views
[i
] && binding
->views
[i
]->texture
);
102 res
= virgl_resource(binding
->views
[i
]->texture
);
103 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
107 static void virgl_attach_res_vertex_buffers(struct virgl_context
*vctx
)
109 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
110 struct virgl_resource
*res
;
113 for (i
= 0; i
< vctx
->num_vertex_buffers
; i
++) {
114 res
= virgl_resource(vctx
->vertex_buffer
[i
].buffer
.resource
);
116 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
120 static void virgl_attach_res_index_buffer(struct virgl_context
*vctx
,
121 struct virgl_indexbuf
*ib
)
123 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
124 struct virgl_resource
*res
;
126 res
= virgl_resource(ib
->buffer
);
128 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
131 static void virgl_attach_res_so_targets(struct virgl_context
*vctx
)
133 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
134 struct virgl_resource
*res
;
137 for (i
= 0; i
< vctx
->num_so_targets
; i
++) {
138 res
= virgl_resource(vctx
->so_targets
[i
].base
.buffer
);
140 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
144 static void virgl_attach_res_uniform_buffers(struct virgl_context
*vctx
,
145 enum pipe_shader_type shader_type
)
147 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
148 const struct virgl_shader_binding_state
*binding
=
149 &vctx
->shader_bindings
[shader_type
];
150 uint32_t remaining_mask
= binding
->ubo_enabled_mask
;
151 struct virgl_resource
*res
;
153 while (remaining_mask
) {
154 int i
= u_bit_scan(&remaining_mask
);
155 res
= virgl_resource(binding
->ubos
[i
].buffer
);
157 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
161 static void virgl_attach_res_shader_buffers(struct virgl_context
*vctx
,
162 enum pipe_shader_type shader_type
)
164 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
165 const struct virgl_shader_binding_state
*binding
=
166 &vctx
->shader_bindings
[shader_type
];
167 uint32_t remaining_mask
= binding
->ssbo_enabled_mask
;
168 struct virgl_resource
*res
;
170 while (remaining_mask
) {
171 int i
= u_bit_scan(&remaining_mask
);
172 res
= virgl_resource(binding
->ssbos
[i
].buffer
);
174 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
178 static void virgl_attach_res_shader_images(struct virgl_context
*vctx
,
179 enum pipe_shader_type shader_type
)
181 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
182 const struct virgl_shader_binding_state
*binding
=
183 &vctx
->shader_bindings
[shader_type
];
184 uint32_t remaining_mask
= binding
->image_enabled_mask
;
185 struct virgl_resource
*res
;
187 while (remaining_mask
) {
188 int i
= u_bit_scan(&remaining_mask
);
189 res
= virgl_resource(binding
->images
[i
].resource
);
191 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
195 static void virgl_attach_res_atomic_buffers(struct virgl_context
*vctx
)
197 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
198 uint32_t remaining_mask
= vctx
->atomic_buffer_enabled_mask
;
199 struct virgl_resource
*res
;
201 while (remaining_mask
) {
202 int i
= u_bit_scan(&remaining_mask
);
203 res
= virgl_resource(vctx
->atomic_buffers
[i
].buffer
);
205 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
210 * after flushing, the hw context still has a bunch of
211 * resources bound, so we need to rebind those here.
213 static void virgl_reemit_draw_resources(struct virgl_context
*vctx
)
215 enum pipe_shader_type shader_type
;
217 /* reattach any flushed resources */
218 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
219 virgl_attach_res_framebuffer(vctx
);
221 for (shader_type
= 0; shader_type
< PIPE_SHADER_COMPUTE
; shader_type
++) {
222 virgl_attach_res_sampler_views(vctx
, shader_type
);
223 virgl_attach_res_uniform_buffers(vctx
, shader_type
);
224 virgl_attach_res_shader_buffers(vctx
, shader_type
);
225 virgl_attach_res_shader_images(vctx
, shader_type
);
227 virgl_attach_res_atomic_buffers(vctx
);
228 virgl_attach_res_vertex_buffers(vctx
);
229 virgl_attach_res_so_targets(vctx
);
232 static void virgl_reemit_compute_resources(struct virgl_context
*vctx
)
234 virgl_attach_res_sampler_views(vctx
, PIPE_SHADER_COMPUTE
);
235 virgl_attach_res_uniform_buffers(vctx
, PIPE_SHADER_COMPUTE
);
236 virgl_attach_res_shader_buffers(vctx
, PIPE_SHADER_COMPUTE
);
237 virgl_attach_res_shader_images(vctx
, PIPE_SHADER_COMPUTE
);
239 virgl_attach_res_atomic_buffers(vctx
);
242 static struct pipe_surface
*virgl_create_surface(struct pipe_context
*ctx
,
243 struct pipe_resource
*resource
,
244 const struct pipe_surface
*templ
)
246 struct virgl_context
*vctx
= virgl_context(ctx
);
247 struct virgl_surface
*surf
;
248 struct virgl_resource
*res
= virgl_resource(resource
);
251 /* no support for buffer surfaces */
252 if (resource
->target
== PIPE_BUFFER
)
255 surf
= CALLOC_STRUCT(virgl_surface
);
259 assert(ctx
->screen
->get_param(ctx
->screen
,
260 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL
) ||
261 (util_format_is_srgb(templ
->format
) ==
262 util_format_is_srgb(resource
->format
)));
264 virgl_resource_dirty(res
, 0);
265 handle
= virgl_object_assign_handle();
266 pipe_reference_init(&surf
->base
.reference
, 1);
267 pipe_resource_reference(&surf
->base
.texture
, resource
);
268 surf
->base
.context
= ctx
;
269 surf
->base
.format
= templ
->format
;
271 surf
->base
.width
= u_minify(resource
->width0
, templ
->u
.tex
.level
);
272 surf
->base
.height
= u_minify(resource
->height0
, templ
->u
.tex
.level
);
273 surf
->base
.u
.tex
.level
= templ
->u
.tex
.level
;
274 surf
->base
.u
.tex
.first_layer
= templ
->u
.tex
.first_layer
;
275 surf
->base
.u
.tex
.last_layer
= templ
->u
.tex
.last_layer
;
277 virgl_encoder_create_surface(vctx
, handle
, res
, &surf
->base
);
278 surf
->handle
= handle
;
282 static void virgl_surface_destroy(struct pipe_context
*ctx
,
283 struct pipe_surface
*psurf
)
285 struct virgl_context
*vctx
= virgl_context(ctx
);
286 struct virgl_surface
*surf
= virgl_surface(psurf
);
288 pipe_resource_reference(&surf
->base
.texture
, NULL
);
289 virgl_encode_delete_object(vctx
, surf
->handle
, VIRGL_OBJECT_SURFACE
);
293 static void *virgl_create_blend_state(struct pipe_context
*ctx
,
294 const struct pipe_blend_state
*blend_state
)
296 struct virgl_context
*vctx
= virgl_context(ctx
);
298 handle
= virgl_object_assign_handle();
300 virgl_encode_blend_state(vctx
, handle
, blend_state
);
301 return (void *)(unsigned long)handle
;
305 static void virgl_bind_blend_state(struct pipe_context
*ctx
,
308 struct virgl_context
*vctx
= virgl_context(ctx
);
309 uint32_t handle
= (unsigned long)blend_state
;
310 virgl_encode_bind_object(vctx
, handle
, VIRGL_OBJECT_BLEND
);
313 static void virgl_delete_blend_state(struct pipe_context
*ctx
,
316 struct virgl_context
*vctx
= virgl_context(ctx
);
317 uint32_t handle
= (unsigned long)blend_state
;
318 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_BLEND
);
321 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context
*ctx
,
322 const struct pipe_depth_stencil_alpha_state
*blend_state
)
324 struct virgl_context
*vctx
= virgl_context(ctx
);
326 handle
= virgl_object_assign_handle();
328 virgl_encode_dsa_state(vctx
, handle
, blend_state
);
329 return (void *)(unsigned long)handle
;
332 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context
*ctx
,
335 struct virgl_context
*vctx
= virgl_context(ctx
);
336 uint32_t handle
= (unsigned long)blend_state
;
337 virgl_encode_bind_object(vctx
, handle
, VIRGL_OBJECT_DSA
);
340 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context
*ctx
,
343 struct virgl_context
*vctx
= virgl_context(ctx
);
344 uint32_t handle
= (unsigned long)dsa_state
;
345 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_DSA
);
348 static void *virgl_create_rasterizer_state(struct pipe_context
*ctx
,
349 const struct pipe_rasterizer_state
*rs_state
)
351 struct virgl_context
*vctx
= virgl_context(ctx
);
352 struct virgl_rasterizer_state
*vrs
= CALLOC_STRUCT(virgl_rasterizer_state
);
357 vrs
->handle
= virgl_object_assign_handle();
359 virgl_encode_rasterizer_state(vctx
, vrs
->handle
, rs_state
);
363 static void virgl_bind_rasterizer_state(struct pipe_context
*ctx
,
366 struct virgl_context
*vctx
= virgl_context(ctx
);
369 struct virgl_rasterizer_state
*vrs
= rs_state
;
370 vctx
->rs_state
= *vrs
;
371 handle
= vrs
->handle
;
373 virgl_encode_bind_object(vctx
, handle
, VIRGL_OBJECT_RASTERIZER
);
376 static void virgl_delete_rasterizer_state(struct pipe_context
*ctx
,
379 struct virgl_context
*vctx
= virgl_context(ctx
);
380 struct virgl_rasterizer_state
*vrs
= rs_state
;
381 virgl_encode_delete_object(vctx
, vrs
->handle
, VIRGL_OBJECT_RASTERIZER
);
385 static void virgl_set_framebuffer_state(struct pipe_context
*ctx
,
386 const struct pipe_framebuffer_state
*state
)
388 struct virgl_context
*vctx
= virgl_context(ctx
);
390 vctx
->framebuffer
= *state
;
391 virgl_encoder_set_framebuffer_state(vctx
, state
);
392 virgl_attach_res_framebuffer(vctx
);
395 static void virgl_set_viewport_states(struct pipe_context
*ctx
,
397 unsigned num_viewports
,
398 const struct pipe_viewport_state
*state
)
400 struct virgl_context
*vctx
= virgl_context(ctx
);
401 virgl_encoder_set_viewport_states(vctx
, start_slot
, num_viewports
, state
);
404 static void *virgl_create_vertex_elements_state(struct pipe_context
*ctx
,
405 unsigned num_elements
,
406 const struct pipe_vertex_element
*elements
)
408 struct pipe_vertex_element new_elements
[PIPE_MAX_ATTRIBS
];
409 struct virgl_context
*vctx
= virgl_context(ctx
);
410 struct virgl_vertex_elements_state
*state
=
411 CALLOC_STRUCT(virgl_vertex_elements_state
);
413 for (int i
= 0; i
< num_elements
; ++i
) {
414 if (elements
[i
].instance_divisor
) {
415 /* Virglrenderer doesn't deal with instance_divisor correctly if
416 * there isn't a 1:1 relationship between elements and bindings.
417 * So let's make sure there is, by duplicating bindings.
419 for (int j
= 0; j
< num_elements
; ++j
) {
420 new_elements
[j
] = elements
[j
];
421 new_elements
[j
].vertex_buffer_index
= j
;
422 state
->binding_map
[j
] = elements
[j
].vertex_buffer_index
;
424 elements
= new_elements
;
425 state
->num_bindings
= num_elements
;
430 state
->handle
= virgl_object_assign_handle();
431 virgl_encoder_create_vertex_elements(vctx
, state
->handle
,
432 num_elements
, elements
);
436 static void virgl_delete_vertex_elements_state(struct pipe_context
*ctx
,
439 struct virgl_context
*vctx
= virgl_context(ctx
);
440 struct virgl_vertex_elements_state
*state
=
441 (struct virgl_vertex_elements_state
*)ve
;
442 virgl_encode_delete_object(vctx
, state
->handle
, VIRGL_OBJECT_VERTEX_ELEMENTS
);
446 static void virgl_bind_vertex_elements_state(struct pipe_context
*ctx
,
449 struct virgl_context
*vctx
= virgl_context(ctx
);
450 struct virgl_vertex_elements_state
*state
=
451 (struct virgl_vertex_elements_state
*)ve
;
452 vctx
->vertex_elements
= state
;
453 virgl_encode_bind_object(vctx
, state
? state
->handle
: 0,
454 VIRGL_OBJECT_VERTEX_ELEMENTS
);
455 vctx
->vertex_array_dirty
= TRUE
;
458 static void virgl_set_vertex_buffers(struct pipe_context
*ctx
,
460 unsigned num_buffers
,
461 const struct pipe_vertex_buffer
*buffers
)
463 struct virgl_context
*vctx
= virgl_context(ctx
);
465 util_set_vertex_buffers_count(vctx
->vertex_buffer
,
466 &vctx
->num_vertex_buffers
,
467 buffers
, start_slot
, num_buffers
);
469 vctx
->vertex_array_dirty
= TRUE
;
472 static void virgl_hw_set_vertex_buffers(struct virgl_context
*vctx
)
474 if (vctx
->vertex_array_dirty
) {
475 struct virgl_vertex_elements_state
*ve
= vctx
->vertex_elements
;
477 if (ve
->num_bindings
) {
478 struct pipe_vertex_buffer vertex_buffers
[PIPE_MAX_ATTRIBS
];
479 for (int i
= 0; i
< ve
->num_bindings
; ++i
)
480 vertex_buffers
[i
] = vctx
->vertex_buffer
[ve
->binding_map
[i
]];
482 virgl_encoder_set_vertex_buffers(vctx
, ve
->num_bindings
, vertex_buffers
);
484 virgl_encoder_set_vertex_buffers(vctx
, vctx
->num_vertex_buffers
, vctx
->vertex_buffer
);
486 virgl_attach_res_vertex_buffers(vctx
);
488 vctx
->vertex_array_dirty
= FALSE
;
492 static void virgl_set_stencil_ref(struct pipe_context
*ctx
,
493 const struct pipe_stencil_ref
*ref
)
495 struct virgl_context
*vctx
= virgl_context(ctx
);
496 virgl_encoder_set_stencil_ref(vctx
, ref
);
499 static void virgl_set_blend_color(struct pipe_context
*ctx
,
500 const struct pipe_blend_color
*color
)
502 struct virgl_context
*vctx
= virgl_context(ctx
);
503 virgl_encoder_set_blend_color(vctx
, color
);
506 static void virgl_hw_set_index_buffer(struct virgl_context
*vctx
,
507 struct virgl_indexbuf
*ib
)
509 virgl_encoder_set_index_buffer(vctx
, ib
);
510 virgl_attach_res_index_buffer(vctx
, ib
);
513 static void virgl_set_constant_buffer(struct pipe_context
*ctx
,
514 enum pipe_shader_type shader
, uint index
,
515 const struct pipe_constant_buffer
*buf
)
517 struct virgl_context
*vctx
= virgl_context(ctx
);
518 struct virgl_shader_binding_state
*binding
=
519 &vctx
->shader_bindings
[shader
];
521 if (buf
&& buf
->buffer
) {
522 struct virgl_resource
*res
= virgl_resource(buf
->buffer
);
523 virgl_encoder_set_uniform_buffer(vctx
, shader
, index
,
525 buf
->buffer_size
, res
);
527 pipe_resource_reference(&binding
->ubos
[index
].buffer
, buf
->buffer
);
528 binding
->ubos
[index
] = *buf
;
529 binding
->ubo_enabled_mask
|= 1 << index
;
531 static const struct pipe_constant_buffer dummy_ubo
;
534 virgl_encoder_write_constant_buffer(vctx
, shader
, index
,
535 buf
->buffer_size
/ 4,
538 pipe_resource_reference(&binding
->ubos
[index
].buffer
, NULL
);
539 binding
->ubo_enabled_mask
&= ~(1 << index
);
543 static void *virgl_shader_encoder(struct pipe_context
*ctx
,
544 const struct pipe_shader_state
*shader
,
547 struct virgl_context
*vctx
= virgl_context(ctx
);
549 struct tgsi_token
*new_tokens
;
552 new_tokens
= virgl_tgsi_transform(vctx
, shader
->tokens
);
556 handle
= virgl_object_assign_handle();
557 /* encode VS state */
558 ret
= virgl_encode_shader_state(vctx
, handle
, type
,
559 &shader
->stream_output
, 0,
566 return (void *)(unsigned long)handle
;
569 static void *virgl_create_vs_state(struct pipe_context
*ctx
,
570 const struct pipe_shader_state
*shader
)
572 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_VERTEX
);
575 static void *virgl_create_tcs_state(struct pipe_context
*ctx
,
576 const struct pipe_shader_state
*shader
)
578 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_TESS_CTRL
);
581 static void *virgl_create_tes_state(struct pipe_context
*ctx
,
582 const struct pipe_shader_state
*shader
)
584 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_TESS_EVAL
);
587 static void *virgl_create_gs_state(struct pipe_context
*ctx
,
588 const struct pipe_shader_state
*shader
)
590 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_GEOMETRY
);
593 static void *virgl_create_fs_state(struct pipe_context
*ctx
,
594 const struct pipe_shader_state
*shader
)
596 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_FRAGMENT
);
600 virgl_delete_fs_state(struct pipe_context
*ctx
,
603 uint32_t handle
= (unsigned long)fs
;
604 struct virgl_context
*vctx
= virgl_context(ctx
);
606 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
610 virgl_delete_gs_state(struct pipe_context
*ctx
,
613 uint32_t handle
= (unsigned long)gs
;
614 struct virgl_context
*vctx
= virgl_context(ctx
);
616 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
620 virgl_delete_vs_state(struct pipe_context
*ctx
,
623 uint32_t handle
= (unsigned long)vs
;
624 struct virgl_context
*vctx
= virgl_context(ctx
);
626 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
630 virgl_delete_tcs_state(struct pipe_context
*ctx
,
633 uint32_t handle
= (unsigned long)tcs
;
634 struct virgl_context
*vctx
= virgl_context(ctx
);
636 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
640 virgl_delete_tes_state(struct pipe_context
*ctx
,
643 uint32_t handle
= (unsigned long)tes
;
644 struct virgl_context
*vctx
= virgl_context(ctx
);
646 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
649 static void virgl_bind_vs_state(struct pipe_context
*ctx
,
652 uint32_t handle
= (unsigned long)vss
;
653 struct virgl_context
*vctx
= virgl_context(ctx
);
655 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_VERTEX
);
658 static void virgl_bind_tcs_state(struct pipe_context
*ctx
,
661 uint32_t handle
= (unsigned long)vss
;
662 struct virgl_context
*vctx
= virgl_context(ctx
);
664 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_TESS_CTRL
);
667 static void virgl_bind_tes_state(struct pipe_context
*ctx
,
670 uint32_t handle
= (unsigned long)vss
;
671 struct virgl_context
*vctx
= virgl_context(ctx
);
673 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_TESS_EVAL
);
676 static void virgl_bind_gs_state(struct pipe_context
*ctx
,
679 uint32_t handle
= (unsigned long)vss
;
680 struct virgl_context
*vctx
= virgl_context(ctx
);
682 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_GEOMETRY
);
686 static void virgl_bind_fs_state(struct pipe_context
*ctx
,
689 uint32_t handle
= (unsigned long)vss
;
690 struct virgl_context
*vctx
= virgl_context(ctx
);
692 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_FRAGMENT
);
695 static void virgl_clear(struct pipe_context
*ctx
,
697 const union pipe_color_union
*color
,
698 double depth
, unsigned stencil
)
700 struct virgl_context
*vctx
= virgl_context(ctx
);
702 if (!vctx
->num_draws
)
703 virgl_reemit_draw_resources(vctx
);
706 virgl_encode_clear(vctx
, buffers
, color
, depth
, stencil
);
709 static void virgl_draw_vbo(struct pipe_context
*ctx
,
710 const struct pipe_draw_info
*dinfo
)
712 struct virgl_context
*vctx
= virgl_context(ctx
);
713 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
714 struct virgl_indexbuf ib
= {};
715 struct pipe_draw_info info
= *dinfo
;
717 if (!dinfo
->count_from_stream_output
&& !dinfo
->indirect
&&
718 !dinfo
->primitive_restart
&&
719 !u_trim_pipe_prim(dinfo
->mode
, (unsigned*)&dinfo
->count
))
722 if (!(rs
->caps
.caps
.v1
.prim_mask
& (1 << dinfo
->mode
))) {
723 util_primconvert_save_rasterizer_state(vctx
->primconvert
, &vctx
->rs_state
.rs
);
724 util_primconvert_draw_vbo(vctx
->primconvert
, dinfo
);
727 if (info
.index_size
) {
728 pipe_resource_reference(&ib
.buffer
, info
.has_user_indices
? NULL
: info
.index
.resource
);
729 ib
.user_buffer
= info
.has_user_indices
? info
.index
.user
: NULL
;
730 ib
.index_size
= dinfo
->index_size
;
731 ib
.offset
= info
.start
* ib
.index_size
;
733 if (ib
.user_buffer
) {
734 u_upload_data(vctx
->uploader
, 0, info
.count
* ib
.index_size
, 4,
735 ib
.user_buffer
, &ib
.offset
, &ib
.buffer
);
736 ib
.user_buffer
= NULL
;
740 if (!vctx
->num_draws
)
741 virgl_reemit_draw_resources(vctx
);
744 virgl_hw_set_vertex_buffers(vctx
);
746 virgl_hw_set_index_buffer(vctx
, &ib
);
748 virgl_encoder_draw_vbo(vctx
, &info
);
750 pipe_resource_reference(&ib
.buffer
, NULL
);
754 static void virgl_flush_eq(struct virgl_context
*ctx
, void *closure
,
755 struct pipe_fence_handle
**fence
)
757 struct virgl_screen
*rs
= virgl_screen(ctx
->base
.screen
);
759 /* skip empty cbuf */
760 if (ctx
->cbuf
->cdw
== ctx
->cbuf_initial_cdw
&&
761 ctx
->queue
.num_dwords
== 0 &&
766 u_upload_unmap(ctx
->uploader
);
768 /* send the buffer to the remote side for decoding */
769 ctx
->num_draws
= ctx
->num_compute
= 0;
771 virgl_transfer_queue_clear(&ctx
->queue
, ctx
->cbuf
);
772 rs
->vws
->submit_cmd(rs
->vws
, ctx
->cbuf
, fence
);
774 /* Reserve some space for transfers. */
775 if (ctx
->encoded_transfers
)
776 ctx
->cbuf
->cdw
= VIRGL_MAX_TBUF_DWORDS
;
778 virgl_encoder_set_sub_ctx(ctx
, ctx
->hw_sub_ctx_id
);
780 ctx
->cbuf_initial_cdw
= ctx
->cbuf
->cdw
;
782 /* We have flushed the command queue, including any pending copy transfers
783 * involving staging resources.
785 ctx
->queued_staging_res_size
= 0;
788 static void virgl_flush_from_st(struct pipe_context
*ctx
,
789 struct pipe_fence_handle
**fence
,
790 enum pipe_flush_flags flags
)
792 struct virgl_context
*vctx
= virgl_context(ctx
);
794 virgl_flush_eq(vctx
, vctx
, fence
);
797 static struct pipe_sampler_view
*virgl_create_sampler_view(struct pipe_context
*ctx
,
798 struct pipe_resource
*texture
,
799 const struct pipe_sampler_view
*state
)
801 struct virgl_context
*vctx
= virgl_context(ctx
);
802 struct virgl_sampler_view
*grview
;
804 struct virgl_resource
*res
;
809 grview
= CALLOC_STRUCT(virgl_sampler_view
);
813 res
= virgl_resource(texture
);
814 handle
= virgl_object_assign_handle();
815 virgl_encode_sampler_view(vctx
, handle
, res
, state
);
817 grview
->base
= *state
;
818 grview
->base
.reference
.count
= 1;
820 grview
->base
.texture
= NULL
;
821 grview
->base
.context
= ctx
;
822 pipe_resource_reference(&grview
->base
.texture
, texture
);
823 grview
->handle
= handle
;
824 return &grview
->base
;
827 static void virgl_set_sampler_views(struct pipe_context
*ctx
,
828 enum pipe_shader_type shader_type
,
831 struct pipe_sampler_view
**views
)
833 struct virgl_context
*vctx
= virgl_context(ctx
);
834 struct virgl_shader_binding_state
*binding
=
835 &vctx
->shader_bindings
[shader_type
];
837 binding
->view_enabled_mask
&= ~u_bit_consecutive(start_slot
, num_views
);
838 for (unsigned i
= 0; i
< num_views
; i
++) {
839 unsigned idx
= start_slot
+ i
;
840 if (views
&& views
[i
]) {
841 pipe_sampler_view_reference(&binding
->views
[idx
], views
[i
]);
842 binding
->view_enabled_mask
|= 1 << idx
;
844 pipe_sampler_view_reference(&binding
->views
[idx
], NULL
);
848 virgl_encode_set_sampler_views(vctx
, shader_type
,
849 start_slot
, num_views
, (struct virgl_sampler_view
**)binding
->views
);
850 virgl_attach_res_sampler_views(vctx
, shader_type
);
854 virgl_texture_barrier(struct pipe_context
*ctx
, unsigned flags
)
856 struct virgl_context
*vctx
= virgl_context(ctx
);
857 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
859 if (!(rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_TEXTURE_BARRIER
))
861 virgl_encode_texture_barrier(vctx
, flags
);
864 static void virgl_destroy_sampler_view(struct pipe_context
*ctx
,
865 struct pipe_sampler_view
*view
)
867 struct virgl_context
*vctx
= virgl_context(ctx
);
868 struct virgl_sampler_view
*grview
= virgl_sampler_view(view
);
870 virgl_encode_delete_object(vctx
, grview
->handle
, VIRGL_OBJECT_SAMPLER_VIEW
);
871 pipe_resource_reference(&view
->texture
, NULL
);
875 static void *virgl_create_sampler_state(struct pipe_context
*ctx
,
876 const struct pipe_sampler_state
*state
)
878 struct virgl_context
*vctx
= virgl_context(ctx
);
881 handle
= virgl_object_assign_handle();
883 virgl_encode_sampler_state(vctx
, handle
, state
);
884 return (void *)(unsigned long)handle
;
887 static void virgl_delete_sampler_state(struct pipe_context
*ctx
,
890 struct virgl_context
*vctx
= virgl_context(ctx
);
891 uint32_t handle
= (unsigned long)ss
;
893 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SAMPLER_STATE
);
896 static void virgl_bind_sampler_states(struct pipe_context
*ctx
,
897 enum pipe_shader_type shader
,
899 unsigned num_samplers
,
902 struct virgl_context
*vctx
= virgl_context(ctx
);
903 uint32_t handles
[32];
905 for (i
= 0; i
< num_samplers
; i
++) {
906 handles
[i
] = (unsigned long)(samplers
[i
]);
908 virgl_encode_bind_sampler_states(vctx
, shader
, start_slot
, num_samplers
, handles
);
911 static void virgl_set_polygon_stipple(struct pipe_context
*ctx
,
912 const struct pipe_poly_stipple
*ps
)
914 struct virgl_context
*vctx
= virgl_context(ctx
);
915 virgl_encoder_set_polygon_stipple(vctx
, ps
);
918 static void virgl_set_scissor_states(struct pipe_context
*ctx
,
920 unsigned num_scissor
,
921 const struct pipe_scissor_state
*ss
)
923 struct virgl_context
*vctx
= virgl_context(ctx
);
924 virgl_encoder_set_scissor_state(vctx
, start_slot
, num_scissor
, ss
);
927 static void virgl_set_sample_mask(struct pipe_context
*ctx
,
928 unsigned sample_mask
)
930 struct virgl_context
*vctx
= virgl_context(ctx
);
931 virgl_encoder_set_sample_mask(vctx
, sample_mask
);
934 static void virgl_set_min_samples(struct pipe_context
*ctx
,
935 unsigned min_samples
)
937 struct virgl_context
*vctx
= virgl_context(ctx
);
938 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
940 if (!(rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_SET_MIN_SAMPLES
))
942 virgl_encoder_set_min_samples(vctx
, min_samples
);
945 static void virgl_set_clip_state(struct pipe_context
*ctx
,
946 const struct pipe_clip_state
*clip
)
948 struct virgl_context
*vctx
= virgl_context(ctx
);
949 virgl_encoder_set_clip_state(vctx
, clip
);
952 static void virgl_set_tess_state(struct pipe_context
*ctx
,
953 const float default_outer_level
[4],
954 const float default_inner_level
[2])
956 struct virgl_context
*vctx
= virgl_context(ctx
);
957 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
959 if (!rs
->caps
.caps
.v1
.bset
.has_tessellation_shaders
)
961 virgl_encode_set_tess_state(vctx
, default_outer_level
, default_inner_level
);
964 static void virgl_resource_copy_region(struct pipe_context
*ctx
,
965 struct pipe_resource
*dst
,
967 unsigned dstx
, unsigned dsty
, unsigned dstz
,
968 struct pipe_resource
*src
,
970 const struct pipe_box
*src_box
)
972 struct virgl_context
*vctx
= virgl_context(ctx
);
973 struct virgl_resource
*dres
= virgl_resource(dst
);
974 struct virgl_resource
*sres
= virgl_resource(src
);
976 if (dres
->u
.b
.target
== PIPE_BUFFER
)
977 util_range_add(&dres
->valid_buffer_range
, dstx
, dstx
+ src_box
->width
);
978 virgl_resource_dirty(dres
, dst_level
);
980 virgl_encode_resource_copy_region(vctx
, dres
,
981 dst_level
, dstx
, dsty
, dstz
,
987 virgl_flush_resource(struct pipe_context
*pipe
,
988 struct pipe_resource
*resource
)
992 static void virgl_blit(struct pipe_context
*ctx
,
993 const struct pipe_blit_info
*blit
)
995 struct virgl_context
*vctx
= virgl_context(ctx
);
996 struct virgl_resource
*dres
= virgl_resource(blit
->dst
.resource
);
997 struct virgl_resource
*sres
= virgl_resource(blit
->src
.resource
);
999 assert(ctx
->screen
->get_param(ctx
->screen
,
1000 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL
) ||
1001 (util_format_is_srgb(blit
->dst
.resource
->format
) ==
1002 util_format_is_srgb(blit
->dst
.format
)));
1004 virgl_resource_dirty(dres
, blit
->dst
.level
);
1005 virgl_encode_blit(vctx
, dres
, sres
,
1009 static void virgl_set_hw_atomic_buffers(struct pipe_context
*ctx
,
1010 unsigned start_slot
,
1012 const struct pipe_shader_buffer
*buffers
)
1014 struct virgl_context
*vctx
= virgl_context(ctx
);
1016 vctx
->atomic_buffer_enabled_mask
&= ~u_bit_consecutive(start_slot
, count
);
1017 for (unsigned i
= 0; i
< count
; i
++) {
1018 unsigned idx
= start_slot
+ i
;
1019 if (buffers
&& buffers
[i
].buffer
) {
1020 pipe_resource_reference(&vctx
->atomic_buffers
[idx
].buffer
,
1022 vctx
->atomic_buffers
[idx
] = buffers
[i
];
1023 vctx
->atomic_buffer_enabled_mask
|= 1 << idx
;
1025 pipe_resource_reference(&vctx
->atomic_buffers
[idx
].buffer
, NULL
);
1029 virgl_encode_set_hw_atomic_buffers(vctx
, start_slot
, count
, buffers
);
1032 static void virgl_set_shader_buffers(struct pipe_context
*ctx
,
1033 enum pipe_shader_type shader
,
1034 unsigned start_slot
, unsigned count
,
1035 const struct pipe_shader_buffer
*buffers
,
1036 unsigned writable_bitmask
)
1038 struct virgl_context
*vctx
= virgl_context(ctx
);
1039 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1040 struct virgl_shader_binding_state
*binding
=
1041 &vctx
->shader_bindings
[shader
];
1043 binding
->ssbo_enabled_mask
&= ~u_bit_consecutive(start_slot
, count
);
1044 for (unsigned i
= 0; i
< count
; i
++) {
1045 unsigned idx
= start_slot
+ i
;
1046 if (buffers
&& buffers
[i
].buffer
) {
1047 pipe_resource_reference(&binding
->ssbos
[idx
].buffer
, buffers
[i
].buffer
);
1048 binding
->ssbos
[idx
] = buffers
[i
];
1049 binding
->ssbo_enabled_mask
|= 1 << idx
;
1051 pipe_resource_reference(&binding
->ssbos
[idx
].buffer
, NULL
);
1055 uint32_t max_shader_buffer
= (shader
== PIPE_SHADER_FRAGMENT
|| shader
== PIPE_SHADER_COMPUTE
) ?
1056 rs
->caps
.caps
.v2
.max_shader_buffer_frag_compute
:
1057 rs
->caps
.caps
.v2
.max_shader_buffer_other_stages
;
1058 if (!max_shader_buffer
)
1060 virgl_encode_set_shader_buffers(vctx
, shader
, start_slot
, count
, buffers
);
1063 static void virgl_create_fence_fd(struct pipe_context
*ctx
,
1064 struct pipe_fence_handle
**fence
,
1066 enum pipe_fd_type type
)
1068 assert(type
== PIPE_FD_TYPE_NATIVE_SYNC
);
1069 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1071 if (rs
->vws
->cs_create_fence
)
1072 *fence
= rs
->vws
->cs_create_fence(rs
->vws
, fd
);
1075 static void virgl_fence_server_sync(struct pipe_context
*ctx
,
1076 struct pipe_fence_handle
*fence
)
1078 struct virgl_context
*vctx
= virgl_context(ctx
);
1079 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1081 if (rs
->vws
->fence_server_sync
)
1082 rs
->vws
->fence_server_sync(rs
->vws
, vctx
->cbuf
, fence
);
1085 static void virgl_set_shader_images(struct pipe_context
*ctx
,
1086 enum pipe_shader_type shader
,
1087 unsigned start_slot
, unsigned count
,
1088 const struct pipe_image_view
*images
)
1090 struct virgl_context
*vctx
= virgl_context(ctx
);
1091 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1092 struct virgl_shader_binding_state
*binding
=
1093 &vctx
->shader_bindings
[shader
];
1095 binding
->image_enabled_mask
&= ~u_bit_consecutive(start_slot
, count
);
1096 for (unsigned i
= 0; i
< count
; i
++) {
1097 unsigned idx
= start_slot
+ i
;
1098 if (images
&& images
[i
].resource
) {
1099 pipe_resource_reference(&binding
->images
[idx
].resource
,
1100 images
[i
].resource
);
1101 binding
->images
[idx
] = images
[i
];
1102 binding
->image_enabled_mask
|= 1 << idx
;
1104 pipe_resource_reference(&binding
->images
[idx
].resource
, NULL
);
1108 uint32_t max_shader_images
= (shader
== PIPE_SHADER_FRAGMENT
|| shader
== PIPE_SHADER_COMPUTE
) ?
1109 rs
->caps
.caps
.v2
.max_shader_image_frag_compute
:
1110 rs
->caps
.caps
.v2
.max_shader_image_other_stages
;
1111 if (!max_shader_images
)
1113 virgl_encode_set_shader_images(vctx
, shader
, start_slot
, count
, images
);
1116 static void virgl_memory_barrier(struct pipe_context
*ctx
,
1119 struct virgl_context
*vctx
= virgl_context(ctx
);
1120 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1122 if (!(rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_MEMORY_BARRIER
))
1124 virgl_encode_memory_barrier(vctx
, flags
);
1127 static void *virgl_create_compute_state(struct pipe_context
*ctx
,
1128 const struct pipe_compute_state
*state
)
1130 struct virgl_context
*vctx
= virgl_context(ctx
);
1132 const struct tgsi_token
*new_tokens
= state
->prog
;
1133 struct pipe_stream_output_info so_info
= {};
1136 handle
= virgl_object_assign_handle();
1137 ret
= virgl_encode_shader_state(vctx
, handle
, PIPE_SHADER_COMPUTE
,
1139 state
->req_local_mem
,
1145 return (void *)(unsigned long)handle
;
1148 static void virgl_bind_compute_state(struct pipe_context
*ctx
, void *state
)
1150 uint32_t handle
= (unsigned long)state
;
1151 struct virgl_context
*vctx
= virgl_context(ctx
);
1153 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_COMPUTE
);
1156 static void virgl_delete_compute_state(struct pipe_context
*ctx
, void *state
)
1158 uint32_t handle
= (unsigned long)state
;
1159 struct virgl_context
*vctx
= virgl_context(ctx
);
1161 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
1164 static void virgl_launch_grid(struct pipe_context
*ctx
,
1165 const struct pipe_grid_info
*info
)
1167 struct virgl_context
*vctx
= virgl_context(ctx
);
1169 if (!vctx
->num_compute
)
1170 virgl_reemit_compute_resources(vctx
);
1171 vctx
->num_compute
++;
1173 virgl_encode_launch_grid(vctx
, info
);
1177 virgl_release_shader_binding(struct virgl_context
*vctx
,
1178 enum pipe_shader_type shader_type
)
1180 struct virgl_shader_binding_state
*binding
=
1181 &vctx
->shader_bindings
[shader_type
];
1183 while (binding
->view_enabled_mask
) {
1184 int i
= u_bit_scan(&binding
->view_enabled_mask
);
1185 pipe_sampler_view_reference(
1186 (struct pipe_sampler_view
**)&binding
->views
[i
], NULL
);
1189 while (binding
->ubo_enabled_mask
) {
1190 int i
= u_bit_scan(&binding
->ubo_enabled_mask
);
1191 pipe_resource_reference(&binding
->ubos
[i
].buffer
, NULL
);
1194 while (binding
->ssbo_enabled_mask
) {
1195 int i
= u_bit_scan(&binding
->ssbo_enabled_mask
);
1196 pipe_resource_reference(&binding
->ssbos
[i
].buffer
, NULL
);
1199 while (binding
->image_enabled_mask
) {
1200 int i
= u_bit_scan(&binding
->image_enabled_mask
);
1201 pipe_resource_reference(&binding
->images
[i
].resource
, NULL
);
1206 virgl_context_destroy( struct pipe_context
*ctx
)
1208 struct virgl_context
*vctx
= virgl_context(ctx
);
1209 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1210 enum pipe_shader_type shader_type
;
1212 vctx
->framebuffer
.zsbuf
= NULL
;
1213 vctx
->framebuffer
.nr_cbufs
= 0;
1214 virgl_encoder_destroy_sub_ctx(vctx
, vctx
->hw_sub_ctx_id
);
1215 virgl_flush_eq(vctx
, vctx
, NULL
);
1217 for (shader_type
= 0; shader_type
< PIPE_SHADER_TYPES
; shader_type
++)
1218 virgl_release_shader_binding(vctx
, shader_type
);
1220 while (vctx
->atomic_buffer_enabled_mask
) {
1221 int i
= u_bit_scan(&vctx
->atomic_buffer_enabled_mask
);
1222 pipe_resource_reference(&vctx
->atomic_buffers
[i
].buffer
, NULL
);
1225 rs
->vws
->cmd_buf_destroy(vctx
->cbuf
);
1227 u_upload_destroy(vctx
->uploader
);
1228 if (vctx
->transfer_uploader
)
1229 u_upload_destroy(vctx
->transfer_uploader
);
1230 util_primconvert_destroy(vctx
->primconvert
);
1231 virgl_transfer_queue_fini(&vctx
->queue
);
1233 slab_destroy_child(&vctx
->transfer_pool
);
1237 static void virgl_get_sample_position(struct pipe_context
*ctx
,
1238 unsigned sample_count
,
1242 struct virgl_context
*vctx
= virgl_context(ctx
);
1243 struct virgl_screen
*vs
= virgl_screen(vctx
->base
.screen
);
1245 if (sample_count
> vs
->caps
.caps
.v1
.max_samples
) {
1246 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1247 sample_count
, vs
->caps
.caps
.v1
.max_samples
);
1251 /* The following is basically copied from dri/i965gen6_get_sample_position
1252 * The only addition is that we hold the msaa positions for all sample
1253 * counts in a flat array. */
1255 if (sample_count
== 1) {
1256 out_value
[0] = out_value
[1] = 0.5f
;
1258 } else if (sample_count
== 2) {
1259 bits
= vs
->caps
.caps
.v2
.sample_locations
[0] >> (8 * index
);
1260 } else if (sample_count
<= 4) {
1261 bits
= vs
->caps
.caps
.v2
.sample_locations
[1] >> (8 * index
);
1262 } else if (sample_count
<= 8) {
1263 bits
= vs
->caps
.caps
.v2
.sample_locations
[2 + (index
>> 2)] >> (8 * (index
& 3));
1264 } else if (sample_count
<= 16) {
1265 bits
= vs
->caps
.caps
.v2
.sample_locations
[4 + (index
>> 2)] >> (8 * (index
& 3));
1267 out_value
[0] = ((bits
>> 4) & 0xf) / 16.0f
;
1268 out_value
[1] = (bits
& 0xf) / 16.0f
;
1270 if (virgl_debug
& VIRGL_DEBUG_VERBOSE
)
1271 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1272 index
, sample_count
, out_value
[0], out_value
[1]);
1275 struct pipe_context
*virgl_context_create(struct pipe_screen
*pscreen
,
1279 struct virgl_context
*vctx
;
1280 struct virgl_screen
*rs
= virgl_screen(pscreen
);
1281 vctx
= CALLOC_STRUCT(virgl_context
);
1282 const char *host_debug_flagstring
;
1284 vctx
->cbuf
= rs
->vws
->cmd_buf_create(rs
->vws
, VIRGL_MAX_CMDBUF_DWORDS
);
1290 vctx
->base
.destroy
= virgl_context_destroy
;
1291 vctx
->base
.create_surface
= virgl_create_surface
;
1292 vctx
->base
.surface_destroy
= virgl_surface_destroy
;
1293 vctx
->base
.set_framebuffer_state
= virgl_set_framebuffer_state
;
1294 vctx
->base
.create_blend_state
= virgl_create_blend_state
;
1295 vctx
->base
.bind_blend_state
= virgl_bind_blend_state
;
1296 vctx
->base
.delete_blend_state
= virgl_delete_blend_state
;
1297 vctx
->base
.create_depth_stencil_alpha_state
= virgl_create_depth_stencil_alpha_state
;
1298 vctx
->base
.bind_depth_stencil_alpha_state
= virgl_bind_depth_stencil_alpha_state
;
1299 vctx
->base
.delete_depth_stencil_alpha_state
= virgl_delete_depth_stencil_alpha_state
;
1300 vctx
->base
.create_rasterizer_state
= virgl_create_rasterizer_state
;
1301 vctx
->base
.bind_rasterizer_state
= virgl_bind_rasterizer_state
;
1302 vctx
->base
.delete_rasterizer_state
= virgl_delete_rasterizer_state
;
1304 vctx
->base
.set_viewport_states
= virgl_set_viewport_states
;
1305 vctx
->base
.create_vertex_elements_state
= virgl_create_vertex_elements_state
;
1306 vctx
->base
.bind_vertex_elements_state
= virgl_bind_vertex_elements_state
;
1307 vctx
->base
.delete_vertex_elements_state
= virgl_delete_vertex_elements_state
;
1308 vctx
->base
.set_vertex_buffers
= virgl_set_vertex_buffers
;
1309 vctx
->base
.set_constant_buffer
= virgl_set_constant_buffer
;
1311 vctx
->base
.set_tess_state
= virgl_set_tess_state
;
1312 vctx
->base
.create_vs_state
= virgl_create_vs_state
;
1313 vctx
->base
.create_tcs_state
= virgl_create_tcs_state
;
1314 vctx
->base
.create_tes_state
= virgl_create_tes_state
;
1315 vctx
->base
.create_gs_state
= virgl_create_gs_state
;
1316 vctx
->base
.create_fs_state
= virgl_create_fs_state
;
1318 vctx
->base
.bind_vs_state
= virgl_bind_vs_state
;
1319 vctx
->base
.bind_tcs_state
= virgl_bind_tcs_state
;
1320 vctx
->base
.bind_tes_state
= virgl_bind_tes_state
;
1321 vctx
->base
.bind_gs_state
= virgl_bind_gs_state
;
1322 vctx
->base
.bind_fs_state
= virgl_bind_fs_state
;
1324 vctx
->base
.delete_vs_state
= virgl_delete_vs_state
;
1325 vctx
->base
.delete_tcs_state
= virgl_delete_tcs_state
;
1326 vctx
->base
.delete_tes_state
= virgl_delete_tes_state
;
1327 vctx
->base
.delete_gs_state
= virgl_delete_gs_state
;
1328 vctx
->base
.delete_fs_state
= virgl_delete_fs_state
;
1330 vctx
->base
.create_compute_state
= virgl_create_compute_state
;
1331 vctx
->base
.bind_compute_state
= virgl_bind_compute_state
;
1332 vctx
->base
.delete_compute_state
= virgl_delete_compute_state
;
1333 vctx
->base
.launch_grid
= virgl_launch_grid
;
1335 vctx
->base
.clear
= virgl_clear
;
1336 vctx
->base
.draw_vbo
= virgl_draw_vbo
;
1337 vctx
->base
.flush
= virgl_flush_from_st
;
1338 vctx
->base
.screen
= pscreen
;
1339 vctx
->base
.create_sampler_view
= virgl_create_sampler_view
;
1340 vctx
->base
.sampler_view_destroy
= virgl_destroy_sampler_view
;
1341 vctx
->base
.set_sampler_views
= virgl_set_sampler_views
;
1342 vctx
->base
.texture_barrier
= virgl_texture_barrier
;
1344 vctx
->base
.create_sampler_state
= virgl_create_sampler_state
;
1345 vctx
->base
.delete_sampler_state
= virgl_delete_sampler_state
;
1346 vctx
->base
.bind_sampler_states
= virgl_bind_sampler_states
;
1348 vctx
->base
.set_polygon_stipple
= virgl_set_polygon_stipple
;
1349 vctx
->base
.set_scissor_states
= virgl_set_scissor_states
;
1350 vctx
->base
.set_sample_mask
= virgl_set_sample_mask
;
1351 vctx
->base
.set_min_samples
= virgl_set_min_samples
;
1352 vctx
->base
.set_stencil_ref
= virgl_set_stencil_ref
;
1353 vctx
->base
.set_clip_state
= virgl_set_clip_state
;
1355 vctx
->base
.set_blend_color
= virgl_set_blend_color
;
1357 vctx
->base
.get_sample_position
= virgl_get_sample_position
;
1359 vctx
->base
.resource_copy_region
= virgl_resource_copy_region
;
1360 vctx
->base
.flush_resource
= virgl_flush_resource
;
1361 vctx
->base
.blit
= virgl_blit
;
1362 vctx
->base
.create_fence_fd
= virgl_create_fence_fd
;
1363 vctx
->base
.fence_server_sync
= virgl_fence_server_sync
;
1365 vctx
->base
.set_shader_buffers
= virgl_set_shader_buffers
;
1366 vctx
->base
.set_hw_atomic_buffers
= virgl_set_hw_atomic_buffers
;
1367 vctx
->base
.set_shader_images
= virgl_set_shader_images
;
1368 vctx
->base
.memory_barrier
= virgl_memory_barrier
;
1370 virgl_init_context_resource_functions(&vctx
->base
);
1371 virgl_init_query_functions(vctx
);
1372 virgl_init_so_functions(vctx
);
1374 slab_create_child(&vctx
->transfer_pool
, &rs
->transfer_pool
);
1375 virgl_transfer_queue_init(&vctx
->queue
, rs
, &vctx
->transfer_pool
);
1376 vctx
->encoded_transfers
= (rs
->vws
->supports_encoded_transfers
&&
1377 (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_TRANSFER
));
1379 /* Reserve some space for transfers. */
1380 if (vctx
->encoded_transfers
)
1381 vctx
->cbuf
->cdw
= VIRGL_MAX_TBUF_DWORDS
;
1383 vctx
->primconvert
= util_primconvert_create(&vctx
->base
, rs
->caps
.caps
.v1
.prim_mask
);
1384 vctx
->uploader
= u_upload_create(&vctx
->base
, 1024 * 1024,
1385 PIPE_BIND_INDEX_BUFFER
, PIPE_USAGE_STREAM
, 0);
1386 if (!vctx
->uploader
)
1388 vctx
->base
.stream_uploader
= vctx
->uploader
;
1389 vctx
->base
.const_uploader
= vctx
->uploader
;
1390 /* Use a custom/staging buffer for the transfer uploader, since we are
1391 * using it only for copies to other resources.
1393 if ((rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_COPY_TRANSFER
) &&
1394 vctx
->encoded_transfers
) {
1395 vctx
->transfer_uploader
= u_upload_create(&vctx
->base
, 1024 * 1024,
1398 VIRGL_RESOURCE_FLAG_STAGING
);
1399 if (!vctx
->transfer_uploader
)
1403 vctx
->hw_sub_ctx_id
= rs
->sub_ctx_id
++;
1404 virgl_encoder_create_sub_ctx(vctx
, vctx
->hw_sub_ctx_id
);
1406 virgl_encoder_set_sub_ctx(vctx
, vctx
->hw_sub_ctx_id
);
1408 if (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_GUEST_MAY_INIT_LOG
) {
1409 host_debug_flagstring
= getenv("VIRGL_HOST_DEBUG");
1410 if (host_debug_flagstring
)
1411 virgl_encode_host_debug_flagstring(vctx
, host_debug_flagstring
);
1416 virgl_context_destroy(&vctx
->base
);