2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "pipe/p_shader_tokens.h"
27 #include "pipe/p_context.h"
28 #include "pipe/p_defines.h"
29 #include "pipe/p_screen.h"
30 #include "pipe/p_state.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_prim.h"
35 #include "util/u_transfer.h"
36 #include "util/u_helpers.h"
37 #include "util/slab.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_blitter.h"
40 #include "tgsi/tgsi_text.h"
41 #include "indices/u_primconvert.h"
43 #include "pipebuffer/pb_buffer.h"
45 #include "virgl_encode.h"
46 #include "virgl_context.h"
47 #include "virgl_protocol.h"
48 #include "virgl_resource.h"
49 #include "virgl_screen.h"
51 struct virgl_vertex_elements_state
{
53 uint8_t binding_map
[PIPE_MAX_ATTRIBS
];
57 static uint32_t next_handle
;
58 uint32_t virgl_object_assign_handle(void)
63 static void virgl_attach_res_framebuffer(struct virgl_context
*vctx
)
65 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
66 struct pipe_surface
*surf
;
67 struct virgl_resource
*res
;
70 surf
= vctx
->framebuffer
.zsbuf
;
72 res
= virgl_resource(surf
->texture
);
74 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
76 for (i
= 0; i
< vctx
->framebuffer
.nr_cbufs
; i
++) {
77 surf
= vctx
->framebuffer
.cbufs
[i
];
79 res
= virgl_resource(surf
->texture
);
81 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
86 static void virgl_attach_res_sampler_views(struct virgl_context
*vctx
,
87 enum pipe_shader_type shader_type
)
89 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
90 struct virgl_textures_info
*tinfo
= &vctx
->samplers
[shader_type
];
91 struct virgl_resource
*res
;
92 uint32_t remaining_mask
= tinfo
->enabled_mask
;
94 while (remaining_mask
) {
95 i
= u_bit_scan(&remaining_mask
);
96 assert(tinfo
->views
[i
]);
98 res
= virgl_resource(tinfo
->views
[i
]->base
.texture
);
100 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
104 static void virgl_attach_res_vertex_buffers(struct virgl_context
*vctx
)
106 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
107 struct virgl_resource
*res
;
110 for (i
= 0; i
< vctx
->num_vertex_buffers
; i
++) {
111 res
= virgl_resource(vctx
->vertex_buffer
[i
].buffer
.resource
);
113 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
117 static void virgl_attach_res_index_buffer(struct virgl_context
*vctx
,
118 struct virgl_indexbuf
*ib
)
120 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
121 struct virgl_resource
*res
;
123 res
= virgl_resource(ib
->buffer
);
125 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
128 static void virgl_attach_res_so_targets(struct virgl_context
*vctx
)
130 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
131 struct virgl_resource
*res
;
134 for (i
= 0; i
< vctx
->num_so_targets
; i
++) {
135 res
= virgl_resource(vctx
->so_targets
[i
].base
.buffer
);
137 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
141 static void virgl_attach_res_uniform_buffers(struct virgl_context
*vctx
,
142 enum pipe_shader_type shader_type
)
144 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
145 struct virgl_resource
*res
;
147 for (i
= 0; i
< PIPE_MAX_CONSTANT_BUFFERS
; i
++) {
148 res
= virgl_resource(vctx
->ubos
[shader_type
][i
]);
150 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
155 static void virgl_attach_res_shader_buffers(struct virgl_context
*vctx
,
156 enum pipe_shader_type shader_type
)
158 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
159 struct virgl_resource
*res
;
161 for (i
= 0; i
< PIPE_MAX_SHADER_BUFFERS
; i
++) {
162 res
= virgl_resource(vctx
->ssbos
[shader_type
][i
]);
164 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
169 static void virgl_attach_res_shader_images(struct virgl_context
*vctx
,
170 enum pipe_shader_type shader_type
)
172 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
173 struct virgl_resource
*res
;
175 for (i
= 0; i
< PIPE_MAX_SHADER_IMAGES
; i
++) {
176 res
= virgl_resource(vctx
->images
[shader_type
][i
]);
178 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
183 static void virgl_attach_res_atomic_buffers(struct virgl_context
*vctx
)
185 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
186 struct virgl_resource
*res
;
188 for (i
= 0; i
< PIPE_MAX_HW_ATOMIC_BUFFERS
; i
++) {
189 res
= virgl_resource(vctx
->atomic_buffers
[i
]);
191 vws
->emit_res(vws
, vctx
->cbuf
, res
->hw_res
, FALSE
);
197 * after flushing, the hw context still has a bunch of
198 * resources bound, so we need to rebind those here.
200 static void virgl_reemit_res(struct virgl_context
*vctx
)
202 enum pipe_shader_type shader_type
;
204 /* reattach any flushed resources */
205 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
206 virgl_attach_res_framebuffer(vctx
);
208 for (shader_type
= 0; shader_type
< PIPE_SHADER_TYPES
; shader_type
++) {
209 virgl_attach_res_sampler_views(vctx
, shader_type
);
210 virgl_attach_res_uniform_buffers(vctx
, shader_type
);
211 virgl_attach_res_shader_buffers(vctx
, shader_type
);
212 virgl_attach_res_shader_images(vctx
, shader_type
);
214 virgl_attach_res_atomic_buffers(vctx
);
215 virgl_attach_res_vertex_buffers(vctx
);
216 virgl_attach_res_so_targets(vctx
);
219 static struct pipe_surface
*virgl_create_surface(struct pipe_context
*ctx
,
220 struct pipe_resource
*resource
,
221 const struct pipe_surface
*templ
)
223 struct virgl_context
*vctx
= virgl_context(ctx
);
224 struct virgl_surface
*surf
;
225 struct virgl_resource
*res
= virgl_resource(resource
);
228 surf
= CALLOC_STRUCT(virgl_surface
);
232 assert(ctx
->screen
->get_param(ctx
->screen
,
233 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL
) ||
234 (util_format_is_srgb(templ
->format
) ==
235 util_format_is_srgb(resource
->format
)));
237 res
->clean
[0] = FALSE
;
238 handle
= virgl_object_assign_handle();
239 pipe_reference_init(&surf
->base
.reference
, 1);
240 pipe_resource_reference(&surf
->base
.texture
, resource
);
241 surf
->base
.context
= ctx
;
242 surf
->base
.format
= templ
->format
;
243 if (resource
->target
!= PIPE_BUFFER
) {
244 surf
->base
.width
= u_minify(resource
->width0
, templ
->u
.tex
.level
);
245 surf
->base
.height
= u_minify(resource
->height0
, templ
->u
.tex
.level
);
246 surf
->base
.u
.tex
.level
= templ
->u
.tex
.level
;
247 surf
->base
.u
.tex
.first_layer
= templ
->u
.tex
.first_layer
;
248 surf
->base
.u
.tex
.last_layer
= templ
->u
.tex
.last_layer
;
250 surf
->base
.width
= templ
->u
.buf
.last_element
- templ
->u
.buf
.first_element
+ 1;
251 surf
->base
.height
= resource
->height0
;
252 surf
->base
.u
.buf
.first_element
= templ
->u
.buf
.first_element
;
253 surf
->base
.u
.buf
.last_element
= templ
->u
.buf
.last_element
;
255 virgl_encoder_create_surface(vctx
, handle
, res
, &surf
->base
);
256 surf
->handle
= handle
;
260 static void virgl_surface_destroy(struct pipe_context
*ctx
,
261 struct pipe_surface
*psurf
)
263 struct virgl_context
*vctx
= virgl_context(ctx
);
264 struct virgl_surface
*surf
= virgl_surface(psurf
);
266 pipe_resource_reference(&surf
->base
.texture
, NULL
);
267 virgl_encode_delete_object(vctx
, surf
->handle
, VIRGL_OBJECT_SURFACE
);
271 static void *virgl_create_blend_state(struct pipe_context
*ctx
,
272 const struct pipe_blend_state
*blend_state
)
274 struct virgl_context
*vctx
= virgl_context(ctx
);
276 handle
= virgl_object_assign_handle();
278 virgl_encode_blend_state(vctx
, handle
, blend_state
);
279 return (void *)(unsigned long)handle
;
283 static void virgl_bind_blend_state(struct pipe_context
*ctx
,
286 struct virgl_context
*vctx
= virgl_context(ctx
);
287 uint32_t handle
= (unsigned long)blend_state
;
288 virgl_encode_bind_object(vctx
, handle
, VIRGL_OBJECT_BLEND
);
291 static void virgl_delete_blend_state(struct pipe_context
*ctx
,
294 struct virgl_context
*vctx
= virgl_context(ctx
);
295 uint32_t handle
= (unsigned long)blend_state
;
296 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_BLEND
);
299 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context
*ctx
,
300 const struct pipe_depth_stencil_alpha_state
*blend_state
)
302 struct virgl_context
*vctx
= virgl_context(ctx
);
304 handle
= virgl_object_assign_handle();
306 virgl_encode_dsa_state(vctx
, handle
, blend_state
);
307 return (void *)(unsigned long)handle
;
310 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context
*ctx
,
313 struct virgl_context
*vctx
= virgl_context(ctx
);
314 uint32_t handle
= (unsigned long)blend_state
;
315 virgl_encode_bind_object(vctx
, handle
, VIRGL_OBJECT_DSA
);
318 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context
*ctx
,
321 struct virgl_context
*vctx
= virgl_context(ctx
);
322 uint32_t handle
= (unsigned long)dsa_state
;
323 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_DSA
);
326 static void *virgl_create_rasterizer_state(struct pipe_context
*ctx
,
327 const struct pipe_rasterizer_state
*rs_state
)
329 struct virgl_context
*vctx
= virgl_context(ctx
);
330 struct virgl_rasterizer_state
*vrs
= CALLOC_STRUCT(virgl_rasterizer_state
);
335 vrs
->handle
= virgl_object_assign_handle();
337 virgl_encode_rasterizer_state(vctx
, vrs
->handle
, rs_state
);
341 static void virgl_bind_rasterizer_state(struct pipe_context
*ctx
,
344 struct virgl_context
*vctx
= virgl_context(ctx
);
347 struct virgl_rasterizer_state
*vrs
= rs_state
;
348 vctx
->rs_state
= *vrs
;
349 handle
= vrs
->handle
;
351 virgl_encode_bind_object(vctx
, handle
, VIRGL_OBJECT_RASTERIZER
);
354 static void virgl_delete_rasterizer_state(struct pipe_context
*ctx
,
357 struct virgl_context
*vctx
= virgl_context(ctx
);
358 struct virgl_rasterizer_state
*vrs
= rs_state
;
359 virgl_encode_delete_object(vctx
, vrs
->handle
, VIRGL_OBJECT_RASTERIZER
);
363 static void virgl_set_framebuffer_state(struct pipe_context
*ctx
,
364 const struct pipe_framebuffer_state
*state
)
366 struct virgl_context
*vctx
= virgl_context(ctx
);
368 vctx
->framebuffer
= *state
;
369 virgl_encoder_set_framebuffer_state(vctx
, state
);
370 virgl_attach_res_framebuffer(vctx
);
373 static void virgl_set_viewport_states(struct pipe_context
*ctx
,
375 unsigned num_viewports
,
376 const struct pipe_viewport_state
*state
)
378 struct virgl_context
*vctx
= virgl_context(ctx
);
379 virgl_encoder_set_viewport_states(vctx
, start_slot
, num_viewports
, state
);
382 static void *virgl_create_vertex_elements_state(struct pipe_context
*ctx
,
383 unsigned num_elements
,
384 const struct pipe_vertex_element
*elements
)
386 struct pipe_vertex_element new_elements
[PIPE_MAX_ATTRIBS
];
387 struct virgl_context
*vctx
= virgl_context(ctx
);
388 struct virgl_vertex_elements_state
*state
=
389 CALLOC_STRUCT(virgl_vertex_elements_state
);
391 for (int i
= 0; i
< num_elements
; ++i
) {
392 if (elements
[i
].instance_divisor
) {
393 /* Virglrenderer doesn't deal with instance_divisor correctly if
394 * there isn't a 1:1 relationship between elements and bindings.
395 * So let's make sure there is, by duplicating bindings.
397 for (int j
= 0; j
< num_elements
; ++j
) {
398 new_elements
[j
] = elements
[j
];
399 new_elements
[j
].vertex_buffer_index
= j
;
400 state
->binding_map
[j
] = elements
[j
].vertex_buffer_index
;
402 elements
= new_elements
;
403 state
->num_bindings
= num_elements
;
408 state
->handle
= virgl_object_assign_handle();
409 virgl_encoder_create_vertex_elements(vctx
, state
->handle
,
410 num_elements
, elements
);
414 static void virgl_delete_vertex_elements_state(struct pipe_context
*ctx
,
417 struct virgl_context
*vctx
= virgl_context(ctx
);
418 struct virgl_vertex_elements_state
*state
=
419 (struct virgl_vertex_elements_state
*)ve
;
420 virgl_encode_delete_object(vctx
, state
->handle
, VIRGL_OBJECT_VERTEX_ELEMENTS
);
424 static void virgl_bind_vertex_elements_state(struct pipe_context
*ctx
,
427 struct virgl_context
*vctx
= virgl_context(ctx
);
428 struct virgl_vertex_elements_state
*state
=
429 (struct virgl_vertex_elements_state
*)ve
;
430 vctx
->vertex_elements
= state
;
431 virgl_encode_bind_object(vctx
, state
? state
->handle
: 0,
432 VIRGL_OBJECT_VERTEX_ELEMENTS
);
433 vctx
->vertex_array_dirty
= TRUE
;
436 static void virgl_set_vertex_buffers(struct pipe_context
*ctx
,
438 unsigned num_buffers
,
439 const struct pipe_vertex_buffer
*buffers
)
441 struct virgl_context
*vctx
= virgl_context(ctx
);
443 util_set_vertex_buffers_count(vctx
->vertex_buffer
,
444 &vctx
->num_vertex_buffers
,
445 buffers
, start_slot
, num_buffers
);
447 vctx
->vertex_array_dirty
= TRUE
;
450 static void virgl_hw_set_vertex_buffers(struct virgl_context
*vctx
)
452 if (vctx
->vertex_array_dirty
) {
453 struct virgl_vertex_elements_state
*ve
= vctx
->vertex_elements
;
455 if (ve
->num_bindings
) {
456 struct pipe_vertex_buffer vertex_buffers
[PIPE_MAX_ATTRIBS
];
457 for (int i
= 0; i
< ve
->num_bindings
; ++i
)
458 vertex_buffers
[i
] = vctx
->vertex_buffer
[ve
->binding_map
[i
]];
460 virgl_encoder_set_vertex_buffers(vctx
, ve
->num_bindings
, vertex_buffers
);
462 virgl_encoder_set_vertex_buffers(vctx
, vctx
->num_vertex_buffers
, vctx
->vertex_buffer
);
464 virgl_attach_res_vertex_buffers(vctx
);
468 static void virgl_set_stencil_ref(struct pipe_context
*ctx
,
469 const struct pipe_stencil_ref
*ref
)
471 struct virgl_context
*vctx
= virgl_context(ctx
);
472 virgl_encoder_set_stencil_ref(vctx
, ref
);
475 static void virgl_set_blend_color(struct pipe_context
*ctx
,
476 const struct pipe_blend_color
*color
)
478 struct virgl_context
*vctx
= virgl_context(ctx
);
479 virgl_encoder_set_blend_color(vctx
, color
);
482 static void virgl_hw_set_index_buffer(struct virgl_context
*vctx
,
483 struct virgl_indexbuf
*ib
)
485 virgl_encoder_set_index_buffer(vctx
, ib
);
486 virgl_attach_res_index_buffer(vctx
, ib
);
489 static void virgl_set_constant_buffer(struct pipe_context
*ctx
,
490 enum pipe_shader_type shader
, uint index
,
491 const struct pipe_constant_buffer
*buf
)
493 struct virgl_context
*vctx
= virgl_context(ctx
);
496 if (!buf
->user_buffer
){
497 struct virgl_resource
*res
= virgl_resource(buf
->buffer
);
498 virgl_encoder_set_uniform_buffer(vctx
, shader
, index
, buf
->buffer_offset
,
499 buf
->buffer_size
, res
);
500 pipe_resource_reference(&vctx
->ubos
[shader
][index
], buf
->buffer
);
503 pipe_resource_reference(&vctx
->ubos
[shader
][index
], NULL
);
504 virgl_encoder_write_constant_buffer(vctx
, shader
, index
, buf
->buffer_size
/ 4, buf
->user_buffer
);
506 virgl_encoder_write_constant_buffer(vctx
, shader
, index
, 0, NULL
);
507 pipe_resource_reference(&vctx
->ubos
[shader
][index
], NULL
);
511 void virgl_transfer_inline_write(struct pipe_context
*ctx
,
512 struct pipe_resource
*res
,
515 const struct pipe_box
*box
,
518 unsigned layer_stride
)
520 struct virgl_context
*vctx
= virgl_context(ctx
);
521 struct virgl_screen
*vs
= virgl_screen(ctx
->screen
);
522 struct virgl_resource
*grres
= virgl_resource(res
);
524 grres
->clean
[0] = FALSE
;
526 if (virgl_res_needs_flush_wait(vctx
, grres
, usage
)) {
527 ctx
->flush(ctx
, NULL
, 0);
529 vs
->vws
->resource_wait(vs
->vws
, grres
->hw_res
);
532 virgl_encoder_inline_write(vctx
, grres
, level
, usage
,
533 box
, data
, stride
, layer_stride
);
536 static void *virgl_shader_encoder(struct pipe_context
*ctx
,
537 const struct pipe_shader_state
*shader
,
540 struct virgl_context
*vctx
= virgl_context(ctx
);
542 struct tgsi_token
*new_tokens
;
545 new_tokens
= virgl_tgsi_transform(vctx
, shader
->tokens
);
549 handle
= virgl_object_assign_handle();
550 /* encode VS state */
551 ret
= virgl_encode_shader_state(vctx
, handle
, type
,
552 &shader
->stream_output
, 0,
559 return (void *)(unsigned long)handle
;
562 static void *virgl_create_vs_state(struct pipe_context
*ctx
,
563 const struct pipe_shader_state
*shader
)
565 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_VERTEX
);
568 static void *virgl_create_tcs_state(struct pipe_context
*ctx
,
569 const struct pipe_shader_state
*shader
)
571 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_TESS_CTRL
);
574 static void *virgl_create_tes_state(struct pipe_context
*ctx
,
575 const struct pipe_shader_state
*shader
)
577 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_TESS_EVAL
);
580 static void *virgl_create_gs_state(struct pipe_context
*ctx
,
581 const struct pipe_shader_state
*shader
)
583 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_GEOMETRY
);
586 static void *virgl_create_fs_state(struct pipe_context
*ctx
,
587 const struct pipe_shader_state
*shader
)
589 return virgl_shader_encoder(ctx
, shader
, PIPE_SHADER_FRAGMENT
);
593 virgl_delete_fs_state(struct pipe_context
*ctx
,
596 uint32_t handle
= (unsigned long)fs
;
597 struct virgl_context
*vctx
= virgl_context(ctx
);
599 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
603 virgl_delete_gs_state(struct pipe_context
*ctx
,
606 uint32_t handle
= (unsigned long)gs
;
607 struct virgl_context
*vctx
= virgl_context(ctx
);
609 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
613 virgl_delete_vs_state(struct pipe_context
*ctx
,
616 uint32_t handle
= (unsigned long)vs
;
617 struct virgl_context
*vctx
= virgl_context(ctx
);
619 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
623 virgl_delete_tcs_state(struct pipe_context
*ctx
,
626 uint32_t handle
= (unsigned long)tcs
;
627 struct virgl_context
*vctx
= virgl_context(ctx
);
629 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
633 virgl_delete_tes_state(struct pipe_context
*ctx
,
636 uint32_t handle
= (unsigned long)tes
;
637 struct virgl_context
*vctx
= virgl_context(ctx
);
639 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
642 static void virgl_bind_vs_state(struct pipe_context
*ctx
,
645 uint32_t handle
= (unsigned long)vss
;
646 struct virgl_context
*vctx
= virgl_context(ctx
);
648 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_VERTEX
);
651 static void virgl_bind_tcs_state(struct pipe_context
*ctx
,
654 uint32_t handle
= (unsigned long)vss
;
655 struct virgl_context
*vctx
= virgl_context(ctx
);
657 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_TESS_CTRL
);
660 static void virgl_bind_tes_state(struct pipe_context
*ctx
,
663 uint32_t handle
= (unsigned long)vss
;
664 struct virgl_context
*vctx
= virgl_context(ctx
);
666 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_TESS_EVAL
);
669 static void virgl_bind_gs_state(struct pipe_context
*ctx
,
672 uint32_t handle
= (unsigned long)vss
;
673 struct virgl_context
*vctx
= virgl_context(ctx
);
675 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_GEOMETRY
);
679 static void virgl_bind_fs_state(struct pipe_context
*ctx
,
682 uint32_t handle
= (unsigned long)vss
;
683 struct virgl_context
*vctx
= virgl_context(ctx
);
685 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_FRAGMENT
);
688 static void virgl_clear(struct pipe_context
*ctx
,
690 const union pipe_color_union
*color
,
691 double depth
, unsigned stencil
)
693 struct virgl_context
*vctx
= virgl_context(ctx
);
695 virgl_encode_clear(vctx
, buffers
, color
, depth
, stencil
);
698 static void virgl_draw_vbo(struct pipe_context
*ctx
,
699 const struct pipe_draw_info
*dinfo
)
701 struct virgl_context
*vctx
= virgl_context(ctx
);
702 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
703 struct virgl_indexbuf ib
= {};
704 struct pipe_draw_info info
= *dinfo
;
706 if (!dinfo
->count_from_stream_output
&& !dinfo
->indirect
&&
707 !dinfo
->primitive_restart
&&
708 !u_trim_pipe_prim(dinfo
->mode
, (unsigned*)&dinfo
->count
))
711 if (!(rs
->caps
.caps
.v1
.prim_mask
& (1 << dinfo
->mode
))) {
712 util_primconvert_save_rasterizer_state(vctx
->primconvert
, &vctx
->rs_state
.rs
);
713 util_primconvert_draw_vbo(vctx
->primconvert
, dinfo
);
716 if (info
.index_size
) {
717 pipe_resource_reference(&ib
.buffer
, info
.has_user_indices
? NULL
: info
.index
.resource
);
718 ib
.user_buffer
= info
.has_user_indices
? info
.index
.user
: NULL
;
719 ib
.index_size
= dinfo
->index_size
;
720 ib
.offset
= info
.start
* ib
.index_size
;
722 if (ib
.user_buffer
) {
723 u_upload_data(vctx
->uploader
, 0, info
.count
* ib
.index_size
, 256,
724 ib
.user_buffer
, &ib
.offset
, &ib
.buffer
);
725 ib
.user_buffer
= NULL
;
729 u_upload_unmap(vctx
->uploader
);
732 virgl_hw_set_vertex_buffers(vctx
);
734 virgl_hw_set_index_buffer(vctx
, &ib
);
736 virgl_encoder_draw_vbo(vctx
, &info
);
738 pipe_resource_reference(&ib
.buffer
, NULL
);
742 static void virgl_flush_eq(struct virgl_context
*ctx
, void *closure
,
743 struct pipe_fence_handle
**fence
)
745 struct virgl_screen
*rs
= virgl_screen(ctx
->base
.screen
);
746 int out_fence_fd
= -1;
748 /* send the buffer to the remote side for decoding */
749 ctx
->num_transfers
= ctx
->num_draws
= 0;
751 rs
->vws
->submit_cmd(rs
->vws
, ctx
->cbuf
, ctx
->cbuf
->in_fence_fd
,
752 ctx
->cbuf
->needs_out_fence_fd
? &out_fence_fd
: NULL
);
755 *fence
= rs
->vws
->cs_create_fence(rs
->vws
, out_fence_fd
);
757 virgl_encoder_set_sub_ctx(ctx
, ctx
->hw_sub_ctx_id
);
759 /* add back current framebuffer resources to reference list? */
760 virgl_reemit_res(ctx
);
763 static void virgl_flush_from_st(struct pipe_context
*ctx
,
764 struct pipe_fence_handle
**fence
,
765 enum pipe_flush_flags flags
)
767 struct virgl_context
*vctx
= virgl_context(ctx
);
768 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
770 if (flags
& PIPE_FLUSH_FENCE_FD
)
771 vctx
->cbuf
->needs_out_fence_fd
= true;
773 virgl_flush_eq(vctx
, vctx
, fence
);
775 if (vctx
->cbuf
->in_fence_fd
!= -1) {
776 close(vctx
->cbuf
->in_fence_fd
);
777 vctx
->cbuf
->in_fence_fd
= -1;
779 vctx
->cbuf
->needs_out_fence_fd
= false;
782 static struct pipe_sampler_view
*virgl_create_sampler_view(struct pipe_context
*ctx
,
783 struct pipe_resource
*texture
,
784 const struct pipe_sampler_view
*state
)
786 struct virgl_context
*vctx
= virgl_context(ctx
);
787 struct virgl_sampler_view
*grview
;
789 struct virgl_resource
*res
;
794 grview
= CALLOC_STRUCT(virgl_sampler_view
);
798 res
= virgl_resource(texture
);
799 handle
= virgl_object_assign_handle();
800 virgl_encode_sampler_view(vctx
, handle
, res
, state
);
802 grview
->base
= *state
;
803 grview
->base
.reference
.count
= 1;
805 grview
->base
.texture
= NULL
;
806 grview
->base
.context
= ctx
;
807 pipe_resource_reference(&grview
->base
.texture
, texture
);
808 grview
->handle
= handle
;
809 return &grview
->base
;
812 static void virgl_set_sampler_views(struct pipe_context
*ctx
,
813 enum pipe_shader_type shader_type
,
816 struct pipe_sampler_view
**views
)
818 struct virgl_context
*vctx
= virgl_context(ctx
);
820 uint32_t disable_mask
= ~((1ull << num_views
) - 1);
821 struct virgl_textures_info
*tinfo
= &vctx
->samplers
[shader_type
];
822 uint32_t new_mask
= 0;
823 uint32_t remaining_mask
;
825 remaining_mask
= tinfo
->enabled_mask
& disable_mask
;
827 while (remaining_mask
) {
828 i
= u_bit_scan(&remaining_mask
);
829 assert(tinfo
->views
[i
]);
831 pipe_sampler_view_reference((struct pipe_sampler_view
**)&tinfo
->views
[i
], NULL
);
834 for (i
= 0; i
< num_views
; i
++) {
835 struct virgl_sampler_view
*grview
= virgl_sampler_view(views
[i
]);
837 if (views
[i
] == (struct pipe_sampler_view
*)tinfo
->views
[i
])
842 pipe_sampler_view_reference((struct pipe_sampler_view
**)&tinfo
->views
[i
], views
[i
]);
844 pipe_sampler_view_reference((struct pipe_sampler_view
**)&tinfo
->views
[i
], NULL
);
845 disable_mask
|= 1 << i
;
849 tinfo
->enabled_mask
&= ~disable_mask
;
850 tinfo
->enabled_mask
|= new_mask
;
851 virgl_encode_set_sampler_views(vctx
, shader_type
, start_slot
, num_views
, tinfo
->views
);
852 virgl_attach_res_sampler_views(vctx
, shader_type
);
856 virgl_texture_barrier(struct pipe_context
*ctx
, unsigned flags
)
858 struct virgl_context
*vctx
= virgl_context(ctx
);
859 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
861 if (!(rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_TEXTURE_BARRIER
))
863 virgl_encode_texture_barrier(vctx
, flags
);
866 static void virgl_destroy_sampler_view(struct pipe_context
*ctx
,
867 struct pipe_sampler_view
*view
)
869 struct virgl_context
*vctx
= virgl_context(ctx
);
870 struct virgl_sampler_view
*grview
= virgl_sampler_view(view
);
872 virgl_encode_delete_object(vctx
, grview
->handle
, VIRGL_OBJECT_SAMPLER_VIEW
);
873 pipe_resource_reference(&view
->texture
, NULL
);
877 static void *virgl_create_sampler_state(struct pipe_context
*ctx
,
878 const struct pipe_sampler_state
*state
)
880 struct virgl_context
*vctx
= virgl_context(ctx
);
883 handle
= virgl_object_assign_handle();
885 virgl_encode_sampler_state(vctx
, handle
, state
);
886 return (void *)(unsigned long)handle
;
889 static void virgl_delete_sampler_state(struct pipe_context
*ctx
,
892 struct virgl_context
*vctx
= virgl_context(ctx
);
893 uint32_t handle
= (unsigned long)ss
;
895 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SAMPLER_STATE
);
898 static void virgl_bind_sampler_states(struct pipe_context
*ctx
,
899 enum pipe_shader_type shader
,
901 unsigned num_samplers
,
904 struct virgl_context
*vctx
= virgl_context(ctx
);
905 uint32_t handles
[32];
907 for (i
= 0; i
< num_samplers
; i
++) {
908 handles
[i
] = (unsigned long)(samplers
[i
]);
910 virgl_encode_bind_sampler_states(vctx
, shader
, start_slot
, num_samplers
, handles
);
913 static void virgl_set_polygon_stipple(struct pipe_context
*ctx
,
914 const struct pipe_poly_stipple
*ps
)
916 struct virgl_context
*vctx
= virgl_context(ctx
);
917 virgl_encoder_set_polygon_stipple(vctx
, ps
);
920 static void virgl_set_scissor_states(struct pipe_context
*ctx
,
922 unsigned num_scissor
,
923 const struct pipe_scissor_state
*ss
)
925 struct virgl_context
*vctx
= virgl_context(ctx
);
926 virgl_encoder_set_scissor_state(vctx
, start_slot
, num_scissor
, ss
);
929 static void virgl_set_sample_mask(struct pipe_context
*ctx
,
930 unsigned sample_mask
)
932 struct virgl_context
*vctx
= virgl_context(ctx
);
933 virgl_encoder_set_sample_mask(vctx
, sample_mask
);
936 static void virgl_set_min_samples(struct pipe_context
*ctx
,
937 unsigned min_samples
)
939 struct virgl_context
*vctx
= virgl_context(ctx
);
940 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
942 if (!(rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_SET_MIN_SAMPLES
))
944 virgl_encoder_set_min_samples(vctx
, min_samples
);
947 static void virgl_set_clip_state(struct pipe_context
*ctx
,
948 const struct pipe_clip_state
*clip
)
950 struct virgl_context
*vctx
= virgl_context(ctx
);
951 virgl_encoder_set_clip_state(vctx
, clip
);
954 static void virgl_set_tess_state(struct pipe_context
*ctx
,
955 const float default_outer_level
[4],
956 const float default_inner_level
[2])
958 struct virgl_context
*vctx
= virgl_context(ctx
);
959 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
961 if (!rs
->caps
.caps
.v1
.bset
.has_tessellation_shaders
)
963 virgl_encode_set_tess_state(vctx
, default_outer_level
, default_inner_level
);
966 static void virgl_resource_copy_region(struct pipe_context
*ctx
,
967 struct pipe_resource
*dst
,
969 unsigned dstx
, unsigned dsty
, unsigned dstz
,
970 struct pipe_resource
*src
,
972 const struct pipe_box
*src_box
)
974 struct virgl_context
*vctx
= virgl_context(ctx
);
975 struct virgl_resource
*dres
= virgl_resource(dst
);
976 struct virgl_resource
*sres
= virgl_resource(src
);
978 dres
->clean
[0] = FALSE
;
979 virgl_encode_resource_copy_region(vctx
, dres
,
980 dst_level
, dstx
, dsty
, dstz
,
986 virgl_flush_resource(struct pipe_context
*pipe
,
987 struct pipe_resource
*resource
)
991 static void virgl_blit(struct pipe_context
*ctx
,
992 const struct pipe_blit_info
*blit
)
994 struct virgl_context
*vctx
= virgl_context(ctx
);
995 struct virgl_resource
*dres
= virgl_resource(blit
->dst
.resource
);
996 struct virgl_resource
*sres
= virgl_resource(blit
->src
.resource
);
998 assert(ctx
->screen
->get_param(ctx
->screen
,
999 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL
) ||
1000 (util_format_is_srgb(blit
->dst
.resource
->format
) ==
1001 util_format_is_srgb(blit
->dst
.format
)));
1003 dres
->clean
[0] = FALSE
;
1004 virgl_encode_blit(vctx
, dres
, sres
,
1008 static void virgl_set_hw_atomic_buffers(struct pipe_context
*ctx
,
1009 unsigned start_slot
,
1011 const struct pipe_shader_buffer
*buffers
)
1013 struct virgl_context
*vctx
= virgl_context(ctx
);
1015 for (unsigned i
= 0; i
< count
; i
++) {
1016 unsigned idx
= start_slot
+ i
;
1019 if (buffers
[i
].buffer
) {
1020 pipe_resource_reference(&vctx
->atomic_buffers
[idx
],
1025 pipe_resource_reference(&vctx
->atomic_buffers
[idx
], NULL
);
1027 virgl_encode_set_hw_atomic_buffers(vctx
, start_slot
, count
, buffers
);
1030 static void virgl_set_shader_buffers(struct pipe_context
*ctx
,
1031 enum pipe_shader_type shader
,
1032 unsigned start_slot
, unsigned count
,
1033 const struct pipe_shader_buffer
*buffers
)
1035 struct virgl_context
*vctx
= virgl_context(ctx
);
1036 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1038 for (unsigned i
= 0; i
< count
; i
++) {
1039 unsigned idx
= start_slot
+ i
;
1042 if (buffers
[i
].buffer
) {
1043 pipe_resource_reference(&vctx
->ssbos
[shader
][idx
], buffers
[i
].buffer
);
1047 pipe_resource_reference(&vctx
->ssbos
[shader
][idx
], NULL
);
1050 uint32_t max_shader_buffer
= (shader
== PIPE_SHADER_FRAGMENT
|| shader
== PIPE_SHADER_COMPUTE
) ?
1051 rs
->caps
.caps
.v2
.max_shader_buffer_frag_compute
:
1052 rs
->caps
.caps
.v2
.max_shader_buffer_other_stages
;
1053 if (!max_shader_buffer
)
1055 virgl_encode_set_shader_buffers(vctx
, shader
, start_slot
, count
, buffers
);
1058 static void virgl_create_fence_fd(struct pipe_context
*ctx
,
1059 struct pipe_fence_handle
**fence
,
1061 enum pipe_fd_type type
)
1063 assert(type
== PIPE_FD_TYPE_NATIVE_SYNC
);
1064 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1066 if (rs
->vws
->cs_create_fence
)
1067 *fence
= rs
->vws
->cs_create_fence(rs
->vws
, fd
);
1070 static void virgl_fence_server_sync(struct pipe_context
*ctx
,
1071 struct pipe_fence_handle
*fence
)
1073 struct virgl_context
*vctx
= virgl_context(ctx
);
1074 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1076 if (rs
->vws
->fence_server_sync
)
1077 rs
->vws
->fence_server_sync(rs
->vws
, vctx
->cbuf
, fence
);
1080 static void virgl_set_shader_images(struct pipe_context
*ctx
,
1081 enum pipe_shader_type shader
,
1082 unsigned start_slot
, unsigned count
,
1083 const struct pipe_image_view
*images
)
1085 struct virgl_context
*vctx
= virgl_context(ctx
);
1086 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1088 for (unsigned i
= 0; i
< count
; i
++) {
1089 unsigned idx
= start_slot
+ i
;
1092 if (images
[i
].resource
) {
1093 pipe_resource_reference(&vctx
->images
[shader
][idx
], images
[i
].resource
);
1097 pipe_resource_reference(&vctx
->images
[shader
][idx
], NULL
);
1100 uint32_t max_shader_images
= (shader
== PIPE_SHADER_FRAGMENT
|| shader
== PIPE_SHADER_COMPUTE
) ?
1101 rs
->caps
.caps
.v2
.max_shader_image_frag_compute
:
1102 rs
->caps
.caps
.v2
.max_shader_image_other_stages
;
1103 if (!max_shader_images
)
1105 virgl_encode_set_shader_images(vctx
, shader
, start_slot
, count
, images
);
1108 static void virgl_memory_barrier(struct pipe_context
*ctx
,
1111 struct virgl_context
*vctx
= virgl_context(ctx
);
1112 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1114 if (!(rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_MEMORY_BARRIER
))
1116 virgl_encode_memory_barrier(vctx
, flags
);
1119 static void *virgl_create_compute_state(struct pipe_context
*ctx
,
1120 const struct pipe_compute_state
*state
)
1122 struct virgl_context
*vctx
= virgl_context(ctx
);
1124 const struct tgsi_token
*new_tokens
= state
->prog
;
1125 struct pipe_stream_output_info so_info
= {};
1128 handle
= virgl_object_assign_handle();
1129 ret
= virgl_encode_shader_state(vctx
, handle
, PIPE_SHADER_COMPUTE
,
1131 state
->req_local_mem
,
1137 return (void *)(unsigned long)handle
;
1140 static void virgl_bind_compute_state(struct pipe_context
*ctx
, void *state
)
1142 uint32_t handle
= (unsigned long)state
;
1143 struct virgl_context
*vctx
= virgl_context(ctx
);
1145 virgl_encode_bind_shader(vctx
, handle
, PIPE_SHADER_COMPUTE
);
1148 static void virgl_delete_compute_state(struct pipe_context
*ctx
, void *state
)
1150 uint32_t handle
= (unsigned long)state
;
1151 struct virgl_context
*vctx
= virgl_context(ctx
);
1153 virgl_encode_delete_object(vctx
, handle
, VIRGL_OBJECT_SHADER
);
1156 static void virgl_launch_grid(struct pipe_context
*ctx
,
1157 const struct pipe_grid_info
*info
)
1159 struct virgl_context
*vctx
= virgl_context(ctx
);
1160 virgl_encode_launch_grid(vctx
, info
);
1164 virgl_context_destroy( struct pipe_context
*ctx
)
1166 struct virgl_context
*vctx
= virgl_context(ctx
);
1167 struct virgl_screen
*rs
= virgl_screen(ctx
->screen
);
1169 vctx
->framebuffer
.zsbuf
= NULL
;
1170 vctx
->framebuffer
.nr_cbufs
= 0;
1171 virgl_encoder_destroy_sub_ctx(vctx
, vctx
->hw_sub_ctx_id
);
1172 virgl_flush_eq(vctx
, vctx
, NULL
);
1174 rs
->vws
->cmd_buf_destroy(vctx
->cbuf
);
1176 u_upload_destroy(vctx
->uploader
);
1177 util_primconvert_destroy(vctx
->primconvert
);
1179 slab_destroy_child(&vctx
->transfer_pool
);
1183 static void virgl_get_sample_position(struct pipe_context
*ctx
,
1184 unsigned sample_count
,
1188 struct virgl_context
*vctx
= virgl_context(ctx
);
1189 struct virgl_screen
*vs
= virgl_screen(vctx
->base
.screen
);
1191 if (sample_count
> vs
->caps
.caps
.v1
.max_samples
) {
1192 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1193 sample_count
, vs
->caps
.caps
.v1
.max_samples
);
1197 /* The following is basically copied from dri/i965gen6_get_sample_position
1198 * The only addition is that we hold the msaa positions for all sample
1199 * counts in a flat array. */
1201 if (sample_count
== 1) {
1202 out_value
[0] = out_value
[1] = 0.5f
;
1204 } else if (sample_count
== 2) {
1205 bits
= vs
->caps
.caps
.v2
.sample_locations
[0] >> (8 * index
);
1206 } else if (sample_count
<= 4) {
1207 bits
= vs
->caps
.caps
.v2
.sample_locations
[1] >> (8 * index
);
1208 } else if (sample_count
<= 8) {
1209 bits
= vs
->caps
.caps
.v2
.sample_locations
[2 + (index
>> 2)] >> (8 * (index
& 3));
1210 } else if (sample_count
<= 16) {
1211 bits
= vs
->caps
.caps
.v2
.sample_locations
[4 + (index
>> 2)] >> (8 * (index
& 3));
1213 out_value
[0] = ((bits
>> 4) & 0xf) / 16.0f
;
1214 out_value
[1] = (bits
& 0xf) / 16.0f
;
1216 if (virgl_debug
& VIRGL_DEBUG_VERBOSE
)
1217 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1218 index
, sample_count
, out_value
[0], out_value
[1]);
1221 struct pipe_context
*virgl_context_create(struct pipe_screen
*pscreen
,
1225 struct virgl_context
*vctx
;
1226 struct virgl_screen
*rs
= virgl_screen(pscreen
);
1227 vctx
= CALLOC_STRUCT(virgl_context
);
1228 const char *host_debug_flagstring
;
1230 vctx
->cbuf
= rs
->vws
->cmd_buf_create(rs
->vws
);
1236 vctx
->base
.destroy
= virgl_context_destroy
;
1237 vctx
->base
.create_surface
= virgl_create_surface
;
1238 vctx
->base
.surface_destroy
= virgl_surface_destroy
;
1239 vctx
->base
.set_framebuffer_state
= virgl_set_framebuffer_state
;
1240 vctx
->base
.create_blend_state
= virgl_create_blend_state
;
1241 vctx
->base
.bind_blend_state
= virgl_bind_blend_state
;
1242 vctx
->base
.delete_blend_state
= virgl_delete_blend_state
;
1243 vctx
->base
.create_depth_stencil_alpha_state
= virgl_create_depth_stencil_alpha_state
;
1244 vctx
->base
.bind_depth_stencil_alpha_state
= virgl_bind_depth_stencil_alpha_state
;
1245 vctx
->base
.delete_depth_stencil_alpha_state
= virgl_delete_depth_stencil_alpha_state
;
1246 vctx
->base
.create_rasterizer_state
= virgl_create_rasterizer_state
;
1247 vctx
->base
.bind_rasterizer_state
= virgl_bind_rasterizer_state
;
1248 vctx
->base
.delete_rasterizer_state
= virgl_delete_rasterizer_state
;
1250 vctx
->base
.set_viewport_states
= virgl_set_viewport_states
;
1251 vctx
->base
.create_vertex_elements_state
= virgl_create_vertex_elements_state
;
1252 vctx
->base
.bind_vertex_elements_state
= virgl_bind_vertex_elements_state
;
1253 vctx
->base
.delete_vertex_elements_state
= virgl_delete_vertex_elements_state
;
1254 vctx
->base
.set_vertex_buffers
= virgl_set_vertex_buffers
;
1255 vctx
->base
.set_constant_buffer
= virgl_set_constant_buffer
;
1257 vctx
->base
.set_tess_state
= virgl_set_tess_state
;
1258 vctx
->base
.create_vs_state
= virgl_create_vs_state
;
1259 vctx
->base
.create_tcs_state
= virgl_create_tcs_state
;
1260 vctx
->base
.create_tes_state
= virgl_create_tes_state
;
1261 vctx
->base
.create_gs_state
= virgl_create_gs_state
;
1262 vctx
->base
.create_fs_state
= virgl_create_fs_state
;
1264 vctx
->base
.bind_vs_state
= virgl_bind_vs_state
;
1265 vctx
->base
.bind_tcs_state
= virgl_bind_tcs_state
;
1266 vctx
->base
.bind_tes_state
= virgl_bind_tes_state
;
1267 vctx
->base
.bind_gs_state
= virgl_bind_gs_state
;
1268 vctx
->base
.bind_fs_state
= virgl_bind_fs_state
;
1270 vctx
->base
.delete_vs_state
= virgl_delete_vs_state
;
1271 vctx
->base
.delete_tcs_state
= virgl_delete_tcs_state
;
1272 vctx
->base
.delete_tes_state
= virgl_delete_tes_state
;
1273 vctx
->base
.delete_gs_state
= virgl_delete_gs_state
;
1274 vctx
->base
.delete_fs_state
= virgl_delete_fs_state
;
1276 vctx
->base
.create_compute_state
= virgl_create_compute_state
;
1277 vctx
->base
.bind_compute_state
= virgl_bind_compute_state
;
1278 vctx
->base
.delete_compute_state
= virgl_delete_compute_state
;
1279 vctx
->base
.launch_grid
= virgl_launch_grid
;
1281 vctx
->base
.clear
= virgl_clear
;
1282 vctx
->base
.draw_vbo
= virgl_draw_vbo
;
1283 vctx
->base
.flush
= virgl_flush_from_st
;
1284 vctx
->base
.screen
= pscreen
;
1285 vctx
->base
.create_sampler_view
= virgl_create_sampler_view
;
1286 vctx
->base
.sampler_view_destroy
= virgl_destroy_sampler_view
;
1287 vctx
->base
.set_sampler_views
= virgl_set_sampler_views
;
1288 vctx
->base
.texture_barrier
= virgl_texture_barrier
;
1290 vctx
->base
.create_sampler_state
= virgl_create_sampler_state
;
1291 vctx
->base
.delete_sampler_state
= virgl_delete_sampler_state
;
1292 vctx
->base
.bind_sampler_states
= virgl_bind_sampler_states
;
1294 vctx
->base
.set_polygon_stipple
= virgl_set_polygon_stipple
;
1295 vctx
->base
.set_scissor_states
= virgl_set_scissor_states
;
1296 vctx
->base
.set_sample_mask
= virgl_set_sample_mask
;
1297 vctx
->base
.set_min_samples
= virgl_set_min_samples
;
1298 vctx
->base
.set_stencil_ref
= virgl_set_stencil_ref
;
1299 vctx
->base
.set_clip_state
= virgl_set_clip_state
;
1301 vctx
->base
.set_blend_color
= virgl_set_blend_color
;
1303 vctx
->base
.get_sample_position
= virgl_get_sample_position
;
1305 vctx
->base
.resource_copy_region
= virgl_resource_copy_region
;
1306 vctx
->base
.flush_resource
= virgl_flush_resource
;
1307 vctx
->base
.blit
= virgl_blit
;
1308 vctx
->base
.create_fence_fd
= virgl_create_fence_fd
;
1309 vctx
->base
.fence_server_sync
= virgl_fence_server_sync
;
1311 vctx
->base
.set_shader_buffers
= virgl_set_shader_buffers
;
1312 vctx
->base
.set_hw_atomic_buffers
= virgl_set_hw_atomic_buffers
;
1313 vctx
->base
.set_shader_images
= virgl_set_shader_images
;
1314 vctx
->base
.memory_barrier
= virgl_memory_barrier
;
1316 virgl_init_context_resource_functions(&vctx
->base
);
1317 virgl_init_query_functions(vctx
);
1318 virgl_init_so_functions(vctx
);
1320 slab_create_child(&vctx
->transfer_pool
, &rs
->transfer_pool
);
1322 vctx
->primconvert
= util_primconvert_create(&vctx
->base
, rs
->caps
.caps
.v1
.prim_mask
);
1323 vctx
->uploader
= u_upload_create(&vctx
->base
, 1024 * 1024,
1324 PIPE_BIND_INDEX_BUFFER
, PIPE_USAGE_STREAM
, 0);
1325 if (!vctx
->uploader
)
1327 vctx
->base
.stream_uploader
= vctx
->uploader
;
1328 vctx
->base
.const_uploader
= vctx
->uploader
;
1330 vctx
->hw_sub_ctx_id
= rs
->sub_ctx_id
++;
1331 virgl_encoder_create_sub_ctx(vctx
, vctx
->hw_sub_ctx_id
);
1333 virgl_encoder_set_sub_ctx(vctx
, vctx
->hw_sub_ctx_id
);
1335 if (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_GUEST_MAY_INIT_LOG
) {
1336 host_debug_flagstring
= getenv("VIRGL_HOST_DEBUG");
1337 if (host_debug_flagstring
)
1338 virgl_encode_host_debug_flagstring(vctx
, host_debug_flagstring
);