0e4fd5b2ea453f0886c74d8e1125d173ae13d029
[mesa.git] / src / gallium / drivers / virgl / virgl_context.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <libsync.h>
25 #include "pipe/p_shader_tokens.h"
26
27 #include "pipe/p_context.h"
28 #include "pipe/p_defines.h"
29 #include "pipe/p_screen.h"
30 #include "pipe/p_state.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_prim.h"
35 #include "util/u_transfer.h"
36 #include "util/u_helpers.h"
37 #include "util/slab.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_blitter.h"
40 #include "tgsi/tgsi_text.h"
41 #include "indices/u_primconvert.h"
42
43 #include "pipebuffer/pb_buffer.h"
44
45 #include "virgl_encode.h"
46 #include "virgl_context.h"
47 #include "virgl_protocol.h"
48 #include "virgl_resource.h"
49 #include "virgl_screen.h"
50
51 struct virgl_vertex_elements_state {
52 uint32_t handle;
53 uint8_t binding_map[PIPE_MAX_ATTRIBS];
54 uint8_t num_bindings;
55 };
56
57 static uint32_t next_handle;
58 uint32_t virgl_object_assign_handle(void)
59 {
60 return ++next_handle;
61 }
62
63 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
64 {
65 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
66 struct pipe_surface *surf;
67 struct virgl_resource *res;
68 unsigned i;
69
70 surf = vctx->framebuffer.zsbuf;
71 if (surf) {
72 res = virgl_resource(surf->texture);
73 if (res)
74 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
75 }
76 for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
77 surf = vctx->framebuffer.cbufs[i];
78 if (surf) {
79 res = virgl_resource(surf->texture);
80 if (res)
81 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
82 }
83 }
84 }
85
86 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
87 enum pipe_shader_type shader_type)
88 {
89 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
90 struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
91 struct virgl_resource *res;
92 uint32_t remaining_mask = tinfo->enabled_mask;
93 unsigned i;
94 while (remaining_mask) {
95 i = u_bit_scan(&remaining_mask);
96 assert(tinfo->views[i]);
97
98 res = virgl_resource(tinfo->views[i]->base.texture);
99 if (res)
100 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
101 }
102 }
103
104 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
105 {
106 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
107 struct virgl_resource *res;
108 unsigned i;
109
110 for (i = 0; i < vctx->num_vertex_buffers; i++) {
111 res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
112 if (res)
113 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
114 }
115 }
116
117 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
118 struct virgl_indexbuf *ib)
119 {
120 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
121 struct virgl_resource *res;
122
123 res = virgl_resource(ib->buffer);
124 if (res)
125 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
126 }
127
128 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
129 {
130 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
131 struct virgl_resource *res;
132 unsigned i;
133
134 for (i = 0; i < vctx->num_so_targets; i++) {
135 res = virgl_resource(vctx->so_targets[i].base.buffer);
136 if (res)
137 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
138 }
139 }
140
141 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
142 enum pipe_shader_type shader_type)
143 {
144 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
145 struct virgl_resource *res;
146 unsigned i;
147 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
148 res = virgl_resource(vctx->ubos[shader_type][i]);
149 if (res) {
150 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
151 }
152 }
153 }
154
155 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
156 enum pipe_shader_type shader_type)
157 {
158 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
159 struct virgl_resource *res;
160 unsigned i;
161 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
162 res = virgl_resource(vctx->ssbos[shader_type][i]);
163 if (res) {
164 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
165 }
166 }
167 }
168
169 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
170 enum pipe_shader_type shader_type)
171 {
172 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
173 struct virgl_resource *res;
174 unsigned i;
175 for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
176 res = virgl_resource(vctx->images[shader_type][i]);
177 if (res) {
178 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
179 }
180 }
181 }
182
183 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
184 {
185 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
186 struct virgl_resource *res;
187 unsigned i;
188 for (i = 0; i < PIPE_MAX_HW_ATOMIC_BUFFERS; i++) {
189 res = virgl_resource(vctx->atomic_buffers[i]);
190 if (res) {
191 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
192 }
193 }
194 }
195
196 /*
197 * after flushing, the hw context still has a bunch of
198 * resources bound, so we need to rebind those here.
199 */
200 static void virgl_reemit_res(struct virgl_context *vctx)
201 {
202 enum pipe_shader_type shader_type;
203
204 /* reattach any flushed resources */
205 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
206 virgl_attach_res_framebuffer(vctx);
207
208 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
209 virgl_attach_res_sampler_views(vctx, shader_type);
210 virgl_attach_res_uniform_buffers(vctx, shader_type);
211 virgl_attach_res_shader_buffers(vctx, shader_type);
212 virgl_attach_res_shader_images(vctx, shader_type);
213 }
214 virgl_attach_res_atomic_buffers(vctx);
215 virgl_attach_res_vertex_buffers(vctx);
216 virgl_attach_res_so_targets(vctx);
217 }
218
219 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
220 struct pipe_resource *resource,
221 const struct pipe_surface *templ)
222 {
223 struct virgl_context *vctx = virgl_context(ctx);
224 struct virgl_surface *surf;
225 struct virgl_resource *res = virgl_resource(resource);
226 uint32_t handle;
227
228 surf = CALLOC_STRUCT(virgl_surface);
229 if (!surf)
230 return NULL;
231
232 assert(ctx->screen->get_param(ctx->screen,
233 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
234 (util_format_is_srgb(templ->format) ==
235 util_format_is_srgb(resource->format)));
236
237 res->clean[0] = FALSE;
238 handle = virgl_object_assign_handle();
239 pipe_reference_init(&surf->base.reference, 1);
240 pipe_resource_reference(&surf->base.texture, resource);
241 surf->base.context = ctx;
242 surf->base.format = templ->format;
243 if (resource->target != PIPE_BUFFER) {
244 surf->base.width = u_minify(resource->width0, templ->u.tex.level);
245 surf->base.height = u_minify(resource->height0, templ->u.tex.level);
246 surf->base.u.tex.level = templ->u.tex.level;
247 surf->base.u.tex.first_layer = templ->u.tex.first_layer;
248 surf->base.u.tex.last_layer = templ->u.tex.last_layer;
249 } else {
250 surf->base.width = templ->u.buf.last_element - templ->u.buf.first_element + 1;
251 surf->base.height = resource->height0;
252 surf->base.u.buf.first_element = templ->u.buf.first_element;
253 surf->base.u.buf.last_element = templ->u.buf.last_element;
254 }
255 virgl_encoder_create_surface(vctx, handle, res, &surf->base);
256 surf->handle = handle;
257 return &surf->base;
258 }
259
260 static void virgl_surface_destroy(struct pipe_context *ctx,
261 struct pipe_surface *psurf)
262 {
263 struct virgl_context *vctx = virgl_context(ctx);
264 struct virgl_surface *surf = virgl_surface(psurf);
265
266 pipe_resource_reference(&surf->base.texture, NULL);
267 virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
268 FREE(surf);
269 }
270
271 static void *virgl_create_blend_state(struct pipe_context *ctx,
272 const struct pipe_blend_state *blend_state)
273 {
274 struct virgl_context *vctx = virgl_context(ctx);
275 uint32_t handle;
276 handle = virgl_object_assign_handle();
277
278 virgl_encode_blend_state(vctx, handle, blend_state);
279 return (void *)(unsigned long)handle;
280
281 }
282
283 static void virgl_bind_blend_state(struct pipe_context *ctx,
284 void *blend_state)
285 {
286 struct virgl_context *vctx = virgl_context(ctx);
287 uint32_t handle = (unsigned long)blend_state;
288 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
289 }
290
291 static void virgl_delete_blend_state(struct pipe_context *ctx,
292 void *blend_state)
293 {
294 struct virgl_context *vctx = virgl_context(ctx);
295 uint32_t handle = (unsigned long)blend_state;
296 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
297 }
298
299 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
300 const struct pipe_depth_stencil_alpha_state *blend_state)
301 {
302 struct virgl_context *vctx = virgl_context(ctx);
303 uint32_t handle;
304 handle = virgl_object_assign_handle();
305
306 virgl_encode_dsa_state(vctx, handle, blend_state);
307 return (void *)(unsigned long)handle;
308 }
309
310 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
311 void *blend_state)
312 {
313 struct virgl_context *vctx = virgl_context(ctx);
314 uint32_t handle = (unsigned long)blend_state;
315 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
316 }
317
318 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
319 void *dsa_state)
320 {
321 struct virgl_context *vctx = virgl_context(ctx);
322 uint32_t handle = (unsigned long)dsa_state;
323 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
324 }
325
326 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
327 const struct pipe_rasterizer_state *rs_state)
328 {
329 struct virgl_context *vctx = virgl_context(ctx);
330 struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
331
332 if (!vrs)
333 return NULL;
334 vrs->rs = *rs_state;
335 vrs->handle = virgl_object_assign_handle();
336
337 virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
338 return (void *)vrs;
339 }
340
341 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
342 void *rs_state)
343 {
344 struct virgl_context *vctx = virgl_context(ctx);
345 uint32_t handle = 0;
346 if (rs_state) {
347 struct virgl_rasterizer_state *vrs = rs_state;
348 vctx->rs_state = *vrs;
349 handle = vrs->handle;
350 }
351 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
352 }
353
354 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
355 void *rs_state)
356 {
357 struct virgl_context *vctx = virgl_context(ctx);
358 struct virgl_rasterizer_state *vrs = rs_state;
359 virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
360 FREE(vrs);
361 }
362
363 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
364 const struct pipe_framebuffer_state *state)
365 {
366 struct virgl_context *vctx = virgl_context(ctx);
367
368 vctx->framebuffer = *state;
369 virgl_encoder_set_framebuffer_state(vctx, state);
370 virgl_attach_res_framebuffer(vctx);
371 }
372
373 static void virgl_set_viewport_states(struct pipe_context *ctx,
374 unsigned start_slot,
375 unsigned num_viewports,
376 const struct pipe_viewport_state *state)
377 {
378 struct virgl_context *vctx = virgl_context(ctx);
379 virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
380 }
381
382 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
383 unsigned num_elements,
384 const struct pipe_vertex_element *elements)
385 {
386 struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
387 struct virgl_context *vctx = virgl_context(ctx);
388 struct virgl_vertex_elements_state *state =
389 CALLOC_STRUCT(virgl_vertex_elements_state);
390
391 for (int i = 0; i < num_elements; ++i) {
392 if (elements[i].instance_divisor) {
393 /* Virglrenderer doesn't deal with instance_divisor correctly if
394 * there isn't a 1:1 relationship between elements and bindings.
395 * So let's make sure there is, by duplicating bindings.
396 */
397 for (int j = 0; j < num_elements; ++j) {
398 new_elements[j] = elements[j];
399 new_elements[j].vertex_buffer_index = j;
400 state->binding_map[j] = elements[j].vertex_buffer_index;
401 }
402 elements = new_elements;
403 state->num_bindings = num_elements;
404 break;
405 }
406 }
407
408 state->handle = virgl_object_assign_handle();
409 virgl_encoder_create_vertex_elements(vctx, state->handle,
410 num_elements, elements);
411 return state;
412 }
413
414 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
415 void *ve)
416 {
417 struct virgl_context *vctx = virgl_context(ctx);
418 struct virgl_vertex_elements_state *state =
419 (struct virgl_vertex_elements_state *)ve;
420 virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
421 FREE(state);
422 }
423
424 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
425 void *ve)
426 {
427 struct virgl_context *vctx = virgl_context(ctx);
428 struct virgl_vertex_elements_state *state =
429 (struct virgl_vertex_elements_state *)ve;
430 vctx->vertex_elements = state;
431 virgl_encode_bind_object(vctx, state ? state->handle : 0,
432 VIRGL_OBJECT_VERTEX_ELEMENTS);
433 vctx->vertex_array_dirty = TRUE;
434 }
435
436 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
437 unsigned start_slot,
438 unsigned num_buffers,
439 const struct pipe_vertex_buffer *buffers)
440 {
441 struct virgl_context *vctx = virgl_context(ctx);
442
443 util_set_vertex_buffers_count(vctx->vertex_buffer,
444 &vctx->num_vertex_buffers,
445 buffers, start_slot, num_buffers);
446
447 vctx->vertex_array_dirty = TRUE;
448 }
449
450 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
451 {
452 if (vctx->vertex_array_dirty) {
453 struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
454
455 if (ve->num_bindings) {
456 struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
457 for (int i = 0; i < ve->num_bindings; ++i)
458 vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
459
460 virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
461 } else
462 virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
463
464 virgl_attach_res_vertex_buffers(vctx);
465 }
466 }
467
468 static void virgl_set_stencil_ref(struct pipe_context *ctx,
469 const struct pipe_stencil_ref *ref)
470 {
471 struct virgl_context *vctx = virgl_context(ctx);
472 virgl_encoder_set_stencil_ref(vctx, ref);
473 }
474
475 static void virgl_set_blend_color(struct pipe_context *ctx,
476 const struct pipe_blend_color *color)
477 {
478 struct virgl_context *vctx = virgl_context(ctx);
479 virgl_encoder_set_blend_color(vctx, color);
480 }
481
482 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
483 struct virgl_indexbuf *ib)
484 {
485 virgl_encoder_set_index_buffer(vctx, ib);
486 virgl_attach_res_index_buffer(vctx, ib);
487 }
488
489 static void virgl_set_constant_buffer(struct pipe_context *ctx,
490 enum pipe_shader_type shader, uint index,
491 const struct pipe_constant_buffer *buf)
492 {
493 struct virgl_context *vctx = virgl_context(ctx);
494
495 if (buf) {
496 if (!buf->user_buffer){
497 struct virgl_resource *res = virgl_resource(buf->buffer);
498 virgl_encoder_set_uniform_buffer(vctx, shader, index, buf->buffer_offset,
499 buf->buffer_size, res);
500 pipe_resource_reference(&vctx->ubos[shader][index], buf->buffer);
501 return;
502 }
503 pipe_resource_reference(&vctx->ubos[shader][index], NULL);
504 virgl_encoder_write_constant_buffer(vctx, shader, index, buf->buffer_size / 4, buf->user_buffer);
505 } else {
506 virgl_encoder_write_constant_buffer(vctx, shader, index, 0, NULL);
507 pipe_resource_reference(&vctx->ubos[shader][index], NULL);
508 }
509 }
510
511 void virgl_transfer_inline_write(struct pipe_context *ctx,
512 struct pipe_resource *res,
513 unsigned level,
514 unsigned usage,
515 const struct pipe_box *box,
516 const void *data,
517 unsigned stride,
518 unsigned layer_stride)
519 {
520 struct virgl_context *vctx = virgl_context(ctx);
521 struct virgl_screen *vs = virgl_screen(ctx->screen);
522 struct virgl_resource *grres = virgl_resource(res);
523
524 grres->clean[0] = FALSE;
525
526 if (virgl_res_needs_flush_wait(vctx, grres, usage)) {
527 ctx->flush(ctx, NULL, 0);
528
529 vs->vws->resource_wait(vs->vws, grres->hw_res);
530 }
531
532 virgl_encoder_inline_write(vctx, grres, level, usage,
533 box, data, stride, layer_stride);
534 }
535
536 static void *virgl_shader_encoder(struct pipe_context *ctx,
537 const struct pipe_shader_state *shader,
538 unsigned type)
539 {
540 struct virgl_context *vctx = virgl_context(ctx);
541 uint32_t handle;
542 struct tgsi_token *new_tokens;
543 int ret;
544
545 new_tokens = virgl_tgsi_transform(vctx, shader->tokens);
546 if (!new_tokens)
547 return NULL;
548
549 handle = virgl_object_assign_handle();
550 /* encode VS state */
551 ret = virgl_encode_shader_state(vctx, handle, type,
552 &shader->stream_output, 0,
553 new_tokens);
554 if (ret) {
555 return NULL;
556 }
557
558 FREE(new_tokens);
559 return (void *)(unsigned long)handle;
560
561 }
562 static void *virgl_create_vs_state(struct pipe_context *ctx,
563 const struct pipe_shader_state *shader)
564 {
565 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
566 }
567
568 static void *virgl_create_tcs_state(struct pipe_context *ctx,
569 const struct pipe_shader_state *shader)
570 {
571 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
572 }
573
574 static void *virgl_create_tes_state(struct pipe_context *ctx,
575 const struct pipe_shader_state *shader)
576 {
577 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
578 }
579
580 static void *virgl_create_gs_state(struct pipe_context *ctx,
581 const struct pipe_shader_state *shader)
582 {
583 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
584 }
585
586 static void *virgl_create_fs_state(struct pipe_context *ctx,
587 const struct pipe_shader_state *shader)
588 {
589 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
590 }
591
592 static void
593 virgl_delete_fs_state(struct pipe_context *ctx,
594 void *fs)
595 {
596 uint32_t handle = (unsigned long)fs;
597 struct virgl_context *vctx = virgl_context(ctx);
598
599 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
600 }
601
602 static void
603 virgl_delete_gs_state(struct pipe_context *ctx,
604 void *gs)
605 {
606 uint32_t handle = (unsigned long)gs;
607 struct virgl_context *vctx = virgl_context(ctx);
608
609 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
610 }
611
612 static void
613 virgl_delete_vs_state(struct pipe_context *ctx,
614 void *vs)
615 {
616 uint32_t handle = (unsigned long)vs;
617 struct virgl_context *vctx = virgl_context(ctx);
618
619 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
620 }
621
622 static void
623 virgl_delete_tcs_state(struct pipe_context *ctx,
624 void *tcs)
625 {
626 uint32_t handle = (unsigned long)tcs;
627 struct virgl_context *vctx = virgl_context(ctx);
628
629 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
630 }
631
632 static void
633 virgl_delete_tes_state(struct pipe_context *ctx,
634 void *tes)
635 {
636 uint32_t handle = (unsigned long)tes;
637 struct virgl_context *vctx = virgl_context(ctx);
638
639 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
640 }
641
642 static void virgl_bind_vs_state(struct pipe_context *ctx,
643 void *vss)
644 {
645 uint32_t handle = (unsigned long)vss;
646 struct virgl_context *vctx = virgl_context(ctx);
647
648 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
649 }
650
651 static void virgl_bind_tcs_state(struct pipe_context *ctx,
652 void *vss)
653 {
654 uint32_t handle = (unsigned long)vss;
655 struct virgl_context *vctx = virgl_context(ctx);
656
657 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
658 }
659
660 static void virgl_bind_tes_state(struct pipe_context *ctx,
661 void *vss)
662 {
663 uint32_t handle = (unsigned long)vss;
664 struct virgl_context *vctx = virgl_context(ctx);
665
666 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
667 }
668
669 static void virgl_bind_gs_state(struct pipe_context *ctx,
670 void *vss)
671 {
672 uint32_t handle = (unsigned long)vss;
673 struct virgl_context *vctx = virgl_context(ctx);
674
675 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
676 }
677
678
679 static void virgl_bind_fs_state(struct pipe_context *ctx,
680 void *vss)
681 {
682 uint32_t handle = (unsigned long)vss;
683 struct virgl_context *vctx = virgl_context(ctx);
684
685 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
686 }
687
688 static void virgl_clear(struct pipe_context *ctx,
689 unsigned buffers,
690 const union pipe_color_union *color,
691 double depth, unsigned stencil)
692 {
693 struct virgl_context *vctx = virgl_context(ctx);
694
695 virgl_encode_clear(vctx, buffers, color, depth, stencil);
696 }
697
698 static void virgl_draw_vbo(struct pipe_context *ctx,
699 const struct pipe_draw_info *dinfo)
700 {
701 struct virgl_context *vctx = virgl_context(ctx);
702 struct virgl_screen *rs = virgl_screen(ctx->screen);
703 struct virgl_indexbuf ib = {};
704 struct pipe_draw_info info = *dinfo;
705
706 if (!dinfo->count_from_stream_output && !dinfo->indirect &&
707 !dinfo->primitive_restart &&
708 !u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count))
709 return;
710
711 if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
712 util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
713 util_primconvert_draw_vbo(vctx->primconvert, dinfo);
714 return;
715 }
716 if (info.index_size) {
717 pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
718 ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
719 ib.index_size = dinfo->index_size;
720 ib.offset = info.start * ib.index_size;
721
722 if (ib.user_buffer) {
723 u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 256,
724 ib.user_buffer, &ib.offset, &ib.buffer);
725 ib.user_buffer = NULL;
726 }
727 }
728
729 u_upload_unmap(vctx->uploader);
730
731 vctx->num_draws++;
732 virgl_hw_set_vertex_buffers(vctx);
733 if (info.index_size)
734 virgl_hw_set_index_buffer(vctx, &ib);
735
736 virgl_encoder_draw_vbo(vctx, &info);
737
738 pipe_resource_reference(&ib.buffer, NULL);
739
740 }
741
742 static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
743 struct pipe_fence_handle **fence)
744 {
745 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
746 int out_fence_fd = -1;
747
748 /* send the buffer to the remote side for decoding */
749 ctx->num_transfers = ctx->num_draws = 0;
750
751 rs->vws->submit_cmd(rs->vws, ctx->cbuf, ctx->cbuf->in_fence_fd,
752 ctx->cbuf->needs_out_fence_fd ? &out_fence_fd : NULL);
753
754 if (fence)
755 *fence = rs->vws->cs_create_fence(rs->vws, out_fence_fd);
756
757 virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
758
759 /* add back current framebuffer resources to reference list? */
760 virgl_reemit_res(ctx);
761 }
762
763 static void virgl_flush_from_st(struct pipe_context *ctx,
764 struct pipe_fence_handle **fence,
765 enum pipe_flush_flags flags)
766 {
767 struct virgl_context *vctx = virgl_context(ctx);
768 struct virgl_screen *rs = virgl_screen(ctx->screen);
769
770 if (flags & PIPE_FLUSH_FENCE_FD)
771 vctx->cbuf->needs_out_fence_fd = true;
772
773 virgl_flush_eq(vctx, vctx, fence);
774
775 if (vctx->cbuf->in_fence_fd != -1) {
776 close(vctx->cbuf->in_fence_fd);
777 vctx->cbuf->in_fence_fd = -1;
778 }
779 vctx->cbuf->needs_out_fence_fd = false;
780 }
781
782 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
783 struct pipe_resource *texture,
784 const struct pipe_sampler_view *state)
785 {
786 struct virgl_context *vctx = virgl_context(ctx);
787 struct virgl_sampler_view *grview;
788 uint32_t handle;
789 struct virgl_resource *res;
790
791 if (!state)
792 return NULL;
793
794 grview = CALLOC_STRUCT(virgl_sampler_view);
795 if (!grview)
796 return NULL;
797
798 res = virgl_resource(texture);
799 handle = virgl_object_assign_handle();
800 virgl_encode_sampler_view(vctx, handle, res, state);
801
802 grview->base = *state;
803 grview->base.reference.count = 1;
804
805 grview->base.texture = NULL;
806 grview->base.context = ctx;
807 pipe_resource_reference(&grview->base.texture, texture);
808 grview->handle = handle;
809 return &grview->base;
810 }
811
812 static void virgl_set_sampler_views(struct pipe_context *ctx,
813 enum pipe_shader_type shader_type,
814 unsigned start_slot,
815 unsigned num_views,
816 struct pipe_sampler_view **views)
817 {
818 struct virgl_context *vctx = virgl_context(ctx);
819 int i;
820 uint32_t disable_mask = ~((1ull << num_views) - 1);
821 struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
822 uint32_t new_mask = 0;
823 uint32_t remaining_mask;
824
825 remaining_mask = tinfo->enabled_mask & disable_mask;
826
827 while (remaining_mask) {
828 i = u_bit_scan(&remaining_mask);
829 assert(tinfo->views[i]);
830
831 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
832 }
833
834 for (i = 0; i < num_views; i++) {
835 struct virgl_sampler_view *grview = virgl_sampler_view(views[i]);
836
837 if (views[i] == (struct pipe_sampler_view *)tinfo->views[i])
838 continue;
839
840 if (grview) {
841 new_mask |= 1 << i;
842 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], views[i]);
843 } else {
844 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
845 disable_mask |= 1 << i;
846 }
847 }
848
849 tinfo->enabled_mask &= ~disable_mask;
850 tinfo->enabled_mask |= new_mask;
851 virgl_encode_set_sampler_views(vctx, shader_type, start_slot, num_views, tinfo->views);
852 virgl_attach_res_sampler_views(vctx, shader_type);
853 }
854
855 static void
856 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
857 {
858 struct virgl_context *vctx = virgl_context(ctx);
859 struct virgl_screen *rs = virgl_screen(ctx->screen);
860
861 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER))
862 return;
863 virgl_encode_texture_barrier(vctx, flags);
864 }
865
866 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
867 struct pipe_sampler_view *view)
868 {
869 struct virgl_context *vctx = virgl_context(ctx);
870 struct virgl_sampler_view *grview = virgl_sampler_view(view);
871
872 virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
873 pipe_resource_reference(&view->texture, NULL);
874 FREE(view);
875 }
876
877 static void *virgl_create_sampler_state(struct pipe_context *ctx,
878 const struct pipe_sampler_state *state)
879 {
880 struct virgl_context *vctx = virgl_context(ctx);
881 uint32_t handle;
882
883 handle = virgl_object_assign_handle();
884
885 virgl_encode_sampler_state(vctx, handle, state);
886 return (void *)(unsigned long)handle;
887 }
888
889 static void virgl_delete_sampler_state(struct pipe_context *ctx,
890 void *ss)
891 {
892 struct virgl_context *vctx = virgl_context(ctx);
893 uint32_t handle = (unsigned long)ss;
894
895 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
896 }
897
898 static void virgl_bind_sampler_states(struct pipe_context *ctx,
899 enum pipe_shader_type shader,
900 unsigned start_slot,
901 unsigned num_samplers,
902 void **samplers)
903 {
904 struct virgl_context *vctx = virgl_context(ctx);
905 uint32_t handles[32];
906 int i;
907 for (i = 0; i < num_samplers; i++) {
908 handles[i] = (unsigned long)(samplers[i]);
909 }
910 virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
911 }
912
913 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
914 const struct pipe_poly_stipple *ps)
915 {
916 struct virgl_context *vctx = virgl_context(ctx);
917 virgl_encoder_set_polygon_stipple(vctx, ps);
918 }
919
920 static void virgl_set_scissor_states(struct pipe_context *ctx,
921 unsigned start_slot,
922 unsigned num_scissor,
923 const struct pipe_scissor_state *ss)
924 {
925 struct virgl_context *vctx = virgl_context(ctx);
926 virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
927 }
928
929 static void virgl_set_sample_mask(struct pipe_context *ctx,
930 unsigned sample_mask)
931 {
932 struct virgl_context *vctx = virgl_context(ctx);
933 virgl_encoder_set_sample_mask(vctx, sample_mask);
934 }
935
936 static void virgl_set_min_samples(struct pipe_context *ctx,
937 unsigned min_samples)
938 {
939 struct virgl_context *vctx = virgl_context(ctx);
940 struct virgl_screen *rs = virgl_screen(ctx->screen);
941
942 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
943 return;
944 virgl_encoder_set_min_samples(vctx, min_samples);
945 }
946
947 static void virgl_set_clip_state(struct pipe_context *ctx,
948 const struct pipe_clip_state *clip)
949 {
950 struct virgl_context *vctx = virgl_context(ctx);
951 virgl_encoder_set_clip_state(vctx, clip);
952 }
953
954 static void virgl_set_tess_state(struct pipe_context *ctx,
955 const float default_outer_level[4],
956 const float default_inner_level[2])
957 {
958 struct virgl_context *vctx = virgl_context(ctx);
959 struct virgl_screen *rs = virgl_screen(ctx->screen);
960
961 if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
962 return;
963 virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
964 }
965
966 static void virgl_resource_copy_region(struct pipe_context *ctx,
967 struct pipe_resource *dst,
968 unsigned dst_level,
969 unsigned dstx, unsigned dsty, unsigned dstz,
970 struct pipe_resource *src,
971 unsigned src_level,
972 const struct pipe_box *src_box)
973 {
974 struct virgl_context *vctx = virgl_context(ctx);
975 struct virgl_resource *dres = virgl_resource(dst);
976 struct virgl_resource *sres = virgl_resource(src);
977
978 dres->clean[0] = FALSE;
979 virgl_encode_resource_copy_region(vctx, dres,
980 dst_level, dstx, dsty, dstz,
981 sres, src_level,
982 src_box);
983 }
984
985 static void
986 virgl_flush_resource(struct pipe_context *pipe,
987 struct pipe_resource *resource)
988 {
989 }
990
991 static void virgl_blit(struct pipe_context *ctx,
992 const struct pipe_blit_info *blit)
993 {
994 struct virgl_context *vctx = virgl_context(ctx);
995 struct virgl_resource *dres = virgl_resource(blit->dst.resource);
996 struct virgl_resource *sres = virgl_resource(blit->src.resource);
997
998 assert(ctx->screen->get_param(ctx->screen,
999 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
1000 (util_format_is_srgb(blit->dst.resource->format) ==
1001 util_format_is_srgb(blit->dst.format)));
1002
1003 dres->clean[0] = FALSE;
1004 virgl_encode_blit(vctx, dres, sres,
1005 blit);
1006 }
1007
1008 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1009 unsigned start_slot,
1010 unsigned count,
1011 const struct pipe_shader_buffer *buffers)
1012 {
1013 struct virgl_context *vctx = virgl_context(ctx);
1014
1015 for (unsigned i = 0; i < count; i++) {
1016 unsigned idx = start_slot + i;
1017
1018 if (buffers) {
1019 if (buffers[i].buffer) {
1020 pipe_resource_reference(&vctx->atomic_buffers[idx],
1021 buffers[i].buffer);
1022 continue;
1023 }
1024 }
1025 pipe_resource_reference(&vctx->atomic_buffers[idx], NULL);
1026 }
1027 virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1028 }
1029
1030 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1031 enum pipe_shader_type shader,
1032 unsigned start_slot, unsigned count,
1033 const struct pipe_shader_buffer *buffers)
1034 {
1035 struct virgl_context *vctx = virgl_context(ctx);
1036 struct virgl_screen *rs = virgl_screen(ctx->screen);
1037
1038 for (unsigned i = 0; i < count; i++) {
1039 unsigned idx = start_slot + i;
1040
1041 if (buffers) {
1042 if (buffers[i].buffer) {
1043 pipe_resource_reference(&vctx->ssbos[shader][idx], buffers[i].buffer);
1044 continue;
1045 }
1046 }
1047 pipe_resource_reference(&vctx->ssbos[shader][idx], NULL);
1048 }
1049
1050 uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1051 rs->caps.caps.v2.max_shader_buffer_frag_compute :
1052 rs->caps.caps.v2.max_shader_buffer_other_stages;
1053 if (!max_shader_buffer)
1054 return;
1055 virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1056 }
1057
1058 static void virgl_create_fence_fd(struct pipe_context *ctx,
1059 struct pipe_fence_handle **fence,
1060 int fd,
1061 enum pipe_fd_type type)
1062 {
1063 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1064 struct virgl_screen *rs = virgl_screen(ctx->screen);
1065
1066 if (rs->vws->cs_create_fence)
1067 *fence = rs->vws->cs_create_fence(rs->vws, fd);
1068 }
1069
1070 static void virgl_fence_server_sync(struct pipe_context *ctx,
1071 struct pipe_fence_handle *fence)
1072 {
1073 struct virgl_context *vctx = virgl_context(ctx);
1074 struct virgl_screen *rs = virgl_screen(ctx->screen);
1075
1076 if (rs->vws->fence_server_sync)
1077 rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1078 }
1079
1080 static void virgl_set_shader_images(struct pipe_context *ctx,
1081 enum pipe_shader_type shader,
1082 unsigned start_slot, unsigned count,
1083 const struct pipe_image_view *images)
1084 {
1085 struct virgl_context *vctx = virgl_context(ctx);
1086 struct virgl_screen *rs = virgl_screen(ctx->screen);
1087
1088 for (unsigned i = 0; i < count; i++) {
1089 unsigned idx = start_slot + i;
1090
1091 if (images) {
1092 if (images[i].resource) {
1093 pipe_resource_reference(&vctx->images[shader][idx], images[i].resource);
1094 continue;
1095 }
1096 }
1097 pipe_resource_reference(&vctx->images[shader][idx], NULL);
1098 }
1099
1100 uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1101 rs->caps.caps.v2.max_shader_image_frag_compute :
1102 rs->caps.caps.v2.max_shader_image_other_stages;
1103 if (!max_shader_images)
1104 return;
1105 virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1106 }
1107
1108 static void virgl_memory_barrier(struct pipe_context *ctx,
1109 unsigned flags)
1110 {
1111 struct virgl_context *vctx = virgl_context(ctx);
1112 struct virgl_screen *rs = virgl_screen(ctx->screen);
1113
1114 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1115 return;
1116 virgl_encode_memory_barrier(vctx, flags);
1117 }
1118
1119 static void *virgl_create_compute_state(struct pipe_context *ctx,
1120 const struct pipe_compute_state *state)
1121 {
1122 struct virgl_context *vctx = virgl_context(ctx);
1123 uint32_t handle;
1124 const struct tgsi_token *new_tokens = state->prog;
1125 struct pipe_stream_output_info so_info = {};
1126 int ret;
1127
1128 handle = virgl_object_assign_handle();
1129 ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1130 &so_info,
1131 state->req_local_mem,
1132 new_tokens);
1133 if (ret) {
1134 return NULL;
1135 }
1136
1137 return (void *)(unsigned long)handle;
1138 }
1139
1140 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1141 {
1142 uint32_t handle = (unsigned long)state;
1143 struct virgl_context *vctx = virgl_context(ctx);
1144
1145 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1146 }
1147
1148 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1149 {
1150 uint32_t handle = (unsigned long)state;
1151 struct virgl_context *vctx = virgl_context(ctx);
1152
1153 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1154 }
1155
1156 static void virgl_launch_grid(struct pipe_context *ctx,
1157 const struct pipe_grid_info *info)
1158 {
1159 struct virgl_context *vctx = virgl_context(ctx);
1160 virgl_encode_launch_grid(vctx, info);
1161 }
1162
1163 static void
1164 virgl_context_destroy( struct pipe_context *ctx )
1165 {
1166 struct virgl_context *vctx = virgl_context(ctx);
1167 struct virgl_screen *rs = virgl_screen(ctx->screen);
1168
1169 vctx->framebuffer.zsbuf = NULL;
1170 vctx->framebuffer.nr_cbufs = 0;
1171 virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1172 virgl_flush_eq(vctx, vctx, NULL);
1173
1174 rs->vws->cmd_buf_destroy(vctx->cbuf);
1175 if (vctx->uploader)
1176 u_upload_destroy(vctx->uploader);
1177 util_primconvert_destroy(vctx->primconvert);
1178
1179 slab_destroy_child(&vctx->transfer_pool);
1180 FREE(vctx);
1181 }
1182
1183 static void virgl_get_sample_position(struct pipe_context *ctx,
1184 unsigned sample_count,
1185 unsigned index,
1186 float *out_value)
1187 {
1188 struct virgl_context *vctx = virgl_context(ctx);
1189 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1190
1191 if (sample_count > vs->caps.caps.v1.max_samples) {
1192 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1193 sample_count, vs->caps.caps.v1.max_samples);
1194 return;
1195 }
1196
1197 /* The following is basically copied from dri/i965gen6_get_sample_position
1198 * The only addition is that we hold the msaa positions for all sample
1199 * counts in a flat array. */
1200 uint32_t bits = 0;
1201 if (sample_count == 1) {
1202 out_value[0] = out_value[1] = 0.5f;
1203 return;
1204 } else if (sample_count == 2) {
1205 bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1206 } else if (sample_count <= 4) {
1207 bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1208 } else if (sample_count <= 8) {
1209 bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1210 } else if (sample_count <= 16) {
1211 bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1212 }
1213 out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1214 out_value[1] = (bits & 0xf) / 16.0f;
1215
1216 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1217 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1218 index, sample_count, out_value[0], out_value[1]);
1219 }
1220
1221 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1222 void *priv,
1223 unsigned flags)
1224 {
1225 struct virgl_context *vctx;
1226 struct virgl_screen *rs = virgl_screen(pscreen);
1227 vctx = CALLOC_STRUCT(virgl_context);
1228 const char *host_debug_flagstring;
1229
1230 vctx->cbuf = rs->vws->cmd_buf_create(rs->vws);
1231 if (!vctx->cbuf) {
1232 FREE(vctx);
1233 return NULL;
1234 }
1235
1236 vctx->base.destroy = virgl_context_destroy;
1237 vctx->base.create_surface = virgl_create_surface;
1238 vctx->base.surface_destroy = virgl_surface_destroy;
1239 vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1240 vctx->base.create_blend_state = virgl_create_blend_state;
1241 vctx->base.bind_blend_state = virgl_bind_blend_state;
1242 vctx->base.delete_blend_state = virgl_delete_blend_state;
1243 vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1244 vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1245 vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1246 vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1247 vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1248 vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1249
1250 vctx->base.set_viewport_states = virgl_set_viewport_states;
1251 vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1252 vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1253 vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1254 vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1255 vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1256
1257 vctx->base.set_tess_state = virgl_set_tess_state;
1258 vctx->base.create_vs_state = virgl_create_vs_state;
1259 vctx->base.create_tcs_state = virgl_create_tcs_state;
1260 vctx->base.create_tes_state = virgl_create_tes_state;
1261 vctx->base.create_gs_state = virgl_create_gs_state;
1262 vctx->base.create_fs_state = virgl_create_fs_state;
1263
1264 vctx->base.bind_vs_state = virgl_bind_vs_state;
1265 vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1266 vctx->base.bind_tes_state = virgl_bind_tes_state;
1267 vctx->base.bind_gs_state = virgl_bind_gs_state;
1268 vctx->base.bind_fs_state = virgl_bind_fs_state;
1269
1270 vctx->base.delete_vs_state = virgl_delete_vs_state;
1271 vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1272 vctx->base.delete_tes_state = virgl_delete_tes_state;
1273 vctx->base.delete_gs_state = virgl_delete_gs_state;
1274 vctx->base.delete_fs_state = virgl_delete_fs_state;
1275
1276 vctx->base.create_compute_state = virgl_create_compute_state;
1277 vctx->base.bind_compute_state = virgl_bind_compute_state;
1278 vctx->base.delete_compute_state = virgl_delete_compute_state;
1279 vctx->base.launch_grid = virgl_launch_grid;
1280
1281 vctx->base.clear = virgl_clear;
1282 vctx->base.draw_vbo = virgl_draw_vbo;
1283 vctx->base.flush = virgl_flush_from_st;
1284 vctx->base.screen = pscreen;
1285 vctx->base.create_sampler_view = virgl_create_sampler_view;
1286 vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1287 vctx->base.set_sampler_views = virgl_set_sampler_views;
1288 vctx->base.texture_barrier = virgl_texture_barrier;
1289
1290 vctx->base.create_sampler_state = virgl_create_sampler_state;
1291 vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1292 vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1293
1294 vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1295 vctx->base.set_scissor_states = virgl_set_scissor_states;
1296 vctx->base.set_sample_mask = virgl_set_sample_mask;
1297 vctx->base.set_min_samples = virgl_set_min_samples;
1298 vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1299 vctx->base.set_clip_state = virgl_set_clip_state;
1300
1301 vctx->base.set_blend_color = virgl_set_blend_color;
1302
1303 vctx->base.get_sample_position = virgl_get_sample_position;
1304
1305 vctx->base.resource_copy_region = virgl_resource_copy_region;
1306 vctx->base.flush_resource = virgl_flush_resource;
1307 vctx->base.blit = virgl_blit;
1308 vctx->base.create_fence_fd = virgl_create_fence_fd;
1309 vctx->base.fence_server_sync = virgl_fence_server_sync;
1310
1311 vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1312 vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1313 vctx->base.set_shader_images = virgl_set_shader_images;
1314 vctx->base.memory_barrier = virgl_memory_barrier;
1315
1316 virgl_init_context_resource_functions(&vctx->base);
1317 virgl_init_query_functions(vctx);
1318 virgl_init_so_functions(vctx);
1319
1320 slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1321
1322 vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1323 vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1324 PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1325 if (!vctx->uploader)
1326 goto fail;
1327 vctx->base.stream_uploader = vctx->uploader;
1328 vctx->base.const_uploader = vctx->uploader;
1329
1330 vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
1331 virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1332
1333 virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1334
1335 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1336 host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1337 if (host_debug_flagstring)
1338 virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1339 }
1340
1341 return &vctx->base;
1342 fail:
1343 return NULL;
1344 }