virgl: modify how we handle GL_MAP_FLUSH_EXPLICIT_BIT
[mesa.git] / src / gallium / drivers / virgl / virgl_context.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <libsync.h>
25 #include "pipe/p_shader_tokens.h"
26
27 #include "pipe/p_context.h"
28 #include "pipe/p_defines.h"
29 #include "pipe/p_screen.h"
30 #include "pipe/p_state.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_prim.h"
35 #include "util/u_transfer.h"
36 #include "util/u_helpers.h"
37 #include "util/slab.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_blitter.h"
40 #include "tgsi/tgsi_text.h"
41 #include "indices/u_primconvert.h"
42
43 #include "pipebuffer/pb_buffer.h"
44
45 #include "virgl_encode.h"
46 #include "virgl_context.h"
47 #include "virgl_protocol.h"
48 #include "virgl_resource.h"
49 #include "virgl_screen.h"
50
51 struct virgl_vertex_elements_state {
52 uint32_t handle;
53 uint8_t binding_map[PIPE_MAX_ATTRIBS];
54 uint8_t num_bindings;
55 };
56
57 static uint32_t next_handle;
58 uint32_t virgl_object_assign_handle(void)
59 {
60 return ++next_handle;
61 }
62
63 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
64 {
65 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
66 struct pipe_surface *surf;
67 struct virgl_resource *res;
68 unsigned i;
69
70 surf = vctx->framebuffer.zsbuf;
71 if (surf) {
72 res = virgl_resource(surf->texture);
73 if (res)
74 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
75 }
76 for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
77 surf = vctx->framebuffer.cbufs[i];
78 if (surf) {
79 res = virgl_resource(surf->texture);
80 if (res)
81 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
82 }
83 }
84 }
85
86 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
87 enum pipe_shader_type shader_type)
88 {
89 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
90 struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
91 struct virgl_resource *res;
92 uint32_t remaining_mask = tinfo->enabled_mask;
93 unsigned i;
94 while (remaining_mask) {
95 i = u_bit_scan(&remaining_mask);
96 assert(tinfo->views[i]);
97
98 res = virgl_resource(tinfo->views[i]->base.texture);
99 if (res)
100 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
101 }
102 }
103
104 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
105 {
106 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
107 struct virgl_resource *res;
108 unsigned i;
109
110 for (i = 0; i < vctx->num_vertex_buffers; i++) {
111 res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
112 if (res)
113 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
114 }
115 }
116
117 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
118 struct virgl_indexbuf *ib)
119 {
120 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
121 struct virgl_resource *res;
122
123 res = virgl_resource(ib->buffer);
124 if (res)
125 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
126 }
127
128 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
129 {
130 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
131 struct virgl_resource *res;
132 unsigned i;
133
134 for (i = 0; i < vctx->num_so_targets; i++) {
135 res = virgl_resource(vctx->so_targets[i].base.buffer);
136 if (res)
137 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
138 }
139 }
140
141 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
142 enum pipe_shader_type shader_type)
143 {
144 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
145 struct virgl_resource *res;
146 unsigned i;
147 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
148 res = virgl_resource(vctx->ubos[shader_type][i]);
149 if (res) {
150 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
151 }
152 }
153 }
154
155 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
156 enum pipe_shader_type shader_type)
157 {
158 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
159 struct virgl_resource *res;
160 unsigned i;
161 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
162 res = virgl_resource(vctx->ssbos[shader_type][i]);
163 if (res) {
164 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
165 }
166 }
167 }
168
169 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
170 enum pipe_shader_type shader_type)
171 {
172 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
173 struct virgl_resource *res;
174 unsigned i;
175 for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
176 res = virgl_resource(vctx->images[shader_type][i]);
177 if (res) {
178 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
179 }
180 }
181 }
182
183 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
184 {
185 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
186 struct virgl_resource *res;
187 unsigned i;
188 for (i = 0; i < PIPE_MAX_HW_ATOMIC_BUFFERS; i++) {
189 res = virgl_resource(vctx->atomic_buffers[i]);
190 if (res) {
191 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
192 }
193 }
194 }
195
196 /*
197 * after flushing, the hw context still has a bunch of
198 * resources bound, so we need to rebind those here.
199 */
200 static void virgl_reemit_res(struct virgl_context *vctx)
201 {
202 enum pipe_shader_type shader_type;
203
204 /* reattach any flushed resources */
205 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
206 virgl_attach_res_framebuffer(vctx);
207
208 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
209 virgl_attach_res_sampler_views(vctx, shader_type);
210 virgl_attach_res_uniform_buffers(vctx, shader_type);
211 virgl_attach_res_shader_buffers(vctx, shader_type);
212 virgl_attach_res_shader_images(vctx, shader_type);
213 }
214 virgl_attach_res_atomic_buffers(vctx);
215 virgl_attach_res_vertex_buffers(vctx);
216 virgl_attach_res_so_targets(vctx);
217 }
218
219 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
220 struct pipe_resource *resource,
221 const struct pipe_surface *templ)
222 {
223 struct virgl_context *vctx = virgl_context(ctx);
224 struct virgl_surface *surf;
225 struct virgl_resource *res = virgl_resource(resource);
226 uint32_t handle;
227
228 surf = CALLOC_STRUCT(virgl_surface);
229 if (!surf)
230 return NULL;
231
232 res->clean = FALSE;
233 handle = virgl_object_assign_handle();
234 pipe_reference_init(&surf->base.reference, 1);
235 pipe_resource_reference(&surf->base.texture, resource);
236 surf->base.context = ctx;
237 surf->base.format = templ->format;
238 if (resource->target != PIPE_BUFFER) {
239 surf->base.width = u_minify(resource->width0, templ->u.tex.level);
240 surf->base.height = u_minify(resource->height0, templ->u.tex.level);
241 surf->base.u.tex.level = templ->u.tex.level;
242 surf->base.u.tex.first_layer = templ->u.tex.first_layer;
243 surf->base.u.tex.last_layer = templ->u.tex.last_layer;
244 } else {
245 surf->base.width = templ->u.buf.last_element - templ->u.buf.first_element + 1;
246 surf->base.height = resource->height0;
247 surf->base.u.buf.first_element = templ->u.buf.first_element;
248 surf->base.u.buf.last_element = templ->u.buf.last_element;
249 }
250 virgl_encoder_create_surface(vctx, handle, res, &surf->base);
251 surf->handle = handle;
252 return &surf->base;
253 }
254
255 static void virgl_surface_destroy(struct pipe_context *ctx,
256 struct pipe_surface *psurf)
257 {
258 struct virgl_context *vctx = virgl_context(ctx);
259 struct virgl_surface *surf = virgl_surface(psurf);
260
261 pipe_resource_reference(&surf->base.texture, NULL);
262 virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
263 FREE(surf);
264 }
265
266 static void *virgl_create_blend_state(struct pipe_context *ctx,
267 const struct pipe_blend_state *blend_state)
268 {
269 struct virgl_context *vctx = virgl_context(ctx);
270 uint32_t handle;
271 handle = virgl_object_assign_handle();
272
273 virgl_encode_blend_state(vctx, handle, blend_state);
274 return (void *)(unsigned long)handle;
275
276 }
277
278 static void virgl_bind_blend_state(struct pipe_context *ctx,
279 void *blend_state)
280 {
281 struct virgl_context *vctx = virgl_context(ctx);
282 uint32_t handle = (unsigned long)blend_state;
283 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
284 }
285
286 static void virgl_delete_blend_state(struct pipe_context *ctx,
287 void *blend_state)
288 {
289 struct virgl_context *vctx = virgl_context(ctx);
290 uint32_t handle = (unsigned long)blend_state;
291 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
292 }
293
294 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
295 const struct pipe_depth_stencil_alpha_state *blend_state)
296 {
297 struct virgl_context *vctx = virgl_context(ctx);
298 uint32_t handle;
299 handle = virgl_object_assign_handle();
300
301 virgl_encode_dsa_state(vctx, handle, blend_state);
302 return (void *)(unsigned long)handle;
303 }
304
305 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
306 void *blend_state)
307 {
308 struct virgl_context *vctx = virgl_context(ctx);
309 uint32_t handle = (unsigned long)blend_state;
310 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
311 }
312
313 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
314 void *dsa_state)
315 {
316 struct virgl_context *vctx = virgl_context(ctx);
317 uint32_t handle = (unsigned long)dsa_state;
318 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
319 }
320
321 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
322 const struct pipe_rasterizer_state *rs_state)
323 {
324 struct virgl_context *vctx = virgl_context(ctx);
325 uint32_t handle;
326 handle = virgl_object_assign_handle();
327
328 virgl_encode_rasterizer_state(vctx, handle, rs_state);
329 return (void *)(unsigned long)handle;
330 }
331
332 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
333 void *rs_state)
334 {
335 struct virgl_context *vctx = virgl_context(ctx);
336 uint32_t handle = (unsigned long)rs_state;
337
338 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
339 }
340
341 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
342 void *rs_state)
343 {
344 struct virgl_context *vctx = virgl_context(ctx);
345 uint32_t handle = (unsigned long)rs_state;
346 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
347 }
348
349 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
350 const struct pipe_framebuffer_state *state)
351 {
352 struct virgl_context *vctx = virgl_context(ctx);
353
354 vctx->framebuffer = *state;
355 virgl_encoder_set_framebuffer_state(vctx, state);
356 virgl_attach_res_framebuffer(vctx);
357 }
358
359 static void virgl_set_viewport_states(struct pipe_context *ctx,
360 unsigned start_slot,
361 unsigned num_viewports,
362 const struct pipe_viewport_state *state)
363 {
364 struct virgl_context *vctx = virgl_context(ctx);
365 virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
366 }
367
368 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
369 unsigned num_elements,
370 const struct pipe_vertex_element *elements)
371 {
372 struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
373 struct virgl_context *vctx = virgl_context(ctx);
374 struct virgl_vertex_elements_state *state =
375 CALLOC_STRUCT(virgl_vertex_elements_state);
376
377 for (int i = 0; i < num_elements; ++i) {
378 if (elements[i].instance_divisor) {
379 /* Virglrenderer doesn't deal with instance_divisor correctly if
380 * there isn't a 1:1 relationship between elements and bindings.
381 * So let's make sure there is, by duplicating bindings.
382 */
383 for (int j = 0; j < num_elements; ++j) {
384 new_elements[j] = elements[j];
385 new_elements[j].vertex_buffer_index = j;
386 state->binding_map[j] = elements[j].vertex_buffer_index;
387 }
388 elements = new_elements;
389 state->num_bindings = num_elements;
390 break;
391 }
392 }
393
394 state->handle = virgl_object_assign_handle();
395 virgl_encoder_create_vertex_elements(vctx, state->handle,
396 num_elements, elements);
397 return state;
398 }
399
400 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
401 void *ve)
402 {
403 struct virgl_context *vctx = virgl_context(ctx);
404 struct virgl_vertex_elements_state *state =
405 (struct virgl_vertex_elements_state *)ve;
406 virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
407 FREE(state);
408 }
409
410 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
411 void *ve)
412 {
413 struct virgl_context *vctx = virgl_context(ctx);
414 struct virgl_vertex_elements_state *state =
415 (struct virgl_vertex_elements_state *)ve;
416 vctx->vertex_elements = state;
417 virgl_encode_bind_object(vctx, state ? state->handle : 0,
418 VIRGL_OBJECT_VERTEX_ELEMENTS);
419 vctx->vertex_array_dirty = TRUE;
420 }
421
422 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
423 unsigned start_slot,
424 unsigned num_buffers,
425 const struct pipe_vertex_buffer *buffers)
426 {
427 struct virgl_context *vctx = virgl_context(ctx);
428
429 util_set_vertex_buffers_count(vctx->vertex_buffer,
430 &vctx->num_vertex_buffers,
431 buffers, start_slot, num_buffers);
432
433 vctx->vertex_array_dirty = TRUE;
434 }
435
436 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
437 {
438 if (vctx->vertex_array_dirty) {
439 struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
440
441 if (ve->num_bindings) {
442 struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
443 for (int i = 0; i < ve->num_bindings; ++i)
444 vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
445
446 virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
447 } else
448 virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
449
450 virgl_attach_res_vertex_buffers(vctx);
451 }
452 }
453
454 static void virgl_set_stencil_ref(struct pipe_context *ctx,
455 const struct pipe_stencil_ref *ref)
456 {
457 struct virgl_context *vctx = virgl_context(ctx);
458 virgl_encoder_set_stencil_ref(vctx, ref);
459 }
460
461 static void virgl_set_blend_color(struct pipe_context *ctx,
462 const struct pipe_blend_color *color)
463 {
464 struct virgl_context *vctx = virgl_context(ctx);
465 virgl_encoder_set_blend_color(vctx, color);
466 }
467
468 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
469 struct virgl_indexbuf *ib)
470 {
471 virgl_encoder_set_index_buffer(vctx, ib);
472 virgl_attach_res_index_buffer(vctx, ib);
473 }
474
475 static void virgl_set_constant_buffer(struct pipe_context *ctx,
476 enum pipe_shader_type shader, uint index,
477 const struct pipe_constant_buffer *buf)
478 {
479 struct virgl_context *vctx = virgl_context(ctx);
480
481 if (buf) {
482 if (!buf->user_buffer){
483 struct virgl_resource *res = virgl_resource(buf->buffer);
484 virgl_encoder_set_uniform_buffer(vctx, shader, index, buf->buffer_offset,
485 buf->buffer_size, res);
486 pipe_resource_reference(&vctx->ubos[shader][index], buf->buffer);
487 return;
488 }
489 pipe_resource_reference(&vctx->ubos[shader][index], NULL);
490 virgl_encoder_write_constant_buffer(vctx, shader, index, buf->buffer_size / 4, buf->user_buffer);
491 } else {
492 virgl_encoder_write_constant_buffer(vctx, shader, index, 0, NULL);
493 pipe_resource_reference(&vctx->ubos[shader][index], NULL);
494 }
495 }
496
497 void virgl_transfer_inline_write(struct pipe_context *ctx,
498 struct pipe_resource *res,
499 unsigned level,
500 unsigned usage,
501 const struct pipe_box *box,
502 const void *data,
503 unsigned stride,
504 unsigned layer_stride)
505 {
506 struct virgl_context *vctx = virgl_context(ctx);
507 struct virgl_screen *vs = virgl_screen(ctx->screen);
508 struct virgl_resource *grres = virgl_resource(res);
509 struct virgl_buffer *vbuf = virgl_buffer(res);
510
511 grres->clean = FALSE;
512
513 if (virgl_res_needs_flush_wait(vctx, &vbuf->base, usage)) {
514 ctx->flush(ctx, NULL, 0);
515
516 vs->vws->resource_wait(vs->vws, vbuf->base.hw_res);
517 }
518
519 virgl_encoder_inline_write(vctx, grres, level, usage,
520 box, data, stride, layer_stride);
521 }
522
523 static void *virgl_shader_encoder(struct pipe_context *ctx,
524 const struct pipe_shader_state *shader,
525 unsigned type)
526 {
527 struct virgl_context *vctx = virgl_context(ctx);
528 uint32_t handle;
529 struct tgsi_token *new_tokens;
530 int ret;
531
532 new_tokens = virgl_tgsi_transform(vctx, shader->tokens);
533 if (!new_tokens)
534 return NULL;
535
536 handle = virgl_object_assign_handle();
537 /* encode VS state */
538 ret = virgl_encode_shader_state(vctx, handle, type,
539 &shader->stream_output, 0,
540 new_tokens);
541 if (ret) {
542 return NULL;
543 }
544
545 FREE(new_tokens);
546 return (void *)(unsigned long)handle;
547
548 }
549 static void *virgl_create_vs_state(struct pipe_context *ctx,
550 const struct pipe_shader_state *shader)
551 {
552 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
553 }
554
555 static void *virgl_create_tcs_state(struct pipe_context *ctx,
556 const struct pipe_shader_state *shader)
557 {
558 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
559 }
560
561 static void *virgl_create_tes_state(struct pipe_context *ctx,
562 const struct pipe_shader_state *shader)
563 {
564 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
565 }
566
567 static void *virgl_create_gs_state(struct pipe_context *ctx,
568 const struct pipe_shader_state *shader)
569 {
570 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
571 }
572
573 static void *virgl_create_fs_state(struct pipe_context *ctx,
574 const struct pipe_shader_state *shader)
575 {
576 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
577 }
578
579 static void
580 virgl_delete_fs_state(struct pipe_context *ctx,
581 void *fs)
582 {
583 uint32_t handle = (unsigned long)fs;
584 struct virgl_context *vctx = virgl_context(ctx);
585
586 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
587 }
588
589 static void
590 virgl_delete_gs_state(struct pipe_context *ctx,
591 void *gs)
592 {
593 uint32_t handle = (unsigned long)gs;
594 struct virgl_context *vctx = virgl_context(ctx);
595
596 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
597 }
598
599 static void
600 virgl_delete_vs_state(struct pipe_context *ctx,
601 void *vs)
602 {
603 uint32_t handle = (unsigned long)vs;
604 struct virgl_context *vctx = virgl_context(ctx);
605
606 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
607 }
608
609 static void
610 virgl_delete_tcs_state(struct pipe_context *ctx,
611 void *tcs)
612 {
613 uint32_t handle = (unsigned long)tcs;
614 struct virgl_context *vctx = virgl_context(ctx);
615
616 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
617 }
618
619 static void
620 virgl_delete_tes_state(struct pipe_context *ctx,
621 void *tes)
622 {
623 uint32_t handle = (unsigned long)tes;
624 struct virgl_context *vctx = virgl_context(ctx);
625
626 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
627 }
628
629 static void virgl_bind_vs_state(struct pipe_context *ctx,
630 void *vss)
631 {
632 uint32_t handle = (unsigned long)vss;
633 struct virgl_context *vctx = virgl_context(ctx);
634
635 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
636 }
637
638 static void virgl_bind_tcs_state(struct pipe_context *ctx,
639 void *vss)
640 {
641 uint32_t handle = (unsigned long)vss;
642 struct virgl_context *vctx = virgl_context(ctx);
643
644 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
645 }
646
647 static void virgl_bind_tes_state(struct pipe_context *ctx,
648 void *vss)
649 {
650 uint32_t handle = (unsigned long)vss;
651 struct virgl_context *vctx = virgl_context(ctx);
652
653 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
654 }
655
656 static void virgl_bind_gs_state(struct pipe_context *ctx,
657 void *vss)
658 {
659 uint32_t handle = (unsigned long)vss;
660 struct virgl_context *vctx = virgl_context(ctx);
661
662 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
663 }
664
665
666 static void virgl_bind_fs_state(struct pipe_context *ctx,
667 void *vss)
668 {
669 uint32_t handle = (unsigned long)vss;
670 struct virgl_context *vctx = virgl_context(ctx);
671
672 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
673 }
674
675 static void virgl_clear(struct pipe_context *ctx,
676 unsigned buffers,
677 const union pipe_color_union *color,
678 double depth, unsigned stencil)
679 {
680 struct virgl_context *vctx = virgl_context(ctx);
681
682 virgl_encode_clear(vctx, buffers, color, depth, stencil);
683 }
684
685 static void virgl_draw_vbo(struct pipe_context *ctx,
686 const struct pipe_draw_info *dinfo)
687 {
688 struct virgl_context *vctx = virgl_context(ctx);
689 struct virgl_screen *rs = virgl_screen(ctx->screen);
690 struct virgl_indexbuf ib = {};
691 struct pipe_draw_info info = *dinfo;
692
693 if (!dinfo->count_from_stream_output && !dinfo->indirect &&
694 !dinfo->primitive_restart &&
695 !u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count))
696 return;
697
698 if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
699 util_primconvert_draw_vbo(vctx->primconvert, dinfo);
700 return;
701 }
702 if (info.index_size) {
703 pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
704 ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
705 ib.index_size = dinfo->index_size;
706 ib.offset = info.start * ib.index_size;
707
708 if (ib.user_buffer) {
709 u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 256,
710 ib.user_buffer, &ib.offset, &ib.buffer);
711 ib.user_buffer = NULL;
712 }
713 }
714
715 u_upload_unmap(vctx->uploader);
716
717 vctx->num_draws++;
718 virgl_hw_set_vertex_buffers(vctx);
719 if (info.index_size)
720 virgl_hw_set_index_buffer(vctx, &ib);
721
722 virgl_encoder_draw_vbo(vctx, &info);
723
724 pipe_resource_reference(&ib.buffer, NULL);
725
726 }
727
728 static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
729 struct pipe_fence_handle **fence)
730 {
731 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
732 int out_fence_fd = -1;
733
734 /* send the buffer to the remote side for decoding */
735 ctx->num_transfers = ctx->num_draws = 0;
736
737 rs->vws->submit_cmd(rs->vws, ctx->cbuf, ctx->cbuf->in_fence_fd,
738 ctx->cbuf->needs_out_fence_fd ? &out_fence_fd : NULL);
739
740 if (fence)
741 *fence = rs->vws->cs_create_fence(rs->vws, out_fence_fd);
742
743 virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
744
745 /* add back current framebuffer resources to reference list? */
746 virgl_reemit_res(ctx);
747 }
748
749 static void virgl_flush_from_st(struct pipe_context *ctx,
750 struct pipe_fence_handle **fence,
751 enum pipe_flush_flags flags)
752 {
753 struct virgl_context *vctx = virgl_context(ctx);
754 struct virgl_screen *rs = virgl_screen(ctx->screen);
755
756 if (flags & PIPE_FLUSH_FENCE_FD)
757 vctx->cbuf->needs_out_fence_fd = true;
758
759 virgl_flush_eq(vctx, vctx, fence);
760
761 if (vctx->cbuf->in_fence_fd != -1) {
762 close(vctx->cbuf->in_fence_fd);
763 vctx->cbuf->in_fence_fd = -1;
764 }
765 vctx->cbuf->needs_out_fence_fd = false;
766 }
767
768 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
769 struct pipe_resource *texture,
770 const struct pipe_sampler_view *state)
771 {
772 struct virgl_context *vctx = virgl_context(ctx);
773 struct virgl_sampler_view *grview;
774 uint32_t handle;
775 struct virgl_resource *res;
776
777 if (!state)
778 return NULL;
779
780 grview = CALLOC_STRUCT(virgl_sampler_view);
781 if (!grview)
782 return NULL;
783
784 res = virgl_resource(texture);
785 handle = virgl_object_assign_handle();
786 virgl_encode_sampler_view(vctx, handle, res, state);
787
788 grview->base = *state;
789 grview->base.reference.count = 1;
790
791 grview->base.texture = NULL;
792 grview->base.context = ctx;
793 pipe_resource_reference(&grview->base.texture, texture);
794 grview->handle = handle;
795 return &grview->base;
796 }
797
798 static void virgl_set_sampler_views(struct pipe_context *ctx,
799 enum pipe_shader_type shader_type,
800 unsigned start_slot,
801 unsigned num_views,
802 struct pipe_sampler_view **views)
803 {
804 struct virgl_context *vctx = virgl_context(ctx);
805 int i;
806 uint32_t disable_mask = ~((1ull << num_views) - 1);
807 struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
808 uint32_t new_mask = 0;
809 uint32_t remaining_mask;
810
811 remaining_mask = tinfo->enabled_mask & disable_mask;
812
813 while (remaining_mask) {
814 i = u_bit_scan(&remaining_mask);
815 assert(tinfo->views[i]);
816
817 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
818 }
819
820 for (i = 0; i < num_views; i++) {
821 struct virgl_sampler_view *grview = virgl_sampler_view(views[i]);
822
823 if (views[i] == (struct pipe_sampler_view *)tinfo->views[i])
824 continue;
825
826 if (grview) {
827 new_mask |= 1 << i;
828 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], views[i]);
829 } else {
830 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
831 disable_mask |= 1 << i;
832 }
833 }
834
835 tinfo->enabled_mask &= ~disable_mask;
836 tinfo->enabled_mask |= new_mask;
837 virgl_encode_set_sampler_views(vctx, shader_type, start_slot, num_views, tinfo->views);
838 virgl_attach_res_sampler_views(vctx, shader_type);
839 }
840
841 static void
842 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
843 {
844 struct virgl_context *vctx = virgl_context(ctx);
845 struct virgl_screen *rs = virgl_screen(ctx->screen);
846
847 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER))
848 return;
849 virgl_encode_texture_barrier(vctx, flags);
850 }
851
852 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
853 struct pipe_sampler_view *view)
854 {
855 struct virgl_context *vctx = virgl_context(ctx);
856 struct virgl_sampler_view *grview = virgl_sampler_view(view);
857
858 virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
859 pipe_resource_reference(&view->texture, NULL);
860 FREE(view);
861 }
862
863 static void *virgl_create_sampler_state(struct pipe_context *ctx,
864 const struct pipe_sampler_state *state)
865 {
866 struct virgl_context *vctx = virgl_context(ctx);
867 uint32_t handle;
868
869 handle = virgl_object_assign_handle();
870
871 virgl_encode_sampler_state(vctx, handle, state);
872 return (void *)(unsigned long)handle;
873 }
874
875 static void virgl_delete_sampler_state(struct pipe_context *ctx,
876 void *ss)
877 {
878 struct virgl_context *vctx = virgl_context(ctx);
879 uint32_t handle = (unsigned long)ss;
880
881 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
882 }
883
884 static void virgl_bind_sampler_states(struct pipe_context *ctx,
885 enum pipe_shader_type shader,
886 unsigned start_slot,
887 unsigned num_samplers,
888 void **samplers)
889 {
890 struct virgl_context *vctx = virgl_context(ctx);
891 uint32_t handles[32];
892 int i;
893 for (i = 0; i < num_samplers; i++) {
894 handles[i] = (unsigned long)(samplers[i]);
895 }
896 virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
897 }
898
899 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
900 const struct pipe_poly_stipple *ps)
901 {
902 struct virgl_context *vctx = virgl_context(ctx);
903 virgl_encoder_set_polygon_stipple(vctx, ps);
904 }
905
906 static void virgl_set_scissor_states(struct pipe_context *ctx,
907 unsigned start_slot,
908 unsigned num_scissor,
909 const struct pipe_scissor_state *ss)
910 {
911 struct virgl_context *vctx = virgl_context(ctx);
912 virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
913 }
914
915 static void virgl_set_sample_mask(struct pipe_context *ctx,
916 unsigned sample_mask)
917 {
918 struct virgl_context *vctx = virgl_context(ctx);
919 virgl_encoder_set_sample_mask(vctx, sample_mask);
920 }
921
922 static void virgl_set_min_samples(struct pipe_context *ctx,
923 unsigned min_samples)
924 {
925 struct virgl_context *vctx = virgl_context(ctx);
926 struct virgl_screen *rs = virgl_screen(ctx->screen);
927
928 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
929 return;
930 virgl_encoder_set_min_samples(vctx, min_samples);
931 }
932
933 static void virgl_set_clip_state(struct pipe_context *ctx,
934 const struct pipe_clip_state *clip)
935 {
936 struct virgl_context *vctx = virgl_context(ctx);
937 virgl_encoder_set_clip_state(vctx, clip);
938 }
939
940 static void virgl_set_tess_state(struct pipe_context *ctx,
941 const float default_outer_level[4],
942 const float default_inner_level[2])
943 {
944 struct virgl_context *vctx = virgl_context(ctx);
945 struct virgl_screen *rs = virgl_screen(ctx->screen);
946
947 if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
948 return;
949 virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
950 }
951
952 static void virgl_resource_copy_region(struct pipe_context *ctx,
953 struct pipe_resource *dst,
954 unsigned dst_level,
955 unsigned dstx, unsigned dsty, unsigned dstz,
956 struct pipe_resource *src,
957 unsigned src_level,
958 const struct pipe_box *src_box)
959 {
960 struct virgl_context *vctx = virgl_context(ctx);
961 struct virgl_resource *dres = virgl_resource(dst);
962 struct virgl_resource *sres = virgl_resource(src);
963
964 dres->clean = FALSE;
965 virgl_encode_resource_copy_region(vctx, dres,
966 dst_level, dstx, dsty, dstz,
967 sres, src_level,
968 src_box);
969 }
970
971 static void
972 virgl_flush_resource(struct pipe_context *pipe,
973 struct pipe_resource *resource)
974 {
975 }
976
977 static void virgl_blit(struct pipe_context *ctx,
978 const struct pipe_blit_info *blit)
979 {
980 struct virgl_context *vctx = virgl_context(ctx);
981 struct virgl_resource *dres = virgl_resource(blit->dst.resource);
982 struct virgl_resource *sres = virgl_resource(blit->src.resource);
983
984 dres->clean = FALSE;
985 virgl_encode_blit(vctx, dres, sres,
986 blit);
987 }
988
989 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
990 unsigned start_slot,
991 unsigned count,
992 const struct pipe_shader_buffer *buffers)
993 {
994 struct virgl_context *vctx = virgl_context(ctx);
995
996 for (unsigned i = 0; i < count; i++) {
997 unsigned idx = start_slot + i;
998
999 if (buffers) {
1000 if (buffers[i].buffer) {
1001 pipe_resource_reference(&vctx->atomic_buffers[idx],
1002 buffers[i].buffer);
1003 continue;
1004 }
1005 }
1006 pipe_resource_reference(&vctx->atomic_buffers[idx], NULL);
1007 }
1008 virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1009 }
1010
1011 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1012 enum pipe_shader_type shader,
1013 unsigned start_slot, unsigned count,
1014 const struct pipe_shader_buffer *buffers)
1015 {
1016 struct virgl_context *vctx = virgl_context(ctx);
1017 struct virgl_screen *rs = virgl_screen(ctx->screen);
1018
1019 for (unsigned i = 0; i < count; i++) {
1020 unsigned idx = start_slot + i;
1021
1022 if (buffers) {
1023 if (buffers[i].buffer) {
1024 pipe_resource_reference(&vctx->ssbos[shader][idx], buffers[i].buffer);
1025 continue;
1026 }
1027 }
1028 pipe_resource_reference(&vctx->ssbos[shader][idx], NULL);
1029 }
1030
1031 uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1032 rs->caps.caps.v2.max_shader_buffer_frag_compute :
1033 rs->caps.caps.v2.max_shader_buffer_other_stages;
1034 if (!max_shader_buffer)
1035 return;
1036 virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1037 }
1038
1039 static void virgl_create_fence_fd(struct pipe_context *ctx,
1040 struct pipe_fence_handle **fence,
1041 int fd,
1042 enum pipe_fd_type type)
1043 {
1044 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1045 struct virgl_screen *rs = virgl_screen(ctx->screen);
1046
1047 if (rs->vws->cs_create_fence)
1048 *fence = rs->vws->cs_create_fence(rs->vws, fd);
1049 }
1050
1051 static void virgl_fence_server_sync(struct pipe_context *ctx,
1052 struct pipe_fence_handle *fence)
1053 {
1054 struct virgl_context *vctx = virgl_context(ctx);
1055 struct virgl_screen *rs = virgl_screen(ctx->screen);
1056
1057 if (rs->vws->fence_server_sync)
1058 rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1059 }
1060
1061 static void virgl_set_shader_images(struct pipe_context *ctx,
1062 enum pipe_shader_type shader,
1063 unsigned start_slot, unsigned count,
1064 const struct pipe_image_view *images)
1065 {
1066 struct virgl_context *vctx = virgl_context(ctx);
1067 struct virgl_screen *rs = virgl_screen(ctx->screen);
1068
1069 for (unsigned i = 0; i < count; i++) {
1070 unsigned idx = start_slot + i;
1071
1072 if (images) {
1073 if (images[i].resource) {
1074 pipe_resource_reference(&vctx->images[shader][idx], images[i].resource);
1075 continue;
1076 }
1077 }
1078 pipe_resource_reference(&vctx->images[shader][idx], NULL);
1079 }
1080
1081 uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1082 rs->caps.caps.v2.max_shader_image_frag_compute :
1083 rs->caps.caps.v2.max_shader_image_other_stages;
1084 if (!max_shader_images)
1085 return;
1086 virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1087 }
1088
1089 static void virgl_memory_barrier(struct pipe_context *ctx,
1090 unsigned flags)
1091 {
1092 struct virgl_context *vctx = virgl_context(ctx);
1093 struct virgl_screen *rs = virgl_screen(ctx->screen);
1094
1095 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1096 return;
1097 virgl_encode_memory_barrier(vctx, flags);
1098 }
1099
1100 static void *virgl_create_compute_state(struct pipe_context *ctx,
1101 const struct pipe_compute_state *state)
1102 {
1103 struct virgl_context *vctx = virgl_context(ctx);
1104 uint32_t handle;
1105 const struct tgsi_token *new_tokens = state->prog;
1106 struct pipe_stream_output_info so_info = {};
1107 int ret;
1108
1109 handle = virgl_object_assign_handle();
1110 ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1111 &so_info,
1112 state->req_local_mem,
1113 new_tokens);
1114 if (ret) {
1115 return NULL;
1116 }
1117
1118 return (void *)(unsigned long)handle;
1119 }
1120
1121 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1122 {
1123 uint32_t handle = (unsigned long)state;
1124 struct virgl_context *vctx = virgl_context(ctx);
1125
1126 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1127 }
1128
1129 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1130 {
1131 uint32_t handle = (unsigned long)state;
1132 struct virgl_context *vctx = virgl_context(ctx);
1133
1134 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1135 }
1136
1137 static void virgl_launch_grid(struct pipe_context *ctx,
1138 const struct pipe_grid_info *info)
1139 {
1140 struct virgl_context *vctx = virgl_context(ctx);
1141 virgl_encode_launch_grid(vctx, info);
1142 }
1143
1144 static void
1145 virgl_context_destroy( struct pipe_context *ctx )
1146 {
1147 struct virgl_context *vctx = virgl_context(ctx);
1148 struct virgl_screen *rs = virgl_screen(ctx->screen);
1149
1150 vctx->framebuffer.zsbuf = NULL;
1151 vctx->framebuffer.nr_cbufs = 0;
1152 virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1153 virgl_flush_eq(vctx, vctx, NULL);
1154
1155 rs->vws->cmd_buf_destroy(vctx->cbuf);
1156 if (vctx->uploader)
1157 u_upload_destroy(vctx->uploader);
1158 util_primconvert_destroy(vctx->primconvert);
1159
1160 slab_destroy_child(&vctx->transfer_pool);
1161 FREE(vctx);
1162 }
1163
1164 static void virgl_get_sample_position(struct pipe_context *ctx,
1165 unsigned sample_count,
1166 unsigned index,
1167 float *out_value)
1168 {
1169 struct virgl_context *vctx = virgl_context(ctx);
1170 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1171
1172 if (sample_count > vs->caps.caps.v1.max_samples) {
1173 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1174 sample_count, vs->caps.caps.v1.max_samples);
1175 return;
1176 }
1177
1178 /* The following is basically copied from dri/i965gen6_get_sample_position
1179 * The only addition is that we hold the msaa positions for all sample
1180 * counts in a flat array. */
1181 uint32_t bits = 0;
1182 if (sample_count == 1) {
1183 out_value[0] = out_value[1] = 0.5f;
1184 return;
1185 } else if (sample_count == 2) {
1186 bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1187 } else if (sample_count <= 4) {
1188 bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1189 } else if (sample_count <= 8) {
1190 bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1191 } else if (sample_count <= 16) {
1192 bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1193 }
1194 out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1195 out_value[1] = (bits & 0xf) / 16.0f;
1196
1197 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1198 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1199 index, sample_count, out_value[0], out_value[1]);
1200 }
1201
1202 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1203 void *priv,
1204 unsigned flags)
1205 {
1206 struct virgl_context *vctx;
1207 struct virgl_screen *rs = virgl_screen(pscreen);
1208 vctx = CALLOC_STRUCT(virgl_context);
1209 const char *host_debug_flagstring;
1210
1211 vctx->cbuf = rs->vws->cmd_buf_create(rs->vws);
1212 if (!vctx->cbuf) {
1213 FREE(vctx);
1214 return NULL;
1215 }
1216
1217 vctx->base.destroy = virgl_context_destroy;
1218 vctx->base.create_surface = virgl_create_surface;
1219 vctx->base.surface_destroy = virgl_surface_destroy;
1220 vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1221 vctx->base.create_blend_state = virgl_create_blend_state;
1222 vctx->base.bind_blend_state = virgl_bind_blend_state;
1223 vctx->base.delete_blend_state = virgl_delete_blend_state;
1224 vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1225 vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1226 vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1227 vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1228 vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1229 vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1230
1231 vctx->base.set_viewport_states = virgl_set_viewport_states;
1232 vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1233 vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1234 vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1235 vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1236 vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1237
1238 vctx->base.set_tess_state = virgl_set_tess_state;
1239 vctx->base.create_vs_state = virgl_create_vs_state;
1240 vctx->base.create_tcs_state = virgl_create_tcs_state;
1241 vctx->base.create_tes_state = virgl_create_tes_state;
1242 vctx->base.create_gs_state = virgl_create_gs_state;
1243 vctx->base.create_fs_state = virgl_create_fs_state;
1244
1245 vctx->base.bind_vs_state = virgl_bind_vs_state;
1246 vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1247 vctx->base.bind_tes_state = virgl_bind_tes_state;
1248 vctx->base.bind_gs_state = virgl_bind_gs_state;
1249 vctx->base.bind_fs_state = virgl_bind_fs_state;
1250
1251 vctx->base.delete_vs_state = virgl_delete_vs_state;
1252 vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1253 vctx->base.delete_tes_state = virgl_delete_tes_state;
1254 vctx->base.delete_gs_state = virgl_delete_gs_state;
1255 vctx->base.delete_fs_state = virgl_delete_fs_state;
1256
1257 vctx->base.create_compute_state = virgl_create_compute_state;
1258 vctx->base.bind_compute_state = virgl_bind_compute_state;
1259 vctx->base.delete_compute_state = virgl_delete_compute_state;
1260 vctx->base.launch_grid = virgl_launch_grid;
1261
1262 vctx->base.clear = virgl_clear;
1263 vctx->base.draw_vbo = virgl_draw_vbo;
1264 vctx->base.flush = virgl_flush_from_st;
1265 vctx->base.screen = pscreen;
1266 vctx->base.create_sampler_view = virgl_create_sampler_view;
1267 vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1268 vctx->base.set_sampler_views = virgl_set_sampler_views;
1269 vctx->base.texture_barrier = virgl_texture_barrier;
1270
1271 vctx->base.create_sampler_state = virgl_create_sampler_state;
1272 vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1273 vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1274
1275 vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1276 vctx->base.set_scissor_states = virgl_set_scissor_states;
1277 vctx->base.set_sample_mask = virgl_set_sample_mask;
1278 vctx->base.set_min_samples = virgl_set_min_samples;
1279 vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1280 vctx->base.set_clip_state = virgl_set_clip_state;
1281
1282 vctx->base.set_blend_color = virgl_set_blend_color;
1283
1284 vctx->base.get_sample_position = virgl_get_sample_position;
1285
1286 vctx->base.resource_copy_region = virgl_resource_copy_region;
1287 vctx->base.flush_resource = virgl_flush_resource;
1288 vctx->base.blit = virgl_blit;
1289 vctx->base.create_fence_fd = virgl_create_fence_fd;
1290 vctx->base.fence_server_sync = virgl_fence_server_sync;
1291
1292 vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1293 vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1294 vctx->base.set_shader_images = virgl_set_shader_images;
1295 vctx->base.memory_barrier = virgl_memory_barrier;
1296
1297 virgl_init_context_resource_functions(&vctx->base);
1298 virgl_init_query_functions(vctx);
1299 virgl_init_so_functions(vctx);
1300
1301 slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1302
1303 vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1304 vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1305 PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1306 if (!vctx->uploader)
1307 goto fail;
1308 vctx->base.stream_uploader = vctx->uploader;
1309 vctx->base.const_uploader = vctx->uploader;
1310
1311 vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
1312 virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1313
1314 virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1315
1316 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1317 host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1318 if (host_debug_flagstring)
1319 virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1320 }
1321
1322 return &vctx->base;
1323 fail:
1324 return NULL;
1325 }