virgl: work around bad assumptions in virglrenderer
[mesa.git] / src / gallium / drivers / virgl / virgl_context.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <libsync.h>
25 #include "pipe/p_shader_tokens.h"
26
27 #include "pipe/p_context.h"
28 #include "pipe/p_defines.h"
29 #include "pipe/p_screen.h"
30 #include "pipe/p_state.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_prim.h"
35 #include "util/u_transfer.h"
36 #include "util/u_helpers.h"
37 #include "util/slab.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_blitter.h"
40 #include "tgsi/tgsi_text.h"
41 #include "indices/u_primconvert.h"
42
43 #include "pipebuffer/pb_buffer.h"
44
45 #include "virgl_encode.h"
46 #include "virgl_context.h"
47 #include "virgl_protocol.h"
48 #include "virgl_resource.h"
49 #include "virgl_screen.h"
50
51 struct virgl_vertex_elements_state {
52 uint32_t handle;
53 uint8_t binding_map[PIPE_MAX_ATTRIBS];
54 uint8_t num_bindings;
55 };
56
57 static uint32_t next_handle;
58 uint32_t virgl_object_assign_handle(void)
59 {
60 return ++next_handle;
61 }
62
63 static void virgl_buffer_flush(struct virgl_context *vctx,
64 struct virgl_buffer *vbuf)
65 {
66 struct virgl_screen *rs = virgl_screen(vctx->base.screen);
67 struct pipe_box box;
68
69 assert(vbuf->on_list);
70
71 box.height = 1;
72 box.depth = 1;
73 box.y = 0;
74 box.z = 0;
75
76 box.x = vbuf->valid_buffer_range.start;
77 box.width = MIN2(vbuf->valid_buffer_range.end - vbuf->valid_buffer_range.start, vbuf->base.u.b.width0);
78
79 vctx->num_transfers++;
80 rs->vws->transfer_put(rs->vws, vbuf->base.hw_res,
81 &box, 0, 0, box.x, 0);
82
83 util_range_set_empty(&vbuf->valid_buffer_range);
84 }
85
86 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
87 {
88 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
89 struct pipe_surface *surf;
90 struct virgl_resource *res;
91 unsigned i;
92
93 surf = vctx->framebuffer.zsbuf;
94 if (surf) {
95 res = virgl_resource(surf->texture);
96 if (res)
97 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
98 }
99 for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
100 surf = vctx->framebuffer.cbufs[i];
101 if (surf) {
102 res = virgl_resource(surf->texture);
103 if (res)
104 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
105 }
106 }
107 }
108
109 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
110 enum pipe_shader_type shader_type)
111 {
112 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
113 struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
114 struct virgl_resource *res;
115 uint32_t remaining_mask = tinfo->enabled_mask;
116 unsigned i;
117 while (remaining_mask) {
118 i = u_bit_scan(&remaining_mask);
119 assert(tinfo->views[i]);
120
121 res = virgl_resource(tinfo->views[i]->base.texture);
122 if (res)
123 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
124 }
125 }
126
127 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
128 {
129 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
130 struct virgl_resource *res;
131 unsigned i;
132
133 for (i = 0; i < vctx->num_vertex_buffers; i++) {
134 res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
135 if (res)
136 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
137 }
138 }
139
140 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
141 struct virgl_indexbuf *ib)
142 {
143 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
144 struct virgl_resource *res;
145
146 res = virgl_resource(ib->buffer);
147 if (res)
148 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
149 }
150
151 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
152 {
153 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
154 struct virgl_resource *res;
155 unsigned i;
156
157 for (i = 0; i < vctx->num_so_targets; i++) {
158 res = virgl_resource(vctx->so_targets[i].base.buffer);
159 if (res)
160 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
161 }
162 }
163
164 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
165 enum pipe_shader_type shader_type)
166 {
167 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
168 struct virgl_resource *res;
169 unsigned i;
170 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
171 res = virgl_resource(vctx->ubos[shader_type][i]);
172 if (res) {
173 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
174 }
175 }
176 }
177
178 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
179 enum pipe_shader_type shader_type)
180 {
181 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
182 struct virgl_resource *res;
183 unsigned i;
184 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
185 res = virgl_resource(vctx->ssbos[shader_type][i]);
186 if (res) {
187 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
188 }
189 }
190 }
191
192 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
193 enum pipe_shader_type shader_type)
194 {
195 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
196 struct virgl_resource *res;
197 unsigned i;
198 for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
199 res = virgl_resource(vctx->images[shader_type][i]);
200 if (res) {
201 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
202 }
203 }
204 }
205
206 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
207 {
208 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
209 struct virgl_resource *res;
210 unsigned i;
211 for (i = 0; i < PIPE_MAX_HW_ATOMIC_BUFFERS; i++) {
212 res = virgl_resource(vctx->atomic_buffers[i]);
213 if (res) {
214 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
215 }
216 }
217 }
218
219 /*
220 * after flushing, the hw context still has a bunch of
221 * resources bound, so we need to rebind those here.
222 */
223 static void virgl_reemit_res(struct virgl_context *vctx)
224 {
225 enum pipe_shader_type shader_type;
226
227 /* reattach any flushed resources */
228 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
229 virgl_attach_res_framebuffer(vctx);
230
231 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
232 virgl_attach_res_sampler_views(vctx, shader_type);
233 virgl_attach_res_uniform_buffers(vctx, shader_type);
234 virgl_attach_res_shader_buffers(vctx, shader_type);
235 virgl_attach_res_shader_images(vctx, shader_type);
236 }
237 virgl_attach_res_atomic_buffers(vctx);
238 virgl_attach_res_vertex_buffers(vctx);
239 virgl_attach_res_so_targets(vctx);
240 }
241
242 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
243 struct pipe_resource *resource,
244 const struct pipe_surface *templ)
245 {
246 struct virgl_context *vctx = virgl_context(ctx);
247 struct virgl_surface *surf;
248 struct virgl_resource *res = virgl_resource(resource);
249 uint32_t handle;
250
251 surf = CALLOC_STRUCT(virgl_surface);
252 if (!surf)
253 return NULL;
254
255 res->clean = FALSE;
256 handle = virgl_object_assign_handle();
257 pipe_reference_init(&surf->base.reference, 1);
258 pipe_resource_reference(&surf->base.texture, resource);
259 surf->base.context = ctx;
260 surf->base.format = templ->format;
261 if (resource->target != PIPE_BUFFER) {
262 surf->base.width = u_minify(resource->width0, templ->u.tex.level);
263 surf->base.height = u_minify(resource->height0, templ->u.tex.level);
264 surf->base.u.tex.level = templ->u.tex.level;
265 surf->base.u.tex.first_layer = templ->u.tex.first_layer;
266 surf->base.u.tex.last_layer = templ->u.tex.last_layer;
267 } else {
268 surf->base.width = templ->u.buf.last_element - templ->u.buf.first_element + 1;
269 surf->base.height = resource->height0;
270 surf->base.u.buf.first_element = templ->u.buf.first_element;
271 surf->base.u.buf.last_element = templ->u.buf.last_element;
272 }
273 virgl_encoder_create_surface(vctx, handle, res, &surf->base);
274 surf->handle = handle;
275 return &surf->base;
276 }
277
278 static void virgl_surface_destroy(struct pipe_context *ctx,
279 struct pipe_surface *psurf)
280 {
281 struct virgl_context *vctx = virgl_context(ctx);
282 struct virgl_surface *surf = virgl_surface(psurf);
283
284 pipe_resource_reference(&surf->base.texture, NULL);
285 virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
286 FREE(surf);
287 }
288
289 static void *virgl_create_blend_state(struct pipe_context *ctx,
290 const struct pipe_blend_state *blend_state)
291 {
292 struct virgl_context *vctx = virgl_context(ctx);
293 uint32_t handle;
294 handle = virgl_object_assign_handle();
295
296 virgl_encode_blend_state(vctx, handle, blend_state);
297 return (void *)(unsigned long)handle;
298
299 }
300
301 static void virgl_bind_blend_state(struct pipe_context *ctx,
302 void *blend_state)
303 {
304 struct virgl_context *vctx = virgl_context(ctx);
305 uint32_t handle = (unsigned long)blend_state;
306 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
307 }
308
309 static void virgl_delete_blend_state(struct pipe_context *ctx,
310 void *blend_state)
311 {
312 struct virgl_context *vctx = virgl_context(ctx);
313 uint32_t handle = (unsigned long)blend_state;
314 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
315 }
316
317 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
318 const struct pipe_depth_stencil_alpha_state *blend_state)
319 {
320 struct virgl_context *vctx = virgl_context(ctx);
321 uint32_t handle;
322 handle = virgl_object_assign_handle();
323
324 virgl_encode_dsa_state(vctx, handle, blend_state);
325 return (void *)(unsigned long)handle;
326 }
327
328 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
329 void *blend_state)
330 {
331 struct virgl_context *vctx = virgl_context(ctx);
332 uint32_t handle = (unsigned long)blend_state;
333 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
334 }
335
336 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
337 void *dsa_state)
338 {
339 struct virgl_context *vctx = virgl_context(ctx);
340 uint32_t handle = (unsigned long)dsa_state;
341 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
342 }
343
344 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
345 const struct pipe_rasterizer_state *rs_state)
346 {
347 struct virgl_context *vctx = virgl_context(ctx);
348 uint32_t handle;
349 handle = virgl_object_assign_handle();
350
351 virgl_encode_rasterizer_state(vctx, handle, rs_state);
352 return (void *)(unsigned long)handle;
353 }
354
355 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
356 void *rs_state)
357 {
358 struct virgl_context *vctx = virgl_context(ctx);
359 uint32_t handle = (unsigned long)rs_state;
360
361 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
362 }
363
364 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
365 void *rs_state)
366 {
367 struct virgl_context *vctx = virgl_context(ctx);
368 uint32_t handle = (unsigned long)rs_state;
369 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
370 }
371
372 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
373 const struct pipe_framebuffer_state *state)
374 {
375 struct virgl_context *vctx = virgl_context(ctx);
376
377 vctx->framebuffer = *state;
378 virgl_encoder_set_framebuffer_state(vctx, state);
379 virgl_attach_res_framebuffer(vctx);
380 }
381
382 static void virgl_set_viewport_states(struct pipe_context *ctx,
383 unsigned start_slot,
384 unsigned num_viewports,
385 const struct pipe_viewport_state *state)
386 {
387 struct virgl_context *vctx = virgl_context(ctx);
388 virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
389 }
390
391 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
392 unsigned num_elements,
393 const struct pipe_vertex_element *elements)
394 {
395 struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
396 struct virgl_context *vctx = virgl_context(ctx);
397 struct virgl_vertex_elements_state *state =
398 CALLOC_STRUCT(virgl_vertex_elements_state);
399
400 for (int i = 0; i < num_elements; ++i) {
401 if (elements[i].instance_divisor) {
402 /* Virglrenderer doesn't deal with instance_divisor correctly if
403 * there isn't a 1:1 relationship between elements and bindings.
404 * So let's make sure there is, by duplicating bindings.
405 */
406 for (int j = 0; j < num_elements; ++j) {
407 new_elements[j] = elements[j];
408 new_elements[j].vertex_buffer_index = j;
409 state->binding_map[j] = elements[j].vertex_buffer_index;
410 }
411 elements = new_elements;
412 state->num_bindings = num_elements;
413 break;
414 }
415 }
416
417 state->handle = virgl_object_assign_handle();
418 virgl_encoder_create_vertex_elements(vctx, state->handle,
419 num_elements, elements);
420 return state;
421 }
422
423 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
424 void *ve)
425 {
426 struct virgl_context *vctx = virgl_context(ctx);
427 struct virgl_vertex_elements_state *state =
428 (struct virgl_vertex_elements_state *)ve;
429 virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
430 FREE(state);
431 }
432
433 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
434 void *ve)
435 {
436 struct virgl_context *vctx = virgl_context(ctx);
437 struct virgl_vertex_elements_state *state =
438 (struct virgl_vertex_elements_state *)ve;
439 vctx->vertex_elements = state;
440 virgl_encode_bind_object(vctx, state ? state->handle : 0,
441 VIRGL_OBJECT_VERTEX_ELEMENTS);
442 vctx->vertex_array_dirty = TRUE;
443 }
444
445 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
446 unsigned start_slot,
447 unsigned num_buffers,
448 const struct pipe_vertex_buffer *buffers)
449 {
450 struct virgl_context *vctx = virgl_context(ctx);
451
452 util_set_vertex_buffers_count(vctx->vertex_buffer,
453 &vctx->num_vertex_buffers,
454 buffers, start_slot, num_buffers);
455
456 vctx->vertex_array_dirty = TRUE;
457 }
458
459 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
460 {
461 if (vctx->vertex_array_dirty) {
462 struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
463
464 if (ve->num_bindings) {
465 struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
466 for (int i = 0; i < ve->num_bindings; ++i)
467 vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
468
469 virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
470 } else
471 virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
472
473 virgl_attach_res_vertex_buffers(vctx);
474 }
475 }
476
477 static void virgl_set_stencil_ref(struct pipe_context *ctx,
478 const struct pipe_stencil_ref *ref)
479 {
480 struct virgl_context *vctx = virgl_context(ctx);
481 virgl_encoder_set_stencil_ref(vctx, ref);
482 }
483
484 static void virgl_set_blend_color(struct pipe_context *ctx,
485 const struct pipe_blend_color *color)
486 {
487 struct virgl_context *vctx = virgl_context(ctx);
488 virgl_encoder_set_blend_color(vctx, color);
489 }
490
491 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
492 struct virgl_indexbuf *ib)
493 {
494 virgl_encoder_set_index_buffer(vctx, ib);
495 virgl_attach_res_index_buffer(vctx, ib);
496 }
497
498 static void virgl_set_constant_buffer(struct pipe_context *ctx,
499 enum pipe_shader_type shader, uint index,
500 const struct pipe_constant_buffer *buf)
501 {
502 struct virgl_context *vctx = virgl_context(ctx);
503
504 if (buf) {
505 if (!buf->user_buffer){
506 struct virgl_resource *res = virgl_resource(buf->buffer);
507 virgl_encoder_set_uniform_buffer(vctx, shader, index, buf->buffer_offset,
508 buf->buffer_size, res);
509 pipe_resource_reference(&vctx->ubos[shader][index], buf->buffer);
510 return;
511 }
512 pipe_resource_reference(&vctx->ubos[shader][index], NULL);
513 virgl_encoder_write_constant_buffer(vctx, shader, index, buf->buffer_size / 4, buf->user_buffer);
514 } else {
515 virgl_encoder_write_constant_buffer(vctx, shader, index, 0, NULL);
516 pipe_resource_reference(&vctx->ubos[shader][index], NULL);
517 }
518 }
519
520 void virgl_transfer_inline_write(struct pipe_context *ctx,
521 struct pipe_resource *res,
522 unsigned level,
523 unsigned usage,
524 const struct pipe_box *box,
525 const void *data,
526 unsigned stride,
527 unsigned layer_stride)
528 {
529 struct virgl_context *vctx = virgl_context(ctx);
530 struct virgl_screen *vs = virgl_screen(ctx->screen);
531 struct virgl_resource *grres = virgl_resource(res);
532 struct virgl_buffer *vbuf = virgl_buffer(res);
533
534 grres->clean = FALSE;
535
536 if (virgl_res_needs_flush_wait(vctx, &vbuf->base, usage)) {
537 ctx->flush(ctx, NULL, 0);
538
539 vs->vws->resource_wait(vs->vws, vbuf->base.hw_res);
540 }
541
542 virgl_encoder_inline_write(vctx, grres, level, usage,
543 box, data, stride, layer_stride);
544 }
545
546 static void *virgl_shader_encoder(struct pipe_context *ctx,
547 const struct pipe_shader_state *shader,
548 unsigned type)
549 {
550 struct virgl_context *vctx = virgl_context(ctx);
551 uint32_t handle;
552 struct tgsi_token *new_tokens;
553 int ret;
554
555 new_tokens = virgl_tgsi_transform(vctx, shader->tokens);
556 if (!new_tokens)
557 return NULL;
558
559 handle = virgl_object_assign_handle();
560 /* encode VS state */
561 ret = virgl_encode_shader_state(vctx, handle, type,
562 &shader->stream_output, 0,
563 new_tokens);
564 if (ret) {
565 return NULL;
566 }
567
568 FREE(new_tokens);
569 return (void *)(unsigned long)handle;
570
571 }
572 static void *virgl_create_vs_state(struct pipe_context *ctx,
573 const struct pipe_shader_state *shader)
574 {
575 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
576 }
577
578 static void *virgl_create_tcs_state(struct pipe_context *ctx,
579 const struct pipe_shader_state *shader)
580 {
581 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
582 }
583
584 static void *virgl_create_tes_state(struct pipe_context *ctx,
585 const struct pipe_shader_state *shader)
586 {
587 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
588 }
589
590 static void *virgl_create_gs_state(struct pipe_context *ctx,
591 const struct pipe_shader_state *shader)
592 {
593 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
594 }
595
596 static void *virgl_create_fs_state(struct pipe_context *ctx,
597 const struct pipe_shader_state *shader)
598 {
599 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
600 }
601
602 static void
603 virgl_delete_fs_state(struct pipe_context *ctx,
604 void *fs)
605 {
606 uint32_t handle = (unsigned long)fs;
607 struct virgl_context *vctx = virgl_context(ctx);
608
609 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
610 }
611
612 static void
613 virgl_delete_gs_state(struct pipe_context *ctx,
614 void *gs)
615 {
616 uint32_t handle = (unsigned long)gs;
617 struct virgl_context *vctx = virgl_context(ctx);
618
619 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
620 }
621
622 static void
623 virgl_delete_vs_state(struct pipe_context *ctx,
624 void *vs)
625 {
626 uint32_t handle = (unsigned long)vs;
627 struct virgl_context *vctx = virgl_context(ctx);
628
629 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
630 }
631
632 static void
633 virgl_delete_tcs_state(struct pipe_context *ctx,
634 void *tcs)
635 {
636 uint32_t handle = (unsigned long)tcs;
637 struct virgl_context *vctx = virgl_context(ctx);
638
639 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
640 }
641
642 static void
643 virgl_delete_tes_state(struct pipe_context *ctx,
644 void *tes)
645 {
646 uint32_t handle = (unsigned long)tes;
647 struct virgl_context *vctx = virgl_context(ctx);
648
649 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
650 }
651
652 static void virgl_bind_vs_state(struct pipe_context *ctx,
653 void *vss)
654 {
655 uint32_t handle = (unsigned long)vss;
656 struct virgl_context *vctx = virgl_context(ctx);
657
658 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
659 }
660
661 static void virgl_bind_tcs_state(struct pipe_context *ctx,
662 void *vss)
663 {
664 uint32_t handle = (unsigned long)vss;
665 struct virgl_context *vctx = virgl_context(ctx);
666
667 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
668 }
669
670 static void virgl_bind_tes_state(struct pipe_context *ctx,
671 void *vss)
672 {
673 uint32_t handle = (unsigned long)vss;
674 struct virgl_context *vctx = virgl_context(ctx);
675
676 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
677 }
678
679 static void virgl_bind_gs_state(struct pipe_context *ctx,
680 void *vss)
681 {
682 uint32_t handle = (unsigned long)vss;
683 struct virgl_context *vctx = virgl_context(ctx);
684
685 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
686 }
687
688
689 static void virgl_bind_fs_state(struct pipe_context *ctx,
690 void *vss)
691 {
692 uint32_t handle = (unsigned long)vss;
693 struct virgl_context *vctx = virgl_context(ctx);
694
695 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
696 }
697
698 static void virgl_clear(struct pipe_context *ctx,
699 unsigned buffers,
700 const union pipe_color_union *color,
701 double depth, unsigned stencil)
702 {
703 struct virgl_context *vctx = virgl_context(ctx);
704
705 virgl_encode_clear(vctx, buffers, color, depth, stencil);
706 }
707
708 static void virgl_draw_vbo(struct pipe_context *ctx,
709 const struct pipe_draw_info *dinfo)
710 {
711 struct virgl_context *vctx = virgl_context(ctx);
712 struct virgl_screen *rs = virgl_screen(ctx->screen);
713 struct virgl_indexbuf ib = {};
714 struct pipe_draw_info info = *dinfo;
715
716 if (!dinfo->count_from_stream_output && !dinfo->indirect &&
717 !dinfo->primitive_restart &&
718 !u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count))
719 return;
720
721 if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
722 util_primconvert_draw_vbo(vctx->primconvert, dinfo);
723 return;
724 }
725 if (info.index_size) {
726 pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
727 ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
728 ib.index_size = dinfo->index_size;
729 ib.offset = info.start * ib.index_size;
730
731 if (ib.user_buffer) {
732 u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 256,
733 ib.user_buffer, &ib.offset, &ib.buffer);
734 ib.user_buffer = NULL;
735 }
736 }
737
738 u_upload_unmap(vctx->uploader);
739
740 vctx->num_draws++;
741 virgl_hw_set_vertex_buffers(vctx);
742 if (info.index_size)
743 virgl_hw_set_index_buffer(vctx, &ib);
744
745 virgl_encoder_draw_vbo(vctx, &info);
746
747 pipe_resource_reference(&ib.buffer, NULL);
748
749 }
750
751 static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
752 struct pipe_fence_handle **fence)
753 {
754 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
755 int out_fence_fd = -1;
756
757 /* send the buffer to the remote side for decoding */
758 ctx->num_transfers = ctx->num_draws = 0;
759
760 rs->vws->submit_cmd(rs->vws, ctx->cbuf, ctx->cbuf->in_fence_fd,
761 ctx->cbuf->needs_out_fence_fd ? &out_fence_fd : NULL);
762
763 if (fence)
764 *fence = rs->vws->cs_create_fence(rs->vws, out_fence_fd);
765
766 virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
767
768 /* add back current framebuffer resources to reference list? */
769 virgl_reemit_res(ctx);
770 }
771
772 static void virgl_flush_from_st(struct pipe_context *ctx,
773 struct pipe_fence_handle **fence,
774 enum pipe_flush_flags flags)
775 {
776 struct virgl_context *vctx = virgl_context(ctx);
777 struct virgl_buffer *buf, *tmp;
778
779 if (flags & PIPE_FLUSH_FENCE_FD)
780 vctx->cbuf->needs_out_fence_fd = true;
781
782 LIST_FOR_EACH_ENTRY_SAFE(buf, tmp, &vctx->to_flush_bufs, flush_list) {
783 struct pipe_resource *res = &buf->base.u.b;
784 virgl_buffer_flush(vctx, buf);
785 list_del(&buf->flush_list);
786 buf->on_list = FALSE;
787 pipe_resource_reference(&res, NULL);
788
789 }
790 virgl_flush_eq(vctx, vctx, fence);
791
792 if (vctx->cbuf->in_fence_fd != -1) {
793 close(vctx->cbuf->in_fence_fd);
794 vctx->cbuf->in_fence_fd = -1;
795 }
796 vctx->cbuf->needs_out_fence_fd = false;
797 }
798
799 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
800 struct pipe_resource *texture,
801 const struct pipe_sampler_view *state)
802 {
803 struct virgl_context *vctx = virgl_context(ctx);
804 struct virgl_sampler_view *grview;
805 uint32_t handle;
806 struct virgl_resource *res;
807
808 if (!state)
809 return NULL;
810
811 grview = CALLOC_STRUCT(virgl_sampler_view);
812 if (!grview)
813 return NULL;
814
815 res = virgl_resource(texture);
816 handle = virgl_object_assign_handle();
817 virgl_encode_sampler_view(vctx, handle, res, state);
818
819 grview->base = *state;
820 grview->base.reference.count = 1;
821
822 grview->base.texture = NULL;
823 grview->base.context = ctx;
824 pipe_resource_reference(&grview->base.texture, texture);
825 grview->handle = handle;
826 return &grview->base;
827 }
828
829 static void virgl_set_sampler_views(struct pipe_context *ctx,
830 enum pipe_shader_type shader_type,
831 unsigned start_slot,
832 unsigned num_views,
833 struct pipe_sampler_view **views)
834 {
835 struct virgl_context *vctx = virgl_context(ctx);
836 int i;
837 uint32_t disable_mask = ~((1ull << num_views) - 1);
838 struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
839 uint32_t new_mask = 0;
840 uint32_t remaining_mask;
841
842 remaining_mask = tinfo->enabled_mask & disable_mask;
843
844 while (remaining_mask) {
845 i = u_bit_scan(&remaining_mask);
846 assert(tinfo->views[i]);
847
848 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
849 }
850
851 for (i = 0; i < num_views; i++) {
852 struct virgl_sampler_view *grview = virgl_sampler_view(views[i]);
853
854 if (views[i] == (struct pipe_sampler_view *)tinfo->views[i])
855 continue;
856
857 if (grview) {
858 new_mask |= 1 << i;
859 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], views[i]);
860 } else {
861 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
862 disable_mask |= 1 << i;
863 }
864 }
865
866 tinfo->enabled_mask &= ~disable_mask;
867 tinfo->enabled_mask |= new_mask;
868 virgl_encode_set_sampler_views(vctx, shader_type, start_slot, num_views, tinfo->views);
869 virgl_attach_res_sampler_views(vctx, shader_type);
870 }
871
872 static void
873 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
874 {
875 struct virgl_context *vctx = virgl_context(ctx);
876 struct virgl_screen *rs = virgl_screen(ctx->screen);
877
878 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER))
879 return;
880 virgl_encode_texture_barrier(vctx, flags);
881 }
882
883 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
884 struct pipe_sampler_view *view)
885 {
886 struct virgl_context *vctx = virgl_context(ctx);
887 struct virgl_sampler_view *grview = virgl_sampler_view(view);
888
889 virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
890 pipe_resource_reference(&view->texture, NULL);
891 FREE(view);
892 }
893
894 static void *virgl_create_sampler_state(struct pipe_context *ctx,
895 const struct pipe_sampler_state *state)
896 {
897 struct virgl_context *vctx = virgl_context(ctx);
898 uint32_t handle;
899
900 handle = virgl_object_assign_handle();
901
902 virgl_encode_sampler_state(vctx, handle, state);
903 return (void *)(unsigned long)handle;
904 }
905
906 static void virgl_delete_sampler_state(struct pipe_context *ctx,
907 void *ss)
908 {
909 struct virgl_context *vctx = virgl_context(ctx);
910 uint32_t handle = (unsigned long)ss;
911
912 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
913 }
914
915 static void virgl_bind_sampler_states(struct pipe_context *ctx,
916 enum pipe_shader_type shader,
917 unsigned start_slot,
918 unsigned num_samplers,
919 void **samplers)
920 {
921 struct virgl_context *vctx = virgl_context(ctx);
922 uint32_t handles[32];
923 int i;
924 for (i = 0; i < num_samplers; i++) {
925 handles[i] = (unsigned long)(samplers[i]);
926 }
927 virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
928 }
929
930 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
931 const struct pipe_poly_stipple *ps)
932 {
933 struct virgl_context *vctx = virgl_context(ctx);
934 virgl_encoder_set_polygon_stipple(vctx, ps);
935 }
936
937 static void virgl_set_scissor_states(struct pipe_context *ctx,
938 unsigned start_slot,
939 unsigned num_scissor,
940 const struct pipe_scissor_state *ss)
941 {
942 struct virgl_context *vctx = virgl_context(ctx);
943 virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
944 }
945
946 static void virgl_set_sample_mask(struct pipe_context *ctx,
947 unsigned sample_mask)
948 {
949 struct virgl_context *vctx = virgl_context(ctx);
950 virgl_encoder_set_sample_mask(vctx, sample_mask);
951 }
952
953 static void virgl_set_min_samples(struct pipe_context *ctx,
954 unsigned min_samples)
955 {
956 struct virgl_context *vctx = virgl_context(ctx);
957 struct virgl_screen *rs = virgl_screen(ctx->screen);
958
959 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
960 return;
961 virgl_encoder_set_min_samples(vctx, min_samples);
962 }
963
964 static void virgl_set_clip_state(struct pipe_context *ctx,
965 const struct pipe_clip_state *clip)
966 {
967 struct virgl_context *vctx = virgl_context(ctx);
968 virgl_encoder_set_clip_state(vctx, clip);
969 }
970
971 static void virgl_set_tess_state(struct pipe_context *ctx,
972 const float default_outer_level[4],
973 const float default_inner_level[2])
974 {
975 struct virgl_context *vctx = virgl_context(ctx);
976 struct virgl_screen *rs = virgl_screen(ctx->screen);
977
978 if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
979 return;
980 virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
981 }
982
983 static void virgl_resource_copy_region(struct pipe_context *ctx,
984 struct pipe_resource *dst,
985 unsigned dst_level,
986 unsigned dstx, unsigned dsty, unsigned dstz,
987 struct pipe_resource *src,
988 unsigned src_level,
989 const struct pipe_box *src_box)
990 {
991 struct virgl_context *vctx = virgl_context(ctx);
992 struct virgl_resource *dres = virgl_resource(dst);
993 struct virgl_resource *sres = virgl_resource(src);
994
995 dres->clean = FALSE;
996 virgl_encode_resource_copy_region(vctx, dres,
997 dst_level, dstx, dsty, dstz,
998 sres, src_level,
999 src_box);
1000 }
1001
1002 static void
1003 virgl_flush_resource(struct pipe_context *pipe,
1004 struct pipe_resource *resource)
1005 {
1006 }
1007
1008 static void virgl_blit(struct pipe_context *ctx,
1009 const struct pipe_blit_info *blit)
1010 {
1011 struct virgl_context *vctx = virgl_context(ctx);
1012 struct virgl_resource *dres = virgl_resource(blit->dst.resource);
1013 struct virgl_resource *sres = virgl_resource(blit->src.resource);
1014
1015 dres->clean = FALSE;
1016 virgl_encode_blit(vctx, dres, sres,
1017 blit);
1018 }
1019
1020 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1021 unsigned start_slot,
1022 unsigned count,
1023 const struct pipe_shader_buffer *buffers)
1024 {
1025 struct virgl_context *vctx = virgl_context(ctx);
1026
1027 for (unsigned i = 0; i < count; i++) {
1028 unsigned idx = start_slot + i;
1029
1030 if (buffers) {
1031 if (buffers[i].buffer) {
1032 pipe_resource_reference(&vctx->atomic_buffers[idx],
1033 buffers[i].buffer);
1034 continue;
1035 }
1036 }
1037 pipe_resource_reference(&vctx->atomic_buffers[idx], NULL);
1038 }
1039 virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1040 }
1041
1042 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1043 enum pipe_shader_type shader,
1044 unsigned start_slot, unsigned count,
1045 const struct pipe_shader_buffer *buffers)
1046 {
1047 struct virgl_context *vctx = virgl_context(ctx);
1048 struct virgl_screen *rs = virgl_screen(ctx->screen);
1049
1050 for (unsigned i = 0; i < count; i++) {
1051 unsigned idx = start_slot + i;
1052
1053 if (buffers) {
1054 if (buffers[i].buffer) {
1055 pipe_resource_reference(&vctx->ssbos[shader][idx], buffers[i].buffer);
1056 continue;
1057 }
1058 }
1059 pipe_resource_reference(&vctx->ssbos[shader][idx], NULL);
1060 }
1061
1062 uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1063 rs->caps.caps.v2.max_shader_buffer_frag_compute :
1064 rs->caps.caps.v2.max_shader_buffer_other_stages;
1065 if (!max_shader_buffer)
1066 return;
1067 virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1068 }
1069
1070 static void virgl_create_fence_fd(struct pipe_context *ctx,
1071 struct pipe_fence_handle **fence,
1072 int fd,
1073 enum pipe_fd_type type)
1074 {
1075 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1076 struct virgl_screen *rs = virgl_screen(ctx->screen);
1077
1078 if (rs->vws->cs_create_fence)
1079 *fence = rs->vws->cs_create_fence(rs->vws, fd);
1080 }
1081
1082 static void virgl_fence_server_sync(struct pipe_context *ctx,
1083 struct pipe_fence_handle *fence)
1084 {
1085 struct virgl_context *vctx = virgl_context(ctx);
1086 struct virgl_screen *rs = virgl_screen(ctx->screen);
1087
1088 if (rs->vws->fence_server_sync)
1089 rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1090 }
1091
1092 static void virgl_set_shader_images(struct pipe_context *ctx,
1093 enum pipe_shader_type shader,
1094 unsigned start_slot, unsigned count,
1095 const struct pipe_image_view *images)
1096 {
1097 struct virgl_context *vctx = virgl_context(ctx);
1098 struct virgl_screen *rs = virgl_screen(ctx->screen);
1099
1100 for (unsigned i = 0; i < count; i++) {
1101 unsigned idx = start_slot + i;
1102
1103 if (images) {
1104 if (images[i].resource) {
1105 pipe_resource_reference(&vctx->images[shader][idx], images[i].resource);
1106 continue;
1107 }
1108 }
1109 pipe_resource_reference(&vctx->images[shader][idx], NULL);
1110 }
1111
1112 uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1113 rs->caps.caps.v2.max_shader_image_frag_compute :
1114 rs->caps.caps.v2.max_shader_image_other_stages;
1115 if (!max_shader_images)
1116 return;
1117 virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1118 }
1119
1120 static void virgl_memory_barrier(struct pipe_context *ctx,
1121 unsigned flags)
1122 {
1123 struct virgl_context *vctx = virgl_context(ctx);
1124 struct virgl_screen *rs = virgl_screen(ctx->screen);
1125
1126 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1127 return;
1128 virgl_encode_memory_barrier(vctx, flags);
1129 }
1130
1131 static void *virgl_create_compute_state(struct pipe_context *ctx,
1132 const struct pipe_compute_state *state)
1133 {
1134 struct virgl_context *vctx = virgl_context(ctx);
1135 uint32_t handle;
1136 const struct tgsi_token *new_tokens = state->prog;
1137 struct pipe_stream_output_info so_info = {};
1138 int ret;
1139
1140 handle = virgl_object_assign_handle();
1141 ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1142 &so_info,
1143 state->req_local_mem,
1144 new_tokens);
1145 if (ret) {
1146 return NULL;
1147 }
1148
1149 return (void *)(unsigned long)handle;
1150 }
1151
1152 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1153 {
1154 uint32_t handle = (unsigned long)state;
1155 struct virgl_context *vctx = virgl_context(ctx);
1156
1157 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1158 }
1159
1160 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1161 {
1162 uint32_t handle = (unsigned long)state;
1163 struct virgl_context *vctx = virgl_context(ctx);
1164
1165 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1166 }
1167
1168 static void virgl_launch_grid(struct pipe_context *ctx,
1169 const struct pipe_grid_info *info)
1170 {
1171 struct virgl_context *vctx = virgl_context(ctx);
1172 virgl_encode_launch_grid(vctx, info);
1173 }
1174
1175 static void
1176 virgl_context_destroy( struct pipe_context *ctx )
1177 {
1178 struct virgl_context *vctx = virgl_context(ctx);
1179 struct virgl_screen *rs = virgl_screen(ctx->screen);
1180
1181 vctx->framebuffer.zsbuf = NULL;
1182 vctx->framebuffer.nr_cbufs = 0;
1183 virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1184 virgl_flush_eq(vctx, vctx, NULL);
1185
1186 rs->vws->cmd_buf_destroy(vctx->cbuf);
1187 if (vctx->uploader)
1188 u_upload_destroy(vctx->uploader);
1189 util_primconvert_destroy(vctx->primconvert);
1190
1191 slab_destroy_child(&vctx->texture_transfer_pool);
1192 FREE(vctx);
1193 }
1194
1195 static void virgl_get_sample_position(struct pipe_context *ctx,
1196 unsigned sample_count,
1197 unsigned index,
1198 float *out_value)
1199 {
1200 struct virgl_context *vctx = virgl_context(ctx);
1201 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1202
1203 if (sample_count > vs->caps.caps.v1.max_samples) {
1204 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1205 sample_count, vs->caps.caps.v1.max_samples);
1206 return;
1207 }
1208
1209 /* The following is basically copied from dri/i965gen6_get_sample_position
1210 * The only addition is that we hold the msaa positions for all sample
1211 * counts in a flat array. */
1212 uint32_t bits = 0;
1213 if (sample_count == 1) {
1214 out_value[0] = out_value[1] = 0.5f;
1215 return;
1216 } else if (sample_count == 2) {
1217 bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1218 } else if (sample_count <= 4) {
1219 bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1220 } else if (sample_count <= 8) {
1221 bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1222 } else if (sample_count <= 16) {
1223 bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1224 }
1225 out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1226 out_value[1] = (bits & 0xf) / 16.0f;
1227
1228 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1229 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1230 index, sample_count, out_value[0], out_value[1]);
1231 }
1232
1233 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1234 void *priv,
1235 unsigned flags)
1236 {
1237 struct virgl_context *vctx;
1238 struct virgl_screen *rs = virgl_screen(pscreen);
1239 vctx = CALLOC_STRUCT(virgl_context);
1240 const char *host_debug_flagstring;
1241
1242 vctx->cbuf = rs->vws->cmd_buf_create(rs->vws);
1243 if (!vctx->cbuf) {
1244 FREE(vctx);
1245 return NULL;
1246 }
1247
1248 vctx->base.destroy = virgl_context_destroy;
1249 vctx->base.create_surface = virgl_create_surface;
1250 vctx->base.surface_destroy = virgl_surface_destroy;
1251 vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1252 vctx->base.create_blend_state = virgl_create_blend_state;
1253 vctx->base.bind_blend_state = virgl_bind_blend_state;
1254 vctx->base.delete_blend_state = virgl_delete_blend_state;
1255 vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1256 vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1257 vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1258 vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1259 vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1260 vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1261
1262 vctx->base.set_viewport_states = virgl_set_viewport_states;
1263 vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1264 vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1265 vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1266 vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1267 vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1268
1269 vctx->base.set_tess_state = virgl_set_tess_state;
1270 vctx->base.create_vs_state = virgl_create_vs_state;
1271 vctx->base.create_tcs_state = virgl_create_tcs_state;
1272 vctx->base.create_tes_state = virgl_create_tes_state;
1273 vctx->base.create_gs_state = virgl_create_gs_state;
1274 vctx->base.create_fs_state = virgl_create_fs_state;
1275
1276 vctx->base.bind_vs_state = virgl_bind_vs_state;
1277 vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1278 vctx->base.bind_tes_state = virgl_bind_tes_state;
1279 vctx->base.bind_gs_state = virgl_bind_gs_state;
1280 vctx->base.bind_fs_state = virgl_bind_fs_state;
1281
1282 vctx->base.delete_vs_state = virgl_delete_vs_state;
1283 vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1284 vctx->base.delete_tes_state = virgl_delete_tes_state;
1285 vctx->base.delete_gs_state = virgl_delete_gs_state;
1286 vctx->base.delete_fs_state = virgl_delete_fs_state;
1287
1288 vctx->base.create_compute_state = virgl_create_compute_state;
1289 vctx->base.bind_compute_state = virgl_bind_compute_state;
1290 vctx->base.delete_compute_state = virgl_delete_compute_state;
1291 vctx->base.launch_grid = virgl_launch_grid;
1292
1293 vctx->base.clear = virgl_clear;
1294 vctx->base.draw_vbo = virgl_draw_vbo;
1295 vctx->base.flush = virgl_flush_from_st;
1296 vctx->base.screen = pscreen;
1297 vctx->base.create_sampler_view = virgl_create_sampler_view;
1298 vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1299 vctx->base.set_sampler_views = virgl_set_sampler_views;
1300 vctx->base.texture_barrier = virgl_texture_barrier;
1301
1302 vctx->base.create_sampler_state = virgl_create_sampler_state;
1303 vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1304 vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1305
1306 vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1307 vctx->base.set_scissor_states = virgl_set_scissor_states;
1308 vctx->base.set_sample_mask = virgl_set_sample_mask;
1309 vctx->base.set_min_samples = virgl_set_min_samples;
1310 vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1311 vctx->base.set_clip_state = virgl_set_clip_state;
1312
1313 vctx->base.set_blend_color = virgl_set_blend_color;
1314
1315 vctx->base.get_sample_position = virgl_get_sample_position;
1316
1317 vctx->base.resource_copy_region = virgl_resource_copy_region;
1318 vctx->base.flush_resource = virgl_flush_resource;
1319 vctx->base.blit = virgl_blit;
1320 vctx->base.create_fence_fd = virgl_create_fence_fd;
1321 vctx->base.fence_server_sync = virgl_fence_server_sync;
1322
1323 vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1324 vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1325 vctx->base.set_shader_images = virgl_set_shader_images;
1326 vctx->base.memory_barrier = virgl_memory_barrier;
1327
1328 virgl_init_context_resource_functions(&vctx->base);
1329 virgl_init_query_functions(vctx);
1330 virgl_init_so_functions(vctx);
1331
1332 list_inithead(&vctx->to_flush_bufs);
1333 slab_create_child(&vctx->texture_transfer_pool, &rs->texture_transfer_pool);
1334
1335 vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1336 vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1337 PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1338 if (!vctx->uploader)
1339 goto fail;
1340 vctx->base.stream_uploader = vctx->uploader;
1341 vctx->base.const_uploader = vctx->uploader;
1342
1343 vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
1344 virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1345
1346 virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1347
1348 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1349 host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1350 if (host_debug_flagstring)
1351 virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1352 }
1353
1354 return &vctx->base;
1355 fail:
1356 return NULL;
1357 }