virgl: add VIRGL_DEBUG_SYNC
[mesa.git] / src / gallium / drivers / virgl / virgl_context.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <libsync.h>
25 #include "pipe/p_shader_tokens.h"
26
27 #include "pipe/p_context.h"
28 #include "pipe/p_defines.h"
29 #include "pipe/p_screen.h"
30 #include "pipe/p_state.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_prim.h"
35 #include "util/u_transfer.h"
36 #include "util/u_helpers.h"
37 #include "util/slab.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_blitter.h"
40 #include "tgsi/tgsi_text.h"
41 #include "indices/u_primconvert.h"
42
43 #include "pipebuffer/pb_buffer.h"
44
45 #include "virgl_encode.h"
46 #include "virgl_context.h"
47 #include "virgl_protocol.h"
48 #include "virgl_resource.h"
49 #include "virgl_screen.h"
50
51 struct virgl_vertex_elements_state {
52 uint32_t handle;
53 uint8_t binding_map[PIPE_MAX_ATTRIBS];
54 uint8_t num_bindings;
55 };
56
57 static uint32_t next_handle;
58 uint32_t virgl_object_assign_handle(void)
59 {
60 return ++next_handle;
61 }
62
63 bool
64 virgl_can_rebind_resource(struct virgl_context *vctx,
65 struct pipe_resource *res)
66 {
67 /* We cannot rebind resources that are referenced by host objects, which
68 * are
69 *
70 * - VIRGL_OBJECT_SURFACE
71 * - VIRGL_OBJECT_SAMPLER_VIEW
72 * - VIRGL_OBJECT_STREAMOUT_TARGET
73 *
74 * Because surfaces cannot be created from buffers, we require the resource
75 * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
76 */
77 const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW |
78 PIPE_BIND_STREAM_OUTPUT);
79 const unsigned bind_history = virgl_resource(res)->bind_history;
80 return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind);
81 }
82
83 void
84 virgl_rebind_resource(struct virgl_context *vctx,
85 struct pipe_resource *res)
86 {
87 /* Queries use internally created buffers and do not go through transfers.
88 * Index buffers are not bindable. They are not tracked.
89 */
90 MAYBE_UNUSED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
91 PIPE_BIND_CONSTANT_BUFFER |
92 PIPE_BIND_SHADER_BUFFER |
93 PIPE_BIND_SHADER_IMAGE);
94 const unsigned bind_history = virgl_resource(res)->bind_history;
95 unsigned i;
96
97 assert(virgl_can_rebind_resource(vctx, res) &&
98 (bind_history & tracked_bind) == bind_history);
99
100 if (bind_history & PIPE_BIND_VERTEX_BUFFER) {
101 for (i = 0; i < vctx->num_vertex_buffers; i++) {
102 if (vctx->vertex_buffer[i].buffer.resource == res) {
103 vctx->vertex_array_dirty = true;
104 break;
105 }
106 }
107 }
108
109 if (bind_history & PIPE_BIND_SHADER_BUFFER) {
110 uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
111 while (remaining_mask) {
112 int i = u_bit_scan(&remaining_mask);
113 if (vctx->atomic_buffers[i].buffer == res) {
114 const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i];
115 virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo);
116 }
117 }
118 }
119
120 /* check per-stage shader bindings */
121 if (bind_history & (PIPE_BIND_CONSTANT_BUFFER |
122 PIPE_BIND_SHADER_BUFFER |
123 PIPE_BIND_SHADER_IMAGE)) {
124 enum pipe_shader_type shader_type;
125 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
126 const struct virgl_shader_binding_state *binding =
127 &vctx->shader_bindings[shader_type];
128
129 if (bind_history & PIPE_BIND_CONSTANT_BUFFER) {
130 uint32_t remaining_mask = binding->ubo_enabled_mask;
131 while (remaining_mask) {
132 int i = u_bit_scan(&remaining_mask);
133 if (binding->ubos[i].buffer == res) {
134 const struct pipe_constant_buffer *ubo = &binding->ubos[i];
135 virgl_encoder_set_uniform_buffer(vctx, shader_type, i,
136 ubo->buffer_offset,
137 ubo->buffer_size,
138 virgl_resource(res));
139 }
140 }
141 }
142
143 if (bind_history & PIPE_BIND_SHADER_BUFFER) {
144 uint32_t remaining_mask = binding->ssbo_enabled_mask;
145 while (remaining_mask) {
146 int i = u_bit_scan(&remaining_mask);
147 if (binding->ssbos[i].buffer == res) {
148 const struct pipe_shader_buffer *ssbo = &binding->ssbos[i];
149 virgl_encode_set_shader_buffers(vctx, shader_type, i, 1,
150 ssbo);
151 }
152 }
153 }
154
155 if (bind_history & PIPE_BIND_SHADER_IMAGE) {
156 uint32_t remaining_mask = binding->image_enabled_mask;
157 while (remaining_mask) {
158 int i = u_bit_scan(&remaining_mask);
159 if (binding->images[i].resource == res) {
160 const struct pipe_image_view *image = &binding->images[i];
161 virgl_encode_set_shader_images(vctx, shader_type, i, 1,
162 image);
163 }
164 }
165 }
166 }
167 }
168 }
169
170 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
171 {
172 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
173 struct pipe_surface *surf;
174 struct virgl_resource *res;
175 unsigned i;
176
177 surf = vctx->framebuffer.zsbuf;
178 if (surf) {
179 res = virgl_resource(surf->texture);
180 if (res) {
181 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
182 virgl_resource_dirty(res, surf->u.tex.level);
183 }
184 }
185 for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
186 surf = vctx->framebuffer.cbufs[i];
187 if (surf) {
188 res = virgl_resource(surf->texture);
189 if (res) {
190 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
191 virgl_resource_dirty(res, surf->u.tex.level);
192 }
193 }
194 }
195 }
196
197 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
198 enum pipe_shader_type shader_type)
199 {
200 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
201 const struct virgl_shader_binding_state *binding =
202 &vctx->shader_bindings[shader_type];
203 uint32_t remaining_mask = binding->view_enabled_mask;
204 struct virgl_resource *res;
205
206 while (remaining_mask) {
207 int i = u_bit_scan(&remaining_mask);
208 assert(binding->views[i] && binding->views[i]->texture);
209 res = virgl_resource(binding->views[i]->texture);
210 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
211 }
212 }
213
214 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
215 {
216 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
217 struct virgl_resource *res;
218 unsigned i;
219
220 for (i = 0; i < vctx->num_vertex_buffers; i++) {
221 res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
222 if (res)
223 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
224 }
225 }
226
227 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
228 struct virgl_indexbuf *ib)
229 {
230 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
231 struct virgl_resource *res;
232
233 res = virgl_resource(ib->buffer);
234 if (res)
235 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
236 }
237
238 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
239 {
240 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
241 struct virgl_resource *res;
242 unsigned i;
243
244 for (i = 0; i < vctx->num_so_targets; i++) {
245 res = virgl_resource(vctx->so_targets[i].base.buffer);
246 if (res)
247 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
248 }
249 }
250
251 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
252 enum pipe_shader_type shader_type)
253 {
254 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
255 const struct virgl_shader_binding_state *binding =
256 &vctx->shader_bindings[shader_type];
257 uint32_t remaining_mask = binding->ubo_enabled_mask;
258 struct virgl_resource *res;
259
260 while (remaining_mask) {
261 int i = u_bit_scan(&remaining_mask);
262 res = virgl_resource(binding->ubos[i].buffer);
263 assert(res);
264 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
265 }
266 }
267
268 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
269 enum pipe_shader_type shader_type)
270 {
271 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
272 const struct virgl_shader_binding_state *binding =
273 &vctx->shader_bindings[shader_type];
274 uint32_t remaining_mask = binding->ssbo_enabled_mask;
275 struct virgl_resource *res;
276
277 while (remaining_mask) {
278 int i = u_bit_scan(&remaining_mask);
279 res = virgl_resource(binding->ssbos[i].buffer);
280 assert(res);
281 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
282 }
283 }
284
285 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
286 enum pipe_shader_type shader_type)
287 {
288 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
289 const struct virgl_shader_binding_state *binding =
290 &vctx->shader_bindings[shader_type];
291 uint32_t remaining_mask = binding->image_enabled_mask;
292 struct virgl_resource *res;
293
294 while (remaining_mask) {
295 int i = u_bit_scan(&remaining_mask);
296 res = virgl_resource(binding->images[i].resource);
297 assert(res);
298 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
299 }
300 }
301
302 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
303 {
304 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
305 uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
306 struct virgl_resource *res;
307
308 while (remaining_mask) {
309 int i = u_bit_scan(&remaining_mask);
310 res = virgl_resource(vctx->atomic_buffers[i].buffer);
311 assert(res);
312 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
313 }
314 }
315
316 /*
317 * after flushing, the hw context still has a bunch of
318 * resources bound, so we need to rebind those here.
319 */
320 static void virgl_reemit_draw_resources(struct virgl_context *vctx)
321 {
322 enum pipe_shader_type shader_type;
323
324 /* reattach any flushed resources */
325 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
326 virgl_attach_res_framebuffer(vctx);
327
328 for (shader_type = 0; shader_type < PIPE_SHADER_COMPUTE; shader_type++) {
329 virgl_attach_res_sampler_views(vctx, shader_type);
330 virgl_attach_res_uniform_buffers(vctx, shader_type);
331 virgl_attach_res_shader_buffers(vctx, shader_type);
332 virgl_attach_res_shader_images(vctx, shader_type);
333 }
334 virgl_attach_res_atomic_buffers(vctx);
335 virgl_attach_res_vertex_buffers(vctx);
336 virgl_attach_res_so_targets(vctx);
337 }
338
339 static void virgl_reemit_compute_resources(struct virgl_context *vctx)
340 {
341 virgl_attach_res_sampler_views(vctx, PIPE_SHADER_COMPUTE);
342 virgl_attach_res_uniform_buffers(vctx, PIPE_SHADER_COMPUTE);
343 virgl_attach_res_shader_buffers(vctx, PIPE_SHADER_COMPUTE);
344 virgl_attach_res_shader_images(vctx, PIPE_SHADER_COMPUTE);
345
346 virgl_attach_res_atomic_buffers(vctx);
347 }
348
349 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
350 struct pipe_resource *resource,
351 const struct pipe_surface *templ)
352 {
353 struct virgl_context *vctx = virgl_context(ctx);
354 struct virgl_surface *surf;
355 struct virgl_resource *res = virgl_resource(resource);
356 uint32_t handle;
357
358 /* no support for buffer surfaces */
359 if (resource->target == PIPE_BUFFER)
360 return NULL;
361
362 surf = CALLOC_STRUCT(virgl_surface);
363 if (!surf)
364 return NULL;
365
366 assert(ctx->screen->get_param(ctx->screen,
367 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
368 (util_format_is_srgb(templ->format) ==
369 util_format_is_srgb(resource->format)));
370
371 virgl_resource_dirty(res, 0);
372 handle = virgl_object_assign_handle();
373 pipe_reference_init(&surf->base.reference, 1);
374 pipe_resource_reference(&surf->base.texture, resource);
375 surf->base.context = ctx;
376 surf->base.format = templ->format;
377
378 surf->base.width = u_minify(resource->width0, templ->u.tex.level);
379 surf->base.height = u_minify(resource->height0, templ->u.tex.level);
380 surf->base.u.tex.level = templ->u.tex.level;
381 surf->base.u.tex.first_layer = templ->u.tex.first_layer;
382 surf->base.u.tex.last_layer = templ->u.tex.last_layer;
383
384 virgl_encoder_create_surface(vctx, handle, res, &surf->base);
385 surf->handle = handle;
386 return &surf->base;
387 }
388
389 static void virgl_surface_destroy(struct pipe_context *ctx,
390 struct pipe_surface *psurf)
391 {
392 struct virgl_context *vctx = virgl_context(ctx);
393 struct virgl_surface *surf = virgl_surface(psurf);
394
395 pipe_resource_reference(&surf->base.texture, NULL);
396 virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
397 FREE(surf);
398 }
399
400 static void *virgl_create_blend_state(struct pipe_context *ctx,
401 const struct pipe_blend_state *blend_state)
402 {
403 struct virgl_context *vctx = virgl_context(ctx);
404 uint32_t handle;
405 handle = virgl_object_assign_handle();
406
407 virgl_encode_blend_state(vctx, handle, blend_state);
408 return (void *)(unsigned long)handle;
409
410 }
411
412 static void virgl_bind_blend_state(struct pipe_context *ctx,
413 void *blend_state)
414 {
415 struct virgl_context *vctx = virgl_context(ctx);
416 uint32_t handle = (unsigned long)blend_state;
417 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
418 }
419
420 static void virgl_delete_blend_state(struct pipe_context *ctx,
421 void *blend_state)
422 {
423 struct virgl_context *vctx = virgl_context(ctx);
424 uint32_t handle = (unsigned long)blend_state;
425 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
426 }
427
428 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
429 const struct pipe_depth_stencil_alpha_state *blend_state)
430 {
431 struct virgl_context *vctx = virgl_context(ctx);
432 uint32_t handle;
433 handle = virgl_object_assign_handle();
434
435 virgl_encode_dsa_state(vctx, handle, blend_state);
436 return (void *)(unsigned long)handle;
437 }
438
439 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
440 void *blend_state)
441 {
442 struct virgl_context *vctx = virgl_context(ctx);
443 uint32_t handle = (unsigned long)blend_state;
444 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
445 }
446
447 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
448 void *dsa_state)
449 {
450 struct virgl_context *vctx = virgl_context(ctx);
451 uint32_t handle = (unsigned long)dsa_state;
452 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
453 }
454
455 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
456 const struct pipe_rasterizer_state *rs_state)
457 {
458 struct virgl_context *vctx = virgl_context(ctx);
459 struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
460
461 if (!vrs)
462 return NULL;
463 vrs->rs = *rs_state;
464 vrs->handle = virgl_object_assign_handle();
465
466 virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
467 return (void *)vrs;
468 }
469
470 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
471 void *rs_state)
472 {
473 struct virgl_context *vctx = virgl_context(ctx);
474 uint32_t handle = 0;
475 if (rs_state) {
476 struct virgl_rasterizer_state *vrs = rs_state;
477 vctx->rs_state = *vrs;
478 handle = vrs->handle;
479 }
480 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
481 }
482
483 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
484 void *rs_state)
485 {
486 struct virgl_context *vctx = virgl_context(ctx);
487 struct virgl_rasterizer_state *vrs = rs_state;
488 virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
489 FREE(vrs);
490 }
491
492 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
493 const struct pipe_framebuffer_state *state)
494 {
495 struct virgl_context *vctx = virgl_context(ctx);
496
497 vctx->framebuffer = *state;
498 virgl_encoder_set_framebuffer_state(vctx, state);
499 virgl_attach_res_framebuffer(vctx);
500 }
501
502 static void virgl_set_viewport_states(struct pipe_context *ctx,
503 unsigned start_slot,
504 unsigned num_viewports,
505 const struct pipe_viewport_state *state)
506 {
507 struct virgl_context *vctx = virgl_context(ctx);
508 virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
509 }
510
511 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
512 unsigned num_elements,
513 const struct pipe_vertex_element *elements)
514 {
515 struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
516 struct virgl_context *vctx = virgl_context(ctx);
517 struct virgl_vertex_elements_state *state =
518 CALLOC_STRUCT(virgl_vertex_elements_state);
519
520 for (int i = 0; i < num_elements; ++i) {
521 if (elements[i].instance_divisor) {
522 /* Virglrenderer doesn't deal with instance_divisor correctly if
523 * there isn't a 1:1 relationship between elements and bindings.
524 * So let's make sure there is, by duplicating bindings.
525 */
526 for (int j = 0; j < num_elements; ++j) {
527 new_elements[j] = elements[j];
528 new_elements[j].vertex_buffer_index = j;
529 state->binding_map[j] = elements[j].vertex_buffer_index;
530 }
531 elements = new_elements;
532 state->num_bindings = num_elements;
533 break;
534 }
535 }
536
537 state->handle = virgl_object_assign_handle();
538 virgl_encoder_create_vertex_elements(vctx, state->handle,
539 num_elements, elements);
540 return state;
541 }
542
543 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
544 void *ve)
545 {
546 struct virgl_context *vctx = virgl_context(ctx);
547 struct virgl_vertex_elements_state *state =
548 (struct virgl_vertex_elements_state *)ve;
549 virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
550 FREE(state);
551 }
552
553 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
554 void *ve)
555 {
556 struct virgl_context *vctx = virgl_context(ctx);
557 struct virgl_vertex_elements_state *state =
558 (struct virgl_vertex_elements_state *)ve;
559 vctx->vertex_elements = state;
560 virgl_encode_bind_object(vctx, state ? state->handle : 0,
561 VIRGL_OBJECT_VERTEX_ELEMENTS);
562 vctx->vertex_array_dirty = TRUE;
563 }
564
565 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
566 unsigned start_slot,
567 unsigned num_buffers,
568 const struct pipe_vertex_buffer *buffers)
569 {
570 struct virgl_context *vctx = virgl_context(ctx);
571
572 util_set_vertex_buffers_count(vctx->vertex_buffer,
573 &vctx->num_vertex_buffers,
574 buffers, start_slot, num_buffers);
575
576 if (buffers) {
577 for (unsigned i = 0; i < num_buffers; i++) {
578 struct virgl_resource *res =
579 virgl_resource(buffers[i].buffer.resource);
580 if (res && !buffers[i].is_user_buffer)
581 res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
582 }
583 }
584
585 vctx->vertex_array_dirty = TRUE;
586 }
587
588 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
589 {
590 if (vctx->vertex_array_dirty) {
591 struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
592
593 if (ve->num_bindings) {
594 struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
595 for (int i = 0; i < ve->num_bindings; ++i)
596 vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
597
598 virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
599 } else
600 virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
601
602 virgl_attach_res_vertex_buffers(vctx);
603
604 vctx->vertex_array_dirty = FALSE;
605 }
606 }
607
608 static void virgl_set_stencil_ref(struct pipe_context *ctx,
609 const struct pipe_stencil_ref *ref)
610 {
611 struct virgl_context *vctx = virgl_context(ctx);
612 virgl_encoder_set_stencil_ref(vctx, ref);
613 }
614
615 static void virgl_set_blend_color(struct pipe_context *ctx,
616 const struct pipe_blend_color *color)
617 {
618 struct virgl_context *vctx = virgl_context(ctx);
619 virgl_encoder_set_blend_color(vctx, color);
620 }
621
622 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
623 struct virgl_indexbuf *ib)
624 {
625 virgl_encoder_set_index_buffer(vctx, ib);
626 virgl_attach_res_index_buffer(vctx, ib);
627 }
628
629 static void virgl_set_constant_buffer(struct pipe_context *ctx,
630 enum pipe_shader_type shader, uint index,
631 const struct pipe_constant_buffer *buf)
632 {
633 struct virgl_context *vctx = virgl_context(ctx);
634 struct virgl_shader_binding_state *binding =
635 &vctx->shader_bindings[shader];
636
637 if (buf && buf->buffer) {
638 struct virgl_resource *res = virgl_resource(buf->buffer);
639 res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
640
641 virgl_encoder_set_uniform_buffer(vctx, shader, index,
642 buf->buffer_offset,
643 buf->buffer_size, res);
644
645 pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
646 binding->ubos[index] = *buf;
647 binding->ubo_enabled_mask |= 1 << index;
648 } else {
649 static const struct pipe_constant_buffer dummy_ubo;
650 if (!buf)
651 buf = &dummy_ubo;
652 virgl_encoder_write_constant_buffer(vctx, shader, index,
653 buf->buffer_size / 4,
654 buf->user_buffer);
655
656 pipe_resource_reference(&binding->ubos[index].buffer, NULL);
657 binding->ubo_enabled_mask &= ~(1 << index);
658 }
659 }
660
661 static void *virgl_shader_encoder(struct pipe_context *ctx,
662 const struct pipe_shader_state *shader,
663 unsigned type)
664 {
665 struct virgl_context *vctx = virgl_context(ctx);
666 uint32_t handle;
667 struct tgsi_token *new_tokens;
668 int ret;
669
670 new_tokens = virgl_tgsi_transform(vctx, shader->tokens);
671 if (!new_tokens)
672 return NULL;
673
674 handle = virgl_object_assign_handle();
675 /* encode VS state */
676 ret = virgl_encode_shader_state(vctx, handle, type,
677 &shader->stream_output, 0,
678 new_tokens);
679 if (ret) {
680 return NULL;
681 }
682
683 FREE(new_tokens);
684 return (void *)(unsigned long)handle;
685
686 }
687 static void *virgl_create_vs_state(struct pipe_context *ctx,
688 const struct pipe_shader_state *shader)
689 {
690 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
691 }
692
693 static void *virgl_create_tcs_state(struct pipe_context *ctx,
694 const struct pipe_shader_state *shader)
695 {
696 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
697 }
698
699 static void *virgl_create_tes_state(struct pipe_context *ctx,
700 const struct pipe_shader_state *shader)
701 {
702 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
703 }
704
705 static void *virgl_create_gs_state(struct pipe_context *ctx,
706 const struct pipe_shader_state *shader)
707 {
708 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
709 }
710
711 static void *virgl_create_fs_state(struct pipe_context *ctx,
712 const struct pipe_shader_state *shader)
713 {
714 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
715 }
716
717 static void
718 virgl_delete_fs_state(struct pipe_context *ctx,
719 void *fs)
720 {
721 uint32_t handle = (unsigned long)fs;
722 struct virgl_context *vctx = virgl_context(ctx);
723
724 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
725 }
726
727 static void
728 virgl_delete_gs_state(struct pipe_context *ctx,
729 void *gs)
730 {
731 uint32_t handle = (unsigned long)gs;
732 struct virgl_context *vctx = virgl_context(ctx);
733
734 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
735 }
736
737 static void
738 virgl_delete_vs_state(struct pipe_context *ctx,
739 void *vs)
740 {
741 uint32_t handle = (unsigned long)vs;
742 struct virgl_context *vctx = virgl_context(ctx);
743
744 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
745 }
746
747 static void
748 virgl_delete_tcs_state(struct pipe_context *ctx,
749 void *tcs)
750 {
751 uint32_t handle = (unsigned long)tcs;
752 struct virgl_context *vctx = virgl_context(ctx);
753
754 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
755 }
756
757 static void
758 virgl_delete_tes_state(struct pipe_context *ctx,
759 void *tes)
760 {
761 uint32_t handle = (unsigned long)tes;
762 struct virgl_context *vctx = virgl_context(ctx);
763
764 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
765 }
766
767 static void virgl_bind_vs_state(struct pipe_context *ctx,
768 void *vss)
769 {
770 uint32_t handle = (unsigned long)vss;
771 struct virgl_context *vctx = virgl_context(ctx);
772
773 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
774 }
775
776 static void virgl_bind_tcs_state(struct pipe_context *ctx,
777 void *vss)
778 {
779 uint32_t handle = (unsigned long)vss;
780 struct virgl_context *vctx = virgl_context(ctx);
781
782 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
783 }
784
785 static void virgl_bind_tes_state(struct pipe_context *ctx,
786 void *vss)
787 {
788 uint32_t handle = (unsigned long)vss;
789 struct virgl_context *vctx = virgl_context(ctx);
790
791 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
792 }
793
794 static void virgl_bind_gs_state(struct pipe_context *ctx,
795 void *vss)
796 {
797 uint32_t handle = (unsigned long)vss;
798 struct virgl_context *vctx = virgl_context(ctx);
799
800 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
801 }
802
803
804 static void virgl_bind_fs_state(struct pipe_context *ctx,
805 void *vss)
806 {
807 uint32_t handle = (unsigned long)vss;
808 struct virgl_context *vctx = virgl_context(ctx);
809
810 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
811 }
812
813 static void virgl_clear(struct pipe_context *ctx,
814 unsigned buffers,
815 const union pipe_color_union *color,
816 double depth, unsigned stencil)
817 {
818 struct virgl_context *vctx = virgl_context(ctx);
819
820 if (!vctx->num_draws)
821 virgl_reemit_draw_resources(vctx);
822 vctx->num_draws++;
823
824 virgl_encode_clear(vctx, buffers, color, depth, stencil);
825 }
826
827 static void virgl_draw_vbo(struct pipe_context *ctx,
828 const struct pipe_draw_info *dinfo)
829 {
830 struct virgl_context *vctx = virgl_context(ctx);
831 struct virgl_screen *rs = virgl_screen(ctx->screen);
832 struct virgl_indexbuf ib = {};
833 struct pipe_draw_info info = *dinfo;
834
835 if (!dinfo->count_from_stream_output && !dinfo->indirect &&
836 !dinfo->primitive_restart &&
837 !u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count))
838 return;
839
840 if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
841 util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
842 util_primconvert_draw_vbo(vctx->primconvert, dinfo);
843 return;
844 }
845 if (info.index_size) {
846 pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
847 ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
848 ib.index_size = dinfo->index_size;
849 ib.offset = info.start * ib.index_size;
850
851 if (ib.user_buffer) {
852 u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 4,
853 ib.user_buffer, &ib.offset, &ib.buffer);
854 ib.user_buffer = NULL;
855 }
856 }
857
858 if (!vctx->num_draws)
859 virgl_reemit_draw_resources(vctx);
860 vctx->num_draws++;
861
862 virgl_hw_set_vertex_buffers(vctx);
863 if (info.index_size)
864 virgl_hw_set_index_buffer(vctx, &ib);
865
866 virgl_encoder_draw_vbo(vctx, &info);
867
868 pipe_resource_reference(&ib.buffer, NULL);
869
870 }
871
872 static void virgl_submit_cmd(struct virgl_winsys *vws,
873 struct virgl_cmd_buf *cbuf,
874 struct pipe_fence_handle **fence)
875 {
876 if (unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) {
877 struct pipe_fence_handle *sync_fence = NULL;
878
879 vws->submit_cmd(vws, cbuf, &sync_fence);
880
881 vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE);
882 vws->fence_reference(vws, &sync_fence, NULL);
883 } else {
884 vws->submit_cmd(vws, cbuf, fence);
885 }
886 }
887
888 static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
889 struct pipe_fence_handle **fence)
890 {
891 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
892
893 /* skip empty cbuf */
894 if (ctx->cbuf->cdw == ctx->cbuf_initial_cdw &&
895 ctx->queue.num_dwords == 0 &&
896 !fence)
897 return;
898
899 if (ctx->num_draws)
900 u_upload_unmap(ctx->uploader);
901
902 /* send the buffer to the remote side for decoding */
903 ctx->num_draws = ctx->num_compute = 0;
904
905 virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
906
907 virgl_submit_cmd(rs->vws, ctx->cbuf, fence);
908
909 /* Reserve some space for transfers. */
910 if (ctx->encoded_transfers)
911 ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
912
913 virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
914
915 ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
916
917 /* We have flushed the command queue, including any pending copy transfers
918 * involving staging resources.
919 */
920 ctx->queued_staging_res_size = 0;
921 }
922
923 static void virgl_flush_from_st(struct pipe_context *ctx,
924 struct pipe_fence_handle **fence,
925 enum pipe_flush_flags flags)
926 {
927 struct virgl_context *vctx = virgl_context(ctx);
928
929 virgl_flush_eq(vctx, vctx, fence);
930 }
931
932 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
933 struct pipe_resource *texture,
934 const struct pipe_sampler_view *state)
935 {
936 struct virgl_context *vctx = virgl_context(ctx);
937 struct virgl_sampler_view *grview;
938 uint32_t handle;
939 struct virgl_resource *res;
940
941 if (!state)
942 return NULL;
943
944 grview = CALLOC_STRUCT(virgl_sampler_view);
945 if (!grview)
946 return NULL;
947
948 res = virgl_resource(texture);
949 handle = virgl_object_assign_handle();
950 virgl_encode_sampler_view(vctx, handle, res, state);
951
952 grview->base = *state;
953 grview->base.reference.count = 1;
954
955 grview->base.texture = NULL;
956 grview->base.context = ctx;
957 pipe_resource_reference(&grview->base.texture, texture);
958 grview->handle = handle;
959 return &grview->base;
960 }
961
962 static void virgl_set_sampler_views(struct pipe_context *ctx,
963 enum pipe_shader_type shader_type,
964 unsigned start_slot,
965 unsigned num_views,
966 struct pipe_sampler_view **views)
967 {
968 struct virgl_context *vctx = virgl_context(ctx);
969 struct virgl_shader_binding_state *binding =
970 &vctx->shader_bindings[shader_type];
971
972 binding->view_enabled_mask &= ~u_bit_consecutive(start_slot, num_views);
973 for (unsigned i = 0; i < num_views; i++) {
974 unsigned idx = start_slot + i;
975 if (views && views[i]) {
976 struct virgl_resource *res = virgl_resource(views[i]->texture);
977 res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
978
979 pipe_sampler_view_reference(&binding->views[idx], views[i]);
980 binding->view_enabled_mask |= 1 << idx;
981 } else {
982 pipe_sampler_view_reference(&binding->views[idx], NULL);
983 }
984 }
985
986 virgl_encode_set_sampler_views(vctx, shader_type,
987 start_slot, num_views, (struct virgl_sampler_view **)binding->views);
988 virgl_attach_res_sampler_views(vctx, shader_type);
989 }
990
991 static void
992 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
993 {
994 struct virgl_context *vctx = virgl_context(ctx);
995 struct virgl_screen *rs = virgl_screen(ctx->screen);
996
997 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER))
998 return;
999 virgl_encode_texture_barrier(vctx, flags);
1000 }
1001
1002 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
1003 struct pipe_sampler_view *view)
1004 {
1005 struct virgl_context *vctx = virgl_context(ctx);
1006 struct virgl_sampler_view *grview = virgl_sampler_view(view);
1007
1008 virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
1009 pipe_resource_reference(&view->texture, NULL);
1010 FREE(view);
1011 }
1012
1013 static void *virgl_create_sampler_state(struct pipe_context *ctx,
1014 const struct pipe_sampler_state *state)
1015 {
1016 struct virgl_context *vctx = virgl_context(ctx);
1017 uint32_t handle;
1018
1019 handle = virgl_object_assign_handle();
1020
1021 virgl_encode_sampler_state(vctx, handle, state);
1022 return (void *)(unsigned long)handle;
1023 }
1024
1025 static void virgl_delete_sampler_state(struct pipe_context *ctx,
1026 void *ss)
1027 {
1028 struct virgl_context *vctx = virgl_context(ctx);
1029 uint32_t handle = (unsigned long)ss;
1030
1031 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
1032 }
1033
1034 static void virgl_bind_sampler_states(struct pipe_context *ctx,
1035 enum pipe_shader_type shader,
1036 unsigned start_slot,
1037 unsigned num_samplers,
1038 void **samplers)
1039 {
1040 struct virgl_context *vctx = virgl_context(ctx);
1041 uint32_t handles[32];
1042 int i;
1043 for (i = 0; i < num_samplers; i++) {
1044 handles[i] = (unsigned long)(samplers[i]);
1045 }
1046 virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
1047 }
1048
1049 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
1050 const struct pipe_poly_stipple *ps)
1051 {
1052 struct virgl_context *vctx = virgl_context(ctx);
1053 virgl_encoder_set_polygon_stipple(vctx, ps);
1054 }
1055
1056 static void virgl_set_scissor_states(struct pipe_context *ctx,
1057 unsigned start_slot,
1058 unsigned num_scissor,
1059 const struct pipe_scissor_state *ss)
1060 {
1061 struct virgl_context *vctx = virgl_context(ctx);
1062 virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
1063 }
1064
1065 static void virgl_set_sample_mask(struct pipe_context *ctx,
1066 unsigned sample_mask)
1067 {
1068 struct virgl_context *vctx = virgl_context(ctx);
1069 virgl_encoder_set_sample_mask(vctx, sample_mask);
1070 }
1071
1072 static void virgl_set_min_samples(struct pipe_context *ctx,
1073 unsigned min_samples)
1074 {
1075 struct virgl_context *vctx = virgl_context(ctx);
1076 struct virgl_screen *rs = virgl_screen(ctx->screen);
1077
1078 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
1079 return;
1080 virgl_encoder_set_min_samples(vctx, min_samples);
1081 }
1082
1083 static void virgl_set_clip_state(struct pipe_context *ctx,
1084 const struct pipe_clip_state *clip)
1085 {
1086 struct virgl_context *vctx = virgl_context(ctx);
1087 virgl_encoder_set_clip_state(vctx, clip);
1088 }
1089
1090 static void virgl_set_tess_state(struct pipe_context *ctx,
1091 const float default_outer_level[4],
1092 const float default_inner_level[2])
1093 {
1094 struct virgl_context *vctx = virgl_context(ctx);
1095 struct virgl_screen *rs = virgl_screen(ctx->screen);
1096
1097 if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
1098 return;
1099 virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
1100 }
1101
1102 static void virgl_resource_copy_region(struct pipe_context *ctx,
1103 struct pipe_resource *dst,
1104 unsigned dst_level,
1105 unsigned dstx, unsigned dsty, unsigned dstz,
1106 struct pipe_resource *src,
1107 unsigned src_level,
1108 const struct pipe_box *src_box)
1109 {
1110 struct virgl_context *vctx = virgl_context(ctx);
1111 struct virgl_resource *dres = virgl_resource(dst);
1112 struct virgl_resource *sres = virgl_resource(src);
1113
1114 if (dres->u.b.target == PIPE_BUFFER)
1115 util_range_add(&dres->valid_buffer_range, dstx, dstx + src_box->width);
1116 virgl_resource_dirty(dres, dst_level);
1117
1118 virgl_encode_resource_copy_region(vctx, dres,
1119 dst_level, dstx, dsty, dstz,
1120 sres, src_level,
1121 src_box);
1122 }
1123
1124 static void
1125 virgl_flush_resource(struct pipe_context *pipe,
1126 struct pipe_resource *resource)
1127 {
1128 }
1129
1130 static void virgl_blit(struct pipe_context *ctx,
1131 const struct pipe_blit_info *blit)
1132 {
1133 struct virgl_context *vctx = virgl_context(ctx);
1134 struct virgl_resource *dres = virgl_resource(blit->dst.resource);
1135 struct virgl_resource *sres = virgl_resource(blit->src.resource);
1136
1137 assert(ctx->screen->get_param(ctx->screen,
1138 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
1139 (util_format_is_srgb(blit->dst.resource->format) ==
1140 util_format_is_srgb(blit->dst.format)));
1141
1142 virgl_resource_dirty(dres, blit->dst.level);
1143 virgl_encode_blit(vctx, dres, sres,
1144 blit);
1145 }
1146
1147 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1148 unsigned start_slot,
1149 unsigned count,
1150 const struct pipe_shader_buffer *buffers)
1151 {
1152 struct virgl_context *vctx = virgl_context(ctx);
1153
1154 vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1155 for (unsigned i = 0; i < count; i++) {
1156 unsigned idx = start_slot + i;
1157 if (buffers && buffers[i].buffer) {
1158 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1159 res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1160
1161 pipe_resource_reference(&vctx->atomic_buffers[idx].buffer,
1162 buffers[i].buffer);
1163 vctx->atomic_buffers[idx] = buffers[i];
1164 vctx->atomic_buffer_enabled_mask |= 1 << idx;
1165 } else {
1166 pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL);
1167 }
1168 }
1169
1170 virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1171 }
1172
1173 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1174 enum pipe_shader_type shader,
1175 unsigned start_slot, unsigned count,
1176 const struct pipe_shader_buffer *buffers,
1177 unsigned writable_bitmask)
1178 {
1179 struct virgl_context *vctx = virgl_context(ctx);
1180 struct virgl_screen *rs = virgl_screen(ctx->screen);
1181 struct virgl_shader_binding_state *binding =
1182 &vctx->shader_bindings[shader];
1183
1184 binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1185 for (unsigned i = 0; i < count; i++) {
1186 unsigned idx = start_slot + i;
1187 if (buffers && buffers[i].buffer) {
1188 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1189 res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1190
1191 pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer);
1192 binding->ssbos[idx] = buffers[i];
1193 binding->ssbo_enabled_mask |= 1 << idx;
1194 } else {
1195 pipe_resource_reference(&binding->ssbos[idx].buffer, NULL);
1196 }
1197 }
1198
1199 uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1200 rs->caps.caps.v2.max_shader_buffer_frag_compute :
1201 rs->caps.caps.v2.max_shader_buffer_other_stages;
1202 if (!max_shader_buffer)
1203 return;
1204 virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1205 }
1206
1207 static void virgl_create_fence_fd(struct pipe_context *ctx,
1208 struct pipe_fence_handle **fence,
1209 int fd,
1210 enum pipe_fd_type type)
1211 {
1212 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1213 struct virgl_screen *rs = virgl_screen(ctx->screen);
1214
1215 if (rs->vws->cs_create_fence)
1216 *fence = rs->vws->cs_create_fence(rs->vws, fd);
1217 }
1218
1219 static void virgl_fence_server_sync(struct pipe_context *ctx,
1220 struct pipe_fence_handle *fence)
1221 {
1222 struct virgl_context *vctx = virgl_context(ctx);
1223 struct virgl_screen *rs = virgl_screen(ctx->screen);
1224
1225 if (rs->vws->fence_server_sync)
1226 rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1227 }
1228
1229 static void virgl_set_shader_images(struct pipe_context *ctx,
1230 enum pipe_shader_type shader,
1231 unsigned start_slot, unsigned count,
1232 const struct pipe_image_view *images)
1233 {
1234 struct virgl_context *vctx = virgl_context(ctx);
1235 struct virgl_screen *rs = virgl_screen(ctx->screen);
1236 struct virgl_shader_binding_state *binding =
1237 &vctx->shader_bindings[shader];
1238
1239 binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1240 for (unsigned i = 0; i < count; i++) {
1241 unsigned idx = start_slot + i;
1242 if (images && images[i].resource) {
1243 struct virgl_resource *res = virgl_resource(images[i].resource);
1244 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
1245
1246 pipe_resource_reference(&binding->images[idx].resource,
1247 images[i].resource);
1248 binding->images[idx] = images[i];
1249 binding->image_enabled_mask |= 1 << idx;
1250 } else {
1251 pipe_resource_reference(&binding->images[idx].resource, NULL);
1252 }
1253 }
1254
1255 uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1256 rs->caps.caps.v2.max_shader_image_frag_compute :
1257 rs->caps.caps.v2.max_shader_image_other_stages;
1258 if (!max_shader_images)
1259 return;
1260 virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1261 }
1262
1263 static void virgl_memory_barrier(struct pipe_context *ctx,
1264 unsigned flags)
1265 {
1266 struct virgl_context *vctx = virgl_context(ctx);
1267 struct virgl_screen *rs = virgl_screen(ctx->screen);
1268
1269 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1270 return;
1271 virgl_encode_memory_barrier(vctx, flags);
1272 }
1273
1274 static void *virgl_create_compute_state(struct pipe_context *ctx,
1275 const struct pipe_compute_state *state)
1276 {
1277 struct virgl_context *vctx = virgl_context(ctx);
1278 uint32_t handle;
1279 const struct tgsi_token *new_tokens = state->prog;
1280 struct pipe_stream_output_info so_info = {};
1281 int ret;
1282
1283 handle = virgl_object_assign_handle();
1284 ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1285 &so_info,
1286 state->req_local_mem,
1287 new_tokens);
1288 if (ret) {
1289 return NULL;
1290 }
1291
1292 return (void *)(unsigned long)handle;
1293 }
1294
1295 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1296 {
1297 uint32_t handle = (unsigned long)state;
1298 struct virgl_context *vctx = virgl_context(ctx);
1299
1300 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1301 }
1302
1303 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1304 {
1305 uint32_t handle = (unsigned long)state;
1306 struct virgl_context *vctx = virgl_context(ctx);
1307
1308 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1309 }
1310
1311 static void virgl_launch_grid(struct pipe_context *ctx,
1312 const struct pipe_grid_info *info)
1313 {
1314 struct virgl_context *vctx = virgl_context(ctx);
1315
1316 if (!vctx->num_compute)
1317 virgl_reemit_compute_resources(vctx);
1318 vctx->num_compute++;
1319
1320 virgl_encode_launch_grid(vctx, info);
1321 }
1322
1323 static void
1324 virgl_release_shader_binding(struct virgl_context *vctx,
1325 enum pipe_shader_type shader_type)
1326 {
1327 struct virgl_shader_binding_state *binding =
1328 &vctx->shader_bindings[shader_type];
1329
1330 while (binding->view_enabled_mask) {
1331 int i = u_bit_scan(&binding->view_enabled_mask);
1332 pipe_sampler_view_reference(
1333 (struct pipe_sampler_view **)&binding->views[i], NULL);
1334 }
1335
1336 while (binding->ubo_enabled_mask) {
1337 int i = u_bit_scan(&binding->ubo_enabled_mask);
1338 pipe_resource_reference(&binding->ubos[i].buffer, NULL);
1339 }
1340
1341 while (binding->ssbo_enabled_mask) {
1342 int i = u_bit_scan(&binding->ssbo_enabled_mask);
1343 pipe_resource_reference(&binding->ssbos[i].buffer, NULL);
1344 }
1345
1346 while (binding->image_enabled_mask) {
1347 int i = u_bit_scan(&binding->image_enabled_mask);
1348 pipe_resource_reference(&binding->images[i].resource, NULL);
1349 }
1350 }
1351
1352 static void
1353 virgl_context_destroy( struct pipe_context *ctx )
1354 {
1355 struct virgl_context *vctx = virgl_context(ctx);
1356 struct virgl_screen *rs = virgl_screen(ctx->screen);
1357 enum pipe_shader_type shader_type;
1358
1359 vctx->framebuffer.zsbuf = NULL;
1360 vctx->framebuffer.nr_cbufs = 0;
1361 virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1362 virgl_flush_eq(vctx, vctx, NULL);
1363
1364 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++)
1365 virgl_release_shader_binding(vctx, shader_type);
1366
1367 while (vctx->atomic_buffer_enabled_mask) {
1368 int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask);
1369 pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL);
1370 }
1371
1372 rs->vws->cmd_buf_destroy(vctx->cbuf);
1373 if (vctx->uploader)
1374 u_upload_destroy(vctx->uploader);
1375 if (vctx->transfer_uploader)
1376 u_upload_destroy(vctx->transfer_uploader);
1377 util_primconvert_destroy(vctx->primconvert);
1378 virgl_transfer_queue_fini(&vctx->queue);
1379
1380 slab_destroy_child(&vctx->transfer_pool);
1381 FREE(vctx);
1382 }
1383
1384 static void virgl_get_sample_position(struct pipe_context *ctx,
1385 unsigned sample_count,
1386 unsigned index,
1387 float *out_value)
1388 {
1389 struct virgl_context *vctx = virgl_context(ctx);
1390 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1391
1392 if (sample_count > vs->caps.caps.v1.max_samples) {
1393 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1394 sample_count, vs->caps.caps.v1.max_samples);
1395 return;
1396 }
1397
1398 /* The following is basically copied from dri/i965gen6_get_sample_position
1399 * The only addition is that we hold the msaa positions for all sample
1400 * counts in a flat array. */
1401 uint32_t bits = 0;
1402 if (sample_count == 1) {
1403 out_value[0] = out_value[1] = 0.5f;
1404 return;
1405 } else if (sample_count == 2) {
1406 bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1407 } else if (sample_count <= 4) {
1408 bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1409 } else if (sample_count <= 8) {
1410 bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1411 } else if (sample_count <= 16) {
1412 bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1413 }
1414 out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1415 out_value[1] = (bits & 0xf) / 16.0f;
1416
1417 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1418 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1419 index, sample_count, out_value[0], out_value[1]);
1420 }
1421
1422 static void virgl_send_tweaks(struct virgl_context *vctx, struct virgl_screen *rs)
1423 {
1424 if (rs->tweak_gles_emulate_bgra)
1425 virgl_encode_tweak(vctx, virgl_tweak_gles_brga_emulate, 1);
1426
1427 if (rs->tweak_gles_apply_bgra_dest_swizzle)
1428 virgl_encode_tweak(vctx, virgl_tweak_gles_brga_apply_dest_swizzle, 1);
1429
1430 if (rs->tweak_gles_tf3_value > 0)
1431 virgl_encode_tweak(vctx, virgl_tweak_gles_tf3_samples_passes_multiplier,
1432 rs->tweak_gles_tf3_value);
1433 }
1434
1435 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1436 void *priv,
1437 unsigned flags)
1438 {
1439 struct virgl_context *vctx;
1440 struct virgl_screen *rs = virgl_screen(pscreen);
1441 vctx = CALLOC_STRUCT(virgl_context);
1442 const char *host_debug_flagstring;
1443
1444 vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS);
1445 if (!vctx->cbuf) {
1446 FREE(vctx);
1447 return NULL;
1448 }
1449
1450 vctx->base.destroy = virgl_context_destroy;
1451 vctx->base.create_surface = virgl_create_surface;
1452 vctx->base.surface_destroy = virgl_surface_destroy;
1453 vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1454 vctx->base.create_blend_state = virgl_create_blend_state;
1455 vctx->base.bind_blend_state = virgl_bind_blend_state;
1456 vctx->base.delete_blend_state = virgl_delete_blend_state;
1457 vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1458 vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1459 vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1460 vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1461 vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1462 vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1463
1464 vctx->base.set_viewport_states = virgl_set_viewport_states;
1465 vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1466 vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1467 vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1468 vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1469 vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1470
1471 vctx->base.set_tess_state = virgl_set_tess_state;
1472 vctx->base.create_vs_state = virgl_create_vs_state;
1473 vctx->base.create_tcs_state = virgl_create_tcs_state;
1474 vctx->base.create_tes_state = virgl_create_tes_state;
1475 vctx->base.create_gs_state = virgl_create_gs_state;
1476 vctx->base.create_fs_state = virgl_create_fs_state;
1477
1478 vctx->base.bind_vs_state = virgl_bind_vs_state;
1479 vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1480 vctx->base.bind_tes_state = virgl_bind_tes_state;
1481 vctx->base.bind_gs_state = virgl_bind_gs_state;
1482 vctx->base.bind_fs_state = virgl_bind_fs_state;
1483
1484 vctx->base.delete_vs_state = virgl_delete_vs_state;
1485 vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1486 vctx->base.delete_tes_state = virgl_delete_tes_state;
1487 vctx->base.delete_gs_state = virgl_delete_gs_state;
1488 vctx->base.delete_fs_state = virgl_delete_fs_state;
1489
1490 vctx->base.create_compute_state = virgl_create_compute_state;
1491 vctx->base.bind_compute_state = virgl_bind_compute_state;
1492 vctx->base.delete_compute_state = virgl_delete_compute_state;
1493 vctx->base.launch_grid = virgl_launch_grid;
1494
1495 vctx->base.clear = virgl_clear;
1496 vctx->base.draw_vbo = virgl_draw_vbo;
1497 vctx->base.flush = virgl_flush_from_st;
1498 vctx->base.screen = pscreen;
1499 vctx->base.create_sampler_view = virgl_create_sampler_view;
1500 vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1501 vctx->base.set_sampler_views = virgl_set_sampler_views;
1502 vctx->base.texture_barrier = virgl_texture_barrier;
1503
1504 vctx->base.create_sampler_state = virgl_create_sampler_state;
1505 vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1506 vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1507
1508 vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1509 vctx->base.set_scissor_states = virgl_set_scissor_states;
1510 vctx->base.set_sample_mask = virgl_set_sample_mask;
1511 vctx->base.set_min_samples = virgl_set_min_samples;
1512 vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1513 vctx->base.set_clip_state = virgl_set_clip_state;
1514
1515 vctx->base.set_blend_color = virgl_set_blend_color;
1516
1517 vctx->base.get_sample_position = virgl_get_sample_position;
1518
1519 vctx->base.resource_copy_region = virgl_resource_copy_region;
1520 vctx->base.flush_resource = virgl_flush_resource;
1521 vctx->base.blit = virgl_blit;
1522 vctx->base.create_fence_fd = virgl_create_fence_fd;
1523 vctx->base.fence_server_sync = virgl_fence_server_sync;
1524
1525 vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1526 vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1527 vctx->base.set_shader_images = virgl_set_shader_images;
1528 vctx->base.memory_barrier = virgl_memory_barrier;
1529
1530 virgl_init_context_resource_functions(&vctx->base);
1531 virgl_init_query_functions(vctx);
1532 virgl_init_so_functions(vctx);
1533
1534 slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1535 virgl_transfer_queue_init(&vctx->queue, vctx);
1536 vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
1537 (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
1538
1539 /* Reserve some space for transfers. */
1540 if (vctx->encoded_transfers)
1541 vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1542
1543 vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1544 vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1545 PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1546 if (!vctx->uploader)
1547 goto fail;
1548 vctx->base.stream_uploader = vctx->uploader;
1549 vctx->base.const_uploader = vctx->uploader;
1550 /* Use a custom/staging buffer for the transfer uploader, since we are
1551 * using it only for copies to other resources.
1552 */
1553 if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) &&
1554 vctx->encoded_transfers) {
1555 vctx->transfer_uploader = u_upload_create(&vctx->base, 1024 * 1024,
1556 PIPE_BIND_CUSTOM,
1557 PIPE_USAGE_STAGING,
1558 VIRGL_RESOURCE_FLAG_STAGING);
1559 if (!vctx->transfer_uploader)
1560 goto fail;
1561 }
1562
1563 vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
1564 virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1565
1566 virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1567
1568 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1569 host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1570 if (host_debug_flagstring)
1571 virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1572 }
1573
1574 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT)
1575 virgl_send_tweaks(vctx, rs);
1576
1577 return &vctx->base;
1578 fail:
1579 virgl_context_destroy(&vctx->base);
1580 return NULL;
1581 }