e511750dc6901f668ed07225fbe1a457d204a20e
[mesa.git] / src / gallium / drivers / virgl / virgl_context.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <libsync.h>
25 #include "pipe/p_shader_tokens.h"
26
27 #include "pipe/p_context.h"
28 #include "pipe/p_defines.h"
29 #include "pipe/p_screen.h"
30 #include "pipe/p_state.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_prim.h"
35 #include "util/u_transfer.h"
36 #include "util/u_helpers.h"
37 #include "util/slab.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_blitter.h"
40 #include "tgsi/tgsi_text.h"
41 #include "indices/u_primconvert.h"
42
43 #include "pipebuffer/pb_buffer.h"
44
45 #include "virgl_encode.h"
46 #include "virgl_context.h"
47 #include "virgl_protocol.h"
48 #include "virgl_resource.h"
49 #include "virgl_screen.h"
50 #include "virgl_staging_mgr.h"
51
52 struct virgl_vertex_elements_state {
53 uint32_t handle;
54 uint8_t binding_map[PIPE_MAX_ATTRIBS];
55 uint8_t num_bindings;
56 };
57
58 static uint32_t next_handle;
59 uint32_t virgl_object_assign_handle(void)
60 {
61 return ++next_handle;
62 }
63
64 bool
65 virgl_can_rebind_resource(struct virgl_context *vctx,
66 struct pipe_resource *res)
67 {
68 /* We cannot rebind resources that are referenced by host objects, which
69 * are
70 *
71 * - VIRGL_OBJECT_SURFACE
72 * - VIRGL_OBJECT_SAMPLER_VIEW
73 * - VIRGL_OBJECT_STREAMOUT_TARGET
74 *
75 * Because surfaces cannot be created from buffers, we require the resource
76 * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
77 */
78 const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW |
79 PIPE_BIND_STREAM_OUTPUT);
80 const unsigned bind_history = virgl_resource(res)->bind_history;
81 return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind);
82 }
83
84 void
85 virgl_rebind_resource(struct virgl_context *vctx,
86 struct pipe_resource *res)
87 {
88 /* Queries use internally created buffers and do not go through transfers.
89 * Index buffers are not bindable. They are not tracked.
90 */
91 MAYBE_UNUSED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
92 PIPE_BIND_CONSTANT_BUFFER |
93 PIPE_BIND_SHADER_BUFFER |
94 PIPE_BIND_SHADER_IMAGE);
95 const unsigned bind_history = virgl_resource(res)->bind_history;
96 unsigned i;
97
98 assert(virgl_can_rebind_resource(vctx, res) &&
99 (bind_history & tracked_bind) == bind_history);
100
101 if (bind_history & PIPE_BIND_VERTEX_BUFFER) {
102 for (i = 0; i < vctx->num_vertex_buffers; i++) {
103 if (vctx->vertex_buffer[i].buffer.resource == res) {
104 vctx->vertex_array_dirty = true;
105 break;
106 }
107 }
108 }
109
110 if (bind_history & PIPE_BIND_SHADER_BUFFER) {
111 uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
112 while (remaining_mask) {
113 int i = u_bit_scan(&remaining_mask);
114 if (vctx->atomic_buffers[i].buffer == res) {
115 const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i];
116 virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo);
117 }
118 }
119 }
120
121 /* check per-stage shader bindings */
122 if (bind_history & (PIPE_BIND_CONSTANT_BUFFER |
123 PIPE_BIND_SHADER_BUFFER |
124 PIPE_BIND_SHADER_IMAGE)) {
125 enum pipe_shader_type shader_type;
126 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
127 const struct virgl_shader_binding_state *binding =
128 &vctx->shader_bindings[shader_type];
129
130 if (bind_history & PIPE_BIND_CONSTANT_BUFFER) {
131 uint32_t remaining_mask = binding->ubo_enabled_mask;
132 while (remaining_mask) {
133 int i = u_bit_scan(&remaining_mask);
134 if (binding->ubos[i].buffer == res) {
135 const struct pipe_constant_buffer *ubo = &binding->ubos[i];
136 virgl_encoder_set_uniform_buffer(vctx, shader_type, i,
137 ubo->buffer_offset,
138 ubo->buffer_size,
139 virgl_resource(res));
140 }
141 }
142 }
143
144 if (bind_history & PIPE_BIND_SHADER_BUFFER) {
145 uint32_t remaining_mask = binding->ssbo_enabled_mask;
146 while (remaining_mask) {
147 int i = u_bit_scan(&remaining_mask);
148 if (binding->ssbos[i].buffer == res) {
149 const struct pipe_shader_buffer *ssbo = &binding->ssbos[i];
150 virgl_encode_set_shader_buffers(vctx, shader_type, i, 1,
151 ssbo);
152 }
153 }
154 }
155
156 if (bind_history & PIPE_BIND_SHADER_IMAGE) {
157 uint32_t remaining_mask = binding->image_enabled_mask;
158 while (remaining_mask) {
159 int i = u_bit_scan(&remaining_mask);
160 if (binding->images[i].resource == res) {
161 const struct pipe_image_view *image = &binding->images[i];
162 virgl_encode_set_shader_images(vctx, shader_type, i, 1,
163 image);
164 }
165 }
166 }
167 }
168 }
169 }
170
171 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
172 {
173 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
174 struct pipe_surface *surf;
175 struct virgl_resource *res;
176 unsigned i;
177
178 surf = vctx->framebuffer.zsbuf;
179 if (surf) {
180 res = virgl_resource(surf->texture);
181 if (res) {
182 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
183 virgl_resource_dirty(res, surf->u.tex.level);
184 }
185 }
186 for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
187 surf = vctx->framebuffer.cbufs[i];
188 if (surf) {
189 res = virgl_resource(surf->texture);
190 if (res) {
191 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
192 virgl_resource_dirty(res, surf->u.tex.level);
193 }
194 }
195 }
196 }
197
198 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
199 enum pipe_shader_type shader_type)
200 {
201 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
202 const struct virgl_shader_binding_state *binding =
203 &vctx->shader_bindings[shader_type];
204 uint32_t remaining_mask = binding->view_enabled_mask;
205 struct virgl_resource *res;
206
207 while (remaining_mask) {
208 int i = u_bit_scan(&remaining_mask);
209 assert(binding->views[i] && binding->views[i]->texture);
210 res = virgl_resource(binding->views[i]->texture);
211 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
212 }
213 }
214
215 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
216 {
217 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
218 struct virgl_resource *res;
219 unsigned i;
220
221 for (i = 0; i < vctx->num_vertex_buffers; i++) {
222 res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
223 if (res)
224 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
225 }
226 }
227
228 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
229 struct virgl_indexbuf *ib)
230 {
231 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
232 struct virgl_resource *res;
233
234 res = virgl_resource(ib->buffer);
235 if (res)
236 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
237 }
238
239 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
240 {
241 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
242 struct virgl_resource *res;
243 unsigned i;
244
245 for (i = 0; i < vctx->num_so_targets; i++) {
246 res = virgl_resource(vctx->so_targets[i].base.buffer);
247 if (res)
248 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
249 }
250 }
251
252 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
253 enum pipe_shader_type shader_type)
254 {
255 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
256 const struct virgl_shader_binding_state *binding =
257 &vctx->shader_bindings[shader_type];
258 uint32_t remaining_mask = binding->ubo_enabled_mask;
259 struct virgl_resource *res;
260
261 while (remaining_mask) {
262 int i = u_bit_scan(&remaining_mask);
263 res = virgl_resource(binding->ubos[i].buffer);
264 assert(res);
265 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
266 }
267 }
268
269 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
270 enum pipe_shader_type shader_type)
271 {
272 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
273 const struct virgl_shader_binding_state *binding =
274 &vctx->shader_bindings[shader_type];
275 uint32_t remaining_mask = binding->ssbo_enabled_mask;
276 struct virgl_resource *res;
277
278 while (remaining_mask) {
279 int i = u_bit_scan(&remaining_mask);
280 res = virgl_resource(binding->ssbos[i].buffer);
281 assert(res);
282 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
283 }
284 }
285
286 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
287 enum pipe_shader_type shader_type)
288 {
289 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
290 const struct virgl_shader_binding_state *binding =
291 &vctx->shader_bindings[shader_type];
292 uint32_t remaining_mask = binding->image_enabled_mask;
293 struct virgl_resource *res;
294
295 while (remaining_mask) {
296 int i = u_bit_scan(&remaining_mask);
297 res = virgl_resource(binding->images[i].resource);
298 assert(res);
299 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
300 }
301 }
302
303 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
304 {
305 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
306 uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
307 struct virgl_resource *res;
308
309 while (remaining_mask) {
310 int i = u_bit_scan(&remaining_mask);
311 res = virgl_resource(vctx->atomic_buffers[i].buffer);
312 assert(res);
313 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
314 }
315 }
316
317 /*
318 * after flushing, the hw context still has a bunch of
319 * resources bound, so we need to rebind those here.
320 */
321 static void virgl_reemit_draw_resources(struct virgl_context *vctx)
322 {
323 enum pipe_shader_type shader_type;
324
325 /* reattach any flushed resources */
326 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
327 virgl_attach_res_framebuffer(vctx);
328
329 for (shader_type = 0; shader_type < PIPE_SHADER_COMPUTE; shader_type++) {
330 virgl_attach_res_sampler_views(vctx, shader_type);
331 virgl_attach_res_uniform_buffers(vctx, shader_type);
332 virgl_attach_res_shader_buffers(vctx, shader_type);
333 virgl_attach_res_shader_images(vctx, shader_type);
334 }
335 virgl_attach_res_atomic_buffers(vctx);
336 virgl_attach_res_vertex_buffers(vctx);
337 virgl_attach_res_so_targets(vctx);
338 }
339
340 static void virgl_reemit_compute_resources(struct virgl_context *vctx)
341 {
342 virgl_attach_res_sampler_views(vctx, PIPE_SHADER_COMPUTE);
343 virgl_attach_res_uniform_buffers(vctx, PIPE_SHADER_COMPUTE);
344 virgl_attach_res_shader_buffers(vctx, PIPE_SHADER_COMPUTE);
345 virgl_attach_res_shader_images(vctx, PIPE_SHADER_COMPUTE);
346
347 virgl_attach_res_atomic_buffers(vctx);
348 }
349
350 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
351 struct pipe_resource *resource,
352 const struct pipe_surface *templ)
353 {
354 struct virgl_context *vctx = virgl_context(ctx);
355 struct virgl_surface *surf;
356 struct virgl_resource *res = virgl_resource(resource);
357 uint32_t handle;
358
359 /* no support for buffer surfaces */
360 if (resource->target == PIPE_BUFFER)
361 return NULL;
362
363 surf = CALLOC_STRUCT(virgl_surface);
364 if (!surf)
365 return NULL;
366
367 assert(ctx->screen->get_param(ctx->screen,
368 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
369 (util_format_is_srgb(templ->format) ==
370 util_format_is_srgb(resource->format)));
371
372 virgl_resource_dirty(res, 0);
373 handle = virgl_object_assign_handle();
374 pipe_reference_init(&surf->base.reference, 1);
375 pipe_resource_reference(&surf->base.texture, resource);
376 surf->base.context = ctx;
377 surf->base.format = templ->format;
378
379 surf->base.width = u_minify(resource->width0, templ->u.tex.level);
380 surf->base.height = u_minify(resource->height0, templ->u.tex.level);
381 surf->base.u.tex.level = templ->u.tex.level;
382 surf->base.u.tex.first_layer = templ->u.tex.first_layer;
383 surf->base.u.tex.last_layer = templ->u.tex.last_layer;
384
385 virgl_encoder_create_surface(vctx, handle, res, &surf->base);
386 surf->handle = handle;
387 return &surf->base;
388 }
389
390 static void virgl_surface_destroy(struct pipe_context *ctx,
391 struct pipe_surface *psurf)
392 {
393 struct virgl_context *vctx = virgl_context(ctx);
394 struct virgl_surface *surf = virgl_surface(psurf);
395
396 pipe_resource_reference(&surf->base.texture, NULL);
397 virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
398 FREE(surf);
399 }
400
401 static void *virgl_create_blend_state(struct pipe_context *ctx,
402 const struct pipe_blend_state *blend_state)
403 {
404 struct virgl_context *vctx = virgl_context(ctx);
405 uint32_t handle;
406 handle = virgl_object_assign_handle();
407
408 virgl_encode_blend_state(vctx, handle, blend_state);
409 return (void *)(unsigned long)handle;
410
411 }
412
413 static void virgl_bind_blend_state(struct pipe_context *ctx,
414 void *blend_state)
415 {
416 struct virgl_context *vctx = virgl_context(ctx);
417 uint32_t handle = (unsigned long)blend_state;
418 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
419 }
420
421 static void virgl_delete_blend_state(struct pipe_context *ctx,
422 void *blend_state)
423 {
424 struct virgl_context *vctx = virgl_context(ctx);
425 uint32_t handle = (unsigned long)blend_state;
426 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
427 }
428
429 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
430 const struct pipe_depth_stencil_alpha_state *blend_state)
431 {
432 struct virgl_context *vctx = virgl_context(ctx);
433 uint32_t handle;
434 handle = virgl_object_assign_handle();
435
436 virgl_encode_dsa_state(vctx, handle, blend_state);
437 return (void *)(unsigned long)handle;
438 }
439
440 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
441 void *blend_state)
442 {
443 struct virgl_context *vctx = virgl_context(ctx);
444 uint32_t handle = (unsigned long)blend_state;
445 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
446 }
447
448 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
449 void *dsa_state)
450 {
451 struct virgl_context *vctx = virgl_context(ctx);
452 uint32_t handle = (unsigned long)dsa_state;
453 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
454 }
455
456 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
457 const struct pipe_rasterizer_state *rs_state)
458 {
459 struct virgl_context *vctx = virgl_context(ctx);
460 struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
461
462 if (!vrs)
463 return NULL;
464 vrs->rs = *rs_state;
465 vrs->handle = virgl_object_assign_handle();
466
467 virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
468 return (void *)vrs;
469 }
470
471 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
472 void *rs_state)
473 {
474 struct virgl_context *vctx = virgl_context(ctx);
475 uint32_t handle = 0;
476 if (rs_state) {
477 struct virgl_rasterizer_state *vrs = rs_state;
478 vctx->rs_state = *vrs;
479 handle = vrs->handle;
480 }
481 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
482 }
483
484 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
485 void *rs_state)
486 {
487 struct virgl_context *vctx = virgl_context(ctx);
488 struct virgl_rasterizer_state *vrs = rs_state;
489 virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
490 FREE(vrs);
491 }
492
493 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
494 const struct pipe_framebuffer_state *state)
495 {
496 struct virgl_context *vctx = virgl_context(ctx);
497
498 vctx->framebuffer = *state;
499 virgl_encoder_set_framebuffer_state(vctx, state);
500 virgl_attach_res_framebuffer(vctx);
501 }
502
503 static void virgl_set_viewport_states(struct pipe_context *ctx,
504 unsigned start_slot,
505 unsigned num_viewports,
506 const struct pipe_viewport_state *state)
507 {
508 struct virgl_context *vctx = virgl_context(ctx);
509 virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
510 }
511
512 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
513 unsigned num_elements,
514 const struct pipe_vertex_element *elements)
515 {
516 struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
517 struct virgl_context *vctx = virgl_context(ctx);
518 struct virgl_vertex_elements_state *state =
519 CALLOC_STRUCT(virgl_vertex_elements_state);
520
521 for (int i = 0; i < num_elements; ++i) {
522 if (elements[i].instance_divisor) {
523 /* Virglrenderer doesn't deal with instance_divisor correctly if
524 * there isn't a 1:1 relationship between elements and bindings.
525 * So let's make sure there is, by duplicating bindings.
526 */
527 for (int j = 0; j < num_elements; ++j) {
528 new_elements[j] = elements[j];
529 new_elements[j].vertex_buffer_index = j;
530 state->binding_map[j] = elements[j].vertex_buffer_index;
531 }
532 elements = new_elements;
533 state->num_bindings = num_elements;
534 break;
535 }
536 }
537
538 state->handle = virgl_object_assign_handle();
539 virgl_encoder_create_vertex_elements(vctx, state->handle,
540 num_elements, elements);
541 return state;
542 }
543
544 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
545 void *ve)
546 {
547 struct virgl_context *vctx = virgl_context(ctx);
548 struct virgl_vertex_elements_state *state =
549 (struct virgl_vertex_elements_state *)ve;
550 virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
551 FREE(state);
552 }
553
554 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
555 void *ve)
556 {
557 struct virgl_context *vctx = virgl_context(ctx);
558 struct virgl_vertex_elements_state *state =
559 (struct virgl_vertex_elements_state *)ve;
560 vctx->vertex_elements = state;
561 virgl_encode_bind_object(vctx, state ? state->handle : 0,
562 VIRGL_OBJECT_VERTEX_ELEMENTS);
563 vctx->vertex_array_dirty = TRUE;
564 }
565
566 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
567 unsigned start_slot,
568 unsigned num_buffers,
569 const struct pipe_vertex_buffer *buffers)
570 {
571 struct virgl_context *vctx = virgl_context(ctx);
572
573 util_set_vertex_buffers_count(vctx->vertex_buffer,
574 &vctx->num_vertex_buffers,
575 buffers, start_slot, num_buffers);
576
577 if (buffers) {
578 for (unsigned i = 0; i < num_buffers; i++) {
579 struct virgl_resource *res =
580 virgl_resource(buffers[i].buffer.resource);
581 if (res && !buffers[i].is_user_buffer)
582 res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
583 }
584 }
585
586 vctx->vertex_array_dirty = TRUE;
587 }
588
589 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
590 {
591 if (vctx->vertex_array_dirty) {
592 struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
593
594 if (ve->num_bindings) {
595 struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
596 for (int i = 0; i < ve->num_bindings; ++i)
597 vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
598
599 virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
600 } else
601 virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
602
603 virgl_attach_res_vertex_buffers(vctx);
604
605 vctx->vertex_array_dirty = FALSE;
606 }
607 }
608
609 static void virgl_set_stencil_ref(struct pipe_context *ctx,
610 const struct pipe_stencil_ref *ref)
611 {
612 struct virgl_context *vctx = virgl_context(ctx);
613 virgl_encoder_set_stencil_ref(vctx, ref);
614 }
615
616 static void virgl_set_blend_color(struct pipe_context *ctx,
617 const struct pipe_blend_color *color)
618 {
619 struct virgl_context *vctx = virgl_context(ctx);
620 virgl_encoder_set_blend_color(vctx, color);
621 }
622
623 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
624 struct virgl_indexbuf *ib)
625 {
626 virgl_encoder_set_index_buffer(vctx, ib);
627 virgl_attach_res_index_buffer(vctx, ib);
628 }
629
630 static void virgl_set_constant_buffer(struct pipe_context *ctx,
631 enum pipe_shader_type shader, uint index,
632 const struct pipe_constant_buffer *buf)
633 {
634 struct virgl_context *vctx = virgl_context(ctx);
635 struct virgl_shader_binding_state *binding =
636 &vctx->shader_bindings[shader];
637
638 if (buf && buf->buffer) {
639 struct virgl_resource *res = virgl_resource(buf->buffer);
640 res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
641
642 virgl_encoder_set_uniform_buffer(vctx, shader, index,
643 buf->buffer_offset,
644 buf->buffer_size, res);
645
646 pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
647 binding->ubos[index] = *buf;
648 binding->ubo_enabled_mask |= 1 << index;
649 } else {
650 static const struct pipe_constant_buffer dummy_ubo;
651 if (!buf)
652 buf = &dummy_ubo;
653 virgl_encoder_write_constant_buffer(vctx, shader, index,
654 buf->buffer_size / 4,
655 buf->user_buffer);
656
657 pipe_resource_reference(&binding->ubos[index].buffer, NULL);
658 binding->ubo_enabled_mask &= ~(1 << index);
659 }
660 }
661
662 static void *virgl_shader_encoder(struct pipe_context *ctx,
663 const struct pipe_shader_state *shader,
664 unsigned type)
665 {
666 struct virgl_context *vctx = virgl_context(ctx);
667 uint32_t handle;
668 struct tgsi_token *new_tokens;
669 int ret;
670
671 new_tokens = virgl_tgsi_transform(vctx, shader->tokens);
672 if (!new_tokens)
673 return NULL;
674
675 handle = virgl_object_assign_handle();
676 /* encode VS state */
677 ret = virgl_encode_shader_state(vctx, handle, type,
678 &shader->stream_output, 0,
679 new_tokens);
680 if (ret) {
681 return NULL;
682 }
683
684 FREE(new_tokens);
685 return (void *)(unsigned long)handle;
686
687 }
688 static void *virgl_create_vs_state(struct pipe_context *ctx,
689 const struct pipe_shader_state *shader)
690 {
691 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
692 }
693
694 static void *virgl_create_tcs_state(struct pipe_context *ctx,
695 const struct pipe_shader_state *shader)
696 {
697 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
698 }
699
700 static void *virgl_create_tes_state(struct pipe_context *ctx,
701 const struct pipe_shader_state *shader)
702 {
703 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
704 }
705
706 static void *virgl_create_gs_state(struct pipe_context *ctx,
707 const struct pipe_shader_state *shader)
708 {
709 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
710 }
711
712 static void *virgl_create_fs_state(struct pipe_context *ctx,
713 const struct pipe_shader_state *shader)
714 {
715 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
716 }
717
718 static void
719 virgl_delete_fs_state(struct pipe_context *ctx,
720 void *fs)
721 {
722 uint32_t handle = (unsigned long)fs;
723 struct virgl_context *vctx = virgl_context(ctx);
724
725 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
726 }
727
728 static void
729 virgl_delete_gs_state(struct pipe_context *ctx,
730 void *gs)
731 {
732 uint32_t handle = (unsigned long)gs;
733 struct virgl_context *vctx = virgl_context(ctx);
734
735 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
736 }
737
738 static void
739 virgl_delete_vs_state(struct pipe_context *ctx,
740 void *vs)
741 {
742 uint32_t handle = (unsigned long)vs;
743 struct virgl_context *vctx = virgl_context(ctx);
744
745 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
746 }
747
748 static void
749 virgl_delete_tcs_state(struct pipe_context *ctx,
750 void *tcs)
751 {
752 uint32_t handle = (unsigned long)tcs;
753 struct virgl_context *vctx = virgl_context(ctx);
754
755 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
756 }
757
758 static void
759 virgl_delete_tes_state(struct pipe_context *ctx,
760 void *tes)
761 {
762 uint32_t handle = (unsigned long)tes;
763 struct virgl_context *vctx = virgl_context(ctx);
764
765 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
766 }
767
768 static void virgl_bind_vs_state(struct pipe_context *ctx,
769 void *vss)
770 {
771 uint32_t handle = (unsigned long)vss;
772 struct virgl_context *vctx = virgl_context(ctx);
773
774 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
775 }
776
777 static void virgl_bind_tcs_state(struct pipe_context *ctx,
778 void *vss)
779 {
780 uint32_t handle = (unsigned long)vss;
781 struct virgl_context *vctx = virgl_context(ctx);
782
783 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
784 }
785
786 static void virgl_bind_tes_state(struct pipe_context *ctx,
787 void *vss)
788 {
789 uint32_t handle = (unsigned long)vss;
790 struct virgl_context *vctx = virgl_context(ctx);
791
792 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
793 }
794
795 static void virgl_bind_gs_state(struct pipe_context *ctx,
796 void *vss)
797 {
798 uint32_t handle = (unsigned long)vss;
799 struct virgl_context *vctx = virgl_context(ctx);
800
801 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
802 }
803
804
805 static void virgl_bind_fs_state(struct pipe_context *ctx,
806 void *vss)
807 {
808 uint32_t handle = (unsigned long)vss;
809 struct virgl_context *vctx = virgl_context(ctx);
810
811 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
812 }
813
814 static void virgl_clear(struct pipe_context *ctx,
815 unsigned buffers,
816 const union pipe_color_union *color,
817 double depth, unsigned stencil)
818 {
819 struct virgl_context *vctx = virgl_context(ctx);
820
821 if (!vctx->num_draws)
822 virgl_reemit_draw_resources(vctx);
823 vctx->num_draws++;
824
825 virgl_encode_clear(vctx, buffers, color, depth, stencil);
826 }
827
828 static void virgl_draw_vbo(struct pipe_context *ctx,
829 const struct pipe_draw_info *dinfo)
830 {
831 struct virgl_context *vctx = virgl_context(ctx);
832 struct virgl_screen *rs = virgl_screen(ctx->screen);
833 struct virgl_indexbuf ib = {};
834 struct pipe_draw_info info = *dinfo;
835
836 if (!dinfo->count_from_stream_output && !dinfo->indirect &&
837 !dinfo->primitive_restart &&
838 !u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count))
839 return;
840
841 if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
842 util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
843 util_primconvert_draw_vbo(vctx->primconvert, dinfo);
844 return;
845 }
846 if (info.index_size) {
847 pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
848 ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
849 ib.index_size = dinfo->index_size;
850 ib.offset = info.start * ib.index_size;
851
852 if (ib.user_buffer) {
853 u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 4,
854 ib.user_buffer, &ib.offset, &ib.buffer);
855 ib.user_buffer = NULL;
856 }
857 }
858
859 if (!vctx->num_draws)
860 virgl_reemit_draw_resources(vctx);
861 vctx->num_draws++;
862
863 virgl_hw_set_vertex_buffers(vctx);
864 if (info.index_size)
865 virgl_hw_set_index_buffer(vctx, &ib);
866
867 virgl_encoder_draw_vbo(vctx, &info);
868
869 pipe_resource_reference(&ib.buffer, NULL);
870
871 }
872
873 static void virgl_submit_cmd(struct virgl_winsys *vws,
874 struct virgl_cmd_buf *cbuf,
875 struct pipe_fence_handle **fence)
876 {
877 if (unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) {
878 struct pipe_fence_handle *sync_fence = NULL;
879
880 vws->submit_cmd(vws, cbuf, &sync_fence);
881
882 vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE);
883 vws->fence_reference(vws, &sync_fence, NULL);
884 } else {
885 vws->submit_cmd(vws, cbuf, fence);
886 }
887 }
888
889 static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
890 struct pipe_fence_handle **fence)
891 {
892 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
893
894 /* skip empty cbuf */
895 if (ctx->cbuf->cdw == ctx->cbuf_initial_cdw &&
896 ctx->queue.num_dwords == 0 &&
897 !fence)
898 return;
899
900 if (ctx->num_draws)
901 u_upload_unmap(ctx->uploader);
902
903 /* send the buffer to the remote side for decoding */
904 ctx->num_draws = ctx->num_compute = 0;
905
906 virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
907
908 virgl_submit_cmd(rs->vws, ctx->cbuf, fence);
909
910 /* Reserve some space for transfers. */
911 if (ctx->encoded_transfers)
912 ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
913
914 virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
915
916 ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
917
918 /* We have flushed the command queue, including any pending copy transfers
919 * involving staging resources.
920 */
921 ctx->queued_staging_res_size = 0;
922 }
923
924 static void virgl_flush_from_st(struct pipe_context *ctx,
925 struct pipe_fence_handle **fence,
926 enum pipe_flush_flags flags)
927 {
928 struct virgl_context *vctx = virgl_context(ctx);
929
930 virgl_flush_eq(vctx, vctx, fence);
931 }
932
933 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
934 struct pipe_resource *texture,
935 const struct pipe_sampler_view *state)
936 {
937 struct virgl_context *vctx = virgl_context(ctx);
938 struct virgl_sampler_view *grview;
939 uint32_t handle;
940 struct virgl_resource *res;
941
942 if (!state)
943 return NULL;
944
945 grview = CALLOC_STRUCT(virgl_sampler_view);
946 if (!grview)
947 return NULL;
948
949 res = virgl_resource(texture);
950 handle = virgl_object_assign_handle();
951 virgl_encode_sampler_view(vctx, handle, res, state);
952
953 grview->base = *state;
954 grview->base.reference.count = 1;
955
956 grview->base.texture = NULL;
957 grview->base.context = ctx;
958 pipe_resource_reference(&grview->base.texture, texture);
959 grview->handle = handle;
960 return &grview->base;
961 }
962
963 static void virgl_set_sampler_views(struct pipe_context *ctx,
964 enum pipe_shader_type shader_type,
965 unsigned start_slot,
966 unsigned num_views,
967 struct pipe_sampler_view **views)
968 {
969 struct virgl_context *vctx = virgl_context(ctx);
970 struct virgl_shader_binding_state *binding =
971 &vctx->shader_bindings[shader_type];
972
973 binding->view_enabled_mask &= ~u_bit_consecutive(start_slot, num_views);
974 for (unsigned i = 0; i < num_views; i++) {
975 unsigned idx = start_slot + i;
976 if (views && views[i]) {
977 struct virgl_resource *res = virgl_resource(views[i]->texture);
978 res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
979
980 pipe_sampler_view_reference(&binding->views[idx], views[i]);
981 binding->view_enabled_mask |= 1 << idx;
982 } else {
983 pipe_sampler_view_reference(&binding->views[idx], NULL);
984 }
985 }
986
987 virgl_encode_set_sampler_views(vctx, shader_type,
988 start_slot, num_views, (struct virgl_sampler_view **)binding->views);
989 virgl_attach_res_sampler_views(vctx, shader_type);
990 }
991
992 static void
993 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
994 {
995 struct virgl_context *vctx = virgl_context(ctx);
996 struct virgl_screen *rs = virgl_screen(ctx->screen);
997
998 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER))
999 return;
1000 virgl_encode_texture_barrier(vctx, flags);
1001 }
1002
1003 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
1004 struct pipe_sampler_view *view)
1005 {
1006 struct virgl_context *vctx = virgl_context(ctx);
1007 struct virgl_sampler_view *grview = virgl_sampler_view(view);
1008
1009 virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
1010 pipe_resource_reference(&view->texture, NULL);
1011 FREE(view);
1012 }
1013
1014 static void *virgl_create_sampler_state(struct pipe_context *ctx,
1015 const struct pipe_sampler_state *state)
1016 {
1017 struct virgl_context *vctx = virgl_context(ctx);
1018 uint32_t handle;
1019
1020 handle = virgl_object_assign_handle();
1021
1022 virgl_encode_sampler_state(vctx, handle, state);
1023 return (void *)(unsigned long)handle;
1024 }
1025
1026 static void virgl_delete_sampler_state(struct pipe_context *ctx,
1027 void *ss)
1028 {
1029 struct virgl_context *vctx = virgl_context(ctx);
1030 uint32_t handle = (unsigned long)ss;
1031
1032 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
1033 }
1034
1035 static void virgl_bind_sampler_states(struct pipe_context *ctx,
1036 enum pipe_shader_type shader,
1037 unsigned start_slot,
1038 unsigned num_samplers,
1039 void **samplers)
1040 {
1041 struct virgl_context *vctx = virgl_context(ctx);
1042 uint32_t handles[32];
1043 int i;
1044 for (i = 0; i < num_samplers; i++) {
1045 handles[i] = (unsigned long)(samplers[i]);
1046 }
1047 virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
1048 }
1049
1050 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
1051 const struct pipe_poly_stipple *ps)
1052 {
1053 struct virgl_context *vctx = virgl_context(ctx);
1054 virgl_encoder_set_polygon_stipple(vctx, ps);
1055 }
1056
1057 static void virgl_set_scissor_states(struct pipe_context *ctx,
1058 unsigned start_slot,
1059 unsigned num_scissor,
1060 const struct pipe_scissor_state *ss)
1061 {
1062 struct virgl_context *vctx = virgl_context(ctx);
1063 virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
1064 }
1065
1066 static void virgl_set_sample_mask(struct pipe_context *ctx,
1067 unsigned sample_mask)
1068 {
1069 struct virgl_context *vctx = virgl_context(ctx);
1070 virgl_encoder_set_sample_mask(vctx, sample_mask);
1071 }
1072
1073 static void virgl_set_min_samples(struct pipe_context *ctx,
1074 unsigned min_samples)
1075 {
1076 struct virgl_context *vctx = virgl_context(ctx);
1077 struct virgl_screen *rs = virgl_screen(ctx->screen);
1078
1079 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
1080 return;
1081 virgl_encoder_set_min_samples(vctx, min_samples);
1082 }
1083
1084 static void virgl_set_clip_state(struct pipe_context *ctx,
1085 const struct pipe_clip_state *clip)
1086 {
1087 struct virgl_context *vctx = virgl_context(ctx);
1088 virgl_encoder_set_clip_state(vctx, clip);
1089 }
1090
1091 static void virgl_set_tess_state(struct pipe_context *ctx,
1092 const float default_outer_level[4],
1093 const float default_inner_level[2])
1094 {
1095 struct virgl_context *vctx = virgl_context(ctx);
1096 struct virgl_screen *rs = virgl_screen(ctx->screen);
1097
1098 if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
1099 return;
1100 virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
1101 }
1102
1103 static void virgl_resource_copy_region(struct pipe_context *ctx,
1104 struct pipe_resource *dst,
1105 unsigned dst_level,
1106 unsigned dstx, unsigned dsty, unsigned dstz,
1107 struct pipe_resource *src,
1108 unsigned src_level,
1109 const struct pipe_box *src_box)
1110 {
1111 struct virgl_context *vctx = virgl_context(ctx);
1112 struct virgl_resource *dres = virgl_resource(dst);
1113 struct virgl_resource *sres = virgl_resource(src);
1114
1115 if (dres->u.b.target == PIPE_BUFFER)
1116 util_range_add(&dres->valid_buffer_range, dstx, dstx + src_box->width);
1117 virgl_resource_dirty(dres, dst_level);
1118
1119 virgl_encode_resource_copy_region(vctx, dres,
1120 dst_level, dstx, dsty, dstz,
1121 sres, src_level,
1122 src_box);
1123 }
1124
1125 static void
1126 virgl_flush_resource(struct pipe_context *pipe,
1127 struct pipe_resource *resource)
1128 {
1129 }
1130
1131 static void virgl_blit(struct pipe_context *ctx,
1132 const struct pipe_blit_info *blit)
1133 {
1134 struct virgl_context *vctx = virgl_context(ctx);
1135 struct virgl_resource *dres = virgl_resource(blit->dst.resource);
1136 struct virgl_resource *sres = virgl_resource(blit->src.resource);
1137
1138 assert(ctx->screen->get_param(ctx->screen,
1139 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
1140 (util_format_is_srgb(blit->dst.resource->format) ==
1141 util_format_is_srgb(blit->dst.format)));
1142
1143 virgl_resource_dirty(dres, blit->dst.level);
1144 virgl_encode_blit(vctx, dres, sres,
1145 blit);
1146 }
1147
1148 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1149 unsigned start_slot,
1150 unsigned count,
1151 const struct pipe_shader_buffer *buffers)
1152 {
1153 struct virgl_context *vctx = virgl_context(ctx);
1154
1155 vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1156 for (unsigned i = 0; i < count; i++) {
1157 unsigned idx = start_slot + i;
1158 if (buffers && buffers[i].buffer) {
1159 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1160 res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1161
1162 pipe_resource_reference(&vctx->atomic_buffers[idx].buffer,
1163 buffers[i].buffer);
1164 vctx->atomic_buffers[idx] = buffers[i];
1165 vctx->atomic_buffer_enabled_mask |= 1 << idx;
1166 } else {
1167 pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL);
1168 }
1169 }
1170
1171 virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1172 }
1173
1174 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1175 enum pipe_shader_type shader,
1176 unsigned start_slot, unsigned count,
1177 const struct pipe_shader_buffer *buffers,
1178 unsigned writable_bitmask)
1179 {
1180 struct virgl_context *vctx = virgl_context(ctx);
1181 struct virgl_screen *rs = virgl_screen(ctx->screen);
1182 struct virgl_shader_binding_state *binding =
1183 &vctx->shader_bindings[shader];
1184
1185 binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1186 for (unsigned i = 0; i < count; i++) {
1187 unsigned idx = start_slot + i;
1188 if (buffers && buffers[i].buffer) {
1189 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1190 res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1191
1192 pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer);
1193 binding->ssbos[idx] = buffers[i];
1194 binding->ssbo_enabled_mask |= 1 << idx;
1195 } else {
1196 pipe_resource_reference(&binding->ssbos[idx].buffer, NULL);
1197 }
1198 }
1199
1200 uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1201 rs->caps.caps.v2.max_shader_buffer_frag_compute :
1202 rs->caps.caps.v2.max_shader_buffer_other_stages;
1203 if (!max_shader_buffer)
1204 return;
1205 virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1206 }
1207
1208 static void virgl_create_fence_fd(struct pipe_context *ctx,
1209 struct pipe_fence_handle **fence,
1210 int fd,
1211 enum pipe_fd_type type)
1212 {
1213 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1214 struct virgl_screen *rs = virgl_screen(ctx->screen);
1215
1216 if (rs->vws->cs_create_fence)
1217 *fence = rs->vws->cs_create_fence(rs->vws, fd);
1218 }
1219
1220 static void virgl_fence_server_sync(struct pipe_context *ctx,
1221 struct pipe_fence_handle *fence)
1222 {
1223 struct virgl_context *vctx = virgl_context(ctx);
1224 struct virgl_screen *rs = virgl_screen(ctx->screen);
1225
1226 if (rs->vws->fence_server_sync)
1227 rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1228 }
1229
1230 static void virgl_set_shader_images(struct pipe_context *ctx,
1231 enum pipe_shader_type shader,
1232 unsigned start_slot, unsigned count,
1233 const struct pipe_image_view *images)
1234 {
1235 struct virgl_context *vctx = virgl_context(ctx);
1236 struct virgl_screen *rs = virgl_screen(ctx->screen);
1237 struct virgl_shader_binding_state *binding =
1238 &vctx->shader_bindings[shader];
1239
1240 binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1241 for (unsigned i = 0; i < count; i++) {
1242 unsigned idx = start_slot + i;
1243 if (images && images[i].resource) {
1244 struct virgl_resource *res = virgl_resource(images[i].resource);
1245 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
1246
1247 pipe_resource_reference(&binding->images[idx].resource,
1248 images[i].resource);
1249 binding->images[idx] = images[i];
1250 binding->image_enabled_mask |= 1 << idx;
1251 } else {
1252 pipe_resource_reference(&binding->images[idx].resource, NULL);
1253 }
1254 }
1255
1256 uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1257 rs->caps.caps.v2.max_shader_image_frag_compute :
1258 rs->caps.caps.v2.max_shader_image_other_stages;
1259 if (!max_shader_images)
1260 return;
1261 virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1262 }
1263
1264 static void virgl_memory_barrier(struct pipe_context *ctx,
1265 unsigned flags)
1266 {
1267 struct virgl_context *vctx = virgl_context(ctx);
1268 struct virgl_screen *rs = virgl_screen(ctx->screen);
1269
1270 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1271 return;
1272 virgl_encode_memory_barrier(vctx, flags);
1273 }
1274
1275 static void *virgl_create_compute_state(struct pipe_context *ctx,
1276 const struct pipe_compute_state *state)
1277 {
1278 struct virgl_context *vctx = virgl_context(ctx);
1279 uint32_t handle;
1280 const struct tgsi_token *new_tokens = state->prog;
1281 struct pipe_stream_output_info so_info = {};
1282 int ret;
1283
1284 handle = virgl_object_assign_handle();
1285 ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1286 &so_info,
1287 state->req_local_mem,
1288 new_tokens);
1289 if (ret) {
1290 return NULL;
1291 }
1292
1293 return (void *)(unsigned long)handle;
1294 }
1295
1296 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1297 {
1298 uint32_t handle = (unsigned long)state;
1299 struct virgl_context *vctx = virgl_context(ctx);
1300
1301 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1302 }
1303
1304 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1305 {
1306 uint32_t handle = (unsigned long)state;
1307 struct virgl_context *vctx = virgl_context(ctx);
1308
1309 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1310 }
1311
1312 static void virgl_launch_grid(struct pipe_context *ctx,
1313 const struct pipe_grid_info *info)
1314 {
1315 struct virgl_context *vctx = virgl_context(ctx);
1316
1317 if (!vctx->num_compute)
1318 virgl_reemit_compute_resources(vctx);
1319 vctx->num_compute++;
1320
1321 virgl_encode_launch_grid(vctx, info);
1322 }
1323
1324 static void
1325 virgl_release_shader_binding(struct virgl_context *vctx,
1326 enum pipe_shader_type shader_type)
1327 {
1328 struct virgl_shader_binding_state *binding =
1329 &vctx->shader_bindings[shader_type];
1330
1331 while (binding->view_enabled_mask) {
1332 int i = u_bit_scan(&binding->view_enabled_mask);
1333 pipe_sampler_view_reference(
1334 (struct pipe_sampler_view **)&binding->views[i], NULL);
1335 }
1336
1337 while (binding->ubo_enabled_mask) {
1338 int i = u_bit_scan(&binding->ubo_enabled_mask);
1339 pipe_resource_reference(&binding->ubos[i].buffer, NULL);
1340 }
1341
1342 while (binding->ssbo_enabled_mask) {
1343 int i = u_bit_scan(&binding->ssbo_enabled_mask);
1344 pipe_resource_reference(&binding->ssbos[i].buffer, NULL);
1345 }
1346
1347 while (binding->image_enabled_mask) {
1348 int i = u_bit_scan(&binding->image_enabled_mask);
1349 pipe_resource_reference(&binding->images[i].resource, NULL);
1350 }
1351 }
1352
1353 static void
1354 virgl_context_destroy( struct pipe_context *ctx )
1355 {
1356 struct virgl_context *vctx = virgl_context(ctx);
1357 struct virgl_screen *rs = virgl_screen(ctx->screen);
1358 enum pipe_shader_type shader_type;
1359
1360 vctx->framebuffer.zsbuf = NULL;
1361 vctx->framebuffer.nr_cbufs = 0;
1362 virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1363 virgl_flush_eq(vctx, vctx, NULL);
1364
1365 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++)
1366 virgl_release_shader_binding(vctx, shader_type);
1367
1368 while (vctx->atomic_buffer_enabled_mask) {
1369 int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask);
1370 pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL);
1371 }
1372
1373 rs->vws->cmd_buf_destroy(vctx->cbuf);
1374 if (vctx->uploader)
1375 u_upload_destroy(vctx->uploader);
1376 if (vctx->supports_staging)
1377 virgl_staging_destroy(&vctx->staging);
1378 util_primconvert_destroy(vctx->primconvert);
1379 virgl_transfer_queue_fini(&vctx->queue);
1380
1381 slab_destroy_child(&vctx->transfer_pool);
1382 FREE(vctx);
1383 }
1384
1385 static void virgl_get_sample_position(struct pipe_context *ctx,
1386 unsigned sample_count,
1387 unsigned index,
1388 float *out_value)
1389 {
1390 struct virgl_context *vctx = virgl_context(ctx);
1391 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1392
1393 if (sample_count > vs->caps.caps.v1.max_samples) {
1394 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1395 sample_count, vs->caps.caps.v1.max_samples);
1396 return;
1397 }
1398
1399 /* The following is basically copied from dri/i965gen6_get_sample_position
1400 * The only addition is that we hold the msaa positions for all sample
1401 * counts in a flat array. */
1402 uint32_t bits = 0;
1403 if (sample_count == 1) {
1404 out_value[0] = out_value[1] = 0.5f;
1405 return;
1406 } else if (sample_count == 2) {
1407 bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1408 } else if (sample_count <= 4) {
1409 bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1410 } else if (sample_count <= 8) {
1411 bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1412 } else if (sample_count <= 16) {
1413 bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1414 }
1415 out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1416 out_value[1] = (bits & 0xf) / 16.0f;
1417
1418 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1419 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1420 index, sample_count, out_value[0], out_value[1]);
1421 }
1422
1423 static void virgl_send_tweaks(struct virgl_context *vctx, struct virgl_screen *rs)
1424 {
1425 if (rs->tweak_gles_emulate_bgra)
1426 virgl_encode_tweak(vctx, virgl_tweak_gles_brga_emulate, 1);
1427
1428 if (rs->tweak_gles_apply_bgra_dest_swizzle)
1429 virgl_encode_tweak(vctx, virgl_tweak_gles_brga_apply_dest_swizzle, 1);
1430
1431 if (rs->tweak_gles_tf3_value > 0)
1432 virgl_encode_tweak(vctx, virgl_tweak_gles_tf3_samples_passes_multiplier,
1433 rs->tweak_gles_tf3_value);
1434 }
1435
1436 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1437 void *priv,
1438 unsigned flags)
1439 {
1440 struct virgl_context *vctx;
1441 struct virgl_screen *rs = virgl_screen(pscreen);
1442 vctx = CALLOC_STRUCT(virgl_context);
1443 const char *host_debug_flagstring;
1444
1445 vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS);
1446 if (!vctx->cbuf) {
1447 FREE(vctx);
1448 return NULL;
1449 }
1450
1451 vctx->base.destroy = virgl_context_destroy;
1452 vctx->base.create_surface = virgl_create_surface;
1453 vctx->base.surface_destroy = virgl_surface_destroy;
1454 vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1455 vctx->base.create_blend_state = virgl_create_blend_state;
1456 vctx->base.bind_blend_state = virgl_bind_blend_state;
1457 vctx->base.delete_blend_state = virgl_delete_blend_state;
1458 vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1459 vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1460 vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1461 vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1462 vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1463 vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1464
1465 vctx->base.set_viewport_states = virgl_set_viewport_states;
1466 vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1467 vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1468 vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1469 vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1470 vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1471
1472 vctx->base.set_tess_state = virgl_set_tess_state;
1473 vctx->base.create_vs_state = virgl_create_vs_state;
1474 vctx->base.create_tcs_state = virgl_create_tcs_state;
1475 vctx->base.create_tes_state = virgl_create_tes_state;
1476 vctx->base.create_gs_state = virgl_create_gs_state;
1477 vctx->base.create_fs_state = virgl_create_fs_state;
1478
1479 vctx->base.bind_vs_state = virgl_bind_vs_state;
1480 vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1481 vctx->base.bind_tes_state = virgl_bind_tes_state;
1482 vctx->base.bind_gs_state = virgl_bind_gs_state;
1483 vctx->base.bind_fs_state = virgl_bind_fs_state;
1484
1485 vctx->base.delete_vs_state = virgl_delete_vs_state;
1486 vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1487 vctx->base.delete_tes_state = virgl_delete_tes_state;
1488 vctx->base.delete_gs_state = virgl_delete_gs_state;
1489 vctx->base.delete_fs_state = virgl_delete_fs_state;
1490
1491 vctx->base.create_compute_state = virgl_create_compute_state;
1492 vctx->base.bind_compute_state = virgl_bind_compute_state;
1493 vctx->base.delete_compute_state = virgl_delete_compute_state;
1494 vctx->base.launch_grid = virgl_launch_grid;
1495
1496 vctx->base.clear = virgl_clear;
1497 vctx->base.draw_vbo = virgl_draw_vbo;
1498 vctx->base.flush = virgl_flush_from_st;
1499 vctx->base.screen = pscreen;
1500 vctx->base.create_sampler_view = virgl_create_sampler_view;
1501 vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1502 vctx->base.set_sampler_views = virgl_set_sampler_views;
1503 vctx->base.texture_barrier = virgl_texture_barrier;
1504
1505 vctx->base.create_sampler_state = virgl_create_sampler_state;
1506 vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1507 vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1508
1509 vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1510 vctx->base.set_scissor_states = virgl_set_scissor_states;
1511 vctx->base.set_sample_mask = virgl_set_sample_mask;
1512 vctx->base.set_min_samples = virgl_set_min_samples;
1513 vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1514 vctx->base.set_clip_state = virgl_set_clip_state;
1515
1516 vctx->base.set_blend_color = virgl_set_blend_color;
1517
1518 vctx->base.get_sample_position = virgl_get_sample_position;
1519
1520 vctx->base.resource_copy_region = virgl_resource_copy_region;
1521 vctx->base.flush_resource = virgl_flush_resource;
1522 vctx->base.blit = virgl_blit;
1523 vctx->base.create_fence_fd = virgl_create_fence_fd;
1524 vctx->base.fence_server_sync = virgl_fence_server_sync;
1525
1526 vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1527 vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1528 vctx->base.set_shader_images = virgl_set_shader_images;
1529 vctx->base.memory_barrier = virgl_memory_barrier;
1530
1531 virgl_init_context_resource_functions(&vctx->base);
1532 virgl_init_query_functions(vctx);
1533 virgl_init_so_functions(vctx);
1534
1535 slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1536 virgl_transfer_queue_init(&vctx->queue, vctx);
1537 vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
1538 (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
1539
1540 /* Reserve some space for transfers. */
1541 if (vctx->encoded_transfers)
1542 vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1543
1544 vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1545 vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1546 PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1547 if (!vctx->uploader)
1548 goto fail;
1549 vctx->base.stream_uploader = vctx->uploader;
1550 vctx->base.const_uploader = vctx->uploader;
1551
1552 /* We use a special staging buffer as the source of copy transfers. */
1553 if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) &&
1554 vctx->encoded_transfers) {
1555 virgl_staging_init(&vctx->staging, &vctx->base, 1024 * 1024);
1556 vctx->supports_staging = true;
1557 }
1558
1559 vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
1560 virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1561
1562 virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1563
1564 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1565 host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1566 if (host_debug_flagstring)
1567 virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1568 }
1569
1570 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT)
1571 virgl_send_tweaks(vctx, rs);
1572
1573 return &vctx->base;
1574 fail:
1575 virgl_context_destroy(&vctx->base);
1576 return NULL;
1577 }