virgl: Support VIRGL_BIND_SHARED
[mesa.git] / src / gallium / drivers / virgl / virgl_context.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <libsync.h>
25 #include "pipe/p_shader_tokens.h"
26
27 #include "pipe/p_context.h"
28 #include "pipe/p_defines.h"
29 #include "pipe/p_screen.h"
30 #include "pipe/p_state.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_prim.h"
35 #include "util/u_transfer.h"
36 #include "util/u_helpers.h"
37 #include "util/slab.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_blitter.h"
40 #include "tgsi/tgsi_text.h"
41 #include "indices/u_primconvert.h"
42
43 #include "pipebuffer/pb_buffer.h"
44
45 #include "virgl_encode.h"
46 #include "virgl_context.h"
47 #include "virgl_protocol.h"
48 #include "virgl_resource.h"
49 #include "virgl_screen.h"
50
51 struct virgl_vertex_elements_state {
52 uint32_t handle;
53 uint8_t binding_map[PIPE_MAX_ATTRIBS];
54 uint8_t num_bindings;
55 };
56
57 static uint32_t next_handle;
58 uint32_t virgl_object_assign_handle(void)
59 {
60 return ++next_handle;
61 }
62
63 bool
64 virgl_can_rebind_resource(struct virgl_context *vctx,
65 struct pipe_resource *res)
66 {
67 /* We cannot rebind resources that are referenced by host objects, which
68 * are
69 *
70 * - VIRGL_OBJECT_SURFACE
71 * - VIRGL_OBJECT_SAMPLER_VIEW
72 * - VIRGL_OBJECT_STREAMOUT_TARGET
73 *
74 * Because surfaces cannot be created from buffers, we require the resource
75 * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
76 */
77 const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW |
78 PIPE_BIND_STREAM_OUTPUT);
79 const unsigned bind_history = virgl_resource(res)->bind_history;
80 return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind);
81 }
82
83 void
84 virgl_rebind_resource(struct virgl_context *vctx,
85 struct pipe_resource *res)
86 {
87 /* Queries use internally created buffers and do not go through transfers.
88 * Index buffers are not bindable. They are not tracked.
89 */
90 MAYBE_UNUSED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
91 PIPE_BIND_CONSTANT_BUFFER |
92 PIPE_BIND_SHADER_BUFFER |
93 PIPE_BIND_SHADER_IMAGE);
94 const unsigned bind_history = virgl_resource(res)->bind_history;
95 unsigned i;
96
97 assert(virgl_can_rebind_resource(vctx, res) &&
98 (bind_history & tracked_bind) == bind_history);
99
100 if (bind_history & PIPE_BIND_VERTEX_BUFFER) {
101 for (i = 0; i < vctx->num_vertex_buffers; i++) {
102 if (vctx->vertex_buffer[i].buffer.resource == res) {
103 vctx->vertex_array_dirty = true;
104 break;
105 }
106 }
107 }
108
109 if (bind_history & PIPE_BIND_SHADER_BUFFER) {
110 uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
111 while (remaining_mask) {
112 int i = u_bit_scan(&remaining_mask);
113 if (vctx->atomic_buffers[i].buffer == res) {
114 const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i];
115 virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo);
116 }
117 }
118 }
119
120 /* check per-stage shader bindings */
121 if (bind_history & (PIPE_BIND_CONSTANT_BUFFER |
122 PIPE_BIND_SHADER_BUFFER |
123 PIPE_BIND_SHADER_IMAGE)) {
124 enum pipe_shader_type shader_type;
125 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
126 const struct virgl_shader_binding_state *binding =
127 &vctx->shader_bindings[shader_type];
128
129 if (bind_history & PIPE_BIND_CONSTANT_BUFFER) {
130 uint32_t remaining_mask = binding->ubo_enabled_mask;
131 while (remaining_mask) {
132 int i = u_bit_scan(&remaining_mask);
133 if (binding->ubos[i].buffer == res) {
134 const struct pipe_constant_buffer *ubo = &binding->ubos[i];
135 virgl_encoder_set_uniform_buffer(vctx, shader_type, i,
136 ubo->buffer_offset,
137 ubo->buffer_size,
138 virgl_resource(res));
139 }
140 }
141 }
142
143 if (bind_history & PIPE_BIND_SHADER_BUFFER) {
144 uint32_t remaining_mask = binding->ssbo_enabled_mask;
145 while (remaining_mask) {
146 int i = u_bit_scan(&remaining_mask);
147 if (binding->ssbos[i].buffer == res) {
148 const struct pipe_shader_buffer *ssbo = &binding->ssbos[i];
149 virgl_encode_set_shader_buffers(vctx, shader_type, i, 1,
150 ssbo);
151 }
152 }
153 }
154
155 if (bind_history & PIPE_BIND_SHADER_IMAGE) {
156 uint32_t remaining_mask = binding->image_enabled_mask;
157 while (remaining_mask) {
158 int i = u_bit_scan(&remaining_mask);
159 if (binding->images[i].resource == res) {
160 const struct pipe_image_view *image = &binding->images[i];
161 virgl_encode_set_shader_images(vctx, shader_type, i, 1,
162 image);
163 }
164 }
165 }
166 }
167 }
168 }
169
170 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
171 {
172 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
173 struct pipe_surface *surf;
174 struct virgl_resource *res;
175 unsigned i;
176
177 surf = vctx->framebuffer.zsbuf;
178 if (surf) {
179 res = virgl_resource(surf->texture);
180 if (res) {
181 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
182 virgl_resource_dirty(res, surf->u.tex.level);
183 }
184 }
185 for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
186 surf = vctx->framebuffer.cbufs[i];
187 if (surf) {
188 res = virgl_resource(surf->texture);
189 if (res) {
190 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
191 virgl_resource_dirty(res, surf->u.tex.level);
192 }
193 }
194 }
195 }
196
197 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
198 enum pipe_shader_type shader_type)
199 {
200 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
201 const struct virgl_shader_binding_state *binding =
202 &vctx->shader_bindings[shader_type];
203 uint32_t remaining_mask = binding->view_enabled_mask;
204 struct virgl_resource *res;
205
206 while (remaining_mask) {
207 int i = u_bit_scan(&remaining_mask);
208 assert(binding->views[i] && binding->views[i]->texture);
209 res = virgl_resource(binding->views[i]->texture);
210 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
211 }
212 }
213
214 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
215 {
216 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
217 struct virgl_resource *res;
218 unsigned i;
219
220 for (i = 0; i < vctx->num_vertex_buffers; i++) {
221 res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
222 if (res)
223 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
224 }
225 }
226
227 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
228 struct virgl_indexbuf *ib)
229 {
230 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
231 struct virgl_resource *res;
232
233 res = virgl_resource(ib->buffer);
234 if (res)
235 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
236 }
237
238 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
239 {
240 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
241 struct virgl_resource *res;
242 unsigned i;
243
244 for (i = 0; i < vctx->num_so_targets; i++) {
245 res = virgl_resource(vctx->so_targets[i].base.buffer);
246 if (res)
247 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
248 }
249 }
250
251 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
252 enum pipe_shader_type shader_type)
253 {
254 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
255 const struct virgl_shader_binding_state *binding =
256 &vctx->shader_bindings[shader_type];
257 uint32_t remaining_mask = binding->ubo_enabled_mask;
258 struct virgl_resource *res;
259
260 while (remaining_mask) {
261 int i = u_bit_scan(&remaining_mask);
262 res = virgl_resource(binding->ubos[i].buffer);
263 assert(res);
264 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
265 }
266 }
267
268 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
269 enum pipe_shader_type shader_type)
270 {
271 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
272 const struct virgl_shader_binding_state *binding =
273 &vctx->shader_bindings[shader_type];
274 uint32_t remaining_mask = binding->ssbo_enabled_mask;
275 struct virgl_resource *res;
276
277 while (remaining_mask) {
278 int i = u_bit_scan(&remaining_mask);
279 res = virgl_resource(binding->ssbos[i].buffer);
280 assert(res);
281 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
282 }
283 }
284
285 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
286 enum pipe_shader_type shader_type)
287 {
288 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
289 const struct virgl_shader_binding_state *binding =
290 &vctx->shader_bindings[shader_type];
291 uint32_t remaining_mask = binding->image_enabled_mask;
292 struct virgl_resource *res;
293
294 while (remaining_mask) {
295 int i = u_bit_scan(&remaining_mask);
296 res = virgl_resource(binding->images[i].resource);
297 assert(res);
298 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
299 }
300 }
301
302 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
303 {
304 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
305 uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
306 struct virgl_resource *res;
307
308 while (remaining_mask) {
309 int i = u_bit_scan(&remaining_mask);
310 res = virgl_resource(vctx->atomic_buffers[i].buffer);
311 assert(res);
312 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
313 }
314 }
315
316 /*
317 * after flushing, the hw context still has a bunch of
318 * resources bound, so we need to rebind those here.
319 */
320 static void virgl_reemit_draw_resources(struct virgl_context *vctx)
321 {
322 enum pipe_shader_type shader_type;
323
324 /* reattach any flushed resources */
325 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
326 virgl_attach_res_framebuffer(vctx);
327
328 for (shader_type = 0; shader_type < PIPE_SHADER_COMPUTE; shader_type++) {
329 virgl_attach_res_sampler_views(vctx, shader_type);
330 virgl_attach_res_uniform_buffers(vctx, shader_type);
331 virgl_attach_res_shader_buffers(vctx, shader_type);
332 virgl_attach_res_shader_images(vctx, shader_type);
333 }
334 virgl_attach_res_atomic_buffers(vctx);
335 virgl_attach_res_vertex_buffers(vctx);
336 virgl_attach_res_so_targets(vctx);
337 }
338
339 static void virgl_reemit_compute_resources(struct virgl_context *vctx)
340 {
341 virgl_attach_res_sampler_views(vctx, PIPE_SHADER_COMPUTE);
342 virgl_attach_res_uniform_buffers(vctx, PIPE_SHADER_COMPUTE);
343 virgl_attach_res_shader_buffers(vctx, PIPE_SHADER_COMPUTE);
344 virgl_attach_res_shader_images(vctx, PIPE_SHADER_COMPUTE);
345
346 virgl_attach_res_atomic_buffers(vctx);
347 }
348
349 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
350 struct pipe_resource *resource,
351 const struct pipe_surface *templ)
352 {
353 struct virgl_context *vctx = virgl_context(ctx);
354 struct virgl_surface *surf;
355 struct virgl_resource *res = virgl_resource(resource);
356 uint32_t handle;
357
358 /* no support for buffer surfaces */
359 if (resource->target == PIPE_BUFFER)
360 return NULL;
361
362 surf = CALLOC_STRUCT(virgl_surface);
363 if (!surf)
364 return NULL;
365
366 assert(ctx->screen->get_param(ctx->screen,
367 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
368 (util_format_is_srgb(templ->format) ==
369 util_format_is_srgb(resource->format)));
370
371 virgl_resource_dirty(res, 0);
372 handle = virgl_object_assign_handle();
373 pipe_reference_init(&surf->base.reference, 1);
374 pipe_resource_reference(&surf->base.texture, resource);
375 surf->base.context = ctx;
376 surf->base.format = templ->format;
377
378 surf->base.width = u_minify(resource->width0, templ->u.tex.level);
379 surf->base.height = u_minify(resource->height0, templ->u.tex.level);
380 surf->base.u.tex.level = templ->u.tex.level;
381 surf->base.u.tex.first_layer = templ->u.tex.first_layer;
382 surf->base.u.tex.last_layer = templ->u.tex.last_layer;
383
384 virgl_encoder_create_surface(vctx, handle, res, &surf->base);
385 surf->handle = handle;
386 return &surf->base;
387 }
388
389 static void virgl_surface_destroy(struct pipe_context *ctx,
390 struct pipe_surface *psurf)
391 {
392 struct virgl_context *vctx = virgl_context(ctx);
393 struct virgl_surface *surf = virgl_surface(psurf);
394
395 pipe_resource_reference(&surf->base.texture, NULL);
396 virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
397 FREE(surf);
398 }
399
400 static void *virgl_create_blend_state(struct pipe_context *ctx,
401 const struct pipe_blend_state *blend_state)
402 {
403 struct virgl_context *vctx = virgl_context(ctx);
404 uint32_t handle;
405 handle = virgl_object_assign_handle();
406
407 virgl_encode_blend_state(vctx, handle, blend_state);
408 return (void *)(unsigned long)handle;
409
410 }
411
412 static void virgl_bind_blend_state(struct pipe_context *ctx,
413 void *blend_state)
414 {
415 struct virgl_context *vctx = virgl_context(ctx);
416 uint32_t handle = (unsigned long)blend_state;
417 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
418 }
419
420 static void virgl_delete_blend_state(struct pipe_context *ctx,
421 void *blend_state)
422 {
423 struct virgl_context *vctx = virgl_context(ctx);
424 uint32_t handle = (unsigned long)blend_state;
425 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
426 }
427
428 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
429 const struct pipe_depth_stencil_alpha_state *blend_state)
430 {
431 struct virgl_context *vctx = virgl_context(ctx);
432 uint32_t handle;
433 handle = virgl_object_assign_handle();
434
435 virgl_encode_dsa_state(vctx, handle, blend_state);
436 return (void *)(unsigned long)handle;
437 }
438
439 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
440 void *blend_state)
441 {
442 struct virgl_context *vctx = virgl_context(ctx);
443 uint32_t handle = (unsigned long)blend_state;
444 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
445 }
446
447 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
448 void *dsa_state)
449 {
450 struct virgl_context *vctx = virgl_context(ctx);
451 uint32_t handle = (unsigned long)dsa_state;
452 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
453 }
454
455 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
456 const struct pipe_rasterizer_state *rs_state)
457 {
458 struct virgl_context *vctx = virgl_context(ctx);
459 struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
460
461 if (!vrs)
462 return NULL;
463 vrs->rs = *rs_state;
464 vrs->handle = virgl_object_assign_handle();
465
466 virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
467 return (void *)vrs;
468 }
469
470 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
471 void *rs_state)
472 {
473 struct virgl_context *vctx = virgl_context(ctx);
474 uint32_t handle = 0;
475 if (rs_state) {
476 struct virgl_rasterizer_state *vrs = rs_state;
477 vctx->rs_state = *vrs;
478 handle = vrs->handle;
479 }
480 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
481 }
482
483 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
484 void *rs_state)
485 {
486 struct virgl_context *vctx = virgl_context(ctx);
487 struct virgl_rasterizer_state *vrs = rs_state;
488 virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
489 FREE(vrs);
490 }
491
492 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
493 const struct pipe_framebuffer_state *state)
494 {
495 struct virgl_context *vctx = virgl_context(ctx);
496
497 vctx->framebuffer = *state;
498 virgl_encoder_set_framebuffer_state(vctx, state);
499 virgl_attach_res_framebuffer(vctx);
500 }
501
502 static void virgl_set_viewport_states(struct pipe_context *ctx,
503 unsigned start_slot,
504 unsigned num_viewports,
505 const struct pipe_viewport_state *state)
506 {
507 struct virgl_context *vctx = virgl_context(ctx);
508 virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
509 }
510
511 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
512 unsigned num_elements,
513 const struct pipe_vertex_element *elements)
514 {
515 struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
516 struct virgl_context *vctx = virgl_context(ctx);
517 struct virgl_vertex_elements_state *state =
518 CALLOC_STRUCT(virgl_vertex_elements_state);
519
520 for (int i = 0; i < num_elements; ++i) {
521 if (elements[i].instance_divisor) {
522 /* Virglrenderer doesn't deal with instance_divisor correctly if
523 * there isn't a 1:1 relationship between elements and bindings.
524 * So let's make sure there is, by duplicating bindings.
525 */
526 for (int j = 0; j < num_elements; ++j) {
527 new_elements[j] = elements[j];
528 new_elements[j].vertex_buffer_index = j;
529 state->binding_map[j] = elements[j].vertex_buffer_index;
530 }
531 elements = new_elements;
532 state->num_bindings = num_elements;
533 break;
534 }
535 }
536
537 state->handle = virgl_object_assign_handle();
538 virgl_encoder_create_vertex_elements(vctx, state->handle,
539 num_elements, elements);
540 return state;
541 }
542
543 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
544 void *ve)
545 {
546 struct virgl_context *vctx = virgl_context(ctx);
547 struct virgl_vertex_elements_state *state =
548 (struct virgl_vertex_elements_state *)ve;
549 virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
550 FREE(state);
551 }
552
553 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
554 void *ve)
555 {
556 struct virgl_context *vctx = virgl_context(ctx);
557 struct virgl_vertex_elements_state *state =
558 (struct virgl_vertex_elements_state *)ve;
559 vctx->vertex_elements = state;
560 virgl_encode_bind_object(vctx, state ? state->handle : 0,
561 VIRGL_OBJECT_VERTEX_ELEMENTS);
562 vctx->vertex_array_dirty = TRUE;
563 }
564
565 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
566 unsigned start_slot,
567 unsigned num_buffers,
568 const struct pipe_vertex_buffer *buffers)
569 {
570 struct virgl_context *vctx = virgl_context(ctx);
571
572 util_set_vertex_buffers_count(vctx->vertex_buffer,
573 &vctx->num_vertex_buffers,
574 buffers, start_slot, num_buffers);
575
576 if (buffers) {
577 for (unsigned i = 0; i < num_buffers; i++) {
578 struct virgl_resource *res =
579 virgl_resource(buffers[i].buffer.resource);
580 if (res && !buffers[i].is_user_buffer)
581 res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
582 }
583 }
584
585 vctx->vertex_array_dirty = TRUE;
586 }
587
588 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
589 {
590 if (vctx->vertex_array_dirty) {
591 struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
592
593 if (ve->num_bindings) {
594 struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
595 for (int i = 0; i < ve->num_bindings; ++i)
596 vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
597
598 virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
599 } else
600 virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
601
602 virgl_attach_res_vertex_buffers(vctx);
603
604 vctx->vertex_array_dirty = FALSE;
605 }
606 }
607
608 static void virgl_set_stencil_ref(struct pipe_context *ctx,
609 const struct pipe_stencil_ref *ref)
610 {
611 struct virgl_context *vctx = virgl_context(ctx);
612 virgl_encoder_set_stencil_ref(vctx, ref);
613 }
614
615 static void virgl_set_blend_color(struct pipe_context *ctx,
616 const struct pipe_blend_color *color)
617 {
618 struct virgl_context *vctx = virgl_context(ctx);
619 virgl_encoder_set_blend_color(vctx, color);
620 }
621
622 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
623 struct virgl_indexbuf *ib)
624 {
625 virgl_encoder_set_index_buffer(vctx, ib);
626 virgl_attach_res_index_buffer(vctx, ib);
627 }
628
629 static void virgl_set_constant_buffer(struct pipe_context *ctx,
630 enum pipe_shader_type shader, uint index,
631 const struct pipe_constant_buffer *buf)
632 {
633 struct virgl_context *vctx = virgl_context(ctx);
634 struct virgl_shader_binding_state *binding =
635 &vctx->shader_bindings[shader];
636
637 if (buf && buf->buffer) {
638 struct virgl_resource *res = virgl_resource(buf->buffer);
639 res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
640
641 virgl_encoder_set_uniform_buffer(vctx, shader, index,
642 buf->buffer_offset,
643 buf->buffer_size, res);
644
645 pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
646 binding->ubos[index] = *buf;
647 binding->ubo_enabled_mask |= 1 << index;
648 } else {
649 static const struct pipe_constant_buffer dummy_ubo;
650 if (!buf)
651 buf = &dummy_ubo;
652 virgl_encoder_write_constant_buffer(vctx, shader, index,
653 buf->buffer_size / 4,
654 buf->user_buffer);
655
656 pipe_resource_reference(&binding->ubos[index].buffer, NULL);
657 binding->ubo_enabled_mask &= ~(1 << index);
658 }
659 }
660
661 static void *virgl_shader_encoder(struct pipe_context *ctx,
662 const struct pipe_shader_state *shader,
663 unsigned type)
664 {
665 struct virgl_context *vctx = virgl_context(ctx);
666 uint32_t handle;
667 struct tgsi_token *new_tokens;
668 int ret;
669
670 new_tokens = virgl_tgsi_transform(vctx, shader->tokens);
671 if (!new_tokens)
672 return NULL;
673
674 handle = virgl_object_assign_handle();
675 /* encode VS state */
676 ret = virgl_encode_shader_state(vctx, handle, type,
677 &shader->stream_output, 0,
678 new_tokens);
679 if (ret) {
680 return NULL;
681 }
682
683 FREE(new_tokens);
684 return (void *)(unsigned long)handle;
685
686 }
687 static void *virgl_create_vs_state(struct pipe_context *ctx,
688 const struct pipe_shader_state *shader)
689 {
690 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
691 }
692
693 static void *virgl_create_tcs_state(struct pipe_context *ctx,
694 const struct pipe_shader_state *shader)
695 {
696 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
697 }
698
699 static void *virgl_create_tes_state(struct pipe_context *ctx,
700 const struct pipe_shader_state *shader)
701 {
702 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
703 }
704
705 static void *virgl_create_gs_state(struct pipe_context *ctx,
706 const struct pipe_shader_state *shader)
707 {
708 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
709 }
710
711 static void *virgl_create_fs_state(struct pipe_context *ctx,
712 const struct pipe_shader_state *shader)
713 {
714 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
715 }
716
717 static void
718 virgl_delete_fs_state(struct pipe_context *ctx,
719 void *fs)
720 {
721 uint32_t handle = (unsigned long)fs;
722 struct virgl_context *vctx = virgl_context(ctx);
723
724 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
725 }
726
727 static void
728 virgl_delete_gs_state(struct pipe_context *ctx,
729 void *gs)
730 {
731 uint32_t handle = (unsigned long)gs;
732 struct virgl_context *vctx = virgl_context(ctx);
733
734 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
735 }
736
737 static void
738 virgl_delete_vs_state(struct pipe_context *ctx,
739 void *vs)
740 {
741 uint32_t handle = (unsigned long)vs;
742 struct virgl_context *vctx = virgl_context(ctx);
743
744 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
745 }
746
747 static void
748 virgl_delete_tcs_state(struct pipe_context *ctx,
749 void *tcs)
750 {
751 uint32_t handle = (unsigned long)tcs;
752 struct virgl_context *vctx = virgl_context(ctx);
753
754 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
755 }
756
757 static void
758 virgl_delete_tes_state(struct pipe_context *ctx,
759 void *tes)
760 {
761 uint32_t handle = (unsigned long)tes;
762 struct virgl_context *vctx = virgl_context(ctx);
763
764 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
765 }
766
767 static void virgl_bind_vs_state(struct pipe_context *ctx,
768 void *vss)
769 {
770 uint32_t handle = (unsigned long)vss;
771 struct virgl_context *vctx = virgl_context(ctx);
772
773 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
774 }
775
776 static void virgl_bind_tcs_state(struct pipe_context *ctx,
777 void *vss)
778 {
779 uint32_t handle = (unsigned long)vss;
780 struct virgl_context *vctx = virgl_context(ctx);
781
782 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
783 }
784
785 static void virgl_bind_tes_state(struct pipe_context *ctx,
786 void *vss)
787 {
788 uint32_t handle = (unsigned long)vss;
789 struct virgl_context *vctx = virgl_context(ctx);
790
791 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
792 }
793
794 static void virgl_bind_gs_state(struct pipe_context *ctx,
795 void *vss)
796 {
797 uint32_t handle = (unsigned long)vss;
798 struct virgl_context *vctx = virgl_context(ctx);
799
800 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
801 }
802
803
804 static void virgl_bind_fs_state(struct pipe_context *ctx,
805 void *vss)
806 {
807 uint32_t handle = (unsigned long)vss;
808 struct virgl_context *vctx = virgl_context(ctx);
809
810 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
811 }
812
813 static void virgl_clear(struct pipe_context *ctx,
814 unsigned buffers,
815 const union pipe_color_union *color,
816 double depth, unsigned stencil)
817 {
818 struct virgl_context *vctx = virgl_context(ctx);
819
820 if (!vctx->num_draws)
821 virgl_reemit_draw_resources(vctx);
822 vctx->num_draws++;
823
824 virgl_encode_clear(vctx, buffers, color, depth, stencil);
825 }
826
827 static void virgl_draw_vbo(struct pipe_context *ctx,
828 const struct pipe_draw_info *dinfo)
829 {
830 struct virgl_context *vctx = virgl_context(ctx);
831 struct virgl_screen *rs = virgl_screen(ctx->screen);
832 struct virgl_indexbuf ib = {};
833 struct pipe_draw_info info = *dinfo;
834
835 if (!dinfo->count_from_stream_output && !dinfo->indirect &&
836 !dinfo->primitive_restart &&
837 !u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count))
838 return;
839
840 if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
841 util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
842 util_primconvert_draw_vbo(vctx->primconvert, dinfo);
843 return;
844 }
845 if (info.index_size) {
846 pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
847 ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
848 ib.index_size = dinfo->index_size;
849 ib.offset = info.start * ib.index_size;
850
851 if (ib.user_buffer) {
852 u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 4,
853 ib.user_buffer, &ib.offset, &ib.buffer);
854 ib.user_buffer = NULL;
855 }
856 }
857
858 if (!vctx->num_draws)
859 virgl_reemit_draw_resources(vctx);
860 vctx->num_draws++;
861
862 virgl_hw_set_vertex_buffers(vctx);
863 if (info.index_size)
864 virgl_hw_set_index_buffer(vctx, &ib);
865
866 virgl_encoder_draw_vbo(vctx, &info);
867
868 pipe_resource_reference(&ib.buffer, NULL);
869
870 }
871
872 static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
873 struct pipe_fence_handle **fence)
874 {
875 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
876
877 /* skip empty cbuf */
878 if (ctx->cbuf->cdw == ctx->cbuf_initial_cdw &&
879 ctx->queue.num_dwords == 0 &&
880 !fence)
881 return;
882
883 if (ctx->num_draws)
884 u_upload_unmap(ctx->uploader);
885
886 /* send the buffer to the remote side for decoding */
887 ctx->num_draws = ctx->num_compute = 0;
888
889 virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
890 rs->vws->submit_cmd(rs->vws, ctx->cbuf, fence);
891
892 /* Reserve some space for transfers. */
893 if (ctx->encoded_transfers)
894 ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
895
896 virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
897
898 ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
899
900 /* We have flushed the command queue, including any pending copy transfers
901 * involving staging resources.
902 */
903 ctx->queued_staging_res_size = 0;
904 }
905
906 static void virgl_flush_from_st(struct pipe_context *ctx,
907 struct pipe_fence_handle **fence,
908 enum pipe_flush_flags flags)
909 {
910 struct virgl_context *vctx = virgl_context(ctx);
911
912 virgl_flush_eq(vctx, vctx, fence);
913 }
914
915 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
916 struct pipe_resource *texture,
917 const struct pipe_sampler_view *state)
918 {
919 struct virgl_context *vctx = virgl_context(ctx);
920 struct virgl_sampler_view *grview;
921 uint32_t handle;
922 struct virgl_resource *res;
923
924 if (!state)
925 return NULL;
926
927 grview = CALLOC_STRUCT(virgl_sampler_view);
928 if (!grview)
929 return NULL;
930
931 res = virgl_resource(texture);
932 handle = virgl_object_assign_handle();
933 virgl_encode_sampler_view(vctx, handle, res, state);
934
935 grview->base = *state;
936 grview->base.reference.count = 1;
937
938 grview->base.texture = NULL;
939 grview->base.context = ctx;
940 pipe_resource_reference(&grview->base.texture, texture);
941 grview->handle = handle;
942 return &grview->base;
943 }
944
945 static void virgl_set_sampler_views(struct pipe_context *ctx,
946 enum pipe_shader_type shader_type,
947 unsigned start_slot,
948 unsigned num_views,
949 struct pipe_sampler_view **views)
950 {
951 struct virgl_context *vctx = virgl_context(ctx);
952 struct virgl_shader_binding_state *binding =
953 &vctx->shader_bindings[shader_type];
954
955 binding->view_enabled_mask &= ~u_bit_consecutive(start_slot, num_views);
956 for (unsigned i = 0; i < num_views; i++) {
957 unsigned idx = start_slot + i;
958 if (views && views[i]) {
959 struct virgl_resource *res = virgl_resource(views[i]->texture);
960 res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
961
962 pipe_sampler_view_reference(&binding->views[idx], views[i]);
963 binding->view_enabled_mask |= 1 << idx;
964 } else {
965 pipe_sampler_view_reference(&binding->views[idx], NULL);
966 }
967 }
968
969 virgl_encode_set_sampler_views(vctx, shader_type,
970 start_slot, num_views, (struct virgl_sampler_view **)binding->views);
971 virgl_attach_res_sampler_views(vctx, shader_type);
972 }
973
974 static void
975 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
976 {
977 struct virgl_context *vctx = virgl_context(ctx);
978 struct virgl_screen *rs = virgl_screen(ctx->screen);
979
980 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER))
981 return;
982 virgl_encode_texture_barrier(vctx, flags);
983 }
984
985 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
986 struct pipe_sampler_view *view)
987 {
988 struct virgl_context *vctx = virgl_context(ctx);
989 struct virgl_sampler_view *grview = virgl_sampler_view(view);
990
991 virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
992 pipe_resource_reference(&view->texture, NULL);
993 FREE(view);
994 }
995
996 static void *virgl_create_sampler_state(struct pipe_context *ctx,
997 const struct pipe_sampler_state *state)
998 {
999 struct virgl_context *vctx = virgl_context(ctx);
1000 uint32_t handle;
1001
1002 handle = virgl_object_assign_handle();
1003
1004 virgl_encode_sampler_state(vctx, handle, state);
1005 return (void *)(unsigned long)handle;
1006 }
1007
1008 static void virgl_delete_sampler_state(struct pipe_context *ctx,
1009 void *ss)
1010 {
1011 struct virgl_context *vctx = virgl_context(ctx);
1012 uint32_t handle = (unsigned long)ss;
1013
1014 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
1015 }
1016
1017 static void virgl_bind_sampler_states(struct pipe_context *ctx,
1018 enum pipe_shader_type shader,
1019 unsigned start_slot,
1020 unsigned num_samplers,
1021 void **samplers)
1022 {
1023 struct virgl_context *vctx = virgl_context(ctx);
1024 uint32_t handles[32];
1025 int i;
1026 for (i = 0; i < num_samplers; i++) {
1027 handles[i] = (unsigned long)(samplers[i]);
1028 }
1029 virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
1030 }
1031
1032 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
1033 const struct pipe_poly_stipple *ps)
1034 {
1035 struct virgl_context *vctx = virgl_context(ctx);
1036 virgl_encoder_set_polygon_stipple(vctx, ps);
1037 }
1038
1039 static void virgl_set_scissor_states(struct pipe_context *ctx,
1040 unsigned start_slot,
1041 unsigned num_scissor,
1042 const struct pipe_scissor_state *ss)
1043 {
1044 struct virgl_context *vctx = virgl_context(ctx);
1045 virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
1046 }
1047
1048 static void virgl_set_sample_mask(struct pipe_context *ctx,
1049 unsigned sample_mask)
1050 {
1051 struct virgl_context *vctx = virgl_context(ctx);
1052 virgl_encoder_set_sample_mask(vctx, sample_mask);
1053 }
1054
1055 static void virgl_set_min_samples(struct pipe_context *ctx,
1056 unsigned min_samples)
1057 {
1058 struct virgl_context *vctx = virgl_context(ctx);
1059 struct virgl_screen *rs = virgl_screen(ctx->screen);
1060
1061 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
1062 return;
1063 virgl_encoder_set_min_samples(vctx, min_samples);
1064 }
1065
1066 static void virgl_set_clip_state(struct pipe_context *ctx,
1067 const struct pipe_clip_state *clip)
1068 {
1069 struct virgl_context *vctx = virgl_context(ctx);
1070 virgl_encoder_set_clip_state(vctx, clip);
1071 }
1072
1073 static void virgl_set_tess_state(struct pipe_context *ctx,
1074 const float default_outer_level[4],
1075 const float default_inner_level[2])
1076 {
1077 struct virgl_context *vctx = virgl_context(ctx);
1078 struct virgl_screen *rs = virgl_screen(ctx->screen);
1079
1080 if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
1081 return;
1082 virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
1083 }
1084
1085 static void virgl_resource_copy_region(struct pipe_context *ctx,
1086 struct pipe_resource *dst,
1087 unsigned dst_level,
1088 unsigned dstx, unsigned dsty, unsigned dstz,
1089 struct pipe_resource *src,
1090 unsigned src_level,
1091 const struct pipe_box *src_box)
1092 {
1093 struct virgl_context *vctx = virgl_context(ctx);
1094 struct virgl_resource *dres = virgl_resource(dst);
1095 struct virgl_resource *sres = virgl_resource(src);
1096
1097 if (dres->u.b.target == PIPE_BUFFER)
1098 util_range_add(&dres->valid_buffer_range, dstx, dstx + src_box->width);
1099 virgl_resource_dirty(dres, dst_level);
1100
1101 virgl_encode_resource_copy_region(vctx, dres,
1102 dst_level, dstx, dsty, dstz,
1103 sres, src_level,
1104 src_box);
1105 }
1106
1107 static void
1108 virgl_flush_resource(struct pipe_context *pipe,
1109 struct pipe_resource *resource)
1110 {
1111 }
1112
1113 static void virgl_blit(struct pipe_context *ctx,
1114 const struct pipe_blit_info *blit)
1115 {
1116 struct virgl_context *vctx = virgl_context(ctx);
1117 struct virgl_resource *dres = virgl_resource(blit->dst.resource);
1118 struct virgl_resource *sres = virgl_resource(blit->src.resource);
1119
1120 assert(ctx->screen->get_param(ctx->screen,
1121 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
1122 (util_format_is_srgb(blit->dst.resource->format) ==
1123 util_format_is_srgb(blit->dst.format)));
1124
1125 virgl_resource_dirty(dres, blit->dst.level);
1126 virgl_encode_blit(vctx, dres, sres,
1127 blit);
1128 }
1129
1130 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1131 unsigned start_slot,
1132 unsigned count,
1133 const struct pipe_shader_buffer *buffers)
1134 {
1135 struct virgl_context *vctx = virgl_context(ctx);
1136
1137 vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1138 for (unsigned i = 0; i < count; i++) {
1139 unsigned idx = start_slot + i;
1140 if (buffers && buffers[i].buffer) {
1141 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1142 res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1143
1144 pipe_resource_reference(&vctx->atomic_buffers[idx].buffer,
1145 buffers[i].buffer);
1146 vctx->atomic_buffers[idx] = buffers[i];
1147 vctx->atomic_buffer_enabled_mask |= 1 << idx;
1148 } else {
1149 pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL);
1150 }
1151 }
1152
1153 virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1154 }
1155
1156 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1157 enum pipe_shader_type shader,
1158 unsigned start_slot, unsigned count,
1159 const struct pipe_shader_buffer *buffers,
1160 unsigned writable_bitmask)
1161 {
1162 struct virgl_context *vctx = virgl_context(ctx);
1163 struct virgl_screen *rs = virgl_screen(ctx->screen);
1164 struct virgl_shader_binding_state *binding =
1165 &vctx->shader_bindings[shader];
1166
1167 binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1168 for (unsigned i = 0; i < count; i++) {
1169 unsigned idx = start_slot + i;
1170 if (buffers && buffers[i].buffer) {
1171 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1172 res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1173
1174 pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer);
1175 binding->ssbos[idx] = buffers[i];
1176 binding->ssbo_enabled_mask |= 1 << idx;
1177 } else {
1178 pipe_resource_reference(&binding->ssbos[idx].buffer, NULL);
1179 }
1180 }
1181
1182 uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1183 rs->caps.caps.v2.max_shader_buffer_frag_compute :
1184 rs->caps.caps.v2.max_shader_buffer_other_stages;
1185 if (!max_shader_buffer)
1186 return;
1187 virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1188 }
1189
1190 static void virgl_create_fence_fd(struct pipe_context *ctx,
1191 struct pipe_fence_handle **fence,
1192 int fd,
1193 enum pipe_fd_type type)
1194 {
1195 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1196 struct virgl_screen *rs = virgl_screen(ctx->screen);
1197
1198 if (rs->vws->cs_create_fence)
1199 *fence = rs->vws->cs_create_fence(rs->vws, fd);
1200 }
1201
1202 static void virgl_fence_server_sync(struct pipe_context *ctx,
1203 struct pipe_fence_handle *fence)
1204 {
1205 struct virgl_context *vctx = virgl_context(ctx);
1206 struct virgl_screen *rs = virgl_screen(ctx->screen);
1207
1208 if (rs->vws->fence_server_sync)
1209 rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1210 }
1211
1212 static void virgl_set_shader_images(struct pipe_context *ctx,
1213 enum pipe_shader_type shader,
1214 unsigned start_slot, unsigned count,
1215 const struct pipe_image_view *images)
1216 {
1217 struct virgl_context *vctx = virgl_context(ctx);
1218 struct virgl_screen *rs = virgl_screen(ctx->screen);
1219 struct virgl_shader_binding_state *binding =
1220 &vctx->shader_bindings[shader];
1221
1222 binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1223 for (unsigned i = 0; i < count; i++) {
1224 unsigned idx = start_slot + i;
1225 if (images && images[i].resource) {
1226 struct virgl_resource *res = virgl_resource(images[i].resource);
1227 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
1228
1229 pipe_resource_reference(&binding->images[idx].resource,
1230 images[i].resource);
1231 binding->images[idx] = images[i];
1232 binding->image_enabled_mask |= 1 << idx;
1233 } else {
1234 pipe_resource_reference(&binding->images[idx].resource, NULL);
1235 }
1236 }
1237
1238 uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1239 rs->caps.caps.v2.max_shader_image_frag_compute :
1240 rs->caps.caps.v2.max_shader_image_other_stages;
1241 if (!max_shader_images)
1242 return;
1243 virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1244 }
1245
1246 static void virgl_memory_barrier(struct pipe_context *ctx,
1247 unsigned flags)
1248 {
1249 struct virgl_context *vctx = virgl_context(ctx);
1250 struct virgl_screen *rs = virgl_screen(ctx->screen);
1251
1252 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1253 return;
1254 virgl_encode_memory_barrier(vctx, flags);
1255 }
1256
1257 static void *virgl_create_compute_state(struct pipe_context *ctx,
1258 const struct pipe_compute_state *state)
1259 {
1260 struct virgl_context *vctx = virgl_context(ctx);
1261 uint32_t handle;
1262 const struct tgsi_token *new_tokens = state->prog;
1263 struct pipe_stream_output_info so_info = {};
1264 int ret;
1265
1266 handle = virgl_object_assign_handle();
1267 ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1268 &so_info,
1269 state->req_local_mem,
1270 new_tokens);
1271 if (ret) {
1272 return NULL;
1273 }
1274
1275 return (void *)(unsigned long)handle;
1276 }
1277
1278 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1279 {
1280 uint32_t handle = (unsigned long)state;
1281 struct virgl_context *vctx = virgl_context(ctx);
1282
1283 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1284 }
1285
1286 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1287 {
1288 uint32_t handle = (unsigned long)state;
1289 struct virgl_context *vctx = virgl_context(ctx);
1290
1291 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1292 }
1293
1294 static void virgl_launch_grid(struct pipe_context *ctx,
1295 const struct pipe_grid_info *info)
1296 {
1297 struct virgl_context *vctx = virgl_context(ctx);
1298
1299 if (!vctx->num_compute)
1300 virgl_reemit_compute_resources(vctx);
1301 vctx->num_compute++;
1302
1303 virgl_encode_launch_grid(vctx, info);
1304 }
1305
1306 static void
1307 virgl_release_shader_binding(struct virgl_context *vctx,
1308 enum pipe_shader_type shader_type)
1309 {
1310 struct virgl_shader_binding_state *binding =
1311 &vctx->shader_bindings[shader_type];
1312
1313 while (binding->view_enabled_mask) {
1314 int i = u_bit_scan(&binding->view_enabled_mask);
1315 pipe_sampler_view_reference(
1316 (struct pipe_sampler_view **)&binding->views[i], NULL);
1317 }
1318
1319 while (binding->ubo_enabled_mask) {
1320 int i = u_bit_scan(&binding->ubo_enabled_mask);
1321 pipe_resource_reference(&binding->ubos[i].buffer, NULL);
1322 }
1323
1324 while (binding->ssbo_enabled_mask) {
1325 int i = u_bit_scan(&binding->ssbo_enabled_mask);
1326 pipe_resource_reference(&binding->ssbos[i].buffer, NULL);
1327 }
1328
1329 while (binding->image_enabled_mask) {
1330 int i = u_bit_scan(&binding->image_enabled_mask);
1331 pipe_resource_reference(&binding->images[i].resource, NULL);
1332 }
1333 }
1334
1335 static void
1336 virgl_context_destroy( struct pipe_context *ctx )
1337 {
1338 struct virgl_context *vctx = virgl_context(ctx);
1339 struct virgl_screen *rs = virgl_screen(ctx->screen);
1340 enum pipe_shader_type shader_type;
1341
1342 vctx->framebuffer.zsbuf = NULL;
1343 vctx->framebuffer.nr_cbufs = 0;
1344 virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1345 virgl_flush_eq(vctx, vctx, NULL);
1346
1347 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++)
1348 virgl_release_shader_binding(vctx, shader_type);
1349
1350 while (vctx->atomic_buffer_enabled_mask) {
1351 int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask);
1352 pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL);
1353 }
1354
1355 rs->vws->cmd_buf_destroy(vctx->cbuf);
1356 if (vctx->uploader)
1357 u_upload_destroy(vctx->uploader);
1358 if (vctx->transfer_uploader)
1359 u_upload_destroy(vctx->transfer_uploader);
1360 util_primconvert_destroy(vctx->primconvert);
1361 virgl_transfer_queue_fini(&vctx->queue);
1362
1363 slab_destroy_child(&vctx->transfer_pool);
1364 FREE(vctx);
1365 }
1366
1367 static void virgl_get_sample_position(struct pipe_context *ctx,
1368 unsigned sample_count,
1369 unsigned index,
1370 float *out_value)
1371 {
1372 struct virgl_context *vctx = virgl_context(ctx);
1373 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1374
1375 if (sample_count > vs->caps.caps.v1.max_samples) {
1376 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1377 sample_count, vs->caps.caps.v1.max_samples);
1378 return;
1379 }
1380
1381 /* The following is basically copied from dri/i965gen6_get_sample_position
1382 * The only addition is that we hold the msaa positions for all sample
1383 * counts in a flat array. */
1384 uint32_t bits = 0;
1385 if (sample_count == 1) {
1386 out_value[0] = out_value[1] = 0.5f;
1387 return;
1388 } else if (sample_count == 2) {
1389 bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1390 } else if (sample_count <= 4) {
1391 bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1392 } else if (sample_count <= 8) {
1393 bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1394 } else if (sample_count <= 16) {
1395 bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1396 }
1397 out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1398 out_value[1] = (bits & 0xf) / 16.0f;
1399
1400 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1401 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1402 index, sample_count, out_value[0], out_value[1]);
1403 }
1404
1405 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1406 void *priv,
1407 unsigned flags)
1408 {
1409 struct virgl_context *vctx;
1410 struct virgl_screen *rs = virgl_screen(pscreen);
1411 vctx = CALLOC_STRUCT(virgl_context);
1412 const char *host_debug_flagstring;
1413
1414 vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS);
1415 if (!vctx->cbuf) {
1416 FREE(vctx);
1417 return NULL;
1418 }
1419
1420 vctx->base.destroy = virgl_context_destroy;
1421 vctx->base.create_surface = virgl_create_surface;
1422 vctx->base.surface_destroy = virgl_surface_destroy;
1423 vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1424 vctx->base.create_blend_state = virgl_create_blend_state;
1425 vctx->base.bind_blend_state = virgl_bind_blend_state;
1426 vctx->base.delete_blend_state = virgl_delete_blend_state;
1427 vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1428 vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1429 vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1430 vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1431 vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1432 vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1433
1434 vctx->base.set_viewport_states = virgl_set_viewport_states;
1435 vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1436 vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1437 vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1438 vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1439 vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1440
1441 vctx->base.set_tess_state = virgl_set_tess_state;
1442 vctx->base.create_vs_state = virgl_create_vs_state;
1443 vctx->base.create_tcs_state = virgl_create_tcs_state;
1444 vctx->base.create_tes_state = virgl_create_tes_state;
1445 vctx->base.create_gs_state = virgl_create_gs_state;
1446 vctx->base.create_fs_state = virgl_create_fs_state;
1447
1448 vctx->base.bind_vs_state = virgl_bind_vs_state;
1449 vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1450 vctx->base.bind_tes_state = virgl_bind_tes_state;
1451 vctx->base.bind_gs_state = virgl_bind_gs_state;
1452 vctx->base.bind_fs_state = virgl_bind_fs_state;
1453
1454 vctx->base.delete_vs_state = virgl_delete_vs_state;
1455 vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1456 vctx->base.delete_tes_state = virgl_delete_tes_state;
1457 vctx->base.delete_gs_state = virgl_delete_gs_state;
1458 vctx->base.delete_fs_state = virgl_delete_fs_state;
1459
1460 vctx->base.create_compute_state = virgl_create_compute_state;
1461 vctx->base.bind_compute_state = virgl_bind_compute_state;
1462 vctx->base.delete_compute_state = virgl_delete_compute_state;
1463 vctx->base.launch_grid = virgl_launch_grid;
1464
1465 vctx->base.clear = virgl_clear;
1466 vctx->base.draw_vbo = virgl_draw_vbo;
1467 vctx->base.flush = virgl_flush_from_st;
1468 vctx->base.screen = pscreen;
1469 vctx->base.create_sampler_view = virgl_create_sampler_view;
1470 vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1471 vctx->base.set_sampler_views = virgl_set_sampler_views;
1472 vctx->base.texture_barrier = virgl_texture_barrier;
1473
1474 vctx->base.create_sampler_state = virgl_create_sampler_state;
1475 vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1476 vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1477
1478 vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1479 vctx->base.set_scissor_states = virgl_set_scissor_states;
1480 vctx->base.set_sample_mask = virgl_set_sample_mask;
1481 vctx->base.set_min_samples = virgl_set_min_samples;
1482 vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1483 vctx->base.set_clip_state = virgl_set_clip_state;
1484
1485 vctx->base.set_blend_color = virgl_set_blend_color;
1486
1487 vctx->base.get_sample_position = virgl_get_sample_position;
1488
1489 vctx->base.resource_copy_region = virgl_resource_copy_region;
1490 vctx->base.flush_resource = virgl_flush_resource;
1491 vctx->base.blit = virgl_blit;
1492 vctx->base.create_fence_fd = virgl_create_fence_fd;
1493 vctx->base.fence_server_sync = virgl_fence_server_sync;
1494
1495 vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1496 vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1497 vctx->base.set_shader_images = virgl_set_shader_images;
1498 vctx->base.memory_barrier = virgl_memory_barrier;
1499
1500 virgl_init_context_resource_functions(&vctx->base);
1501 virgl_init_query_functions(vctx);
1502 virgl_init_so_functions(vctx);
1503
1504 slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1505 virgl_transfer_queue_init(&vctx->queue, vctx);
1506 vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
1507 (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
1508
1509 /* Reserve some space for transfers. */
1510 if (vctx->encoded_transfers)
1511 vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1512
1513 vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1514 vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1515 PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1516 if (!vctx->uploader)
1517 goto fail;
1518 vctx->base.stream_uploader = vctx->uploader;
1519 vctx->base.const_uploader = vctx->uploader;
1520 /* Use a custom/staging buffer for the transfer uploader, since we are
1521 * using it only for copies to other resources.
1522 */
1523 if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) &&
1524 vctx->encoded_transfers) {
1525 vctx->transfer_uploader = u_upload_create(&vctx->base, 1024 * 1024,
1526 PIPE_BIND_CUSTOM,
1527 PIPE_USAGE_STAGING,
1528 VIRGL_RESOURCE_FLAG_STAGING);
1529 if (!vctx->transfer_uploader)
1530 goto fail;
1531 }
1532
1533 vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
1534 virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1535
1536 virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1537
1538 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1539 host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1540 if (host_debug_flagstring)
1541 virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1542 }
1543
1544 return &vctx->base;
1545 fail:
1546 virgl_context_destroy(&vctx->base);
1547 return NULL;
1548 }