virgl: Work around possible memory exhaustion
[mesa.git] / src / gallium / drivers / virgl / virgl_context.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <libsync.h>
25 #include "pipe/p_shader_tokens.h"
26
27 #include "pipe/p_context.h"
28 #include "pipe/p_defines.h"
29 #include "pipe/p_screen.h"
30 #include "pipe/p_state.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_prim.h"
35 #include "util/u_transfer.h"
36 #include "util/u_helpers.h"
37 #include "util/slab.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_blitter.h"
40 #include "tgsi/tgsi_text.h"
41 #include "indices/u_primconvert.h"
42
43 #include "pipebuffer/pb_buffer.h"
44
45 #include "virgl_encode.h"
46 #include "virgl_context.h"
47 #include "virgl_protocol.h"
48 #include "virgl_resource.h"
49 #include "virgl_screen.h"
50
51 struct virgl_vertex_elements_state {
52 uint32_t handle;
53 uint8_t binding_map[PIPE_MAX_ATTRIBS];
54 uint8_t num_bindings;
55 };
56
57 static uint32_t next_handle;
58 uint32_t virgl_object_assign_handle(void)
59 {
60 return ++next_handle;
61 }
62
63 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
64 {
65 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
66 struct pipe_surface *surf;
67 struct virgl_resource *res;
68 unsigned i;
69
70 surf = vctx->framebuffer.zsbuf;
71 if (surf) {
72 res = virgl_resource(surf->texture);
73 if (res) {
74 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
75 virgl_resource_dirty(res, surf->u.tex.level);
76 }
77 }
78 for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
79 surf = vctx->framebuffer.cbufs[i];
80 if (surf) {
81 res = virgl_resource(surf->texture);
82 if (res) {
83 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
84 virgl_resource_dirty(res, surf->u.tex.level);
85 }
86 }
87 }
88 }
89
90 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
91 enum pipe_shader_type shader_type)
92 {
93 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
94 const struct virgl_shader_binding_state *binding =
95 &vctx->shader_bindings[shader_type];
96 uint32_t remaining_mask = binding->view_enabled_mask;
97 struct virgl_resource *res;
98
99 while (remaining_mask) {
100 int i = u_bit_scan(&remaining_mask);
101 assert(binding->views[i] && binding->views[i]->texture);
102 res = virgl_resource(binding->views[i]->texture);
103 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
104 }
105 }
106
107 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
108 {
109 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
110 struct virgl_resource *res;
111 unsigned i;
112
113 for (i = 0; i < vctx->num_vertex_buffers; i++) {
114 res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
115 if (res)
116 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
117 }
118 }
119
120 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
121 struct virgl_indexbuf *ib)
122 {
123 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
124 struct virgl_resource *res;
125
126 res = virgl_resource(ib->buffer);
127 if (res)
128 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
129 }
130
131 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
132 {
133 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
134 struct virgl_resource *res;
135 unsigned i;
136
137 for (i = 0; i < vctx->num_so_targets; i++) {
138 res = virgl_resource(vctx->so_targets[i].base.buffer);
139 if (res)
140 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
141 }
142 }
143
144 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
145 enum pipe_shader_type shader_type)
146 {
147 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
148 const struct virgl_shader_binding_state *binding =
149 &vctx->shader_bindings[shader_type];
150 uint32_t remaining_mask = binding->ubo_enabled_mask;
151 struct virgl_resource *res;
152
153 while (remaining_mask) {
154 int i = u_bit_scan(&remaining_mask);
155 res = virgl_resource(binding->ubos[i].buffer);
156 assert(res);
157 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
158 }
159 }
160
161 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
162 enum pipe_shader_type shader_type)
163 {
164 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
165 const struct virgl_shader_binding_state *binding =
166 &vctx->shader_bindings[shader_type];
167 uint32_t remaining_mask = binding->ssbo_enabled_mask;
168 struct virgl_resource *res;
169
170 while (remaining_mask) {
171 int i = u_bit_scan(&remaining_mask);
172 res = virgl_resource(binding->ssbos[i].buffer);
173 assert(res);
174 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
175 }
176 }
177
178 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
179 enum pipe_shader_type shader_type)
180 {
181 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
182 const struct virgl_shader_binding_state *binding =
183 &vctx->shader_bindings[shader_type];
184 uint32_t remaining_mask = binding->image_enabled_mask;
185 struct virgl_resource *res;
186
187 while (remaining_mask) {
188 int i = u_bit_scan(&remaining_mask);
189 res = virgl_resource(binding->images[i].resource);
190 assert(res);
191 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
192 }
193 }
194
195 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
196 {
197 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
198 uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
199 struct virgl_resource *res;
200
201 while (remaining_mask) {
202 int i = u_bit_scan(&remaining_mask);
203 res = virgl_resource(vctx->atomic_buffers[i].buffer);
204 assert(res);
205 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
206 }
207 }
208
209 /*
210 * after flushing, the hw context still has a bunch of
211 * resources bound, so we need to rebind those here.
212 */
213 static void virgl_reemit_draw_resources(struct virgl_context *vctx)
214 {
215 enum pipe_shader_type shader_type;
216
217 /* reattach any flushed resources */
218 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
219 virgl_attach_res_framebuffer(vctx);
220
221 for (shader_type = 0; shader_type < PIPE_SHADER_COMPUTE; shader_type++) {
222 virgl_attach_res_sampler_views(vctx, shader_type);
223 virgl_attach_res_uniform_buffers(vctx, shader_type);
224 virgl_attach_res_shader_buffers(vctx, shader_type);
225 virgl_attach_res_shader_images(vctx, shader_type);
226 }
227 virgl_attach_res_atomic_buffers(vctx);
228 virgl_attach_res_vertex_buffers(vctx);
229 virgl_attach_res_so_targets(vctx);
230 }
231
232 static void virgl_reemit_compute_resources(struct virgl_context *vctx)
233 {
234 virgl_attach_res_sampler_views(vctx, PIPE_SHADER_COMPUTE);
235 virgl_attach_res_uniform_buffers(vctx, PIPE_SHADER_COMPUTE);
236 virgl_attach_res_shader_buffers(vctx, PIPE_SHADER_COMPUTE);
237 virgl_attach_res_shader_images(vctx, PIPE_SHADER_COMPUTE);
238
239 virgl_attach_res_atomic_buffers(vctx);
240 }
241
242 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
243 struct pipe_resource *resource,
244 const struct pipe_surface *templ)
245 {
246 struct virgl_context *vctx = virgl_context(ctx);
247 struct virgl_surface *surf;
248 struct virgl_resource *res = virgl_resource(resource);
249 uint32_t handle;
250
251 /* no support for buffer surfaces */
252 if (resource->target == PIPE_BUFFER)
253 return NULL;
254
255 surf = CALLOC_STRUCT(virgl_surface);
256 if (!surf)
257 return NULL;
258
259 assert(ctx->screen->get_param(ctx->screen,
260 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
261 (util_format_is_srgb(templ->format) ==
262 util_format_is_srgb(resource->format)));
263
264 virgl_resource_dirty(res, 0);
265 handle = virgl_object_assign_handle();
266 pipe_reference_init(&surf->base.reference, 1);
267 pipe_resource_reference(&surf->base.texture, resource);
268 surf->base.context = ctx;
269 surf->base.format = templ->format;
270
271 surf->base.width = u_minify(resource->width0, templ->u.tex.level);
272 surf->base.height = u_minify(resource->height0, templ->u.tex.level);
273 surf->base.u.tex.level = templ->u.tex.level;
274 surf->base.u.tex.first_layer = templ->u.tex.first_layer;
275 surf->base.u.tex.last_layer = templ->u.tex.last_layer;
276
277 virgl_encoder_create_surface(vctx, handle, res, &surf->base);
278 surf->handle = handle;
279 return &surf->base;
280 }
281
282 static void virgl_surface_destroy(struct pipe_context *ctx,
283 struct pipe_surface *psurf)
284 {
285 struct virgl_context *vctx = virgl_context(ctx);
286 struct virgl_surface *surf = virgl_surface(psurf);
287
288 pipe_resource_reference(&surf->base.texture, NULL);
289 virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
290 FREE(surf);
291 }
292
293 static void *virgl_create_blend_state(struct pipe_context *ctx,
294 const struct pipe_blend_state *blend_state)
295 {
296 struct virgl_context *vctx = virgl_context(ctx);
297 uint32_t handle;
298 handle = virgl_object_assign_handle();
299
300 virgl_encode_blend_state(vctx, handle, blend_state);
301 return (void *)(unsigned long)handle;
302
303 }
304
305 static void virgl_bind_blend_state(struct pipe_context *ctx,
306 void *blend_state)
307 {
308 struct virgl_context *vctx = virgl_context(ctx);
309 uint32_t handle = (unsigned long)blend_state;
310 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
311 }
312
313 static void virgl_delete_blend_state(struct pipe_context *ctx,
314 void *blend_state)
315 {
316 struct virgl_context *vctx = virgl_context(ctx);
317 uint32_t handle = (unsigned long)blend_state;
318 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
319 }
320
321 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
322 const struct pipe_depth_stencil_alpha_state *blend_state)
323 {
324 struct virgl_context *vctx = virgl_context(ctx);
325 uint32_t handle;
326 handle = virgl_object_assign_handle();
327
328 virgl_encode_dsa_state(vctx, handle, blend_state);
329 return (void *)(unsigned long)handle;
330 }
331
332 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
333 void *blend_state)
334 {
335 struct virgl_context *vctx = virgl_context(ctx);
336 uint32_t handle = (unsigned long)blend_state;
337 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
338 }
339
340 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
341 void *dsa_state)
342 {
343 struct virgl_context *vctx = virgl_context(ctx);
344 uint32_t handle = (unsigned long)dsa_state;
345 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
346 }
347
348 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
349 const struct pipe_rasterizer_state *rs_state)
350 {
351 struct virgl_context *vctx = virgl_context(ctx);
352 struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
353
354 if (!vrs)
355 return NULL;
356 vrs->rs = *rs_state;
357 vrs->handle = virgl_object_assign_handle();
358
359 virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
360 return (void *)vrs;
361 }
362
363 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
364 void *rs_state)
365 {
366 struct virgl_context *vctx = virgl_context(ctx);
367 uint32_t handle = 0;
368 if (rs_state) {
369 struct virgl_rasterizer_state *vrs = rs_state;
370 vctx->rs_state = *vrs;
371 handle = vrs->handle;
372 }
373 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
374 }
375
376 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
377 void *rs_state)
378 {
379 struct virgl_context *vctx = virgl_context(ctx);
380 struct virgl_rasterizer_state *vrs = rs_state;
381 virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
382 FREE(vrs);
383 }
384
385 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
386 const struct pipe_framebuffer_state *state)
387 {
388 struct virgl_context *vctx = virgl_context(ctx);
389
390 vctx->framebuffer = *state;
391 virgl_encoder_set_framebuffer_state(vctx, state);
392 virgl_attach_res_framebuffer(vctx);
393 }
394
395 static void virgl_set_viewport_states(struct pipe_context *ctx,
396 unsigned start_slot,
397 unsigned num_viewports,
398 const struct pipe_viewport_state *state)
399 {
400 struct virgl_context *vctx = virgl_context(ctx);
401 virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
402 }
403
404 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
405 unsigned num_elements,
406 const struct pipe_vertex_element *elements)
407 {
408 struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
409 struct virgl_context *vctx = virgl_context(ctx);
410 struct virgl_vertex_elements_state *state =
411 CALLOC_STRUCT(virgl_vertex_elements_state);
412
413 for (int i = 0; i < num_elements; ++i) {
414 if (elements[i].instance_divisor) {
415 /* Virglrenderer doesn't deal with instance_divisor correctly if
416 * there isn't a 1:1 relationship between elements and bindings.
417 * So let's make sure there is, by duplicating bindings.
418 */
419 for (int j = 0; j < num_elements; ++j) {
420 new_elements[j] = elements[j];
421 new_elements[j].vertex_buffer_index = j;
422 state->binding_map[j] = elements[j].vertex_buffer_index;
423 }
424 elements = new_elements;
425 state->num_bindings = num_elements;
426 break;
427 }
428 }
429
430 state->handle = virgl_object_assign_handle();
431 virgl_encoder_create_vertex_elements(vctx, state->handle,
432 num_elements, elements);
433 return state;
434 }
435
436 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
437 void *ve)
438 {
439 struct virgl_context *vctx = virgl_context(ctx);
440 struct virgl_vertex_elements_state *state =
441 (struct virgl_vertex_elements_state *)ve;
442 virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
443 FREE(state);
444 }
445
446 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
447 void *ve)
448 {
449 struct virgl_context *vctx = virgl_context(ctx);
450 struct virgl_vertex_elements_state *state =
451 (struct virgl_vertex_elements_state *)ve;
452 vctx->vertex_elements = state;
453 virgl_encode_bind_object(vctx, state ? state->handle : 0,
454 VIRGL_OBJECT_VERTEX_ELEMENTS);
455 vctx->vertex_array_dirty = TRUE;
456 }
457
458 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
459 unsigned start_slot,
460 unsigned num_buffers,
461 const struct pipe_vertex_buffer *buffers)
462 {
463 struct virgl_context *vctx = virgl_context(ctx);
464
465 util_set_vertex_buffers_count(vctx->vertex_buffer,
466 &vctx->num_vertex_buffers,
467 buffers, start_slot, num_buffers);
468
469 vctx->vertex_array_dirty = TRUE;
470 }
471
472 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
473 {
474 if (vctx->vertex_array_dirty) {
475 struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
476
477 if (ve->num_bindings) {
478 struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
479 for (int i = 0; i < ve->num_bindings; ++i)
480 vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
481
482 virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
483 } else
484 virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
485
486 virgl_attach_res_vertex_buffers(vctx);
487
488 vctx->vertex_array_dirty = FALSE;
489 }
490 }
491
492 static void virgl_set_stencil_ref(struct pipe_context *ctx,
493 const struct pipe_stencil_ref *ref)
494 {
495 struct virgl_context *vctx = virgl_context(ctx);
496 virgl_encoder_set_stencil_ref(vctx, ref);
497 }
498
499 static void virgl_set_blend_color(struct pipe_context *ctx,
500 const struct pipe_blend_color *color)
501 {
502 struct virgl_context *vctx = virgl_context(ctx);
503 virgl_encoder_set_blend_color(vctx, color);
504 }
505
506 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
507 struct virgl_indexbuf *ib)
508 {
509 virgl_encoder_set_index_buffer(vctx, ib);
510 virgl_attach_res_index_buffer(vctx, ib);
511 }
512
513 static void virgl_set_constant_buffer(struct pipe_context *ctx,
514 enum pipe_shader_type shader, uint index,
515 const struct pipe_constant_buffer *buf)
516 {
517 struct virgl_context *vctx = virgl_context(ctx);
518 struct virgl_shader_binding_state *binding =
519 &vctx->shader_bindings[shader];
520
521 if (buf && buf->buffer) {
522 struct virgl_resource *res = virgl_resource(buf->buffer);
523 virgl_encoder_set_uniform_buffer(vctx, shader, index,
524 buf->buffer_offset,
525 buf->buffer_size, res);
526
527 pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
528 binding->ubos[index] = *buf;
529 binding->ubo_enabled_mask |= 1 << index;
530 } else {
531 static const struct pipe_constant_buffer dummy_ubo;
532 if (!buf)
533 buf = &dummy_ubo;
534 virgl_encoder_write_constant_buffer(vctx, shader, index,
535 buf->buffer_size / 4,
536 buf->user_buffer);
537
538 pipe_resource_reference(&binding->ubos[index].buffer, NULL);
539 binding->ubo_enabled_mask &= ~(1 << index);
540 }
541 }
542
543 static void *virgl_shader_encoder(struct pipe_context *ctx,
544 const struct pipe_shader_state *shader,
545 unsigned type)
546 {
547 struct virgl_context *vctx = virgl_context(ctx);
548 uint32_t handle;
549 struct tgsi_token *new_tokens;
550 int ret;
551
552 new_tokens = virgl_tgsi_transform(vctx, shader->tokens);
553 if (!new_tokens)
554 return NULL;
555
556 handle = virgl_object_assign_handle();
557 /* encode VS state */
558 ret = virgl_encode_shader_state(vctx, handle, type,
559 &shader->stream_output, 0,
560 new_tokens);
561 if (ret) {
562 return NULL;
563 }
564
565 FREE(new_tokens);
566 return (void *)(unsigned long)handle;
567
568 }
569 static void *virgl_create_vs_state(struct pipe_context *ctx,
570 const struct pipe_shader_state *shader)
571 {
572 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
573 }
574
575 static void *virgl_create_tcs_state(struct pipe_context *ctx,
576 const struct pipe_shader_state *shader)
577 {
578 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
579 }
580
581 static void *virgl_create_tes_state(struct pipe_context *ctx,
582 const struct pipe_shader_state *shader)
583 {
584 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
585 }
586
587 static void *virgl_create_gs_state(struct pipe_context *ctx,
588 const struct pipe_shader_state *shader)
589 {
590 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
591 }
592
593 static void *virgl_create_fs_state(struct pipe_context *ctx,
594 const struct pipe_shader_state *shader)
595 {
596 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
597 }
598
599 static void
600 virgl_delete_fs_state(struct pipe_context *ctx,
601 void *fs)
602 {
603 uint32_t handle = (unsigned long)fs;
604 struct virgl_context *vctx = virgl_context(ctx);
605
606 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
607 }
608
609 static void
610 virgl_delete_gs_state(struct pipe_context *ctx,
611 void *gs)
612 {
613 uint32_t handle = (unsigned long)gs;
614 struct virgl_context *vctx = virgl_context(ctx);
615
616 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
617 }
618
619 static void
620 virgl_delete_vs_state(struct pipe_context *ctx,
621 void *vs)
622 {
623 uint32_t handle = (unsigned long)vs;
624 struct virgl_context *vctx = virgl_context(ctx);
625
626 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
627 }
628
629 static void
630 virgl_delete_tcs_state(struct pipe_context *ctx,
631 void *tcs)
632 {
633 uint32_t handle = (unsigned long)tcs;
634 struct virgl_context *vctx = virgl_context(ctx);
635
636 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
637 }
638
639 static void
640 virgl_delete_tes_state(struct pipe_context *ctx,
641 void *tes)
642 {
643 uint32_t handle = (unsigned long)tes;
644 struct virgl_context *vctx = virgl_context(ctx);
645
646 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
647 }
648
649 static void virgl_bind_vs_state(struct pipe_context *ctx,
650 void *vss)
651 {
652 uint32_t handle = (unsigned long)vss;
653 struct virgl_context *vctx = virgl_context(ctx);
654
655 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
656 }
657
658 static void virgl_bind_tcs_state(struct pipe_context *ctx,
659 void *vss)
660 {
661 uint32_t handle = (unsigned long)vss;
662 struct virgl_context *vctx = virgl_context(ctx);
663
664 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
665 }
666
667 static void virgl_bind_tes_state(struct pipe_context *ctx,
668 void *vss)
669 {
670 uint32_t handle = (unsigned long)vss;
671 struct virgl_context *vctx = virgl_context(ctx);
672
673 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
674 }
675
676 static void virgl_bind_gs_state(struct pipe_context *ctx,
677 void *vss)
678 {
679 uint32_t handle = (unsigned long)vss;
680 struct virgl_context *vctx = virgl_context(ctx);
681
682 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
683 }
684
685
686 static void virgl_bind_fs_state(struct pipe_context *ctx,
687 void *vss)
688 {
689 uint32_t handle = (unsigned long)vss;
690 struct virgl_context *vctx = virgl_context(ctx);
691
692 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
693 }
694
695 static void virgl_clear(struct pipe_context *ctx,
696 unsigned buffers,
697 const union pipe_color_union *color,
698 double depth, unsigned stencil)
699 {
700 struct virgl_context *vctx = virgl_context(ctx);
701
702 if (!vctx->num_draws)
703 virgl_reemit_draw_resources(vctx);
704 vctx->num_draws++;
705
706 virgl_encode_clear(vctx, buffers, color, depth, stencil);
707 }
708
709 static void virgl_draw_vbo(struct pipe_context *ctx,
710 const struct pipe_draw_info *dinfo)
711 {
712 struct virgl_context *vctx = virgl_context(ctx);
713 struct virgl_screen *rs = virgl_screen(ctx->screen);
714 struct virgl_indexbuf ib = {};
715 struct pipe_draw_info info = *dinfo;
716
717 if (!dinfo->count_from_stream_output && !dinfo->indirect &&
718 !dinfo->primitive_restart &&
719 !u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count))
720 return;
721
722 if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
723 util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
724 util_primconvert_draw_vbo(vctx->primconvert, dinfo);
725 return;
726 }
727 if (info.index_size) {
728 pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
729 ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
730 ib.index_size = dinfo->index_size;
731 ib.offset = info.start * ib.index_size;
732
733 if (ib.user_buffer) {
734 u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 4,
735 ib.user_buffer, &ib.offset, &ib.buffer);
736 ib.user_buffer = NULL;
737 }
738 }
739
740 if (!vctx->num_draws)
741 virgl_reemit_draw_resources(vctx);
742 vctx->num_draws++;
743
744 virgl_hw_set_vertex_buffers(vctx);
745 if (info.index_size)
746 virgl_hw_set_index_buffer(vctx, &ib);
747
748 virgl_encoder_draw_vbo(vctx, &info);
749
750 pipe_resource_reference(&ib.buffer, NULL);
751
752 }
753
754 static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
755 struct pipe_fence_handle **fence)
756 {
757 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
758
759 /* skip empty cbuf */
760 if (ctx->cbuf->cdw == ctx->cbuf_initial_cdw &&
761 ctx->queue.num_dwords == 0 &&
762 !fence)
763 return;
764
765 if (ctx->num_draws)
766 u_upload_unmap(ctx->uploader);
767
768 /* send the buffer to the remote side for decoding */
769 ctx->num_draws = ctx->num_compute = 0;
770
771 virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
772 rs->vws->submit_cmd(rs->vws, ctx->cbuf, fence);
773
774 /* Reserve some space for transfers. */
775 if (ctx->encoded_transfers)
776 ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
777
778 virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
779
780 ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
781
782 /* We have flushed the command queue, including any pending copy transfers
783 * involving staging resources.
784 */
785 ctx->queued_staging_res_size = 0;
786 }
787
788 static void virgl_flush_from_st(struct pipe_context *ctx,
789 struct pipe_fence_handle **fence,
790 enum pipe_flush_flags flags)
791 {
792 struct virgl_context *vctx = virgl_context(ctx);
793
794 virgl_flush_eq(vctx, vctx, fence);
795 }
796
797 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
798 struct pipe_resource *texture,
799 const struct pipe_sampler_view *state)
800 {
801 struct virgl_context *vctx = virgl_context(ctx);
802 struct virgl_sampler_view *grview;
803 uint32_t handle;
804 struct virgl_resource *res;
805
806 if (!state)
807 return NULL;
808
809 grview = CALLOC_STRUCT(virgl_sampler_view);
810 if (!grview)
811 return NULL;
812
813 res = virgl_resource(texture);
814 handle = virgl_object_assign_handle();
815 virgl_encode_sampler_view(vctx, handle, res, state);
816
817 grview->base = *state;
818 grview->base.reference.count = 1;
819
820 grview->base.texture = NULL;
821 grview->base.context = ctx;
822 pipe_resource_reference(&grview->base.texture, texture);
823 grview->handle = handle;
824 return &grview->base;
825 }
826
827 static void virgl_set_sampler_views(struct pipe_context *ctx,
828 enum pipe_shader_type shader_type,
829 unsigned start_slot,
830 unsigned num_views,
831 struct pipe_sampler_view **views)
832 {
833 struct virgl_context *vctx = virgl_context(ctx);
834 struct virgl_shader_binding_state *binding =
835 &vctx->shader_bindings[shader_type];
836
837 binding->view_enabled_mask &= ~u_bit_consecutive(start_slot, num_views);
838 for (unsigned i = 0; i < num_views; i++) {
839 unsigned idx = start_slot + i;
840 if (views && views[i]) {
841 pipe_sampler_view_reference(&binding->views[idx], views[i]);
842 binding->view_enabled_mask |= 1 << idx;
843 } else {
844 pipe_sampler_view_reference(&binding->views[idx], NULL);
845 }
846 }
847
848 virgl_encode_set_sampler_views(vctx, shader_type,
849 start_slot, num_views, (struct virgl_sampler_view **)binding->views);
850 virgl_attach_res_sampler_views(vctx, shader_type);
851 }
852
853 static void
854 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
855 {
856 struct virgl_context *vctx = virgl_context(ctx);
857 struct virgl_screen *rs = virgl_screen(ctx->screen);
858
859 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER))
860 return;
861 virgl_encode_texture_barrier(vctx, flags);
862 }
863
864 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
865 struct pipe_sampler_view *view)
866 {
867 struct virgl_context *vctx = virgl_context(ctx);
868 struct virgl_sampler_view *grview = virgl_sampler_view(view);
869
870 virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
871 pipe_resource_reference(&view->texture, NULL);
872 FREE(view);
873 }
874
875 static void *virgl_create_sampler_state(struct pipe_context *ctx,
876 const struct pipe_sampler_state *state)
877 {
878 struct virgl_context *vctx = virgl_context(ctx);
879 uint32_t handle;
880
881 handle = virgl_object_assign_handle();
882
883 virgl_encode_sampler_state(vctx, handle, state);
884 return (void *)(unsigned long)handle;
885 }
886
887 static void virgl_delete_sampler_state(struct pipe_context *ctx,
888 void *ss)
889 {
890 struct virgl_context *vctx = virgl_context(ctx);
891 uint32_t handle = (unsigned long)ss;
892
893 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
894 }
895
896 static void virgl_bind_sampler_states(struct pipe_context *ctx,
897 enum pipe_shader_type shader,
898 unsigned start_slot,
899 unsigned num_samplers,
900 void **samplers)
901 {
902 struct virgl_context *vctx = virgl_context(ctx);
903 uint32_t handles[32];
904 int i;
905 for (i = 0; i < num_samplers; i++) {
906 handles[i] = (unsigned long)(samplers[i]);
907 }
908 virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
909 }
910
911 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
912 const struct pipe_poly_stipple *ps)
913 {
914 struct virgl_context *vctx = virgl_context(ctx);
915 virgl_encoder_set_polygon_stipple(vctx, ps);
916 }
917
918 static void virgl_set_scissor_states(struct pipe_context *ctx,
919 unsigned start_slot,
920 unsigned num_scissor,
921 const struct pipe_scissor_state *ss)
922 {
923 struct virgl_context *vctx = virgl_context(ctx);
924 virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
925 }
926
927 static void virgl_set_sample_mask(struct pipe_context *ctx,
928 unsigned sample_mask)
929 {
930 struct virgl_context *vctx = virgl_context(ctx);
931 virgl_encoder_set_sample_mask(vctx, sample_mask);
932 }
933
934 static void virgl_set_min_samples(struct pipe_context *ctx,
935 unsigned min_samples)
936 {
937 struct virgl_context *vctx = virgl_context(ctx);
938 struct virgl_screen *rs = virgl_screen(ctx->screen);
939
940 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
941 return;
942 virgl_encoder_set_min_samples(vctx, min_samples);
943 }
944
945 static void virgl_set_clip_state(struct pipe_context *ctx,
946 const struct pipe_clip_state *clip)
947 {
948 struct virgl_context *vctx = virgl_context(ctx);
949 virgl_encoder_set_clip_state(vctx, clip);
950 }
951
952 static void virgl_set_tess_state(struct pipe_context *ctx,
953 const float default_outer_level[4],
954 const float default_inner_level[2])
955 {
956 struct virgl_context *vctx = virgl_context(ctx);
957 struct virgl_screen *rs = virgl_screen(ctx->screen);
958
959 if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
960 return;
961 virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
962 }
963
964 static void virgl_resource_copy_region(struct pipe_context *ctx,
965 struct pipe_resource *dst,
966 unsigned dst_level,
967 unsigned dstx, unsigned dsty, unsigned dstz,
968 struct pipe_resource *src,
969 unsigned src_level,
970 const struct pipe_box *src_box)
971 {
972 struct virgl_context *vctx = virgl_context(ctx);
973 struct virgl_resource *dres = virgl_resource(dst);
974 struct virgl_resource *sres = virgl_resource(src);
975
976 if (dres->u.b.target == PIPE_BUFFER)
977 util_range_add(&dres->valid_buffer_range, dstx, dstx + src_box->width);
978 virgl_resource_dirty(dres, dst_level);
979
980 virgl_encode_resource_copy_region(vctx, dres,
981 dst_level, dstx, dsty, dstz,
982 sres, src_level,
983 src_box);
984 }
985
986 static void
987 virgl_flush_resource(struct pipe_context *pipe,
988 struct pipe_resource *resource)
989 {
990 }
991
992 static void virgl_blit(struct pipe_context *ctx,
993 const struct pipe_blit_info *blit)
994 {
995 struct virgl_context *vctx = virgl_context(ctx);
996 struct virgl_resource *dres = virgl_resource(blit->dst.resource);
997 struct virgl_resource *sres = virgl_resource(blit->src.resource);
998
999 assert(ctx->screen->get_param(ctx->screen,
1000 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
1001 (util_format_is_srgb(blit->dst.resource->format) ==
1002 util_format_is_srgb(blit->dst.format)));
1003
1004 virgl_resource_dirty(dres, blit->dst.level);
1005 virgl_encode_blit(vctx, dres, sres,
1006 blit);
1007 }
1008
1009 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1010 unsigned start_slot,
1011 unsigned count,
1012 const struct pipe_shader_buffer *buffers)
1013 {
1014 struct virgl_context *vctx = virgl_context(ctx);
1015
1016 vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1017 for (unsigned i = 0; i < count; i++) {
1018 unsigned idx = start_slot + i;
1019 if (buffers && buffers[i].buffer) {
1020 pipe_resource_reference(&vctx->atomic_buffers[idx].buffer,
1021 buffers[i].buffer);
1022 vctx->atomic_buffers[idx] = buffers[i];
1023 vctx->atomic_buffer_enabled_mask |= 1 << idx;
1024 } else {
1025 pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL);
1026 }
1027 }
1028
1029 virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1030 }
1031
1032 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1033 enum pipe_shader_type shader,
1034 unsigned start_slot, unsigned count,
1035 const struct pipe_shader_buffer *buffers,
1036 unsigned writable_bitmask)
1037 {
1038 struct virgl_context *vctx = virgl_context(ctx);
1039 struct virgl_screen *rs = virgl_screen(ctx->screen);
1040 struct virgl_shader_binding_state *binding =
1041 &vctx->shader_bindings[shader];
1042
1043 binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1044 for (unsigned i = 0; i < count; i++) {
1045 unsigned idx = start_slot + i;
1046 if (buffers && buffers[i].buffer) {
1047 pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer);
1048 binding->ssbos[idx] = buffers[i];
1049 binding->ssbo_enabled_mask |= 1 << idx;
1050 } else {
1051 pipe_resource_reference(&binding->ssbos[idx].buffer, NULL);
1052 }
1053 }
1054
1055 uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1056 rs->caps.caps.v2.max_shader_buffer_frag_compute :
1057 rs->caps.caps.v2.max_shader_buffer_other_stages;
1058 if (!max_shader_buffer)
1059 return;
1060 virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1061 }
1062
1063 static void virgl_create_fence_fd(struct pipe_context *ctx,
1064 struct pipe_fence_handle **fence,
1065 int fd,
1066 enum pipe_fd_type type)
1067 {
1068 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1069 struct virgl_screen *rs = virgl_screen(ctx->screen);
1070
1071 if (rs->vws->cs_create_fence)
1072 *fence = rs->vws->cs_create_fence(rs->vws, fd);
1073 }
1074
1075 static void virgl_fence_server_sync(struct pipe_context *ctx,
1076 struct pipe_fence_handle *fence)
1077 {
1078 struct virgl_context *vctx = virgl_context(ctx);
1079 struct virgl_screen *rs = virgl_screen(ctx->screen);
1080
1081 if (rs->vws->fence_server_sync)
1082 rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1083 }
1084
1085 static void virgl_set_shader_images(struct pipe_context *ctx,
1086 enum pipe_shader_type shader,
1087 unsigned start_slot, unsigned count,
1088 const struct pipe_image_view *images)
1089 {
1090 struct virgl_context *vctx = virgl_context(ctx);
1091 struct virgl_screen *rs = virgl_screen(ctx->screen);
1092 struct virgl_shader_binding_state *binding =
1093 &vctx->shader_bindings[shader];
1094
1095 binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1096 for (unsigned i = 0; i < count; i++) {
1097 unsigned idx = start_slot + i;
1098 if (images && images[i].resource) {
1099 pipe_resource_reference(&binding->images[idx].resource,
1100 images[i].resource);
1101 binding->images[idx] = images[i];
1102 binding->image_enabled_mask |= 1 << idx;
1103 } else {
1104 pipe_resource_reference(&binding->images[idx].resource, NULL);
1105 }
1106 }
1107
1108 uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1109 rs->caps.caps.v2.max_shader_image_frag_compute :
1110 rs->caps.caps.v2.max_shader_image_other_stages;
1111 if (!max_shader_images)
1112 return;
1113 virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1114 }
1115
1116 static void virgl_memory_barrier(struct pipe_context *ctx,
1117 unsigned flags)
1118 {
1119 struct virgl_context *vctx = virgl_context(ctx);
1120 struct virgl_screen *rs = virgl_screen(ctx->screen);
1121
1122 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1123 return;
1124 virgl_encode_memory_barrier(vctx, flags);
1125 }
1126
1127 static void *virgl_create_compute_state(struct pipe_context *ctx,
1128 const struct pipe_compute_state *state)
1129 {
1130 struct virgl_context *vctx = virgl_context(ctx);
1131 uint32_t handle;
1132 const struct tgsi_token *new_tokens = state->prog;
1133 struct pipe_stream_output_info so_info = {};
1134 int ret;
1135
1136 handle = virgl_object_assign_handle();
1137 ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1138 &so_info,
1139 state->req_local_mem,
1140 new_tokens);
1141 if (ret) {
1142 return NULL;
1143 }
1144
1145 return (void *)(unsigned long)handle;
1146 }
1147
1148 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1149 {
1150 uint32_t handle = (unsigned long)state;
1151 struct virgl_context *vctx = virgl_context(ctx);
1152
1153 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1154 }
1155
1156 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1157 {
1158 uint32_t handle = (unsigned long)state;
1159 struct virgl_context *vctx = virgl_context(ctx);
1160
1161 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1162 }
1163
1164 static void virgl_launch_grid(struct pipe_context *ctx,
1165 const struct pipe_grid_info *info)
1166 {
1167 struct virgl_context *vctx = virgl_context(ctx);
1168
1169 if (!vctx->num_compute)
1170 virgl_reemit_compute_resources(vctx);
1171 vctx->num_compute++;
1172
1173 virgl_encode_launch_grid(vctx, info);
1174 }
1175
1176 static void
1177 virgl_release_shader_binding(struct virgl_context *vctx,
1178 enum pipe_shader_type shader_type)
1179 {
1180 struct virgl_shader_binding_state *binding =
1181 &vctx->shader_bindings[shader_type];
1182
1183 while (binding->view_enabled_mask) {
1184 int i = u_bit_scan(&binding->view_enabled_mask);
1185 pipe_sampler_view_reference(
1186 (struct pipe_sampler_view **)&binding->views[i], NULL);
1187 }
1188
1189 while (binding->ubo_enabled_mask) {
1190 int i = u_bit_scan(&binding->ubo_enabled_mask);
1191 pipe_resource_reference(&binding->ubos[i].buffer, NULL);
1192 }
1193
1194 while (binding->ssbo_enabled_mask) {
1195 int i = u_bit_scan(&binding->ssbo_enabled_mask);
1196 pipe_resource_reference(&binding->ssbos[i].buffer, NULL);
1197 }
1198
1199 while (binding->image_enabled_mask) {
1200 int i = u_bit_scan(&binding->image_enabled_mask);
1201 pipe_resource_reference(&binding->images[i].resource, NULL);
1202 }
1203 }
1204
1205 static void
1206 virgl_context_destroy( struct pipe_context *ctx )
1207 {
1208 struct virgl_context *vctx = virgl_context(ctx);
1209 struct virgl_screen *rs = virgl_screen(ctx->screen);
1210 enum pipe_shader_type shader_type;
1211
1212 vctx->framebuffer.zsbuf = NULL;
1213 vctx->framebuffer.nr_cbufs = 0;
1214 virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1215 virgl_flush_eq(vctx, vctx, NULL);
1216
1217 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++)
1218 virgl_release_shader_binding(vctx, shader_type);
1219
1220 while (vctx->atomic_buffer_enabled_mask) {
1221 int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask);
1222 pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL);
1223 }
1224
1225 rs->vws->cmd_buf_destroy(vctx->cbuf);
1226 if (vctx->uploader)
1227 u_upload_destroy(vctx->uploader);
1228 if (vctx->transfer_uploader)
1229 u_upload_destroy(vctx->transfer_uploader);
1230 util_primconvert_destroy(vctx->primconvert);
1231 virgl_transfer_queue_fini(&vctx->queue);
1232
1233 slab_destroy_child(&vctx->transfer_pool);
1234 FREE(vctx);
1235 }
1236
1237 static void virgl_get_sample_position(struct pipe_context *ctx,
1238 unsigned sample_count,
1239 unsigned index,
1240 float *out_value)
1241 {
1242 struct virgl_context *vctx = virgl_context(ctx);
1243 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1244
1245 if (sample_count > vs->caps.caps.v1.max_samples) {
1246 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1247 sample_count, vs->caps.caps.v1.max_samples);
1248 return;
1249 }
1250
1251 /* The following is basically copied from dri/i965gen6_get_sample_position
1252 * The only addition is that we hold the msaa positions for all sample
1253 * counts in a flat array. */
1254 uint32_t bits = 0;
1255 if (sample_count == 1) {
1256 out_value[0] = out_value[1] = 0.5f;
1257 return;
1258 } else if (sample_count == 2) {
1259 bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1260 } else if (sample_count <= 4) {
1261 bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1262 } else if (sample_count <= 8) {
1263 bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1264 } else if (sample_count <= 16) {
1265 bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1266 }
1267 out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1268 out_value[1] = (bits & 0xf) / 16.0f;
1269
1270 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1271 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1272 index, sample_count, out_value[0], out_value[1]);
1273 }
1274
1275 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1276 void *priv,
1277 unsigned flags)
1278 {
1279 struct virgl_context *vctx;
1280 struct virgl_screen *rs = virgl_screen(pscreen);
1281 vctx = CALLOC_STRUCT(virgl_context);
1282 const char *host_debug_flagstring;
1283
1284 vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS);
1285 if (!vctx->cbuf) {
1286 FREE(vctx);
1287 return NULL;
1288 }
1289
1290 vctx->base.destroy = virgl_context_destroy;
1291 vctx->base.create_surface = virgl_create_surface;
1292 vctx->base.surface_destroy = virgl_surface_destroy;
1293 vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1294 vctx->base.create_blend_state = virgl_create_blend_state;
1295 vctx->base.bind_blend_state = virgl_bind_blend_state;
1296 vctx->base.delete_blend_state = virgl_delete_blend_state;
1297 vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1298 vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1299 vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1300 vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1301 vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1302 vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1303
1304 vctx->base.set_viewport_states = virgl_set_viewport_states;
1305 vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1306 vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1307 vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1308 vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1309 vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1310
1311 vctx->base.set_tess_state = virgl_set_tess_state;
1312 vctx->base.create_vs_state = virgl_create_vs_state;
1313 vctx->base.create_tcs_state = virgl_create_tcs_state;
1314 vctx->base.create_tes_state = virgl_create_tes_state;
1315 vctx->base.create_gs_state = virgl_create_gs_state;
1316 vctx->base.create_fs_state = virgl_create_fs_state;
1317
1318 vctx->base.bind_vs_state = virgl_bind_vs_state;
1319 vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1320 vctx->base.bind_tes_state = virgl_bind_tes_state;
1321 vctx->base.bind_gs_state = virgl_bind_gs_state;
1322 vctx->base.bind_fs_state = virgl_bind_fs_state;
1323
1324 vctx->base.delete_vs_state = virgl_delete_vs_state;
1325 vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1326 vctx->base.delete_tes_state = virgl_delete_tes_state;
1327 vctx->base.delete_gs_state = virgl_delete_gs_state;
1328 vctx->base.delete_fs_state = virgl_delete_fs_state;
1329
1330 vctx->base.create_compute_state = virgl_create_compute_state;
1331 vctx->base.bind_compute_state = virgl_bind_compute_state;
1332 vctx->base.delete_compute_state = virgl_delete_compute_state;
1333 vctx->base.launch_grid = virgl_launch_grid;
1334
1335 vctx->base.clear = virgl_clear;
1336 vctx->base.draw_vbo = virgl_draw_vbo;
1337 vctx->base.flush = virgl_flush_from_st;
1338 vctx->base.screen = pscreen;
1339 vctx->base.create_sampler_view = virgl_create_sampler_view;
1340 vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1341 vctx->base.set_sampler_views = virgl_set_sampler_views;
1342 vctx->base.texture_barrier = virgl_texture_barrier;
1343
1344 vctx->base.create_sampler_state = virgl_create_sampler_state;
1345 vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1346 vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1347
1348 vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1349 vctx->base.set_scissor_states = virgl_set_scissor_states;
1350 vctx->base.set_sample_mask = virgl_set_sample_mask;
1351 vctx->base.set_min_samples = virgl_set_min_samples;
1352 vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1353 vctx->base.set_clip_state = virgl_set_clip_state;
1354
1355 vctx->base.set_blend_color = virgl_set_blend_color;
1356
1357 vctx->base.get_sample_position = virgl_get_sample_position;
1358
1359 vctx->base.resource_copy_region = virgl_resource_copy_region;
1360 vctx->base.flush_resource = virgl_flush_resource;
1361 vctx->base.blit = virgl_blit;
1362 vctx->base.create_fence_fd = virgl_create_fence_fd;
1363 vctx->base.fence_server_sync = virgl_fence_server_sync;
1364
1365 vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1366 vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1367 vctx->base.set_shader_images = virgl_set_shader_images;
1368 vctx->base.memory_barrier = virgl_memory_barrier;
1369
1370 virgl_init_context_resource_functions(&vctx->base);
1371 virgl_init_query_functions(vctx);
1372 virgl_init_so_functions(vctx);
1373
1374 slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1375 virgl_transfer_queue_init(&vctx->queue, rs, &vctx->transfer_pool);
1376 vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
1377 (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
1378
1379 /* Reserve some space for transfers. */
1380 if (vctx->encoded_transfers)
1381 vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1382
1383 vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1384 vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1385 PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1386 if (!vctx->uploader)
1387 goto fail;
1388 vctx->base.stream_uploader = vctx->uploader;
1389 vctx->base.const_uploader = vctx->uploader;
1390 /* Use a custom/staging buffer for the transfer uploader, since we are
1391 * using it only for copies to other resources.
1392 */
1393 if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) &&
1394 vctx->encoded_transfers) {
1395 vctx->transfer_uploader = u_upload_create(&vctx->base, 1024 * 1024,
1396 PIPE_BIND_CUSTOM,
1397 PIPE_USAGE_STAGING,
1398 VIRGL_RESOURCE_FLAG_STAGING);
1399 if (!vctx->transfer_uploader)
1400 goto fail;
1401 }
1402
1403 vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
1404 virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1405
1406 virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1407
1408 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1409 host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1410 if (host_debug_flagstring)
1411 virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1412 }
1413
1414 return &vctx->base;
1415 fail:
1416 virgl_context_destroy(&vctx->base);
1417 return NULL;
1418 }