virgl: use transfer queue
[mesa.git] / src / gallium / drivers / virgl / virgl_context.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <libsync.h>
25 #include "pipe/p_shader_tokens.h"
26
27 #include "pipe/p_context.h"
28 #include "pipe/p_defines.h"
29 #include "pipe/p_screen.h"
30 #include "pipe/p_state.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_prim.h"
35 #include "util/u_transfer.h"
36 #include "util/u_helpers.h"
37 #include "util/slab.h"
38 #include "util/u_upload_mgr.h"
39 #include "util/u_blitter.h"
40 #include "tgsi/tgsi_text.h"
41 #include "indices/u_primconvert.h"
42
43 #include "pipebuffer/pb_buffer.h"
44
45 #include "virgl_encode.h"
46 #include "virgl_context.h"
47 #include "virgl_protocol.h"
48 #include "virgl_resource.h"
49 #include "virgl_screen.h"
50
51 struct virgl_vertex_elements_state {
52 uint32_t handle;
53 uint8_t binding_map[PIPE_MAX_ATTRIBS];
54 uint8_t num_bindings;
55 };
56
57 static uint32_t next_handle;
58 uint32_t virgl_object_assign_handle(void)
59 {
60 return ++next_handle;
61 }
62
63 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
64 {
65 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
66 struct pipe_surface *surf;
67 struct virgl_resource *res;
68 unsigned i;
69
70 surf = vctx->framebuffer.zsbuf;
71 if (surf) {
72 res = virgl_resource(surf->texture);
73 if (res) {
74 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
75 virgl_resource_dirty(res, surf->u.tex.level);
76 }
77 }
78 for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
79 surf = vctx->framebuffer.cbufs[i];
80 if (surf) {
81 res = virgl_resource(surf->texture);
82 if (res) {
83 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
84 virgl_resource_dirty(res, surf->u.tex.level);
85 }
86 }
87 }
88 }
89
90 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
91 enum pipe_shader_type shader_type)
92 {
93 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
94 struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
95 struct virgl_resource *res;
96 uint32_t remaining_mask = tinfo->enabled_mask;
97 unsigned i;
98 while (remaining_mask) {
99 i = u_bit_scan(&remaining_mask);
100 assert(tinfo->views[i]);
101
102 res = virgl_resource(tinfo->views[i]->base.texture);
103 if (res)
104 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
105 }
106 }
107
108 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
109 {
110 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
111 struct virgl_resource *res;
112 unsigned i;
113
114 for (i = 0; i < vctx->num_vertex_buffers; i++) {
115 res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
116 if (res)
117 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
118 }
119 }
120
121 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
122 struct virgl_indexbuf *ib)
123 {
124 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
125 struct virgl_resource *res;
126
127 res = virgl_resource(ib->buffer);
128 if (res)
129 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
130 }
131
132 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
133 {
134 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
135 struct virgl_resource *res;
136 unsigned i;
137
138 for (i = 0; i < vctx->num_so_targets; i++) {
139 res = virgl_resource(vctx->so_targets[i].base.buffer);
140 if (res)
141 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
142 }
143 }
144
145 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
146 enum pipe_shader_type shader_type)
147 {
148 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
149 struct virgl_resource *res;
150 unsigned i;
151 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
152 res = virgl_resource(vctx->ubos[shader_type][i]);
153 if (res) {
154 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
155 }
156 }
157 }
158
159 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
160 enum pipe_shader_type shader_type)
161 {
162 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
163 struct virgl_resource *res;
164 unsigned i;
165 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
166 res = virgl_resource(vctx->ssbos[shader_type][i]);
167 if (res) {
168 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
169 }
170 }
171 }
172
173 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
174 enum pipe_shader_type shader_type)
175 {
176 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
177 struct virgl_resource *res;
178 unsigned i;
179 for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
180 res = virgl_resource(vctx->images[shader_type][i]);
181 if (res) {
182 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
183 }
184 }
185 }
186
187 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
188 {
189 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
190 struct virgl_resource *res;
191 unsigned i;
192 for (i = 0; i < PIPE_MAX_HW_ATOMIC_BUFFERS; i++) {
193 res = virgl_resource(vctx->atomic_buffers[i]);
194 if (res) {
195 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
196 }
197 }
198 }
199
200 /*
201 * after flushing, the hw context still has a bunch of
202 * resources bound, so we need to rebind those here.
203 */
204 static void virgl_reemit_res(struct virgl_context *vctx)
205 {
206 enum pipe_shader_type shader_type;
207
208 /* reattach any flushed resources */
209 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
210 virgl_attach_res_framebuffer(vctx);
211
212 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
213 virgl_attach_res_sampler_views(vctx, shader_type);
214 virgl_attach_res_uniform_buffers(vctx, shader_type);
215 virgl_attach_res_shader_buffers(vctx, shader_type);
216 virgl_attach_res_shader_images(vctx, shader_type);
217 }
218 virgl_attach_res_atomic_buffers(vctx);
219 virgl_attach_res_vertex_buffers(vctx);
220 virgl_attach_res_so_targets(vctx);
221 }
222
223 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
224 struct pipe_resource *resource,
225 const struct pipe_surface *templ)
226 {
227 struct virgl_context *vctx = virgl_context(ctx);
228 struct virgl_surface *surf;
229 struct virgl_resource *res = virgl_resource(resource);
230 uint32_t handle;
231
232 surf = CALLOC_STRUCT(virgl_surface);
233 if (!surf)
234 return NULL;
235
236 assert(ctx->screen->get_param(ctx->screen,
237 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
238 (util_format_is_srgb(templ->format) ==
239 util_format_is_srgb(resource->format)));
240
241 virgl_resource_dirty(res, 0);
242 handle = virgl_object_assign_handle();
243 pipe_reference_init(&surf->base.reference, 1);
244 pipe_resource_reference(&surf->base.texture, resource);
245 surf->base.context = ctx;
246 surf->base.format = templ->format;
247 if (resource->target != PIPE_BUFFER) {
248 surf->base.width = u_minify(resource->width0, templ->u.tex.level);
249 surf->base.height = u_minify(resource->height0, templ->u.tex.level);
250 surf->base.u.tex.level = templ->u.tex.level;
251 surf->base.u.tex.first_layer = templ->u.tex.first_layer;
252 surf->base.u.tex.last_layer = templ->u.tex.last_layer;
253 } else {
254 surf->base.width = templ->u.buf.last_element - templ->u.buf.first_element + 1;
255 surf->base.height = resource->height0;
256 surf->base.u.buf.first_element = templ->u.buf.first_element;
257 surf->base.u.buf.last_element = templ->u.buf.last_element;
258 }
259 virgl_encoder_create_surface(vctx, handle, res, &surf->base);
260 surf->handle = handle;
261 return &surf->base;
262 }
263
264 static void virgl_surface_destroy(struct pipe_context *ctx,
265 struct pipe_surface *psurf)
266 {
267 struct virgl_context *vctx = virgl_context(ctx);
268 struct virgl_surface *surf = virgl_surface(psurf);
269
270 pipe_resource_reference(&surf->base.texture, NULL);
271 virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
272 FREE(surf);
273 }
274
275 static void *virgl_create_blend_state(struct pipe_context *ctx,
276 const struct pipe_blend_state *blend_state)
277 {
278 struct virgl_context *vctx = virgl_context(ctx);
279 uint32_t handle;
280 handle = virgl_object_assign_handle();
281
282 virgl_encode_blend_state(vctx, handle, blend_state);
283 return (void *)(unsigned long)handle;
284
285 }
286
287 static void virgl_bind_blend_state(struct pipe_context *ctx,
288 void *blend_state)
289 {
290 struct virgl_context *vctx = virgl_context(ctx);
291 uint32_t handle = (unsigned long)blend_state;
292 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
293 }
294
295 static void virgl_delete_blend_state(struct pipe_context *ctx,
296 void *blend_state)
297 {
298 struct virgl_context *vctx = virgl_context(ctx);
299 uint32_t handle = (unsigned long)blend_state;
300 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
301 }
302
303 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
304 const struct pipe_depth_stencil_alpha_state *blend_state)
305 {
306 struct virgl_context *vctx = virgl_context(ctx);
307 uint32_t handle;
308 handle = virgl_object_assign_handle();
309
310 virgl_encode_dsa_state(vctx, handle, blend_state);
311 return (void *)(unsigned long)handle;
312 }
313
314 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
315 void *blend_state)
316 {
317 struct virgl_context *vctx = virgl_context(ctx);
318 uint32_t handle = (unsigned long)blend_state;
319 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
320 }
321
322 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
323 void *dsa_state)
324 {
325 struct virgl_context *vctx = virgl_context(ctx);
326 uint32_t handle = (unsigned long)dsa_state;
327 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
328 }
329
330 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
331 const struct pipe_rasterizer_state *rs_state)
332 {
333 struct virgl_context *vctx = virgl_context(ctx);
334 struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
335
336 if (!vrs)
337 return NULL;
338 vrs->rs = *rs_state;
339 vrs->handle = virgl_object_assign_handle();
340
341 virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
342 return (void *)vrs;
343 }
344
345 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
346 void *rs_state)
347 {
348 struct virgl_context *vctx = virgl_context(ctx);
349 uint32_t handle = 0;
350 if (rs_state) {
351 struct virgl_rasterizer_state *vrs = rs_state;
352 vctx->rs_state = *vrs;
353 handle = vrs->handle;
354 }
355 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
356 }
357
358 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
359 void *rs_state)
360 {
361 struct virgl_context *vctx = virgl_context(ctx);
362 struct virgl_rasterizer_state *vrs = rs_state;
363 virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
364 FREE(vrs);
365 }
366
367 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
368 const struct pipe_framebuffer_state *state)
369 {
370 struct virgl_context *vctx = virgl_context(ctx);
371
372 vctx->framebuffer = *state;
373 virgl_encoder_set_framebuffer_state(vctx, state);
374 virgl_attach_res_framebuffer(vctx);
375 }
376
377 static void virgl_set_viewport_states(struct pipe_context *ctx,
378 unsigned start_slot,
379 unsigned num_viewports,
380 const struct pipe_viewport_state *state)
381 {
382 struct virgl_context *vctx = virgl_context(ctx);
383 virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
384 }
385
386 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
387 unsigned num_elements,
388 const struct pipe_vertex_element *elements)
389 {
390 struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
391 struct virgl_context *vctx = virgl_context(ctx);
392 struct virgl_vertex_elements_state *state =
393 CALLOC_STRUCT(virgl_vertex_elements_state);
394
395 for (int i = 0; i < num_elements; ++i) {
396 if (elements[i].instance_divisor) {
397 /* Virglrenderer doesn't deal with instance_divisor correctly if
398 * there isn't a 1:1 relationship between elements and bindings.
399 * So let's make sure there is, by duplicating bindings.
400 */
401 for (int j = 0; j < num_elements; ++j) {
402 new_elements[j] = elements[j];
403 new_elements[j].vertex_buffer_index = j;
404 state->binding_map[j] = elements[j].vertex_buffer_index;
405 }
406 elements = new_elements;
407 state->num_bindings = num_elements;
408 break;
409 }
410 }
411
412 state->handle = virgl_object_assign_handle();
413 virgl_encoder_create_vertex_elements(vctx, state->handle,
414 num_elements, elements);
415 return state;
416 }
417
418 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
419 void *ve)
420 {
421 struct virgl_context *vctx = virgl_context(ctx);
422 struct virgl_vertex_elements_state *state =
423 (struct virgl_vertex_elements_state *)ve;
424 virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
425 FREE(state);
426 }
427
428 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
429 void *ve)
430 {
431 struct virgl_context *vctx = virgl_context(ctx);
432 struct virgl_vertex_elements_state *state =
433 (struct virgl_vertex_elements_state *)ve;
434 vctx->vertex_elements = state;
435 virgl_encode_bind_object(vctx, state ? state->handle : 0,
436 VIRGL_OBJECT_VERTEX_ELEMENTS);
437 vctx->vertex_array_dirty = TRUE;
438 }
439
440 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
441 unsigned start_slot,
442 unsigned num_buffers,
443 const struct pipe_vertex_buffer *buffers)
444 {
445 struct virgl_context *vctx = virgl_context(ctx);
446
447 util_set_vertex_buffers_count(vctx->vertex_buffer,
448 &vctx->num_vertex_buffers,
449 buffers, start_slot, num_buffers);
450
451 vctx->vertex_array_dirty = TRUE;
452 }
453
454 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
455 {
456 if (vctx->vertex_array_dirty) {
457 struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
458
459 if (ve->num_bindings) {
460 struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
461 for (int i = 0; i < ve->num_bindings; ++i)
462 vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
463
464 virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
465 } else
466 virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
467
468 virgl_attach_res_vertex_buffers(vctx);
469 }
470 }
471
472 static void virgl_set_stencil_ref(struct pipe_context *ctx,
473 const struct pipe_stencil_ref *ref)
474 {
475 struct virgl_context *vctx = virgl_context(ctx);
476 virgl_encoder_set_stencil_ref(vctx, ref);
477 }
478
479 static void virgl_set_blend_color(struct pipe_context *ctx,
480 const struct pipe_blend_color *color)
481 {
482 struct virgl_context *vctx = virgl_context(ctx);
483 virgl_encoder_set_blend_color(vctx, color);
484 }
485
486 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
487 struct virgl_indexbuf *ib)
488 {
489 virgl_encoder_set_index_buffer(vctx, ib);
490 virgl_attach_res_index_buffer(vctx, ib);
491 }
492
493 static void virgl_set_constant_buffer(struct pipe_context *ctx,
494 enum pipe_shader_type shader, uint index,
495 const struct pipe_constant_buffer *buf)
496 {
497 struct virgl_context *vctx = virgl_context(ctx);
498
499 if (buf) {
500 if (!buf->user_buffer){
501 struct virgl_resource *res = virgl_resource(buf->buffer);
502 virgl_encoder_set_uniform_buffer(vctx, shader, index, buf->buffer_offset,
503 buf->buffer_size, res);
504 pipe_resource_reference(&vctx->ubos[shader][index], buf->buffer);
505 return;
506 }
507 pipe_resource_reference(&vctx->ubos[shader][index], NULL);
508 virgl_encoder_write_constant_buffer(vctx, shader, index, buf->buffer_size / 4, buf->user_buffer);
509 } else {
510 virgl_encoder_write_constant_buffer(vctx, shader, index, 0, NULL);
511 pipe_resource_reference(&vctx->ubos[shader][index], NULL);
512 }
513 }
514
515 void virgl_transfer_inline_write(struct pipe_context *ctx,
516 struct pipe_resource *res,
517 unsigned level,
518 unsigned usage,
519 const struct pipe_box *box,
520 const void *data,
521 unsigned stride,
522 unsigned layer_stride)
523 {
524 struct virgl_context *vctx = virgl_context(ctx);
525 struct virgl_screen *vs = virgl_screen(ctx->screen);
526 struct virgl_resource *grres = virgl_resource(res);
527 struct virgl_transfer trans = { 0 };
528
529 trans.base.resource = res;
530 trans.base.level = level;
531 trans.base.usage = usage;
532 trans.base.box = *box;
533 trans.base.stride = stride;
534 trans.base.layer_stride = layer_stride;
535 trans.offset = box->x;
536
537 virgl_resource_dirty(grres, 0);
538
539 if (virgl_res_needs_flush_wait(vctx, &trans)) {
540 ctx->flush(ctx, NULL, 0);
541
542 vs->vws->resource_wait(vs->vws, grres->hw_res);
543 }
544
545 virgl_encoder_inline_write(vctx, grres, level, usage,
546 box, data, stride, layer_stride);
547 }
548
549 static void *virgl_shader_encoder(struct pipe_context *ctx,
550 const struct pipe_shader_state *shader,
551 unsigned type)
552 {
553 struct virgl_context *vctx = virgl_context(ctx);
554 uint32_t handle;
555 struct tgsi_token *new_tokens;
556 int ret;
557
558 new_tokens = virgl_tgsi_transform(vctx, shader->tokens);
559 if (!new_tokens)
560 return NULL;
561
562 handle = virgl_object_assign_handle();
563 /* encode VS state */
564 ret = virgl_encode_shader_state(vctx, handle, type,
565 &shader->stream_output, 0,
566 new_tokens);
567 if (ret) {
568 return NULL;
569 }
570
571 FREE(new_tokens);
572 return (void *)(unsigned long)handle;
573
574 }
575 static void *virgl_create_vs_state(struct pipe_context *ctx,
576 const struct pipe_shader_state *shader)
577 {
578 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
579 }
580
581 static void *virgl_create_tcs_state(struct pipe_context *ctx,
582 const struct pipe_shader_state *shader)
583 {
584 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
585 }
586
587 static void *virgl_create_tes_state(struct pipe_context *ctx,
588 const struct pipe_shader_state *shader)
589 {
590 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
591 }
592
593 static void *virgl_create_gs_state(struct pipe_context *ctx,
594 const struct pipe_shader_state *shader)
595 {
596 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
597 }
598
599 static void *virgl_create_fs_state(struct pipe_context *ctx,
600 const struct pipe_shader_state *shader)
601 {
602 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
603 }
604
605 static void
606 virgl_delete_fs_state(struct pipe_context *ctx,
607 void *fs)
608 {
609 uint32_t handle = (unsigned long)fs;
610 struct virgl_context *vctx = virgl_context(ctx);
611
612 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
613 }
614
615 static void
616 virgl_delete_gs_state(struct pipe_context *ctx,
617 void *gs)
618 {
619 uint32_t handle = (unsigned long)gs;
620 struct virgl_context *vctx = virgl_context(ctx);
621
622 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
623 }
624
625 static void
626 virgl_delete_vs_state(struct pipe_context *ctx,
627 void *vs)
628 {
629 uint32_t handle = (unsigned long)vs;
630 struct virgl_context *vctx = virgl_context(ctx);
631
632 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
633 }
634
635 static void
636 virgl_delete_tcs_state(struct pipe_context *ctx,
637 void *tcs)
638 {
639 uint32_t handle = (unsigned long)tcs;
640 struct virgl_context *vctx = virgl_context(ctx);
641
642 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
643 }
644
645 static void
646 virgl_delete_tes_state(struct pipe_context *ctx,
647 void *tes)
648 {
649 uint32_t handle = (unsigned long)tes;
650 struct virgl_context *vctx = virgl_context(ctx);
651
652 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
653 }
654
655 static void virgl_bind_vs_state(struct pipe_context *ctx,
656 void *vss)
657 {
658 uint32_t handle = (unsigned long)vss;
659 struct virgl_context *vctx = virgl_context(ctx);
660
661 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
662 }
663
664 static void virgl_bind_tcs_state(struct pipe_context *ctx,
665 void *vss)
666 {
667 uint32_t handle = (unsigned long)vss;
668 struct virgl_context *vctx = virgl_context(ctx);
669
670 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
671 }
672
673 static void virgl_bind_tes_state(struct pipe_context *ctx,
674 void *vss)
675 {
676 uint32_t handle = (unsigned long)vss;
677 struct virgl_context *vctx = virgl_context(ctx);
678
679 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
680 }
681
682 static void virgl_bind_gs_state(struct pipe_context *ctx,
683 void *vss)
684 {
685 uint32_t handle = (unsigned long)vss;
686 struct virgl_context *vctx = virgl_context(ctx);
687
688 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
689 }
690
691
692 static void virgl_bind_fs_state(struct pipe_context *ctx,
693 void *vss)
694 {
695 uint32_t handle = (unsigned long)vss;
696 struct virgl_context *vctx = virgl_context(ctx);
697
698 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
699 }
700
701 static void virgl_clear(struct pipe_context *ctx,
702 unsigned buffers,
703 const union pipe_color_union *color,
704 double depth, unsigned stencil)
705 {
706 struct virgl_context *vctx = virgl_context(ctx);
707
708 virgl_encode_clear(vctx, buffers, color, depth, stencil);
709 }
710
711 static void virgl_draw_vbo(struct pipe_context *ctx,
712 const struct pipe_draw_info *dinfo)
713 {
714 struct virgl_context *vctx = virgl_context(ctx);
715 struct virgl_screen *rs = virgl_screen(ctx->screen);
716 struct virgl_indexbuf ib = {};
717 struct pipe_draw_info info = *dinfo;
718
719 if (!dinfo->count_from_stream_output && !dinfo->indirect &&
720 !dinfo->primitive_restart &&
721 !u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count))
722 return;
723
724 if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
725 util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
726 util_primconvert_draw_vbo(vctx->primconvert, dinfo);
727 return;
728 }
729 if (info.index_size) {
730 pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
731 ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
732 ib.index_size = dinfo->index_size;
733 ib.offset = info.start * ib.index_size;
734
735 if (ib.user_buffer) {
736 u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 4,
737 ib.user_buffer, &ib.offset, &ib.buffer);
738 ib.user_buffer = NULL;
739 }
740 }
741
742 vctx->num_draws++;
743 virgl_hw_set_vertex_buffers(vctx);
744 if (info.index_size)
745 virgl_hw_set_index_buffer(vctx, &ib);
746
747 virgl_encoder_draw_vbo(vctx, &info);
748
749 pipe_resource_reference(&ib.buffer, NULL);
750
751 }
752
753 static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
754 struct pipe_fence_handle **fence)
755 {
756 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
757 int out_fence_fd = -1;
758
759 if (ctx->num_draws)
760 u_upload_unmap(ctx->uploader);
761
762 /* send the buffer to the remote side for decoding */
763 ctx->num_transfers = ctx->num_draws = ctx->num_compute = 0;
764
765 virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
766 rs->vws->submit_cmd(rs->vws, ctx->cbuf, ctx->cbuf->in_fence_fd,
767 ctx->cbuf->needs_out_fence_fd ? &out_fence_fd : NULL);
768
769 if (fence)
770 *fence = rs->vws->cs_create_fence(rs->vws, out_fence_fd);
771
772 /* Reserve some space for transfers. */
773 if (ctx->encoded_transfers)
774 ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
775
776 virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
777
778 /* add back current framebuffer resources to reference list? */
779 virgl_reemit_res(ctx);
780 }
781
782 static void virgl_flush_from_st(struct pipe_context *ctx,
783 struct pipe_fence_handle **fence,
784 enum pipe_flush_flags flags)
785 {
786 struct virgl_context *vctx = virgl_context(ctx);
787 struct virgl_screen *rs = virgl_screen(ctx->screen);
788
789 if (flags & PIPE_FLUSH_FENCE_FD)
790 vctx->cbuf->needs_out_fence_fd = true;
791
792 virgl_flush_eq(vctx, vctx, fence);
793
794 if (vctx->cbuf->in_fence_fd != -1) {
795 close(vctx->cbuf->in_fence_fd);
796 vctx->cbuf->in_fence_fd = -1;
797 }
798 vctx->cbuf->needs_out_fence_fd = false;
799 }
800
801 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
802 struct pipe_resource *texture,
803 const struct pipe_sampler_view *state)
804 {
805 struct virgl_context *vctx = virgl_context(ctx);
806 struct virgl_sampler_view *grview;
807 uint32_t handle;
808 struct virgl_resource *res;
809
810 if (!state)
811 return NULL;
812
813 grview = CALLOC_STRUCT(virgl_sampler_view);
814 if (!grview)
815 return NULL;
816
817 res = virgl_resource(texture);
818 handle = virgl_object_assign_handle();
819 virgl_encode_sampler_view(vctx, handle, res, state);
820
821 grview->base = *state;
822 grview->base.reference.count = 1;
823
824 grview->base.texture = NULL;
825 grview->base.context = ctx;
826 pipe_resource_reference(&grview->base.texture, texture);
827 grview->handle = handle;
828 return &grview->base;
829 }
830
831 static void virgl_set_sampler_views(struct pipe_context *ctx,
832 enum pipe_shader_type shader_type,
833 unsigned start_slot,
834 unsigned num_views,
835 struct pipe_sampler_view **views)
836 {
837 struct virgl_context *vctx = virgl_context(ctx);
838 int i;
839 uint32_t disable_mask = ~((1ull << num_views) - 1);
840 struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
841 uint32_t new_mask = 0;
842 uint32_t remaining_mask;
843
844 remaining_mask = tinfo->enabled_mask & disable_mask;
845
846 while (remaining_mask) {
847 i = u_bit_scan(&remaining_mask);
848 assert(tinfo->views[i]);
849
850 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
851 }
852
853 for (i = 0; i < num_views; i++) {
854 struct virgl_sampler_view *grview = virgl_sampler_view(views[i]);
855
856 if (views[i] == (struct pipe_sampler_view *)tinfo->views[i])
857 continue;
858
859 if (grview) {
860 new_mask |= 1 << i;
861 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], views[i]);
862 } else {
863 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
864 disable_mask |= 1 << i;
865 }
866 }
867
868 tinfo->enabled_mask &= ~disable_mask;
869 tinfo->enabled_mask |= new_mask;
870 virgl_encode_set_sampler_views(vctx, shader_type, start_slot, num_views, tinfo->views);
871 virgl_attach_res_sampler_views(vctx, shader_type);
872 }
873
874 static void
875 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
876 {
877 struct virgl_context *vctx = virgl_context(ctx);
878 struct virgl_screen *rs = virgl_screen(ctx->screen);
879
880 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER))
881 return;
882 virgl_encode_texture_barrier(vctx, flags);
883 }
884
885 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
886 struct pipe_sampler_view *view)
887 {
888 struct virgl_context *vctx = virgl_context(ctx);
889 struct virgl_sampler_view *grview = virgl_sampler_view(view);
890
891 virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
892 pipe_resource_reference(&view->texture, NULL);
893 FREE(view);
894 }
895
896 static void *virgl_create_sampler_state(struct pipe_context *ctx,
897 const struct pipe_sampler_state *state)
898 {
899 struct virgl_context *vctx = virgl_context(ctx);
900 uint32_t handle;
901
902 handle = virgl_object_assign_handle();
903
904 virgl_encode_sampler_state(vctx, handle, state);
905 return (void *)(unsigned long)handle;
906 }
907
908 static void virgl_delete_sampler_state(struct pipe_context *ctx,
909 void *ss)
910 {
911 struct virgl_context *vctx = virgl_context(ctx);
912 uint32_t handle = (unsigned long)ss;
913
914 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
915 }
916
917 static void virgl_bind_sampler_states(struct pipe_context *ctx,
918 enum pipe_shader_type shader,
919 unsigned start_slot,
920 unsigned num_samplers,
921 void **samplers)
922 {
923 struct virgl_context *vctx = virgl_context(ctx);
924 uint32_t handles[32];
925 int i;
926 for (i = 0; i < num_samplers; i++) {
927 handles[i] = (unsigned long)(samplers[i]);
928 }
929 virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
930 }
931
932 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
933 const struct pipe_poly_stipple *ps)
934 {
935 struct virgl_context *vctx = virgl_context(ctx);
936 virgl_encoder_set_polygon_stipple(vctx, ps);
937 }
938
939 static void virgl_set_scissor_states(struct pipe_context *ctx,
940 unsigned start_slot,
941 unsigned num_scissor,
942 const struct pipe_scissor_state *ss)
943 {
944 struct virgl_context *vctx = virgl_context(ctx);
945 virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
946 }
947
948 static void virgl_set_sample_mask(struct pipe_context *ctx,
949 unsigned sample_mask)
950 {
951 struct virgl_context *vctx = virgl_context(ctx);
952 virgl_encoder_set_sample_mask(vctx, sample_mask);
953 }
954
955 static void virgl_set_min_samples(struct pipe_context *ctx,
956 unsigned min_samples)
957 {
958 struct virgl_context *vctx = virgl_context(ctx);
959 struct virgl_screen *rs = virgl_screen(ctx->screen);
960
961 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
962 return;
963 virgl_encoder_set_min_samples(vctx, min_samples);
964 }
965
966 static void virgl_set_clip_state(struct pipe_context *ctx,
967 const struct pipe_clip_state *clip)
968 {
969 struct virgl_context *vctx = virgl_context(ctx);
970 virgl_encoder_set_clip_state(vctx, clip);
971 }
972
973 static void virgl_set_tess_state(struct pipe_context *ctx,
974 const float default_outer_level[4],
975 const float default_inner_level[2])
976 {
977 struct virgl_context *vctx = virgl_context(ctx);
978 struct virgl_screen *rs = virgl_screen(ctx->screen);
979
980 if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
981 return;
982 virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
983 }
984
985 static void virgl_resource_copy_region(struct pipe_context *ctx,
986 struct pipe_resource *dst,
987 unsigned dst_level,
988 unsigned dstx, unsigned dsty, unsigned dstz,
989 struct pipe_resource *src,
990 unsigned src_level,
991 const struct pipe_box *src_box)
992 {
993 struct virgl_context *vctx = virgl_context(ctx);
994 struct virgl_resource *dres = virgl_resource(dst);
995 struct virgl_resource *sres = virgl_resource(src);
996
997 virgl_resource_dirty(dres, dst_level);
998 virgl_encode_resource_copy_region(vctx, dres,
999 dst_level, dstx, dsty, dstz,
1000 sres, src_level,
1001 src_box);
1002 }
1003
1004 static void
1005 virgl_flush_resource(struct pipe_context *pipe,
1006 struct pipe_resource *resource)
1007 {
1008 }
1009
1010 static void virgl_blit(struct pipe_context *ctx,
1011 const struct pipe_blit_info *blit)
1012 {
1013 struct virgl_context *vctx = virgl_context(ctx);
1014 struct virgl_resource *dres = virgl_resource(blit->dst.resource);
1015 struct virgl_resource *sres = virgl_resource(blit->src.resource);
1016
1017 assert(ctx->screen->get_param(ctx->screen,
1018 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
1019 (util_format_is_srgb(blit->dst.resource->format) ==
1020 util_format_is_srgb(blit->dst.format)));
1021
1022 virgl_resource_dirty(dres, blit->dst.level);
1023 virgl_encode_blit(vctx, dres, sres,
1024 blit);
1025 }
1026
1027 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1028 unsigned start_slot,
1029 unsigned count,
1030 const struct pipe_shader_buffer *buffers)
1031 {
1032 struct virgl_context *vctx = virgl_context(ctx);
1033
1034 for (unsigned i = 0; i < count; i++) {
1035 unsigned idx = start_slot + i;
1036
1037 if (buffers) {
1038 if (buffers[i].buffer) {
1039 pipe_resource_reference(&vctx->atomic_buffers[idx],
1040 buffers[i].buffer);
1041 continue;
1042 }
1043 }
1044 pipe_resource_reference(&vctx->atomic_buffers[idx], NULL);
1045 }
1046 virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1047 }
1048
1049 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1050 enum pipe_shader_type shader,
1051 unsigned start_slot, unsigned count,
1052 const struct pipe_shader_buffer *buffers)
1053 {
1054 struct virgl_context *vctx = virgl_context(ctx);
1055 struct virgl_screen *rs = virgl_screen(ctx->screen);
1056
1057 for (unsigned i = 0; i < count; i++) {
1058 unsigned idx = start_slot + i;
1059
1060 if (buffers) {
1061 if (buffers[i].buffer) {
1062 pipe_resource_reference(&vctx->ssbos[shader][idx], buffers[i].buffer);
1063 continue;
1064 }
1065 }
1066 pipe_resource_reference(&vctx->ssbos[shader][idx], NULL);
1067 }
1068
1069 uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1070 rs->caps.caps.v2.max_shader_buffer_frag_compute :
1071 rs->caps.caps.v2.max_shader_buffer_other_stages;
1072 if (!max_shader_buffer)
1073 return;
1074 virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1075 }
1076
1077 static void virgl_create_fence_fd(struct pipe_context *ctx,
1078 struct pipe_fence_handle **fence,
1079 int fd,
1080 enum pipe_fd_type type)
1081 {
1082 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1083 struct virgl_screen *rs = virgl_screen(ctx->screen);
1084
1085 if (rs->vws->cs_create_fence)
1086 *fence = rs->vws->cs_create_fence(rs->vws, fd);
1087 }
1088
1089 static void virgl_fence_server_sync(struct pipe_context *ctx,
1090 struct pipe_fence_handle *fence)
1091 {
1092 struct virgl_context *vctx = virgl_context(ctx);
1093 struct virgl_screen *rs = virgl_screen(ctx->screen);
1094
1095 if (rs->vws->fence_server_sync)
1096 rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1097 }
1098
1099 static void virgl_set_shader_images(struct pipe_context *ctx,
1100 enum pipe_shader_type shader,
1101 unsigned start_slot, unsigned count,
1102 const struct pipe_image_view *images)
1103 {
1104 struct virgl_context *vctx = virgl_context(ctx);
1105 struct virgl_screen *rs = virgl_screen(ctx->screen);
1106
1107 for (unsigned i = 0; i < count; i++) {
1108 unsigned idx = start_slot + i;
1109
1110 if (images) {
1111 if (images[i].resource) {
1112 pipe_resource_reference(&vctx->images[shader][idx], images[i].resource);
1113 continue;
1114 }
1115 }
1116 pipe_resource_reference(&vctx->images[shader][idx], NULL);
1117 }
1118
1119 uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1120 rs->caps.caps.v2.max_shader_image_frag_compute :
1121 rs->caps.caps.v2.max_shader_image_other_stages;
1122 if (!max_shader_images)
1123 return;
1124 virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1125 }
1126
1127 static void virgl_memory_barrier(struct pipe_context *ctx,
1128 unsigned flags)
1129 {
1130 struct virgl_context *vctx = virgl_context(ctx);
1131 struct virgl_screen *rs = virgl_screen(ctx->screen);
1132
1133 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1134 return;
1135 virgl_encode_memory_barrier(vctx, flags);
1136 }
1137
1138 static void *virgl_create_compute_state(struct pipe_context *ctx,
1139 const struct pipe_compute_state *state)
1140 {
1141 struct virgl_context *vctx = virgl_context(ctx);
1142 uint32_t handle;
1143 const struct tgsi_token *new_tokens = state->prog;
1144 struct pipe_stream_output_info so_info = {};
1145 int ret;
1146
1147 handle = virgl_object_assign_handle();
1148 ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1149 &so_info,
1150 state->req_local_mem,
1151 new_tokens);
1152 if (ret) {
1153 return NULL;
1154 }
1155
1156 return (void *)(unsigned long)handle;
1157 }
1158
1159 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1160 {
1161 uint32_t handle = (unsigned long)state;
1162 struct virgl_context *vctx = virgl_context(ctx);
1163
1164 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1165 }
1166
1167 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1168 {
1169 uint32_t handle = (unsigned long)state;
1170 struct virgl_context *vctx = virgl_context(ctx);
1171
1172 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1173 }
1174
1175 static void virgl_launch_grid(struct pipe_context *ctx,
1176 const struct pipe_grid_info *info)
1177 {
1178 struct virgl_context *vctx = virgl_context(ctx);
1179 virgl_encode_launch_grid(vctx, info);
1180 vctx->num_compute++;
1181 }
1182
1183 static void
1184 virgl_context_destroy( struct pipe_context *ctx )
1185 {
1186 struct virgl_context *vctx = virgl_context(ctx);
1187 struct virgl_screen *rs = virgl_screen(ctx->screen);
1188
1189 vctx->framebuffer.zsbuf = NULL;
1190 vctx->framebuffer.nr_cbufs = 0;
1191 virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1192 virgl_flush_eq(vctx, vctx, NULL);
1193
1194 rs->vws->cmd_buf_destroy(vctx->cbuf);
1195 if (vctx->uploader)
1196 u_upload_destroy(vctx->uploader);
1197 util_primconvert_destroy(vctx->primconvert);
1198 virgl_transfer_queue_fini(&vctx->queue);
1199
1200 slab_destroy_child(&vctx->transfer_pool);
1201 FREE(vctx);
1202 }
1203
1204 static void virgl_get_sample_position(struct pipe_context *ctx,
1205 unsigned sample_count,
1206 unsigned index,
1207 float *out_value)
1208 {
1209 struct virgl_context *vctx = virgl_context(ctx);
1210 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1211
1212 if (sample_count > vs->caps.caps.v1.max_samples) {
1213 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1214 sample_count, vs->caps.caps.v1.max_samples);
1215 return;
1216 }
1217
1218 /* The following is basically copied from dri/i965gen6_get_sample_position
1219 * The only addition is that we hold the msaa positions for all sample
1220 * counts in a flat array. */
1221 uint32_t bits = 0;
1222 if (sample_count == 1) {
1223 out_value[0] = out_value[1] = 0.5f;
1224 return;
1225 } else if (sample_count == 2) {
1226 bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1227 } else if (sample_count <= 4) {
1228 bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1229 } else if (sample_count <= 8) {
1230 bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1231 } else if (sample_count <= 16) {
1232 bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1233 }
1234 out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1235 out_value[1] = (bits & 0xf) / 16.0f;
1236
1237 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1238 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1239 index, sample_count, out_value[0], out_value[1]);
1240 }
1241
1242 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1243 void *priv,
1244 unsigned flags)
1245 {
1246 struct virgl_context *vctx;
1247 struct virgl_screen *rs = virgl_screen(pscreen);
1248 vctx = CALLOC_STRUCT(virgl_context);
1249 const char *host_debug_flagstring;
1250
1251 vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS);
1252 if (!vctx->cbuf) {
1253 FREE(vctx);
1254 return NULL;
1255 }
1256
1257 vctx->base.destroy = virgl_context_destroy;
1258 vctx->base.create_surface = virgl_create_surface;
1259 vctx->base.surface_destroy = virgl_surface_destroy;
1260 vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1261 vctx->base.create_blend_state = virgl_create_blend_state;
1262 vctx->base.bind_blend_state = virgl_bind_blend_state;
1263 vctx->base.delete_blend_state = virgl_delete_blend_state;
1264 vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1265 vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1266 vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1267 vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1268 vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1269 vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1270
1271 vctx->base.set_viewport_states = virgl_set_viewport_states;
1272 vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1273 vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1274 vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1275 vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1276 vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1277
1278 vctx->base.set_tess_state = virgl_set_tess_state;
1279 vctx->base.create_vs_state = virgl_create_vs_state;
1280 vctx->base.create_tcs_state = virgl_create_tcs_state;
1281 vctx->base.create_tes_state = virgl_create_tes_state;
1282 vctx->base.create_gs_state = virgl_create_gs_state;
1283 vctx->base.create_fs_state = virgl_create_fs_state;
1284
1285 vctx->base.bind_vs_state = virgl_bind_vs_state;
1286 vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1287 vctx->base.bind_tes_state = virgl_bind_tes_state;
1288 vctx->base.bind_gs_state = virgl_bind_gs_state;
1289 vctx->base.bind_fs_state = virgl_bind_fs_state;
1290
1291 vctx->base.delete_vs_state = virgl_delete_vs_state;
1292 vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1293 vctx->base.delete_tes_state = virgl_delete_tes_state;
1294 vctx->base.delete_gs_state = virgl_delete_gs_state;
1295 vctx->base.delete_fs_state = virgl_delete_fs_state;
1296
1297 vctx->base.create_compute_state = virgl_create_compute_state;
1298 vctx->base.bind_compute_state = virgl_bind_compute_state;
1299 vctx->base.delete_compute_state = virgl_delete_compute_state;
1300 vctx->base.launch_grid = virgl_launch_grid;
1301
1302 vctx->base.clear = virgl_clear;
1303 vctx->base.draw_vbo = virgl_draw_vbo;
1304 vctx->base.flush = virgl_flush_from_st;
1305 vctx->base.screen = pscreen;
1306 vctx->base.create_sampler_view = virgl_create_sampler_view;
1307 vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1308 vctx->base.set_sampler_views = virgl_set_sampler_views;
1309 vctx->base.texture_barrier = virgl_texture_barrier;
1310
1311 vctx->base.create_sampler_state = virgl_create_sampler_state;
1312 vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1313 vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1314
1315 vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1316 vctx->base.set_scissor_states = virgl_set_scissor_states;
1317 vctx->base.set_sample_mask = virgl_set_sample_mask;
1318 vctx->base.set_min_samples = virgl_set_min_samples;
1319 vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1320 vctx->base.set_clip_state = virgl_set_clip_state;
1321
1322 vctx->base.set_blend_color = virgl_set_blend_color;
1323
1324 vctx->base.get_sample_position = virgl_get_sample_position;
1325
1326 vctx->base.resource_copy_region = virgl_resource_copy_region;
1327 vctx->base.flush_resource = virgl_flush_resource;
1328 vctx->base.blit = virgl_blit;
1329 vctx->base.create_fence_fd = virgl_create_fence_fd;
1330 vctx->base.fence_server_sync = virgl_fence_server_sync;
1331
1332 vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1333 vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1334 vctx->base.set_shader_images = virgl_set_shader_images;
1335 vctx->base.memory_barrier = virgl_memory_barrier;
1336
1337 virgl_init_context_resource_functions(&vctx->base);
1338 virgl_init_query_functions(vctx);
1339 virgl_init_so_functions(vctx);
1340
1341 slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1342 virgl_transfer_queue_init(&vctx->queue, rs, &vctx->transfer_pool);
1343 vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
1344 (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
1345
1346 /* Reserve some space for transfers. */
1347 if (vctx->encoded_transfers)
1348 vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1349
1350 vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1351 vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1352 PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1353 if (!vctx->uploader)
1354 goto fail;
1355 vctx->base.stream_uploader = vctx->uploader;
1356 vctx->base.const_uploader = vctx->uploader;
1357
1358 vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
1359 virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1360
1361 virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1362
1363 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1364 host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1365 if (host_debug_flagstring)
1366 virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1367 }
1368
1369 return &vctx->base;
1370 fail:
1371 return NULL;
1372 }