Revert "virgl: remove unused stride-arguments"
[mesa.git] / src / gallium / drivers / virgl / virgl_context.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "pipe/p_shader_tokens.h"
25
26 #include "pipe/p_context.h"
27 #include "pipe/p_defines.h"
28 #include "pipe/p_screen.h"
29 #include "pipe/p_state.h"
30 #include "util/u_inlines.h"
31 #include "util/u_memory.h"
32 #include "util/u_format.h"
33 #include "util/u_prim.h"
34 #include "util/u_transfer.h"
35 #include "util/u_helpers.h"
36 #include "util/slab.h"
37 #include "util/u_upload_mgr.h"
38 #include "util/u_blitter.h"
39 #include "tgsi/tgsi_text.h"
40 #include "indices/u_primconvert.h"
41
42 #include "pipebuffer/pb_buffer.h"
43
44 #include "virgl_encode.h"
45 #include "virgl_context.h"
46 #include "virgl_protocol.h"
47 #include "virgl_resource.h"
48 #include "virgl_screen.h"
49
50 static uint32_t next_handle;
51 uint32_t virgl_object_assign_handle(void)
52 {
53 return ++next_handle;
54 }
55
56 static void virgl_buffer_flush(struct virgl_context *vctx,
57 struct virgl_buffer *vbuf)
58 {
59 struct virgl_screen *rs = virgl_screen(vctx->base.screen);
60 struct pipe_box box;
61
62 assert(vbuf->on_list);
63
64 box.height = 1;
65 box.depth = 1;
66 box.y = 0;
67 box.z = 0;
68
69 box.x = vbuf->valid_buffer_range.start;
70 box.width = MIN2(vbuf->valid_buffer_range.end - vbuf->valid_buffer_range.start, vbuf->base.u.b.width0);
71
72 vctx->num_transfers++;
73 rs->vws->transfer_put(rs->vws, vbuf->base.hw_res,
74 &box, 0, 0, box.x, 0);
75
76 util_range_set_empty(&vbuf->valid_buffer_range);
77 }
78
79 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
80 {
81 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
82 struct pipe_surface *surf;
83 struct virgl_resource *res;
84 unsigned i;
85
86 surf = vctx->framebuffer.zsbuf;
87 if (surf) {
88 res = virgl_resource(surf->texture);
89 if (res)
90 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
91 }
92 for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
93 surf = vctx->framebuffer.cbufs[i];
94 if (surf) {
95 res = virgl_resource(surf->texture);
96 if (res)
97 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
98 }
99 }
100 }
101
102 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
103 enum pipe_shader_type shader_type)
104 {
105 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
106 struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
107 struct virgl_resource *res;
108 uint32_t remaining_mask = tinfo->enabled_mask;
109 unsigned i;
110 while (remaining_mask) {
111 i = u_bit_scan(&remaining_mask);
112 assert(tinfo->views[i]);
113
114 res = virgl_resource(tinfo->views[i]->base.texture);
115 if (res)
116 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
117 }
118 }
119
120 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
121 {
122 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
123 struct virgl_resource *res;
124 unsigned i;
125
126 for (i = 0; i < vctx->num_vertex_buffers; i++) {
127 res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
128 if (res)
129 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
130 }
131 }
132
133 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
134 struct virgl_indexbuf *ib)
135 {
136 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
137 struct virgl_resource *res;
138
139 res = virgl_resource(ib->buffer);
140 if (res)
141 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
142 }
143
144 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
145 {
146 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
147 struct virgl_resource *res;
148 unsigned i;
149
150 for (i = 0; i < vctx->num_so_targets; i++) {
151 res = virgl_resource(vctx->so_targets[i].base.buffer);
152 if (res)
153 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
154 }
155 }
156
157 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
158 enum pipe_shader_type shader_type)
159 {
160 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
161 struct virgl_resource *res;
162 unsigned i;
163 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
164 res = virgl_resource(vctx->ubos[shader_type][i]);
165 if (res) {
166 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
167 }
168 }
169 }
170
171 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
172 enum pipe_shader_type shader_type)
173 {
174 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
175 struct virgl_resource *res;
176 unsigned i;
177 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
178 res = virgl_resource(vctx->ssbos[shader_type][i]);
179 if (res) {
180 vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
181 }
182 }
183 }
184
185 /*
186 * after flushing, the hw context still has a bunch of
187 * resources bound, so we need to rebind those here.
188 */
189 static void virgl_reemit_res(struct virgl_context *vctx)
190 {
191 enum pipe_shader_type shader_type;
192
193 /* reattach any flushed resources */
194 /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
195 virgl_attach_res_framebuffer(vctx);
196
197 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
198 virgl_attach_res_sampler_views(vctx, shader_type);
199 virgl_attach_res_uniform_buffers(vctx, shader_type);
200 virgl_attach_res_shader_buffers(vctx, shader_type);
201 }
202 virgl_attach_res_vertex_buffers(vctx);
203 virgl_attach_res_so_targets(vctx);
204 }
205
206 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
207 struct pipe_resource *resource,
208 const struct pipe_surface *templ)
209 {
210 struct virgl_context *vctx = virgl_context(ctx);
211 struct virgl_surface *surf;
212 struct virgl_resource *res = virgl_resource(resource);
213 uint32_t handle;
214
215 surf = CALLOC_STRUCT(virgl_surface);
216 if (!surf)
217 return NULL;
218
219 res->clean = FALSE;
220 handle = virgl_object_assign_handle();
221 pipe_reference_init(&surf->base.reference, 1);
222 pipe_resource_reference(&surf->base.texture, resource);
223 surf->base.context = ctx;
224 surf->base.format = templ->format;
225 if (resource->target != PIPE_BUFFER) {
226 surf->base.width = u_minify(resource->width0, templ->u.tex.level);
227 surf->base.height = u_minify(resource->height0, templ->u.tex.level);
228 surf->base.u.tex.level = templ->u.tex.level;
229 surf->base.u.tex.first_layer = templ->u.tex.first_layer;
230 surf->base.u.tex.last_layer = templ->u.tex.last_layer;
231 } else {
232 surf->base.width = templ->u.buf.last_element - templ->u.buf.first_element + 1;
233 surf->base.height = resource->height0;
234 surf->base.u.buf.first_element = templ->u.buf.first_element;
235 surf->base.u.buf.last_element = templ->u.buf.last_element;
236 }
237 virgl_encoder_create_surface(vctx, handle, res, &surf->base);
238 surf->handle = handle;
239 return &surf->base;
240 }
241
242 static void virgl_surface_destroy(struct pipe_context *ctx,
243 struct pipe_surface *psurf)
244 {
245 struct virgl_context *vctx = virgl_context(ctx);
246 struct virgl_surface *surf = virgl_surface(psurf);
247
248 pipe_resource_reference(&surf->base.texture, NULL);
249 virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
250 FREE(surf);
251 }
252
253 static void *virgl_create_blend_state(struct pipe_context *ctx,
254 const struct pipe_blend_state *blend_state)
255 {
256 struct virgl_context *vctx = virgl_context(ctx);
257 uint32_t handle;
258 handle = virgl_object_assign_handle();
259
260 virgl_encode_blend_state(vctx, handle, blend_state);
261 return (void *)(unsigned long)handle;
262
263 }
264
265 static void virgl_bind_blend_state(struct pipe_context *ctx,
266 void *blend_state)
267 {
268 struct virgl_context *vctx = virgl_context(ctx);
269 uint32_t handle = (unsigned long)blend_state;
270 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
271 }
272
273 static void virgl_delete_blend_state(struct pipe_context *ctx,
274 void *blend_state)
275 {
276 struct virgl_context *vctx = virgl_context(ctx);
277 uint32_t handle = (unsigned long)blend_state;
278 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
279 }
280
281 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
282 const struct pipe_depth_stencil_alpha_state *blend_state)
283 {
284 struct virgl_context *vctx = virgl_context(ctx);
285 uint32_t handle;
286 handle = virgl_object_assign_handle();
287
288 virgl_encode_dsa_state(vctx, handle, blend_state);
289 return (void *)(unsigned long)handle;
290 }
291
292 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
293 void *blend_state)
294 {
295 struct virgl_context *vctx = virgl_context(ctx);
296 uint32_t handle = (unsigned long)blend_state;
297 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
298 }
299
300 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
301 void *dsa_state)
302 {
303 struct virgl_context *vctx = virgl_context(ctx);
304 uint32_t handle = (unsigned long)dsa_state;
305 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
306 }
307
308 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
309 const struct pipe_rasterizer_state *rs_state)
310 {
311 struct virgl_context *vctx = virgl_context(ctx);
312 uint32_t handle;
313 handle = virgl_object_assign_handle();
314
315 virgl_encode_rasterizer_state(vctx, handle, rs_state);
316 return (void *)(unsigned long)handle;
317 }
318
319 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
320 void *rs_state)
321 {
322 struct virgl_context *vctx = virgl_context(ctx);
323 uint32_t handle = (unsigned long)rs_state;
324
325 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
326 }
327
328 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
329 void *rs_state)
330 {
331 struct virgl_context *vctx = virgl_context(ctx);
332 uint32_t handle = (unsigned long)rs_state;
333 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
334 }
335
336 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
337 const struct pipe_framebuffer_state *state)
338 {
339 struct virgl_context *vctx = virgl_context(ctx);
340
341 vctx->framebuffer = *state;
342 virgl_encoder_set_framebuffer_state(vctx, state);
343 virgl_attach_res_framebuffer(vctx);
344 }
345
346 static void virgl_set_viewport_states(struct pipe_context *ctx,
347 unsigned start_slot,
348 unsigned num_viewports,
349 const struct pipe_viewport_state *state)
350 {
351 struct virgl_context *vctx = virgl_context(ctx);
352 virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
353 }
354
355 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
356 unsigned num_elements,
357 const struct pipe_vertex_element *elements)
358 {
359 struct virgl_context *vctx = virgl_context(ctx);
360 uint32_t handle = virgl_object_assign_handle();
361 virgl_encoder_create_vertex_elements(vctx, handle,
362 num_elements, elements);
363 return (void*)(unsigned long)handle;
364
365 }
366
367 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
368 void *ve)
369 {
370 struct virgl_context *vctx = virgl_context(ctx);
371 uint32_t handle = (unsigned long)ve;
372
373 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
374 }
375
376 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
377 void *ve)
378 {
379 struct virgl_context *vctx = virgl_context(ctx);
380 uint32_t handle = (unsigned long)ve;
381 virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
382 }
383
384 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
385 unsigned start_slot,
386 unsigned num_buffers,
387 const struct pipe_vertex_buffer *buffers)
388 {
389 struct virgl_context *vctx = virgl_context(ctx);
390
391 util_set_vertex_buffers_count(vctx->vertex_buffer,
392 &vctx->num_vertex_buffers,
393 buffers, start_slot, num_buffers);
394
395 vctx->vertex_array_dirty = TRUE;
396 }
397
398 static void virgl_hw_set_vertex_buffers(struct pipe_context *ctx)
399 {
400 struct virgl_context *vctx = virgl_context(ctx);
401
402 if (vctx->vertex_array_dirty) {
403 virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
404 virgl_attach_res_vertex_buffers(vctx);
405 }
406 }
407
408 static void virgl_set_stencil_ref(struct pipe_context *ctx,
409 const struct pipe_stencil_ref *ref)
410 {
411 struct virgl_context *vctx = virgl_context(ctx);
412 virgl_encoder_set_stencil_ref(vctx, ref);
413 }
414
415 static void virgl_set_blend_color(struct pipe_context *ctx,
416 const struct pipe_blend_color *color)
417 {
418 struct virgl_context *vctx = virgl_context(ctx);
419 virgl_encoder_set_blend_color(vctx, color);
420 }
421
422 static void virgl_hw_set_index_buffer(struct pipe_context *ctx,
423 struct virgl_indexbuf *ib)
424 {
425 struct virgl_context *vctx = virgl_context(ctx);
426 virgl_encoder_set_index_buffer(vctx, ib);
427 virgl_attach_res_index_buffer(vctx, ib);
428 }
429
430 static void virgl_set_constant_buffer(struct pipe_context *ctx,
431 enum pipe_shader_type shader, uint index,
432 const struct pipe_constant_buffer *buf)
433 {
434 struct virgl_context *vctx = virgl_context(ctx);
435
436 if (buf) {
437 if (!buf->user_buffer){
438 struct virgl_resource *res = virgl_resource(buf->buffer);
439 virgl_encoder_set_uniform_buffer(vctx, shader, index, buf->buffer_offset,
440 buf->buffer_size, res);
441 pipe_resource_reference(&vctx->ubos[shader][index], buf->buffer);
442 return;
443 }
444 pipe_resource_reference(&vctx->ubos[shader][index], NULL);
445 virgl_encoder_write_constant_buffer(vctx, shader, index, buf->buffer_size / 4, buf->user_buffer);
446 } else {
447 virgl_encoder_write_constant_buffer(vctx, shader, index, 0, NULL);
448 pipe_resource_reference(&vctx->ubos[shader][index], NULL);
449 }
450 }
451
452 void virgl_transfer_inline_write(struct pipe_context *ctx,
453 struct pipe_resource *res,
454 unsigned level,
455 unsigned usage,
456 const struct pipe_box *box,
457 const void *data,
458 unsigned stride,
459 unsigned layer_stride)
460 {
461 struct virgl_context *vctx = virgl_context(ctx);
462 struct virgl_screen *vs = virgl_screen(ctx->screen);
463 struct virgl_resource *grres = virgl_resource(res);
464 struct virgl_buffer *vbuf = virgl_buffer(res);
465
466 grres->clean = FALSE;
467
468 if (virgl_res_needs_flush_wait(vctx, &vbuf->base, usage)) {
469 ctx->flush(ctx, NULL, 0);
470
471 vs->vws->resource_wait(vs->vws, vbuf->base.hw_res);
472 }
473
474 virgl_encoder_inline_write(vctx, grres, level, usage,
475 box, data, stride, layer_stride);
476 }
477
478 static void *virgl_shader_encoder(struct pipe_context *ctx,
479 const struct pipe_shader_state *shader,
480 unsigned type)
481 {
482 struct virgl_context *vctx = virgl_context(ctx);
483 uint32_t handle;
484 struct tgsi_token *new_tokens;
485 int ret;
486
487 new_tokens = virgl_tgsi_transform(vctx, shader->tokens);
488 if (!new_tokens)
489 return NULL;
490
491 handle = virgl_object_assign_handle();
492 /* encode VS state */
493 ret = virgl_encode_shader_state(vctx, handle, type,
494 &shader->stream_output,
495 new_tokens);
496 if (ret) {
497 return NULL;
498 }
499
500 FREE(new_tokens);
501 return (void *)(unsigned long)handle;
502
503 }
504 static void *virgl_create_vs_state(struct pipe_context *ctx,
505 const struct pipe_shader_state *shader)
506 {
507 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
508 }
509
510 static void *virgl_create_tcs_state(struct pipe_context *ctx,
511 const struct pipe_shader_state *shader)
512 {
513 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
514 }
515
516 static void *virgl_create_tes_state(struct pipe_context *ctx,
517 const struct pipe_shader_state *shader)
518 {
519 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
520 }
521
522 static void *virgl_create_gs_state(struct pipe_context *ctx,
523 const struct pipe_shader_state *shader)
524 {
525 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
526 }
527
528 static void *virgl_create_fs_state(struct pipe_context *ctx,
529 const struct pipe_shader_state *shader)
530 {
531 return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
532 }
533
534 static void
535 virgl_delete_fs_state(struct pipe_context *ctx,
536 void *fs)
537 {
538 uint32_t handle = (unsigned long)fs;
539 struct virgl_context *vctx = virgl_context(ctx);
540
541 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
542 }
543
544 static void
545 virgl_delete_gs_state(struct pipe_context *ctx,
546 void *gs)
547 {
548 uint32_t handle = (unsigned long)gs;
549 struct virgl_context *vctx = virgl_context(ctx);
550
551 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
552 }
553
554 static void
555 virgl_delete_vs_state(struct pipe_context *ctx,
556 void *vs)
557 {
558 uint32_t handle = (unsigned long)vs;
559 struct virgl_context *vctx = virgl_context(ctx);
560
561 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
562 }
563
564 static void
565 virgl_delete_tcs_state(struct pipe_context *ctx,
566 void *tcs)
567 {
568 uint32_t handle = (unsigned long)tcs;
569 struct virgl_context *vctx = virgl_context(ctx);
570
571 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
572 }
573
574 static void
575 virgl_delete_tes_state(struct pipe_context *ctx,
576 void *tes)
577 {
578 uint32_t handle = (unsigned long)tes;
579 struct virgl_context *vctx = virgl_context(ctx);
580
581 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
582 }
583
584 static void virgl_bind_vs_state(struct pipe_context *ctx,
585 void *vss)
586 {
587 uint32_t handle = (unsigned long)vss;
588 struct virgl_context *vctx = virgl_context(ctx);
589
590 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
591 }
592
593 static void virgl_bind_tcs_state(struct pipe_context *ctx,
594 void *vss)
595 {
596 uint32_t handle = (unsigned long)vss;
597 struct virgl_context *vctx = virgl_context(ctx);
598
599 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
600 }
601
602 static void virgl_bind_tes_state(struct pipe_context *ctx,
603 void *vss)
604 {
605 uint32_t handle = (unsigned long)vss;
606 struct virgl_context *vctx = virgl_context(ctx);
607
608 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
609 }
610
611 static void virgl_bind_gs_state(struct pipe_context *ctx,
612 void *vss)
613 {
614 uint32_t handle = (unsigned long)vss;
615 struct virgl_context *vctx = virgl_context(ctx);
616
617 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
618 }
619
620
621 static void virgl_bind_fs_state(struct pipe_context *ctx,
622 void *vss)
623 {
624 uint32_t handle = (unsigned long)vss;
625 struct virgl_context *vctx = virgl_context(ctx);
626
627 virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
628 }
629
630 static void virgl_clear(struct pipe_context *ctx,
631 unsigned buffers,
632 const union pipe_color_union *color,
633 double depth, unsigned stencil)
634 {
635 struct virgl_context *vctx = virgl_context(ctx);
636
637 virgl_encode_clear(vctx, buffers, color, depth, stencil);
638 }
639
640 static void virgl_draw_vbo(struct pipe_context *ctx,
641 const struct pipe_draw_info *dinfo)
642 {
643 struct virgl_context *vctx = virgl_context(ctx);
644 struct virgl_screen *rs = virgl_screen(ctx->screen);
645 struct virgl_indexbuf ib = {};
646 struct pipe_draw_info info = *dinfo;
647
648 if (!dinfo->count_from_stream_output && !dinfo->indirect &&
649 !dinfo->primitive_restart &&
650 !u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count))
651 return;
652
653 if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
654 util_primconvert_draw_vbo(vctx->primconvert, dinfo);
655 return;
656 }
657 if (info.index_size) {
658 pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
659 ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
660 ib.index_size = dinfo->index_size;
661 ib.offset = info.start * ib.index_size;
662
663 if (ib.user_buffer) {
664 u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 256,
665 ib.user_buffer, &ib.offset, &ib.buffer);
666 ib.user_buffer = NULL;
667 }
668 }
669
670 u_upload_unmap(vctx->uploader);
671
672 vctx->num_draws++;
673 virgl_hw_set_vertex_buffers(ctx);
674 if (info.index_size)
675 virgl_hw_set_index_buffer(ctx, &ib);
676
677 virgl_encoder_draw_vbo(vctx, &info);
678
679 pipe_resource_reference(&ib.buffer, NULL);
680
681 }
682
683 static void virgl_flush_eq(struct virgl_context *ctx, void *closure)
684 {
685 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
686
687 /* send the buffer to the remote side for decoding */
688 ctx->num_transfers = ctx->num_draws = 0;
689 rs->vws->submit_cmd(rs->vws, ctx->cbuf);
690
691 virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
692
693 /* add back current framebuffer resources to reference list? */
694 virgl_reemit_res(ctx);
695 }
696
697 static void virgl_flush_from_st(struct pipe_context *ctx,
698 struct pipe_fence_handle **fence,
699 enum pipe_flush_flags flags)
700 {
701 struct virgl_context *vctx = virgl_context(ctx);
702 struct virgl_screen *rs = virgl_screen(ctx->screen);
703 struct virgl_buffer *buf, *tmp;
704
705 if (fence)
706 *fence = rs->vws->cs_create_fence(rs->vws);
707
708 LIST_FOR_EACH_ENTRY_SAFE(buf, tmp, &vctx->to_flush_bufs, flush_list) {
709 struct pipe_resource *res = &buf->base.u.b;
710 virgl_buffer_flush(vctx, buf);
711 list_del(&buf->flush_list);
712 buf->on_list = FALSE;
713 pipe_resource_reference(&res, NULL);
714
715 }
716 virgl_flush_eq(vctx, vctx);
717 }
718
719 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
720 struct pipe_resource *texture,
721 const struct pipe_sampler_view *state)
722 {
723 struct virgl_context *vctx = virgl_context(ctx);
724 struct virgl_sampler_view *grview;
725 uint32_t handle;
726 struct virgl_resource *res;
727
728 if (!state)
729 return NULL;
730
731 grview = CALLOC_STRUCT(virgl_sampler_view);
732 if (!grview)
733 return NULL;
734
735 res = virgl_resource(texture);
736 handle = virgl_object_assign_handle();
737 virgl_encode_sampler_view(vctx, handle, res, state);
738
739 grview->base = *state;
740 grview->base.reference.count = 1;
741
742 grview->base.texture = NULL;
743 grview->base.context = ctx;
744 pipe_resource_reference(&grview->base.texture, texture);
745 grview->handle = handle;
746 return &grview->base;
747 }
748
749 static void virgl_set_sampler_views(struct pipe_context *ctx,
750 enum pipe_shader_type shader_type,
751 unsigned start_slot,
752 unsigned num_views,
753 struct pipe_sampler_view **views)
754 {
755 struct virgl_context *vctx = virgl_context(ctx);
756 int i;
757 uint32_t disable_mask = ~((1ull << num_views) - 1);
758 struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
759 uint32_t new_mask = 0;
760 uint32_t remaining_mask;
761
762 remaining_mask = tinfo->enabled_mask & disable_mask;
763
764 while (remaining_mask) {
765 i = u_bit_scan(&remaining_mask);
766 assert(tinfo->views[i]);
767
768 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
769 }
770
771 for (i = 0; i < num_views; i++) {
772 struct virgl_sampler_view *grview = virgl_sampler_view(views[i]);
773
774 if (views[i] == (struct pipe_sampler_view *)tinfo->views[i])
775 continue;
776
777 if (grview) {
778 new_mask |= 1 << i;
779 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], views[i]);
780 } else {
781 pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
782 disable_mask |= 1 << i;
783 }
784 }
785
786 tinfo->enabled_mask &= ~disable_mask;
787 tinfo->enabled_mask |= new_mask;
788 virgl_encode_set_sampler_views(vctx, shader_type, start_slot, num_views, tinfo->views);
789 virgl_attach_res_sampler_views(vctx, shader_type);
790 }
791
792 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
793 struct pipe_sampler_view *view)
794 {
795 struct virgl_context *vctx = virgl_context(ctx);
796 struct virgl_sampler_view *grview = virgl_sampler_view(view);
797
798 virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
799 pipe_resource_reference(&view->texture, NULL);
800 FREE(view);
801 }
802
803 static void *virgl_create_sampler_state(struct pipe_context *ctx,
804 const struct pipe_sampler_state *state)
805 {
806 struct virgl_context *vctx = virgl_context(ctx);
807 uint32_t handle;
808
809 handle = virgl_object_assign_handle();
810
811 virgl_encode_sampler_state(vctx, handle, state);
812 return (void *)(unsigned long)handle;
813 }
814
815 static void virgl_delete_sampler_state(struct pipe_context *ctx,
816 void *ss)
817 {
818 struct virgl_context *vctx = virgl_context(ctx);
819 uint32_t handle = (unsigned long)ss;
820
821 virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
822 }
823
824 static void virgl_bind_sampler_states(struct pipe_context *ctx,
825 enum pipe_shader_type shader,
826 unsigned start_slot,
827 unsigned num_samplers,
828 void **samplers)
829 {
830 struct virgl_context *vctx = virgl_context(ctx);
831 uint32_t handles[32];
832 int i;
833 for (i = 0; i < num_samplers; i++) {
834 handles[i] = (unsigned long)(samplers[i]);
835 }
836 virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
837 }
838
839 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
840 const struct pipe_poly_stipple *ps)
841 {
842 struct virgl_context *vctx = virgl_context(ctx);
843 virgl_encoder_set_polygon_stipple(vctx, ps);
844 }
845
846 static void virgl_set_scissor_states(struct pipe_context *ctx,
847 unsigned start_slot,
848 unsigned num_scissor,
849 const struct pipe_scissor_state *ss)
850 {
851 struct virgl_context *vctx = virgl_context(ctx);
852 virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
853 }
854
855 static void virgl_set_sample_mask(struct pipe_context *ctx,
856 unsigned sample_mask)
857 {
858 struct virgl_context *vctx = virgl_context(ctx);
859 virgl_encoder_set_sample_mask(vctx, sample_mask);
860 }
861
862 static void virgl_set_min_samples(struct pipe_context *ctx,
863 unsigned min_samples)
864 {
865 struct virgl_context *vctx = virgl_context(ctx);
866 struct virgl_screen *rs = virgl_screen(ctx->screen);
867
868 if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
869 return;
870 virgl_encoder_set_min_samples(vctx, min_samples);
871 }
872
873 static void virgl_set_clip_state(struct pipe_context *ctx,
874 const struct pipe_clip_state *clip)
875 {
876 struct virgl_context *vctx = virgl_context(ctx);
877 virgl_encoder_set_clip_state(vctx, clip);
878 }
879
880 static void virgl_set_tess_state(struct pipe_context *ctx,
881 const float default_outer_level[4],
882 const float default_inner_level[2])
883 {
884 struct virgl_context *vctx = virgl_context(ctx);
885 struct virgl_screen *rs = virgl_screen(ctx->screen);
886
887 if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
888 return;
889 virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
890 }
891
892 static void virgl_resource_copy_region(struct pipe_context *ctx,
893 struct pipe_resource *dst,
894 unsigned dst_level,
895 unsigned dstx, unsigned dsty, unsigned dstz,
896 struct pipe_resource *src,
897 unsigned src_level,
898 const struct pipe_box *src_box)
899 {
900 struct virgl_context *vctx = virgl_context(ctx);
901 struct virgl_resource *dres = virgl_resource(dst);
902 struct virgl_resource *sres = virgl_resource(src);
903
904 dres->clean = FALSE;
905 virgl_encode_resource_copy_region(vctx, dres,
906 dst_level, dstx, dsty, dstz,
907 sres, src_level,
908 src_box);
909 }
910
911 static void
912 virgl_flush_resource(struct pipe_context *pipe,
913 struct pipe_resource *resource)
914 {
915 }
916
917 static void virgl_blit(struct pipe_context *ctx,
918 const struct pipe_blit_info *blit)
919 {
920 struct virgl_context *vctx = virgl_context(ctx);
921 struct virgl_resource *dres = virgl_resource(blit->dst.resource);
922 struct virgl_resource *sres = virgl_resource(blit->src.resource);
923
924 dres->clean = FALSE;
925 virgl_encode_blit(vctx, dres, sres,
926 blit);
927 }
928
929 static void virgl_set_shader_buffers(struct pipe_context *ctx,
930 enum pipe_shader_type shader,
931 unsigned start_slot, unsigned count,
932 const struct pipe_shader_buffer *buffers)
933 {
934 struct virgl_context *vctx = virgl_context(ctx);
935 struct virgl_screen *rs = virgl_screen(ctx->screen);
936
937 for (unsigned i = 0; i < count; i++) {
938 unsigned idx = start_slot + i;
939
940 if (buffers) {
941 if (buffers[i].buffer) {
942 pipe_resource_reference(&vctx->ssbos[shader][idx], buffers[i].buffer);
943 continue;
944 }
945 }
946 pipe_resource_reference(&vctx->ssbos[shader][idx], NULL);
947 }
948
949 uint32_t max_shader_buffer = shader == PIPE_SHADER_FRAGMENT ?
950 rs->caps.caps.v2.max_shader_buffer_frag_compute :
951 rs->caps.caps.v2.max_shader_buffer_other_stages;
952 if (!max_shader_buffer)
953 return;
954 virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
955 }
956
957 static void
958 virgl_context_destroy( struct pipe_context *ctx )
959 {
960 struct virgl_context *vctx = virgl_context(ctx);
961 struct virgl_screen *rs = virgl_screen(ctx->screen);
962
963 vctx->framebuffer.zsbuf = NULL;
964 vctx->framebuffer.nr_cbufs = 0;
965 virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
966 virgl_flush_eq(vctx, vctx);
967
968 rs->vws->cmd_buf_destroy(vctx->cbuf);
969 if (vctx->uploader)
970 u_upload_destroy(vctx->uploader);
971 util_primconvert_destroy(vctx->primconvert);
972
973 slab_destroy_child(&vctx->texture_transfer_pool);
974 FREE(vctx);
975 }
976
977 static void virgl_get_sample_position(struct pipe_context *ctx,
978 unsigned sample_count,
979 unsigned index,
980 float *out_value)
981 {
982 struct virgl_context *vctx = virgl_context(ctx);
983 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
984
985 if (sample_count > vs->caps.caps.v1.max_samples) {
986 debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
987 sample_count, vs->caps.caps.v1.max_samples);
988 return;
989 }
990
991 /* The following is basically copied from dri/i965gen6_get_sample_position
992 * The only addition is that we hold the msaa positions for all sample
993 * counts in a flat array. */
994 uint32_t bits = 0;
995 if (sample_count == 1) {
996 out_value[0] = out_value[1] = 0.5f;
997 return;
998 } else if (sample_count == 2) {
999 bits = vs->caps.caps.v2.msaa_sample_positions[0] >> (8 * index);
1000 } else if (sample_count <= 4) {
1001 bits = vs->caps.caps.v2.msaa_sample_positions[1] >> (8 * index);
1002 } else if (sample_count <= 8) {
1003 bits = vs->caps.caps.v2.msaa_sample_positions[2 + (index >> 2)] >> (8 * (index & 3));
1004 } else if (sample_count <= 16) {
1005 bits = vs->caps.caps.v2.msaa_sample_positions[4 + (index >> 2)] >> (8 * (index & 3));
1006 }
1007 out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1008 out_value[1] = (bits & 0xf) / 16.0f;
1009 debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1010 index, sample_count, out_value[0], out_value[1]);
1011 }
1012
1013 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1014 void *priv,
1015 unsigned flags)
1016 {
1017 struct virgl_context *vctx;
1018 struct virgl_screen *rs = virgl_screen(pscreen);
1019 vctx = CALLOC_STRUCT(virgl_context);
1020
1021 vctx->cbuf = rs->vws->cmd_buf_create(rs->vws);
1022 if (!vctx->cbuf) {
1023 FREE(vctx);
1024 return NULL;
1025 }
1026
1027 vctx->base.destroy = virgl_context_destroy;
1028 vctx->base.create_surface = virgl_create_surface;
1029 vctx->base.surface_destroy = virgl_surface_destroy;
1030 vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1031 vctx->base.create_blend_state = virgl_create_blend_state;
1032 vctx->base.bind_blend_state = virgl_bind_blend_state;
1033 vctx->base.delete_blend_state = virgl_delete_blend_state;
1034 vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1035 vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1036 vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1037 vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1038 vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1039 vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1040
1041 vctx->base.set_viewport_states = virgl_set_viewport_states;
1042 vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1043 vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1044 vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1045 vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1046 vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1047
1048 vctx->base.set_tess_state = virgl_set_tess_state;
1049 vctx->base.create_vs_state = virgl_create_vs_state;
1050 vctx->base.create_tcs_state = virgl_create_tcs_state;
1051 vctx->base.create_tes_state = virgl_create_tes_state;
1052 vctx->base.create_gs_state = virgl_create_gs_state;
1053 vctx->base.create_fs_state = virgl_create_fs_state;
1054
1055 vctx->base.bind_vs_state = virgl_bind_vs_state;
1056 vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1057 vctx->base.bind_tes_state = virgl_bind_tes_state;
1058 vctx->base.bind_gs_state = virgl_bind_gs_state;
1059 vctx->base.bind_fs_state = virgl_bind_fs_state;
1060
1061 vctx->base.delete_vs_state = virgl_delete_vs_state;
1062 vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1063 vctx->base.delete_tes_state = virgl_delete_tes_state;
1064 vctx->base.delete_gs_state = virgl_delete_gs_state;
1065 vctx->base.delete_fs_state = virgl_delete_fs_state;
1066
1067 vctx->base.clear = virgl_clear;
1068 vctx->base.draw_vbo = virgl_draw_vbo;
1069 vctx->base.flush = virgl_flush_from_st;
1070 vctx->base.screen = pscreen;
1071 vctx->base.create_sampler_view = virgl_create_sampler_view;
1072 vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1073 vctx->base.set_sampler_views = virgl_set_sampler_views;
1074
1075 vctx->base.create_sampler_state = virgl_create_sampler_state;
1076 vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1077 vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1078
1079 vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1080 vctx->base.set_scissor_states = virgl_set_scissor_states;
1081 vctx->base.set_sample_mask = virgl_set_sample_mask;
1082 vctx->base.set_min_samples = virgl_set_min_samples;
1083 vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1084 vctx->base.set_clip_state = virgl_set_clip_state;
1085
1086 vctx->base.set_blend_color = virgl_set_blend_color;
1087
1088 vctx->base.get_sample_position = virgl_get_sample_position;
1089
1090 vctx->base.resource_copy_region = virgl_resource_copy_region;
1091 vctx->base.flush_resource = virgl_flush_resource;
1092 vctx->base.blit = virgl_blit;
1093
1094 vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1095 virgl_init_context_resource_functions(&vctx->base);
1096 virgl_init_query_functions(vctx);
1097 virgl_init_so_functions(vctx);
1098
1099 list_inithead(&vctx->to_flush_bufs);
1100 slab_create_child(&vctx->texture_transfer_pool, &rs->texture_transfer_pool);
1101
1102 vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1103 vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1104 PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1105 if (!vctx->uploader)
1106 goto fail;
1107 vctx->base.stream_uploader = vctx->uploader;
1108 vctx->base.const_uploader = vctx->uploader;
1109
1110 vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
1111 virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1112
1113 virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1114 return &vctx->base;
1115 fail:
1116 return NULL;
1117 }