virgl: track valid buffer range for transfer sync
[mesa.git] / src / gallium / drivers / virgl / virgl_encode.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdint.h>
24 #include <assert.h>
25 #include <string.h>
26
27 #include "util/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
33
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
39
40 #define VIRGL_ENCODE_MAX_DWORDS MIN2(VIRGL_MAX_CMDBUF_DWORDS, VIRGL_CMD0_MAX_DWORDS)
41
42 static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
43 uint32_t dword)
44 {
45 int len = (dword >> 16);
46
47 if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
48 ctx->base.flush(&ctx->base, NULL, 0);
49
50 virgl_encoder_write_dword(ctx->cbuf, dword);
51 return 0;
52 }
53
54 static void virgl_encoder_emit_resource(struct virgl_screen *vs,
55 struct virgl_cmd_buf *buf,
56 struct virgl_resource *res)
57 {
58 struct virgl_winsys *vws = vs->vws;
59 if (res && res->hw_res)
60 vws->emit_res(vws, buf, res->hw_res, TRUE);
61 else {
62 virgl_encoder_write_dword(buf, 0);
63 }
64 }
65
66 static void virgl_encoder_write_res(struct virgl_context *ctx,
67 struct virgl_resource *res)
68 {
69 struct virgl_screen *vs = virgl_screen(ctx->base.screen);
70 virgl_encoder_emit_resource(vs, ctx->cbuf, res);
71 }
72
73 int virgl_encode_bind_object(struct virgl_context *ctx,
74 uint32_t handle, uint32_t object)
75 {
76 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
77 virgl_encoder_write_dword(ctx->cbuf, handle);
78 return 0;
79 }
80
81 int virgl_encode_delete_object(struct virgl_context *ctx,
82 uint32_t handle, uint32_t object)
83 {
84 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
85 virgl_encoder_write_dword(ctx->cbuf, handle);
86 return 0;
87 }
88
89 int virgl_encode_blend_state(struct virgl_context *ctx,
90 uint32_t handle,
91 const struct pipe_blend_state *blend_state)
92 {
93 uint32_t tmp;
94 int i;
95
96 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
97 virgl_encoder_write_dword(ctx->cbuf, handle);
98
99 tmp =
100 VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
101 VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
102 VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
103 VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
104 VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
105
106 virgl_encoder_write_dword(ctx->cbuf, tmp);
107
108 tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
109 virgl_encoder_write_dword(ctx->cbuf, tmp);
110
111 for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
112 tmp =
113 VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
114 VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
115 VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
116 VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
117 VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
118 VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(blend_state->rt[i].alpha_src_factor) |
119 VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
120 VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
121 virgl_encoder_write_dword(ctx->cbuf, tmp);
122 }
123 return 0;
124 }
125
126 int virgl_encode_dsa_state(struct virgl_context *ctx,
127 uint32_t handle,
128 const struct pipe_depth_stencil_alpha_state *dsa_state)
129 {
130 uint32_t tmp;
131 int i;
132 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
133 virgl_encoder_write_dword(ctx->cbuf, handle);
134
135 tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth.enabled) |
136 VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth.writemask) |
137 VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth.func) |
138 VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha.enabled) |
139 VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha.func);
140 virgl_encoder_write_dword(ctx->cbuf, tmp);
141
142 for (i = 0; i < 2; i++) {
143 tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
144 VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
145 VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
146 VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
147 VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
148 VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
149 VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
150 virgl_encoder_write_dword(ctx->cbuf, tmp);
151 }
152
153 virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha.ref_value));
154 return 0;
155 }
156 int virgl_encode_rasterizer_state(struct virgl_context *ctx,
157 uint32_t handle,
158 const struct pipe_rasterizer_state *state)
159 {
160 uint32_t tmp;
161
162 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
163 virgl_encoder_write_dword(ctx->cbuf, handle);
164
165 tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
166 VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip_near) |
167 VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
168 VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
169 VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
170 VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
171 VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
172 VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
173 VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
174 VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
175 VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
176 VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
177 VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
178 VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
179 VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
180 VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
181 VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
182 VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
183 VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
184 VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
185 VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
186 VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
187 VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
188 VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
189 VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
190 VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
191 VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
192 VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule) |
193 VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state->force_persample_interp);
194
195 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
196 virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
197 virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
198 tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
199 VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
200 VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
201 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
202 virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
203 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
204 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
205 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
206 return 0;
207 }
208
209 static void virgl_emit_shader_header(struct virgl_context *ctx,
210 uint32_t handle, uint32_t len,
211 uint32_t type, uint32_t offlen,
212 uint32_t num_tokens)
213 {
214 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
215 virgl_encoder_write_dword(ctx->cbuf, handle);
216 virgl_encoder_write_dword(ctx->cbuf, type);
217 virgl_encoder_write_dword(ctx->cbuf, offlen);
218 virgl_encoder_write_dword(ctx->cbuf, num_tokens);
219 }
220
221 static void virgl_emit_shader_streamout(struct virgl_context *ctx,
222 const struct pipe_stream_output_info *so_info)
223 {
224 int num_outputs = 0;
225 int i;
226 uint32_t tmp;
227
228 if (so_info)
229 num_outputs = so_info->num_outputs;
230
231 virgl_encoder_write_dword(ctx->cbuf, num_outputs);
232 if (num_outputs) {
233 for (i = 0; i < 4; i++)
234 virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
235
236 for (i = 0; i < so_info->num_outputs; i++) {
237 tmp =
238 VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
239 VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
240 VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
241 VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
242 VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
243 virgl_encoder_write_dword(ctx->cbuf, tmp);
244 virgl_encoder_write_dword(ctx->cbuf, so_info->output[i].stream);
245 }
246 }
247 }
248
249 int virgl_encode_shader_state(struct virgl_context *ctx,
250 uint32_t handle,
251 uint32_t type,
252 const struct pipe_stream_output_info *so_info,
253 uint32_t cs_req_local_mem,
254 const struct tgsi_token *tokens)
255 {
256 char *str, *sptr;
257 uint32_t shader_len, len;
258 bool bret;
259 int num_tokens = tgsi_num_tokens(tokens);
260 int str_total_size = 65536;
261 int retry_size = 1;
262 uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
263 bool first_pass;
264 str = CALLOC(1, str_total_size);
265 if (!str)
266 return -1;
267
268 do {
269 int old_size;
270
271 bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
272 if (bret == false) {
273 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
274 debug_printf("Failed to translate shader in available space - trying again\n");
275 old_size = str_total_size;
276 str_total_size = 65536 * ++retry_size;
277 str = REALLOC(str, old_size, str_total_size);
278 if (!str)
279 return -1;
280 }
281 } while (bret == false && retry_size < 10);
282
283 if (bret == false)
284 return -1;
285
286 if (virgl_debug & VIRGL_DEBUG_TGSI)
287 debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str);
288
289 shader_len = strlen(str) + 1;
290
291 left_bytes = shader_len;
292
293 base_hdr_size = 5;
294 strm_hdr_size = so_info->num_outputs ? so_info->num_outputs * 2 + 4 : 0;
295 first_pass = true;
296 sptr = str;
297 while (left_bytes) {
298 uint32_t length, offlen;
299 int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
300 if (ctx->cbuf->cdw + hdr_len + 1 >= VIRGL_ENCODE_MAX_DWORDS)
301 ctx->base.flush(&ctx->base, NULL, 0);
302
303 thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
304
305 length = MIN2(thispass, left_bytes);
306 len = ((length + 3) / 4) + hdr_len;
307
308 if (first_pass)
309 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
310 else
311 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
312
313 virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
314
315 if (type == PIPE_SHADER_COMPUTE)
316 virgl_encoder_write_dword(ctx->cbuf, cs_req_local_mem);
317 else
318 virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
319
320 virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
321
322 sptr += length;
323 first_pass = false;
324 left_bytes -= length;
325 }
326
327 FREE(str);
328 return 0;
329 }
330
331
332 int virgl_encode_clear(struct virgl_context *ctx,
333 unsigned buffers,
334 const union pipe_color_union *color,
335 double depth, unsigned stencil)
336 {
337 int i;
338 uint64_t qword;
339
340 STATIC_ASSERT(sizeof(qword) == sizeof(depth));
341 memcpy(&qword, &depth, sizeof(qword));
342
343 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
344 virgl_encoder_write_dword(ctx->cbuf, buffers);
345 for (i = 0; i < 4; i++)
346 virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
347 virgl_encoder_write_qword(ctx->cbuf, qword);
348 virgl_encoder_write_dword(ctx->cbuf, stencil);
349 return 0;
350 }
351
352 int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
353 const struct pipe_framebuffer_state *state)
354 {
355 struct virgl_surface *zsurf = virgl_surface(state->zsbuf);
356 int i;
357
358 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
359 virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
360 virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
361 for (i = 0; i < state->nr_cbufs; i++) {
362 struct virgl_surface *surf = virgl_surface(state->cbufs[i]);
363 virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
364 }
365
366 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
367 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_FB_NO_ATTACH) {
368 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE));
369 virgl_encoder_write_dword(ctx->cbuf, state->width | (state->height << 16));
370 virgl_encoder_write_dword(ctx->cbuf, state->layers | (state->samples << 16));
371 }
372 return 0;
373 }
374
375 int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
376 int start_slot,
377 int num_viewports,
378 const struct pipe_viewport_state *states)
379 {
380 int i,v;
381 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
382 virgl_encoder_write_dword(ctx->cbuf, start_slot);
383 for (v = 0; v < num_viewports; v++) {
384 for (i = 0; i < 3; i++)
385 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
386 for (i = 0; i < 3; i++)
387 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
388 }
389 return 0;
390 }
391
392 int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
393 uint32_t handle,
394 unsigned num_elements,
395 const struct pipe_vertex_element *element)
396 {
397 int i;
398 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
399 virgl_encoder_write_dword(ctx->cbuf, handle);
400 for (i = 0; i < num_elements; i++) {
401 virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
402 virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
403 virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
404 virgl_encoder_write_dword(ctx->cbuf, element[i].src_format);
405 }
406 return 0;
407 }
408
409 int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
410 unsigned num_buffers,
411 const struct pipe_vertex_buffer *buffers)
412 {
413 int i;
414 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
415 for (i = 0; i < num_buffers; i++) {
416 struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
417 virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
418 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
419 virgl_encoder_write_res(ctx, res);
420 }
421 return 0;
422 }
423
424 int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
425 const struct virgl_indexbuf *ib)
426 {
427 int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
428 struct virgl_resource *res = NULL;
429 if (ib)
430 res = virgl_resource(ib->buffer);
431
432 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
433 virgl_encoder_write_res(ctx, res);
434 if (ib) {
435 virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
436 virgl_encoder_write_dword(ctx->cbuf, ib->offset);
437 }
438 return 0;
439 }
440
441 int virgl_encoder_draw_vbo(struct virgl_context *ctx,
442 const struct pipe_draw_info *info)
443 {
444 uint32_t length = VIRGL_DRAW_VBO_SIZE;
445 if (info->mode == PIPE_PRIM_PATCHES)
446 length = VIRGL_DRAW_VBO_SIZE_TESS;
447 if (info->indirect)
448 length = VIRGL_DRAW_VBO_SIZE_INDIRECT;
449 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, length));
450 virgl_encoder_write_dword(ctx->cbuf, info->start);
451 virgl_encoder_write_dword(ctx->cbuf, info->count);
452 virgl_encoder_write_dword(ctx->cbuf, info->mode);
453 virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
454 virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
455 virgl_encoder_write_dword(ctx->cbuf, info->index_bias);
456 virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
457 virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
458 virgl_encoder_write_dword(ctx->cbuf, info->restart_index);
459 virgl_encoder_write_dword(ctx->cbuf, info->min_index);
460 virgl_encoder_write_dword(ctx->cbuf, info->max_index);
461 if (info->count_from_stream_output)
462 virgl_encoder_write_dword(ctx->cbuf, info->count_from_stream_output->buffer_size);
463 else
464 virgl_encoder_write_dword(ctx->cbuf, 0);
465 if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
466 virgl_encoder_write_dword(ctx->cbuf, info->vertices_per_patch); /* vertices per patch */
467 virgl_encoder_write_dword(ctx->cbuf, info->drawid); /* drawid */
468 }
469 if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
470 virgl_encoder_write_res(ctx, virgl_resource(info->indirect->buffer));
471 virgl_encoder_write_dword(ctx->cbuf, info->indirect->offset);
472 virgl_encoder_write_dword(ctx->cbuf, info->indirect->stride); /* indirect stride */
473 virgl_encoder_write_dword(ctx->cbuf, info->indirect->draw_count); /* indirect draw count */
474 virgl_encoder_write_dword(ctx->cbuf, info->indirect->indirect_draw_count_offset); /* indirect draw count offset */
475 if (info->indirect->indirect_draw_count)
476 virgl_encoder_write_res(ctx, virgl_resource(info->indirect->indirect_draw_count));
477 else
478 virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */
479 }
480 return 0;
481 }
482
483 int virgl_encoder_create_surface(struct virgl_context *ctx,
484 uint32_t handle,
485 struct virgl_resource *res,
486 const struct pipe_surface *templat)
487 {
488 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
489 virgl_encoder_write_dword(ctx->cbuf, handle);
490 virgl_encoder_write_res(ctx, res);
491 virgl_encoder_write_dword(ctx->cbuf, templat->format);
492
493 assert(templat->texture->target != PIPE_BUFFER);
494 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
495 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
496
497 return 0;
498 }
499
500 int virgl_encoder_create_so_target(struct virgl_context *ctx,
501 uint32_t handle,
502 struct virgl_resource *res,
503 unsigned buffer_offset,
504 unsigned buffer_size)
505 {
506 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
507 virgl_encoder_write_dword(ctx->cbuf, handle);
508 virgl_encoder_write_res(ctx, res);
509 virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
510 virgl_encoder_write_dword(ctx->cbuf, buffer_size);
511 return 0;
512 }
513
514 static void virgl_encoder_transfer3d_common(struct virgl_screen *vs,
515 struct virgl_cmd_buf *buf,
516 struct virgl_transfer *xfer)
517 {
518 struct pipe_transfer *transfer = &xfer->base;
519 struct virgl_resource *res = virgl_resource(transfer->resource);
520
521 virgl_encoder_emit_resource(vs, buf, res);
522 virgl_encoder_write_dword(buf, transfer->level);
523 virgl_encoder_write_dword(buf, transfer->usage);
524 virgl_encoder_write_dword(buf, 0);
525 virgl_encoder_write_dword(buf, 0);
526 virgl_encoder_write_dword(buf, transfer->box.x);
527 virgl_encoder_write_dword(buf, transfer->box.y);
528 virgl_encoder_write_dword(buf, transfer->box.z);
529 virgl_encoder_write_dword(buf, transfer->box.width);
530 virgl_encoder_write_dword(buf, transfer->box.height);
531 virgl_encoder_write_dword(buf, transfer->box.depth);
532 }
533
534 int virgl_encoder_inline_write(struct virgl_context *ctx,
535 struct virgl_resource *res,
536 unsigned level, unsigned usage,
537 const struct pipe_box *box,
538 const void *data, unsigned stride,
539 unsigned layer_stride)
540 {
541 uint32_t size = (stride ? stride : box->width) * box->height;
542 uint32_t length, thispass, left_bytes;
543 struct virgl_transfer transfer;
544 struct virgl_screen *vs = virgl_screen(ctx->base.screen);
545
546 transfer.base.resource = &res->u.b;
547 transfer.base.level = level;
548 transfer.base.usage = usage;
549 transfer.base.box = *box;
550
551 length = 11 + (size + 3) / 4;
552 if ((ctx->cbuf->cdw + length + 1) > VIRGL_ENCODE_MAX_DWORDS) {
553 if (box->height > 1 || box->depth > 1) {
554 debug_printf("inline transfer failed due to multi dimensions and too large\n");
555 assert(0);
556 }
557 }
558
559 left_bytes = size;
560 while (left_bytes) {
561 if (ctx->cbuf->cdw + 12 >= VIRGL_ENCODE_MAX_DWORDS)
562 ctx->base.flush(&ctx->base, NULL, 0);
563
564 thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - 12) * 4;
565
566 length = MIN2(thispass, left_bytes);
567
568 transfer.base.box.width = length;
569 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
570 virgl_encoder_transfer3d_common(vs, ctx->cbuf, &transfer);
571 virgl_encoder_write_block(ctx->cbuf, data, length);
572 left_bytes -= length;
573 transfer.base.box.x += length;
574 data += length;
575 }
576 return 0;
577 }
578
579 int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
580 struct virgl_resource *res)
581 {
582 // virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
583 // virgl_encoder_write_dword(ctx->cbuf, res_handle);
584 return 0;
585 }
586
587 int virgl_encode_sampler_state(struct virgl_context *ctx,
588 uint32_t handle,
589 const struct pipe_sampler_state *state)
590 {
591 uint32_t tmp;
592 int i;
593 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
594 virgl_encoder_write_dword(ctx->cbuf, handle);
595
596 tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
597 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
598 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
599 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
600 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
601 VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
602 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
603 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func) |
604 VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state->seamless_cube_map);
605
606 virgl_encoder_write_dword(ctx->cbuf, tmp);
607 virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
608 virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
609 virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
610 for (i = 0; i < 4; i++)
611 virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
612 return 0;
613 }
614
615
616 int virgl_encode_sampler_view(struct virgl_context *ctx,
617 uint32_t handle,
618 struct virgl_resource *res,
619 const struct pipe_sampler_view *state)
620 {
621 unsigned elem_size = util_format_get_blocksize(state->format);
622 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
623 uint32_t tmp;
624 uint32_t dword_fmt_target = state->format;
625 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
626 virgl_encoder_write_dword(ctx->cbuf, handle);
627 virgl_encoder_write_res(ctx, res);
628 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_VIEW)
629 dword_fmt_target |= (state->target << 24);
630 virgl_encoder_write_dword(ctx->cbuf, dword_fmt_target);
631 if (res->u.b.target == PIPE_BUFFER) {
632 virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
633 virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
634 } else {
635 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
636 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
637 }
638 tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
639 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
640 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
641 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
642 virgl_encoder_write_dword(ctx->cbuf, tmp);
643 return 0;
644 }
645
646 int virgl_encode_set_sampler_views(struct virgl_context *ctx,
647 uint32_t shader_type,
648 uint32_t start_slot,
649 uint32_t num_views,
650 struct virgl_sampler_view **views)
651 {
652 int i;
653 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
654 virgl_encoder_write_dword(ctx->cbuf, shader_type);
655 virgl_encoder_write_dword(ctx->cbuf, start_slot);
656 for (i = 0; i < num_views; i++) {
657 uint32_t handle = views[i] ? views[i]->handle : 0;
658 virgl_encoder_write_dword(ctx->cbuf, handle);
659 }
660 return 0;
661 }
662
663 int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
664 uint32_t shader_type,
665 uint32_t start_slot,
666 uint32_t num_handles,
667 uint32_t *handles)
668 {
669 int i;
670 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
671 virgl_encoder_write_dword(ctx->cbuf, shader_type);
672 virgl_encoder_write_dword(ctx->cbuf, start_slot);
673 for (i = 0; i < num_handles; i++)
674 virgl_encoder_write_dword(ctx->cbuf, handles[i]);
675 return 0;
676 }
677
678 int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
679 uint32_t shader,
680 uint32_t index,
681 uint32_t size,
682 const void *data)
683 {
684 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
685 virgl_encoder_write_dword(ctx->cbuf, shader);
686 virgl_encoder_write_dword(ctx->cbuf, index);
687 if (data)
688 virgl_encoder_write_block(ctx->cbuf, data, size * 4);
689 return 0;
690 }
691
692 int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
693 uint32_t shader,
694 uint32_t index,
695 uint32_t offset,
696 uint32_t length,
697 struct virgl_resource *res)
698 {
699 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
700 virgl_encoder_write_dword(ctx->cbuf, shader);
701 virgl_encoder_write_dword(ctx->cbuf, index);
702 virgl_encoder_write_dword(ctx->cbuf, offset);
703 virgl_encoder_write_dword(ctx->cbuf, length);
704 virgl_encoder_write_res(ctx, res);
705 return 0;
706 }
707
708
709 int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
710 const struct pipe_stencil_ref *ref)
711 {
712 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
713 virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
714 return 0;
715 }
716
717 int virgl_encoder_set_blend_color(struct virgl_context *ctx,
718 const struct pipe_blend_color *color)
719 {
720 int i;
721 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
722 for (i = 0; i < 4; i++)
723 virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
724 return 0;
725 }
726
727 int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
728 unsigned start_slot,
729 int num_scissors,
730 const struct pipe_scissor_state *ss)
731 {
732 int i;
733 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
734 virgl_encoder_write_dword(ctx->cbuf, start_slot);
735 for (i = 0; i < num_scissors; i++) {
736 virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
737 virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
738 }
739 return 0;
740 }
741
742 void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
743 const struct pipe_poly_stipple *ps)
744 {
745 int i;
746 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
747 for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
748 virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
749 }
750 }
751
752 void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
753 unsigned sample_mask)
754 {
755 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
756 virgl_encoder_write_dword(ctx->cbuf, sample_mask);
757 }
758
759 void virgl_encoder_set_min_samples(struct virgl_context *ctx,
760 unsigned min_samples)
761 {
762 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES, 0, VIRGL_SET_MIN_SAMPLES_SIZE));
763 virgl_encoder_write_dword(ctx->cbuf, min_samples);
764 }
765
766 void virgl_encoder_set_clip_state(struct virgl_context *ctx,
767 const struct pipe_clip_state *clip)
768 {
769 int i, j;
770 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
771 for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
772 for (j = 0; j < 4; j++) {
773 virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
774 }
775 }
776 }
777
778 int virgl_encode_resource_copy_region(struct virgl_context *ctx,
779 struct virgl_resource *dst_res,
780 unsigned dst_level,
781 unsigned dstx, unsigned dsty, unsigned dstz,
782 struct virgl_resource *src_res,
783 unsigned src_level,
784 const struct pipe_box *src_box)
785 {
786 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
787 virgl_encoder_write_res(ctx, dst_res);
788 virgl_encoder_write_dword(ctx->cbuf, dst_level);
789 virgl_encoder_write_dword(ctx->cbuf, dstx);
790 virgl_encoder_write_dword(ctx->cbuf, dsty);
791 virgl_encoder_write_dword(ctx->cbuf, dstz);
792 virgl_encoder_write_res(ctx, src_res);
793 virgl_encoder_write_dword(ctx->cbuf, src_level);
794 virgl_encoder_write_dword(ctx->cbuf, src_box->x);
795 virgl_encoder_write_dword(ctx->cbuf, src_box->y);
796 virgl_encoder_write_dword(ctx->cbuf, src_box->z);
797 virgl_encoder_write_dword(ctx->cbuf, src_box->width);
798 virgl_encoder_write_dword(ctx->cbuf, src_box->height);
799 virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
800 return 0;
801 }
802
803 int virgl_encode_blit(struct virgl_context *ctx,
804 struct virgl_resource *dst_res,
805 struct virgl_resource *src_res,
806 const struct pipe_blit_info *blit)
807 {
808 uint32_t tmp;
809 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
810 tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
811 VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
812 VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable) |
813 VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit->render_condition_enable) |
814 VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit->alpha_blend);
815 virgl_encoder_write_dword(ctx->cbuf, tmp);
816 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
817 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
818
819 virgl_encoder_write_res(ctx, dst_res);
820 virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
821 virgl_encoder_write_dword(ctx->cbuf, blit->dst.format);
822 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
823 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
824 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
825 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
826 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
827 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
828
829 virgl_encoder_write_res(ctx, src_res);
830 virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
831 virgl_encoder_write_dword(ctx->cbuf, blit->src.format);
832 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
833 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
834 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
835 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
836 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
837 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
838 return 0;
839 }
840
841 int virgl_encoder_create_query(struct virgl_context *ctx,
842 uint32_t handle,
843 uint query_type,
844 uint query_index,
845 struct virgl_resource *res,
846 uint32_t offset)
847 {
848 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
849 virgl_encoder_write_dword(ctx->cbuf, handle);
850 virgl_encoder_write_dword(ctx->cbuf, ((query_type & 0xffff) | (query_index << 16)));
851 virgl_encoder_write_dword(ctx->cbuf, offset);
852 virgl_encoder_write_res(ctx, res);
853 return 0;
854 }
855
856 int virgl_encoder_begin_query(struct virgl_context *ctx,
857 uint32_t handle)
858 {
859 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
860 virgl_encoder_write_dword(ctx->cbuf, handle);
861 return 0;
862 }
863
864 int virgl_encoder_end_query(struct virgl_context *ctx,
865 uint32_t handle)
866 {
867 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
868 virgl_encoder_write_dword(ctx->cbuf, handle);
869 return 0;
870 }
871
872 int virgl_encoder_get_query_result(struct virgl_context *ctx,
873 uint32_t handle, boolean wait)
874 {
875 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
876 virgl_encoder_write_dword(ctx->cbuf, handle);
877 virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
878 return 0;
879 }
880
881 int virgl_encoder_render_condition(struct virgl_context *ctx,
882 uint32_t handle, boolean condition,
883 enum pipe_render_cond_flag mode)
884 {
885 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
886 virgl_encoder_write_dword(ctx->cbuf, handle);
887 virgl_encoder_write_dword(ctx->cbuf, condition);
888 virgl_encoder_write_dword(ctx->cbuf, mode);
889 return 0;
890 }
891
892 int virgl_encoder_set_so_targets(struct virgl_context *ctx,
893 unsigned num_targets,
894 struct pipe_stream_output_target **targets,
895 unsigned append_bitmask)
896 {
897 int i;
898
899 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
900 virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
901 for (i = 0; i < num_targets; i++) {
902 struct virgl_so_target *tg = virgl_so_target(targets[i]);
903 virgl_encoder_write_dword(ctx->cbuf, tg ? tg->handle : 0);
904 }
905 return 0;
906 }
907
908
909 int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
910 {
911 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
912 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
913 return 0;
914 }
915
916 int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
917 {
918 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
919 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
920 return 0;
921 }
922
923 int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
924 {
925 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
926 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
927 return 0;
928 }
929
930 int virgl_encode_bind_shader(struct virgl_context *ctx,
931 uint32_t handle, uint32_t type)
932 {
933 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
934 virgl_encoder_write_dword(ctx->cbuf, handle);
935 virgl_encoder_write_dword(ctx->cbuf, type);
936 return 0;
937 }
938
939 int virgl_encode_set_tess_state(struct virgl_context *ctx,
940 const float outer[4],
941 const float inner[2])
942 {
943 int i;
944 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE, 0, 6));
945 for (i = 0; i < 4; i++)
946 virgl_encoder_write_dword(ctx->cbuf, fui(outer[i]));
947 for (i = 0; i < 2; i++)
948 virgl_encoder_write_dword(ctx->cbuf, fui(inner[i]));
949 return 0;
950 }
951
952 int virgl_encode_set_shader_buffers(struct virgl_context *ctx,
953 enum pipe_shader_type shader,
954 unsigned start_slot, unsigned count,
955 const struct pipe_shader_buffer *buffers)
956 {
957 int i;
958 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count)));
959
960 virgl_encoder_write_dword(ctx->cbuf, shader);
961 virgl_encoder_write_dword(ctx->cbuf, start_slot);
962 for (i = 0; i < count; i++) {
963 if (buffers && buffers[i].buffer) {
964 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
965 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
966 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
967 virgl_encoder_write_res(ctx, res);
968
969 util_range_add(&res->valid_buffer_range, buffers[i].buffer_offset,
970 buffers[i].buffer_offset + buffers[i].buffer_size);
971 virgl_resource_dirty(res, 0);
972 } else {
973 virgl_encoder_write_dword(ctx->cbuf, 0);
974 virgl_encoder_write_dword(ctx->cbuf, 0);
975 virgl_encoder_write_dword(ctx->cbuf, 0);
976 }
977 }
978 return 0;
979 }
980
981 int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
982 unsigned start_slot, unsigned count,
983 const struct pipe_shader_buffer *buffers)
984 {
985 int i;
986 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count)));
987
988 virgl_encoder_write_dword(ctx->cbuf, start_slot);
989 for (i = 0; i < count; i++) {
990 if (buffers && buffers[i].buffer) {
991 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
992 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
993 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
994 virgl_encoder_write_res(ctx, res);
995
996 util_range_add(&res->valid_buffer_range, buffers[i].buffer_offset,
997 buffers[i].buffer_offset + buffers[i].buffer_size);
998 virgl_resource_dirty(res, 0);
999 } else {
1000 virgl_encoder_write_dword(ctx->cbuf, 0);
1001 virgl_encoder_write_dword(ctx->cbuf, 0);
1002 virgl_encoder_write_dword(ctx->cbuf, 0);
1003 }
1004 }
1005 return 0;
1006 }
1007
1008 int virgl_encode_set_shader_images(struct virgl_context *ctx,
1009 enum pipe_shader_type shader,
1010 unsigned start_slot, unsigned count,
1011 const struct pipe_image_view *images)
1012 {
1013 int i;
1014 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count)));
1015
1016 virgl_encoder_write_dword(ctx->cbuf, shader);
1017 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1018 for (i = 0; i < count; i++) {
1019 if (images && images[i].resource) {
1020 struct virgl_resource *res = virgl_resource(images[i].resource);
1021 virgl_encoder_write_dword(ctx->cbuf, images[i].format);
1022 virgl_encoder_write_dword(ctx->cbuf, images[i].access);
1023 virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
1024 virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
1025 virgl_encoder_write_res(ctx, res);
1026
1027 if (res->u.b.target == PIPE_BUFFER) {
1028 util_range_add(&res->valid_buffer_range, images[i].u.buf.offset,
1029 images[i].u.buf.offset + images[i].u.buf.size);
1030 }
1031 virgl_resource_dirty(res, images[i].u.tex.level);
1032 } else {
1033 virgl_encoder_write_dword(ctx->cbuf, 0);
1034 virgl_encoder_write_dword(ctx->cbuf, 0);
1035 virgl_encoder_write_dword(ctx->cbuf, 0);
1036 virgl_encoder_write_dword(ctx->cbuf, 0);
1037 virgl_encoder_write_dword(ctx->cbuf, 0);
1038 }
1039 }
1040 return 0;
1041 }
1042
1043 int virgl_encode_memory_barrier(struct virgl_context *ctx,
1044 unsigned flags)
1045 {
1046 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER, 0, 1));
1047 virgl_encoder_write_dword(ctx->cbuf, flags);
1048 return 0;
1049 }
1050
1051 int virgl_encode_launch_grid(struct virgl_context *ctx,
1052 const struct pipe_grid_info *grid_info)
1053 {
1054 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID, 0, VIRGL_LAUNCH_GRID_SIZE));
1055 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[0]);
1056 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[1]);
1057 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[2]);
1058 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[0]);
1059 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[1]);
1060 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[2]);
1061 if (grid_info->indirect) {
1062 struct virgl_resource *res = virgl_resource(grid_info->indirect);
1063 virgl_encoder_write_res(ctx, res);
1064 } else
1065 virgl_encoder_write_dword(ctx->cbuf, 0);
1066 virgl_encoder_write_dword(ctx->cbuf, grid_info->indirect_offset);
1067 return 0;
1068 }
1069
1070 int virgl_encode_texture_barrier(struct virgl_context *ctx,
1071 unsigned flags)
1072 {
1073 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER, 0, 1));
1074 virgl_encoder_write_dword(ctx->cbuf, flags);
1075 return 0;
1076 }
1077
1078 int virgl_encode_host_debug_flagstring(struct virgl_context *ctx,
1079 const char *flagstring)
1080 {
1081 unsigned long slen = strlen(flagstring) + 1;
1082 uint32_t sslen;
1083 uint32_t string_length;
1084
1085 if (!slen)
1086 return 0;
1087
1088 if (slen > 4 * 0xffff) {
1089 debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1090 slen = 4 * 0xffff;
1091 }
1092
1093 sslen = (uint32_t )(slen + 3) / 4;
1094 string_length = (uint32_t)MIN2(sslen * 4, slen);
1095
1096 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS, 0, sslen));
1097 virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)flagstring, string_length);
1098 return 0;
1099 }
1100
1101 int virgl_encode_get_query_result_qbo(struct virgl_context *ctx,
1102 uint32_t handle,
1103 struct virgl_resource *res, boolean wait,
1104 uint32_t result_type,
1105 uint32_t offset,
1106 uint32_t index)
1107 {
1108 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT_QBO, 0, VIRGL_QUERY_RESULT_QBO_SIZE));
1109 virgl_encoder_write_dword(ctx->cbuf, handle);
1110 virgl_encoder_write_res(ctx, res);
1111 virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1112 virgl_encoder_write_dword(ctx->cbuf, result_type);
1113 virgl_encoder_write_dword(ctx->cbuf, offset);
1114 virgl_encoder_write_dword(ctx->cbuf, index);
1115 return 0;
1116 }
1117
1118 void virgl_encode_transfer(struct virgl_screen *vs, struct virgl_cmd_buf *buf,
1119 struct virgl_transfer *trans, uint32_t direction)
1120 {
1121 uint32_t command;
1122 command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE);
1123 virgl_encoder_write_dword(buf, command);
1124 virgl_encoder_transfer3d_common(vs, buf, trans);
1125 virgl_encoder_write_dword(buf, trans->offset);
1126 virgl_encoder_write_dword(buf, direction);
1127 }
1128
1129 void virgl_encode_end_transfers(struct virgl_cmd_buf *buf)
1130 {
1131 uint32_t command, diff;
1132 diff = VIRGL_MAX_TBUF_DWORDS - buf->cdw;
1133 if (diff) {
1134 command = VIRGL_CMD0(VIRGL_CCMD_END_TRANSFERS, 0, diff - 1);
1135 virgl_encoder_write_dword(buf, command);
1136 }
1137 }