403f54e2842c5c6c590ca66fc268dd3b5788a499
[mesa.git] / src / gallium / drivers / virgl / virgl_encode.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdint.h>
24 #include <assert.h>
25 #include <string.h>
26
27 #include "util/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
33
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
39
40 static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
41 uint32_t dword)
42 {
43 int len = (dword >> 16);
44
45 if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
46 ctx->base.flush(&ctx->base, NULL, 0);
47
48 virgl_encoder_write_dword(ctx->cbuf, dword);
49 return 0;
50 }
51
52 static void virgl_encoder_write_res(struct virgl_context *ctx,
53 struct virgl_resource *res)
54 {
55 struct virgl_winsys *vws = virgl_screen(ctx->base.screen)->vws;
56
57 if (res && res->hw_res)
58 vws->emit_res(vws, ctx->cbuf, res->hw_res, TRUE);
59 else {
60 virgl_encoder_write_dword(ctx->cbuf, 0);
61 }
62 }
63
64 static void virgl_dirty_res(struct virgl_resource *res)
65 {
66 if (res)
67 res->clean = FALSE;
68 }
69
70 int virgl_encode_bind_object(struct virgl_context *ctx,
71 uint32_t handle, uint32_t object)
72 {
73 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
74 virgl_encoder_write_dword(ctx->cbuf, handle);
75 return 0;
76 }
77
78 int virgl_encode_delete_object(struct virgl_context *ctx,
79 uint32_t handle, uint32_t object)
80 {
81 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
82 virgl_encoder_write_dword(ctx->cbuf, handle);
83 return 0;
84 }
85
86 int virgl_encode_blend_state(struct virgl_context *ctx,
87 uint32_t handle,
88 const struct pipe_blend_state *blend_state)
89 {
90 uint32_t tmp;
91 int i;
92
93 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
94 virgl_encoder_write_dword(ctx->cbuf, handle);
95
96 tmp =
97 VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
98 VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
99 VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
100 VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
101 VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
102
103 virgl_encoder_write_dword(ctx->cbuf, tmp);
104
105 tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
106 virgl_encoder_write_dword(ctx->cbuf, tmp);
107
108 for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
109 tmp =
110 VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
111 VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
112 VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
113 VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
114 VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
115 VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(blend_state->rt[i].alpha_src_factor) |
116 VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
117 VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
118 virgl_encoder_write_dword(ctx->cbuf, tmp);
119 }
120 return 0;
121 }
122
123 int virgl_encode_dsa_state(struct virgl_context *ctx,
124 uint32_t handle,
125 const struct pipe_depth_stencil_alpha_state *dsa_state)
126 {
127 uint32_t tmp;
128 int i;
129 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
130 virgl_encoder_write_dword(ctx->cbuf, handle);
131
132 tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth.enabled) |
133 VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth.writemask) |
134 VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth.func) |
135 VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha.enabled) |
136 VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha.func);
137 virgl_encoder_write_dword(ctx->cbuf, tmp);
138
139 for (i = 0; i < 2; i++) {
140 tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
141 VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
142 VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
143 VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
144 VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
145 VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
146 VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
147 virgl_encoder_write_dword(ctx->cbuf, tmp);
148 }
149
150 virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha.ref_value));
151 return 0;
152 }
153 int virgl_encode_rasterizer_state(struct virgl_context *ctx,
154 uint32_t handle,
155 const struct pipe_rasterizer_state *state)
156 {
157 uint32_t tmp;
158
159 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
160 virgl_encoder_write_dword(ctx->cbuf, handle);
161
162 tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
163 VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip_near) |
164 VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
165 VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
166 VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
167 VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
168 VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
169 VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
170 VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
171 VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
172 VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
173 VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
174 VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
175 VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
176 VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
177 VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
178 VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
179 VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
180 VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
181 VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
182 VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
183 VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
184 VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
185 VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
186 VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
187 VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
188 VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
189 VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule) |
190 VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state->force_persample_interp);
191
192 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
193 virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
194 virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
195 tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
196 VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
197 VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
198 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
199 virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
200 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
201 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
202 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
203 return 0;
204 }
205
206 static void virgl_emit_shader_header(struct virgl_context *ctx,
207 uint32_t handle, uint32_t len,
208 uint32_t type, uint32_t offlen,
209 uint32_t num_tokens)
210 {
211 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
212 virgl_encoder_write_dword(ctx->cbuf, handle);
213 virgl_encoder_write_dword(ctx->cbuf, type);
214 virgl_encoder_write_dword(ctx->cbuf, offlen);
215 virgl_encoder_write_dword(ctx->cbuf, num_tokens);
216 }
217
218 static void virgl_emit_shader_streamout(struct virgl_context *ctx,
219 const struct pipe_stream_output_info *so_info)
220 {
221 int num_outputs = 0;
222 int i;
223 uint32_t tmp;
224
225 if (so_info)
226 num_outputs = so_info->num_outputs;
227
228 virgl_encoder_write_dword(ctx->cbuf, num_outputs);
229 if (num_outputs) {
230 for (i = 0; i < 4; i++)
231 virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
232
233 for (i = 0; i < so_info->num_outputs; i++) {
234 tmp =
235 VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
236 VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
237 VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
238 VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
239 VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
240 virgl_encoder_write_dword(ctx->cbuf, tmp);
241 virgl_encoder_write_dword(ctx->cbuf, so_info->output[i].stream);
242 }
243 }
244 }
245
246 int virgl_encode_shader_state(struct virgl_context *ctx,
247 uint32_t handle,
248 uint32_t type,
249 const struct pipe_stream_output_info *so_info,
250 uint32_t cs_req_local_mem,
251 const struct tgsi_token *tokens)
252 {
253 char *str, *sptr;
254 uint32_t shader_len, len;
255 bool bret;
256 int num_tokens = tgsi_num_tokens(tokens);
257 int str_total_size = 65536;
258 int retry_size = 1;
259 uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
260 bool first_pass;
261 str = CALLOC(1, str_total_size);
262 if (!str)
263 return -1;
264
265 do {
266 int old_size;
267
268 bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
269 if (bret == false) {
270 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
271 debug_printf("Failed to translate shader in available space - trying again\n");
272 old_size = str_total_size;
273 str_total_size = 65536 * ++retry_size;
274 str = REALLOC(str, old_size, str_total_size);
275 if (!str)
276 return -1;
277 }
278 } while (bret == false && retry_size < 10);
279
280 if (bret == false)
281 return -1;
282
283 if (virgl_debug & VIRGL_DEBUG_TGSI)
284 debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str);
285
286 shader_len = strlen(str) + 1;
287
288 left_bytes = shader_len;
289
290 base_hdr_size = 5;
291 strm_hdr_size = so_info->num_outputs ? so_info->num_outputs * 2 + 4 : 0;
292 first_pass = true;
293 sptr = str;
294 while (left_bytes) {
295 uint32_t length, offlen;
296 int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
297 if (ctx->cbuf->cdw + hdr_len + 1 >= VIRGL_MAX_CMDBUF_DWORDS)
298 ctx->base.flush(&ctx->base, NULL, 0);
299
300 thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
301
302 length = MIN2(thispass, left_bytes);
303 len = ((length + 3) / 4) + hdr_len;
304
305 if (first_pass)
306 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
307 else
308 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
309
310 virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
311
312 if (type == PIPE_SHADER_COMPUTE)
313 virgl_encoder_write_dword(ctx->cbuf, cs_req_local_mem);
314 else
315 virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
316
317 virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
318
319 sptr += length;
320 first_pass = false;
321 left_bytes -= length;
322 }
323
324 FREE(str);
325 return 0;
326 }
327
328
329 int virgl_encode_clear(struct virgl_context *ctx,
330 unsigned buffers,
331 const union pipe_color_union *color,
332 double depth, unsigned stencil)
333 {
334 int i;
335 uint64_t qword;
336
337 STATIC_ASSERT(sizeof(qword) == sizeof(depth));
338 memcpy(&qword, &depth, sizeof(qword));
339
340 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
341 virgl_encoder_write_dword(ctx->cbuf, buffers);
342 for (i = 0; i < 4; i++)
343 virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
344 virgl_encoder_write_qword(ctx->cbuf, qword);
345 virgl_encoder_write_dword(ctx->cbuf, stencil);
346 return 0;
347 }
348
349 int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
350 const struct pipe_framebuffer_state *state)
351 {
352 struct virgl_surface *zsurf = virgl_surface(state->zsbuf);
353 int i;
354
355 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
356 virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
357 virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
358 for (i = 0; i < state->nr_cbufs; i++) {
359 struct virgl_surface *surf = virgl_surface(state->cbufs[i]);
360 virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
361 }
362
363 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
364 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_FB_NO_ATTACH) {
365 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE));
366 virgl_encoder_write_dword(ctx->cbuf, state->width | (state->height << 16));
367 virgl_encoder_write_dword(ctx->cbuf, state->layers | (state->samples << 16));
368 }
369 return 0;
370 }
371
372 int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
373 int start_slot,
374 int num_viewports,
375 const struct pipe_viewport_state *states)
376 {
377 int i,v;
378 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
379 virgl_encoder_write_dword(ctx->cbuf, start_slot);
380 for (v = 0; v < num_viewports; v++) {
381 for (i = 0; i < 3; i++)
382 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
383 for (i = 0; i < 3; i++)
384 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
385 }
386 return 0;
387 }
388
389 int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
390 uint32_t handle,
391 unsigned num_elements,
392 const struct pipe_vertex_element *element)
393 {
394 int i;
395 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
396 virgl_encoder_write_dword(ctx->cbuf, handle);
397 for (i = 0; i < num_elements; i++) {
398 virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
399 virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
400 virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
401 virgl_encoder_write_dword(ctx->cbuf, element[i].src_format);
402 }
403 return 0;
404 }
405
406 int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
407 unsigned num_buffers,
408 const struct pipe_vertex_buffer *buffers)
409 {
410 int i;
411 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
412 for (i = 0; i < num_buffers; i++) {
413 struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
414 virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
415 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
416 virgl_encoder_write_res(ctx, res);
417 }
418 return 0;
419 }
420
421 int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
422 const struct virgl_indexbuf *ib)
423 {
424 int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
425 struct virgl_resource *res = NULL;
426 if (ib)
427 res = virgl_resource(ib->buffer);
428
429 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
430 virgl_encoder_write_res(ctx, res);
431 if (ib) {
432 virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
433 virgl_encoder_write_dword(ctx->cbuf, ib->offset);
434 }
435 return 0;
436 }
437
438 int virgl_encoder_draw_vbo(struct virgl_context *ctx,
439 const struct pipe_draw_info *info)
440 {
441 uint32_t length = VIRGL_DRAW_VBO_SIZE;
442 if (info->mode == PIPE_PRIM_PATCHES)
443 length = VIRGL_DRAW_VBO_SIZE_TESS;
444 if (info->indirect)
445 length = VIRGL_DRAW_VBO_SIZE_INDIRECT;
446 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, length));
447 virgl_encoder_write_dword(ctx->cbuf, info->start);
448 virgl_encoder_write_dword(ctx->cbuf, info->count);
449 virgl_encoder_write_dword(ctx->cbuf, info->mode);
450 virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
451 virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
452 virgl_encoder_write_dword(ctx->cbuf, info->index_bias);
453 virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
454 virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
455 virgl_encoder_write_dword(ctx->cbuf, info->restart_index);
456 virgl_encoder_write_dword(ctx->cbuf, info->min_index);
457 virgl_encoder_write_dword(ctx->cbuf, info->max_index);
458 if (info->count_from_stream_output)
459 virgl_encoder_write_dword(ctx->cbuf, info->count_from_stream_output->buffer_size);
460 else
461 virgl_encoder_write_dword(ctx->cbuf, 0);
462 if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
463 virgl_encoder_write_dword(ctx->cbuf, info->vertices_per_patch); /* vertices per patch */
464 virgl_encoder_write_dword(ctx->cbuf, info->drawid); /* drawid */
465 }
466 if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
467 virgl_encoder_write_res(ctx, virgl_resource(info->indirect->buffer));
468 virgl_encoder_write_dword(ctx->cbuf, info->indirect->offset);
469 virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect stride */
470 virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count */
471 virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count offset */
472 virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */
473 }
474 return 0;
475 }
476
477 int virgl_encoder_create_surface(struct virgl_context *ctx,
478 uint32_t handle,
479 struct virgl_resource *res,
480 const struct pipe_surface *templat)
481 {
482 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
483 virgl_encoder_write_dword(ctx->cbuf, handle);
484 virgl_encoder_write_res(ctx, res);
485 virgl_encoder_write_dword(ctx->cbuf, templat->format);
486 if (templat->texture->target == PIPE_BUFFER) {
487 virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.first_element);
488 virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.last_element);
489
490 } else {
491 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
492 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
493 }
494 return 0;
495 }
496
497 int virgl_encoder_create_so_target(struct virgl_context *ctx,
498 uint32_t handle,
499 struct virgl_resource *res,
500 unsigned buffer_offset,
501 unsigned buffer_size)
502 {
503 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
504 virgl_encoder_write_dword(ctx->cbuf, handle);
505 virgl_encoder_write_res(ctx, res);
506 virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
507 virgl_encoder_write_dword(ctx->cbuf, buffer_size);
508 return 0;
509 }
510
511 static void virgl_encoder_iw_emit_header_1d(struct virgl_context *ctx,
512 struct virgl_resource *res,
513 unsigned level, unsigned usage,
514 const struct pipe_box *box,
515 unsigned stride, unsigned layer_stride)
516 {
517 virgl_encoder_write_res(ctx, res);
518 virgl_encoder_write_dword(ctx->cbuf, level);
519 virgl_encoder_write_dword(ctx->cbuf, usage);
520 virgl_encoder_write_dword(ctx->cbuf, stride);
521 virgl_encoder_write_dword(ctx->cbuf, layer_stride);
522 virgl_encoder_write_dword(ctx->cbuf, box->x);
523 virgl_encoder_write_dword(ctx->cbuf, box->y);
524 virgl_encoder_write_dword(ctx->cbuf, box->z);
525 virgl_encoder_write_dword(ctx->cbuf, box->width);
526 virgl_encoder_write_dword(ctx->cbuf, box->height);
527 virgl_encoder_write_dword(ctx->cbuf, box->depth);
528 }
529
530 int virgl_encoder_inline_write(struct virgl_context *ctx,
531 struct virgl_resource *res,
532 unsigned level, unsigned usage,
533 const struct pipe_box *box,
534 const void *data, unsigned stride,
535 unsigned layer_stride)
536 {
537 uint32_t size = (stride ? stride : box->width) * box->height;
538 uint32_t length, thispass, left_bytes;
539 struct pipe_box mybox = *box;
540
541 length = 11 + (size + 3) / 4;
542 if ((ctx->cbuf->cdw + length + 1) > VIRGL_MAX_CMDBUF_DWORDS) {
543 if (box->height > 1 || box->depth > 1) {
544 debug_printf("inline transfer failed due to multi dimensions and too large\n");
545 assert(0);
546 }
547 }
548
549 left_bytes = size;
550 while (left_bytes) {
551 if (ctx->cbuf->cdw + 12 >= VIRGL_MAX_CMDBUF_DWORDS)
552 ctx->base.flush(&ctx->base, NULL, 0);
553
554 thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - 12) * 4;
555
556 length = MIN2(thispass, left_bytes);
557
558 mybox.width = length;
559 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
560 virgl_encoder_iw_emit_header_1d(ctx, res, level, usage, &mybox, stride, layer_stride);
561 virgl_encoder_write_block(ctx->cbuf, data, length);
562 left_bytes -= length;
563 mybox.x += length;
564 data += length;
565 }
566 return 0;
567 }
568
569 int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
570 struct virgl_resource *res)
571 {
572 // virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
573 // virgl_encoder_write_dword(ctx->cbuf, res_handle);
574 return 0;
575 }
576
577 int virgl_encode_sampler_state(struct virgl_context *ctx,
578 uint32_t handle,
579 const struct pipe_sampler_state *state)
580 {
581 uint32_t tmp;
582 int i;
583 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
584 virgl_encoder_write_dword(ctx->cbuf, handle);
585
586 tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
587 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
588 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
589 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
590 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
591 VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
592 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
593 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func) |
594 VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state->seamless_cube_map);
595
596 virgl_encoder_write_dword(ctx->cbuf, tmp);
597 virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
598 virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
599 virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
600 for (i = 0; i < 4; i++)
601 virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
602 return 0;
603 }
604
605
606 int virgl_encode_sampler_view(struct virgl_context *ctx,
607 uint32_t handle,
608 struct virgl_resource *res,
609 const struct pipe_sampler_view *state)
610 {
611 unsigned elem_size = util_format_get_blocksize(state->format);
612 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
613 uint32_t tmp;
614 uint32_t dword_fmt_target = state->format;
615 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
616 virgl_encoder_write_dword(ctx->cbuf, handle);
617 virgl_encoder_write_res(ctx, res);
618 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_VIEW)
619 dword_fmt_target |= (state->target << 24);
620 virgl_encoder_write_dword(ctx->cbuf, dword_fmt_target);
621 if (res->u.b.target == PIPE_BUFFER) {
622 virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
623 virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
624 virgl_dirty_res(res);
625 } else {
626 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
627 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
628 }
629 tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
630 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
631 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
632 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
633 virgl_encoder_write_dword(ctx->cbuf, tmp);
634 return 0;
635 }
636
637 int virgl_encode_set_sampler_views(struct virgl_context *ctx,
638 uint32_t shader_type,
639 uint32_t start_slot,
640 uint32_t num_views,
641 struct virgl_sampler_view **views)
642 {
643 int i;
644 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
645 virgl_encoder_write_dword(ctx->cbuf, shader_type);
646 virgl_encoder_write_dword(ctx->cbuf, start_slot);
647 for (i = 0; i < num_views; i++) {
648 uint32_t handle = views[i] ? views[i]->handle : 0;
649 virgl_encoder_write_dword(ctx->cbuf, handle);
650 }
651 return 0;
652 }
653
654 int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
655 uint32_t shader_type,
656 uint32_t start_slot,
657 uint32_t num_handles,
658 uint32_t *handles)
659 {
660 int i;
661 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
662 virgl_encoder_write_dword(ctx->cbuf, shader_type);
663 virgl_encoder_write_dword(ctx->cbuf, start_slot);
664 for (i = 0; i < num_handles; i++)
665 virgl_encoder_write_dword(ctx->cbuf, handles[i]);
666 return 0;
667 }
668
669 int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
670 uint32_t shader,
671 uint32_t index,
672 uint32_t size,
673 const void *data)
674 {
675 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
676 virgl_encoder_write_dword(ctx->cbuf, shader);
677 virgl_encoder_write_dword(ctx->cbuf, index);
678 if (data)
679 virgl_encoder_write_block(ctx->cbuf, data, size * 4);
680 return 0;
681 }
682
683 int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
684 uint32_t shader,
685 uint32_t index,
686 uint32_t offset,
687 uint32_t length,
688 struct virgl_resource *res)
689 {
690 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
691 virgl_encoder_write_dword(ctx->cbuf, shader);
692 virgl_encoder_write_dword(ctx->cbuf, index);
693 virgl_encoder_write_dword(ctx->cbuf, offset);
694 virgl_encoder_write_dword(ctx->cbuf, length);
695 virgl_encoder_write_res(ctx, res);
696 return 0;
697 }
698
699
700 int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
701 const struct pipe_stencil_ref *ref)
702 {
703 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
704 virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
705 return 0;
706 }
707
708 int virgl_encoder_set_blend_color(struct virgl_context *ctx,
709 const struct pipe_blend_color *color)
710 {
711 int i;
712 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
713 for (i = 0; i < 4; i++)
714 virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
715 return 0;
716 }
717
718 int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
719 unsigned start_slot,
720 int num_scissors,
721 const struct pipe_scissor_state *ss)
722 {
723 int i;
724 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
725 virgl_encoder_write_dword(ctx->cbuf, start_slot);
726 for (i = 0; i < num_scissors; i++) {
727 virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
728 virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
729 }
730 return 0;
731 }
732
733 void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
734 const struct pipe_poly_stipple *ps)
735 {
736 int i;
737 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
738 for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
739 virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
740 }
741 }
742
743 void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
744 unsigned sample_mask)
745 {
746 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
747 virgl_encoder_write_dword(ctx->cbuf, sample_mask);
748 }
749
750 void virgl_encoder_set_min_samples(struct virgl_context *ctx,
751 unsigned min_samples)
752 {
753 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES, 0, VIRGL_SET_MIN_SAMPLES_SIZE));
754 virgl_encoder_write_dword(ctx->cbuf, min_samples);
755 }
756
757 void virgl_encoder_set_clip_state(struct virgl_context *ctx,
758 const struct pipe_clip_state *clip)
759 {
760 int i, j;
761 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
762 for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
763 for (j = 0; j < 4; j++) {
764 virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
765 }
766 }
767 }
768
769 int virgl_encode_resource_copy_region(struct virgl_context *ctx,
770 struct virgl_resource *dst_res,
771 unsigned dst_level,
772 unsigned dstx, unsigned dsty, unsigned dstz,
773 struct virgl_resource *src_res,
774 unsigned src_level,
775 const struct pipe_box *src_box)
776 {
777 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
778 virgl_encoder_write_res(ctx, dst_res);
779 virgl_encoder_write_dword(ctx->cbuf, dst_level);
780 virgl_encoder_write_dword(ctx->cbuf, dstx);
781 virgl_encoder_write_dword(ctx->cbuf, dsty);
782 virgl_encoder_write_dword(ctx->cbuf, dstz);
783 virgl_encoder_write_res(ctx, src_res);
784 virgl_encoder_write_dword(ctx->cbuf, src_level);
785 virgl_encoder_write_dword(ctx->cbuf, src_box->x);
786 virgl_encoder_write_dword(ctx->cbuf, src_box->y);
787 virgl_encoder_write_dword(ctx->cbuf, src_box->z);
788 virgl_encoder_write_dword(ctx->cbuf, src_box->width);
789 virgl_encoder_write_dword(ctx->cbuf, src_box->height);
790 virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
791 return 0;
792 }
793
794 int virgl_encode_blit(struct virgl_context *ctx,
795 struct virgl_resource *dst_res,
796 struct virgl_resource *src_res,
797 const struct pipe_blit_info *blit)
798 {
799 uint32_t tmp;
800 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
801 tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
802 VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
803 VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable) |
804 VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit->render_condition_enable) |
805 VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit->alpha_blend);
806 virgl_encoder_write_dword(ctx->cbuf, tmp);
807 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
808 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
809
810 virgl_encoder_write_res(ctx, dst_res);
811 virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
812 virgl_encoder_write_dword(ctx->cbuf, blit->dst.format);
813 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
814 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
815 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
816 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
817 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
818 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
819
820 virgl_encoder_write_res(ctx, src_res);
821 virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
822 virgl_encoder_write_dword(ctx->cbuf, blit->src.format);
823 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
824 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
825 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
826 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
827 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
828 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
829 return 0;
830 }
831
832 int virgl_encoder_create_query(struct virgl_context *ctx,
833 uint32_t handle,
834 uint query_type,
835 uint query_index,
836 struct virgl_resource *res,
837 uint32_t offset)
838 {
839 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
840 virgl_encoder_write_dword(ctx->cbuf, handle);
841 virgl_encoder_write_dword(ctx->cbuf, ((query_type & 0xffff) | (query_index << 16)));
842 virgl_encoder_write_dword(ctx->cbuf, offset);
843 virgl_encoder_write_res(ctx, res);
844 return 0;
845 }
846
847 int virgl_encoder_begin_query(struct virgl_context *ctx,
848 uint32_t handle)
849 {
850 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
851 virgl_encoder_write_dword(ctx->cbuf, handle);
852 return 0;
853 }
854
855 int virgl_encoder_end_query(struct virgl_context *ctx,
856 uint32_t handle)
857 {
858 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
859 virgl_encoder_write_dword(ctx->cbuf, handle);
860 return 0;
861 }
862
863 int virgl_encoder_get_query_result(struct virgl_context *ctx,
864 uint32_t handle, boolean wait)
865 {
866 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
867 virgl_encoder_write_dword(ctx->cbuf, handle);
868 virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
869 return 0;
870 }
871
872 int virgl_encoder_render_condition(struct virgl_context *ctx,
873 uint32_t handle, boolean condition,
874 enum pipe_render_cond_flag mode)
875 {
876 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
877 virgl_encoder_write_dword(ctx->cbuf, handle);
878 virgl_encoder_write_dword(ctx->cbuf, condition);
879 virgl_encoder_write_dword(ctx->cbuf, mode);
880 return 0;
881 }
882
883 int virgl_encoder_set_so_targets(struct virgl_context *ctx,
884 unsigned num_targets,
885 struct pipe_stream_output_target **targets,
886 unsigned append_bitmask)
887 {
888 int i;
889
890 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
891 virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
892 for (i = 0; i < num_targets; i++) {
893 struct virgl_so_target *tg = virgl_so_target(targets[i]);
894 virgl_encoder_write_dword(ctx->cbuf, tg ? tg->handle : 0);
895 }
896 return 0;
897 }
898
899
900 int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
901 {
902 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
903 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
904 return 0;
905 }
906
907 int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
908 {
909 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
910 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
911 return 0;
912 }
913
914 int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
915 {
916 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
917 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
918 return 0;
919 }
920
921 int virgl_encode_bind_shader(struct virgl_context *ctx,
922 uint32_t handle, uint32_t type)
923 {
924 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
925 virgl_encoder_write_dword(ctx->cbuf, handle);
926 virgl_encoder_write_dword(ctx->cbuf, type);
927 return 0;
928 }
929
930 int virgl_encode_set_tess_state(struct virgl_context *ctx,
931 const float outer[4],
932 const float inner[2])
933 {
934 int i;
935 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE, 0, 6));
936 for (i = 0; i < 4; i++)
937 virgl_encoder_write_dword(ctx->cbuf, fui(outer[i]));
938 for (i = 0; i < 2; i++)
939 virgl_encoder_write_dword(ctx->cbuf, fui(inner[i]));
940 return 0;
941 }
942
943 int virgl_encode_set_shader_buffers(struct virgl_context *ctx,
944 enum pipe_shader_type shader,
945 unsigned start_slot, unsigned count,
946 const struct pipe_shader_buffer *buffers)
947 {
948 int i;
949 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count)));
950
951 virgl_encoder_write_dword(ctx->cbuf, shader);
952 virgl_encoder_write_dword(ctx->cbuf, start_slot);
953 for (i = 0; i < count; i++) {
954 if (buffers) {
955 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
956 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
957 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
958 virgl_encoder_write_res(ctx, res);
959 virgl_dirty_res(res);
960 } else {
961 virgl_encoder_write_dword(ctx->cbuf, 0);
962 virgl_encoder_write_dword(ctx->cbuf, 0);
963 virgl_encoder_write_dword(ctx->cbuf, 0);
964 }
965 }
966 return 0;
967 }
968
969 int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
970 unsigned start_slot, unsigned count,
971 const struct pipe_shader_buffer *buffers)
972 {
973 int i;
974 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count)));
975
976 virgl_encoder_write_dword(ctx->cbuf, start_slot);
977 for (i = 0; i < count; i++) {
978 if (buffers) {
979 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
980 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
981 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
982 virgl_encoder_write_res(ctx, res);
983 virgl_dirty_res(res);
984 } else {
985 virgl_encoder_write_dword(ctx->cbuf, 0);
986 virgl_encoder_write_dword(ctx->cbuf, 0);
987 virgl_encoder_write_dword(ctx->cbuf, 0);
988 }
989 }
990 return 0;
991 }
992
993 int virgl_encode_set_shader_images(struct virgl_context *ctx,
994 enum pipe_shader_type shader,
995 unsigned start_slot, unsigned count,
996 const struct pipe_image_view *images)
997 {
998 int i;
999 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count)));
1000
1001 virgl_encoder_write_dword(ctx->cbuf, shader);
1002 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1003 for (i = 0; i < count; i++) {
1004 if (images) {
1005 struct virgl_resource *res = virgl_resource(images[i].resource);
1006 virgl_encoder_write_dword(ctx->cbuf, images[i].format);
1007 virgl_encoder_write_dword(ctx->cbuf, images[i].access);
1008 virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
1009 virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
1010 virgl_encoder_write_res(ctx, res);
1011 virgl_dirty_res(res);
1012 } else {
1013 virgl_encoder_write_dword(ctx->cbuf, 0);
1014 virgl_encoder_write_dword(ctx->cbuf, 0);
1015 virgl_encoder_write_dword(ctx->cbuf, 0);
1016 virgl_encoder_write_dword(ctx->cbuf, 0);
1017 virgl_encoder_write_dword(ctx->cbuf, 0);
1018 }
1019 }
1020 return 0;
1021 }
1022
1023 int virgl_encode_memory_barrier(struct virgl_context *ctx,
1024 unsigned flags)
1025 {
1026 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER, 0, 1));
1027 virgl_encoder_write_dword(ctx->cbuf, flags);
1028 return 0;
1029 }
1030
1031 int virgl_encode_launch_grid(struct virgl_context *ctx,
1032 const struct pipe_grid_info *grid_info)
1033 {
1034 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID, 0, VIRGL_LAUNCH_GRID_SIZE));
1035 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[0]);
1036 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[1]);
1037 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[2]);
1038 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[0]);
1039 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[1]);
1040 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[2]);
1041 if (grid_info->indirect) {
1042 struct virgl_resource *res = virgl_resource(grid_info->indirect);
1043 virgl_encoder_write_res(ctx, res);
1044 } else
1045 virgl_encoder_write_dword(ctx->cbuf, 0);
1046 virgl_encoder_write_dword(ctx->cbuf, grid_info->indirect_offset);
1047 return 0;
1048 }
1049
1050 int virgl_encode_texture_barrier(struct virgl_context *ctx,
1051 unsigned flags)
1052 {
1053 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER, 0, 1));
1054 virgl_encoder_write_dword(ctx->cbuf, flags);
1055 return 0;
1056 }
1057
1058 int virgl_encode_host_debug_flagstring(struct virgl_context *ctx,
1059 const char *flagstring)
1060 {
1061 unsigned long slen = strlen(flagstring) + 1;
1062 uint32_t sslen;
1063 uint32_t string_length;
1064
1065 if (!slen)
1066 return 0;
1067
1068 if (slen > 4 * 0xffff) {
1069 debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1070 slen = 4 * 0xffff;
1071 }
1072
1073 sslen = (uint32_t )(slen + 3) / 4;
1074 string_length = (uint32_t)MIN2(sslen * 4, slen);
1075
1076 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS, 0, sslen));
1077 virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)flagstring, string_length);
1078
1079 return 0;
1080 }