virgl: don't send a shader create with no data. (v2)
[mesa.git] / src / gallium / drivers / virgl / virgl_encode.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdint.h>
24 #include <assert.h>
25 #include <string.h>
26
27 #include "util/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
33
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
39
40 static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
41 uint32_t dword)
42 {
43 int len = (dword >> 16);
44
45 if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
46 ctx->base.flush(&ctx->base, NULL, 0);
47
48 virgl_encoder_write_dword(ctx->cbuf, dword);
49 return 0;
50 }
51
52 static void virgl_encoder_write_res(struct virgl_context *ctx,
53 struct virgl_resource *res)
54 {
55 struct virgl_winsys *vws = virgl_screen(ctx->base.screen)->vws;
56
57 if (res && res->hw_res)
58 vws->emit_res(vws, ctx->cbuf, res->hw_res, TRUE);
59 else {
60 virgl_encoder_write_dword(ctx->cbuf, 0);
61 }
62 }
63
64 int virgl_encode_bind_object(struct virgl_context *ctx,
65 uint32_t handle, uint32_t object)
66 {
67 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
68 virgl_encoder_write_dword(ctx->cbuf, handle);
69 return 0;
70 }
71
72 int virgl_encode_delete_object(struct virgl_context *ctx,
73 uint32_t handle, uint32_t object)
74 {
75 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
76 virgl_encoder_write_dword(ctx->cbuf, handle);
77 return 0;
78 }
79
80 int virgl_encode_blend_state(struct virgl_context *ctx,
81 uint32_t handle,
82 const struct pipe_blend_state *blend_state)
83 {
84 uint32_t tmp;
85 int i;
86
87 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
88 virgl_encoder_write_dword(ctx->cbuf, handle);
89
90 tmp =
91 VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
92 VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
93 VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
94 VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
95 VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
96
97 virgl_encoder_write_dword(ctx->cbuf, tmp);
98
99 tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
100 virgl_encoder_write_dword(ctx->cbuf, tmp);
101
102 for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
103 tmp =
104 VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
105 VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
106 VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
107 VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
108 VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
109 VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(blend_state->rt[i].alpha_src_factor) |
110 VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
111 VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
112 virgl_encoder_write_dword(ctx->cbuf, tmp);
113 }
114 return 0;
115 }
116
117 int virgl_encode_dsa_state(struct virgl_context *ctx,
118 uint32_t handle,
119 const struct pipe_depth_stencil_alpha_state *dsa_state)
120 {
121 uint32_t tmp;
122 int i;
123 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
124 virgl_encoder_write_dword(ctx->cbuf, handle);
125
126 tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth.enabled) |
127 VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth.writemask) |
128 VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth.func) |
129 VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha.enabled) |
130 VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha.func);
131 virgl_encoder_write_dword(ctx->cbuf, tmp);
132
133 for (i = 0; i < 2; i++) {
134 tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
135 VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
136 VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
137 VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
138 VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
139 VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
140 VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
141 virgl_encoder_write_dword(ctx->cbuf, tmp);
142 }
143
144 virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha.ref_value));
145 return 0;
146 }
147 int virgl_encode_rasterizer_state(struct virgl_context *ctx,
148 uint32_t handle,
149 const struct pipe_rasterizer_state *state)
150 {
151 uint32_t tmp;
152
153 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
154 virgl_encoder_write_dword(ctx->cbuf, handle);
155
156 tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
157 VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip_near) |
158 VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
159 VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
160 VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
161 VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
162 VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
163 VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
164 VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
165 VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
166 VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
167 VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
168 VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
169 VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
170 VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
171 VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
172 VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
173 VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
174 VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
175 VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
176 VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
177 VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
178 VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
179 VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
180 VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
181 VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
182 VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
183 VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule) |
184 VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state->force_persample_interp);
185
186 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
187 virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
188 virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
189 tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
190 VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
191 VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
192 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
193 virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
194 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
195 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
196 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
197 return 0;
198 }
199
200 static void virgl_emit_shader_header(struct virgl_context *ctx,
201 uint32_t handle, uint32_t len,
202 uint32_t type, uint32_t offlen,
203 uint32_t num_tokens)
204 {
205 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
206 virgl_encoder_write_dword(ctx->cbuf, handle);
207 virgl_encoder_write_dword(ctx->cbuf, type);
208 virgl_encoder_write_dword(ctx->cbuf, offlen);
209 virgl_encoder_write_dword(ctx->cbuf, num_tokens);
210 }
211
212 static void virgl_emit_shader_streamout(struct virgl_context *ctx,
213 const struct pipe_stream_output_info *so_info)
214 {
215 int num_outputs = 0;
216 int i;
217 uint32_t tmp;
218
219 if (so_info)
220 num_outputs = so_info->num_outputs;
221
222 virgl_encoder_write_dword(ctx->cbuf, num_outputs);
223 if (num_outputs) {
224 for (i = 0; i < 4; i++)
225 virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
226
227 for (i = 0; i < so_info->num_outputs; i++) {
228 tmp =
229 VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
230 VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
231 VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
232 VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
233 VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
234 virgl_encoder_write_dword(ctx->cbuf, tmp);
235 virgl_encoder_write_dword(ctx->cbuf, so_info->output[i].stream);
236 }
237 }
238 }
239
240 int virgl_encode_shader_state(struct virgl_context *ctx,
241 uint32_t handle,
242 uint32_t type,
243 const struct pipe_stream_output_info *so_info,
244 uint32_t cs_req_local_mem,
245 const struct tgsi_token *tokens)
246 {
247 char *str, *sptr;
248 uint32_t shader_len, len;
249 bool bret;
250 int num_tokens = tgsi_num_tokens(tokens);
251 int str_total_size = 65536;
252 int retry_size = 1;
253 uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
254 bool first_pass;
255 str = CALLOC(1, str_total_size);
256 if (!str)
257 return -1;
258
259 do {
260 int old_size;
261
262 bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
263 if (bret == false) {
264 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
265 debug_printf("Failed to translate shader in available space - trying again\n");
266 old_size = str_total_size;
267 str_total_size = 65536 * ++retry_size;
268 str = REALLOC(str, old_size, str_total_size);
269 if (!str)
270 return -1;
271 }
272 } while (bret == false && retry_size < 10);
273
274 if (bret == false)
275 return -1;
276
277 if (virgl_debug & VIRGL_DEBUG_TGSI)
278 debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str);
279
280 shader_len = strlen(str) + 1;
281
282 left_bytes = shader_len;
283
284 base_hdr_size = 5;
285 strm_hdr_size = so_info->num_outputs ? so_info->num_outputs * 2 + 4 : 0;
286 first_pass = true;
287 sptr = str;
288 while (left_bytes) {
289 uint32_t length, offlen;
290 int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
291 if (ctx->cbuf->cdw + hdr_len + 1 >= VIRGL_MAX_CMDBUF_DWORDS)
292 ctx->base.flush(&ctx->base, NULL, 0);
293
294 thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
295
296 length = MIN2(thispass, left_bytes);
297 len = ((length + 3) / 4) + hdr_len;
298
299 if (first_pass)
300 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
301 else
302 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
303
304 virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
305
306 if (type == PIPE_SHADER_COMPUTE)
307 virgl_encoder_write_dword(ctx->cbuf, cs_req_local_mem);
308 else
309 virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
310
311 virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
312
313 sptr += length;
314 first_pass = false;
315 left_bytes -= length;
316 }
317
318 FREE(str);
319 return 0;
320 }
321
322
323 int virgl_encode_clear(struct virgl_context *ctx,
324 unsigned buffers,
325 const union pipe_color_union *color,
326 double depth, unsigned stencil)
327 {
328 int i;
329 uint64_t qword;
330
331 STATIC_ASSERT(sizeof(qword) == sizeof(depth));
332 memcpy(&qword, &depth, sizeof(qword));
333
334 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
335 virgl_encoder_write_dword(ctx->cbuf, buffers);
336 for (i = 0; i < 4; i++)
337 virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
338 virgl_encoder_write_qword(ctx->cbuf, qword);
339 virgl_encoder_write_dword(ctx->cbuf, stencil);
340 return 0;
341 }
342
343 int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
344 const struct pipe_framebuffer_state *state)
345 {
346 struct virgl_surface *zsurf = virgl_surface(state->zsbuf);
347 int i;
348
349 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
350 virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
351 virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
352 for (i = 0; i < state->nr_cbufs; i++) {
353 struct virgl_surface *surf = virgl_surface(state->cbufs[i]);
354 virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
355 }
356
357 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
358 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_FB_NO_ATTACH) {
359 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE));
360 virgl_encoder_write_dword(ctx->cbuf, state->width | (state->height << 16));
361 virgl_encoder_write_dword(ctx->cbuf, state->layers | (state->samples << 16));
362 }
363 return 0;
364 }
365
366 int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
367 int start_slot,
368 int num_viewports,
369 const struct pipe_viewport_state *states)
370 {
371 int i,v;
372 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
373 virgl_encoder_write_dword(ctx->cbuf, start_slot);
374 for (v = 0; v < num_viewports; v++) {
375 for (i = 0; i < 3; i++)
376 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
377 for (i = 0; i < 3; i++)
378 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
379 }
380 return 0;
381 }
382
383 int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
384 uint32_t handle,
385 unsigned num_elements,
386 const struct pipe_vertex_element *element)
387 {
388 int i;
389 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
390 virgl_encoder_write_dword(ctx->cbuf, handle);
391 for (i = 0; i < num_elements; i++) {
392 virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
393 virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
394 virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
395 virgl_encoder_write_dword(ctx->cbuf, element[i].src_format);
396 }
397 return 0;
398 }
399
400 int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
401 unsigned num_buffers,
402 const struct pipe_vertex_buffer *buffers)
403 {
404 int i;
405 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
406 for (i = 0; i < num_buffers; i++) {
407 struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
408 virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
409 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
410 virgl_encoder_write_res(ctx, res);
411 }
412 return 0;
413 }
414
415 int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
416 const struct virgl_indexbuf *ib)
417 {
418 int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
419 struct virgl_resource *res = NULL;
420 if (ib)
421 res = virgl_resource(ib->buffer);
422
423 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
424 virgl_encoder_write_res(ctx, res);
425 if (ib) {
426 virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
427 virgl_encoder_write_dword(ctx->cbuf, ib->offset);
428 }
429 return 0;
430 }
431
432 int virgl_encoder_draw_vbo(struct virgl_context *ctx,
433 const struct pipe_draw_info *info)
434 {
435 uint32_t length = VIRGL_DRAW_VBO_SIZE;
436 if (info->mode == PIPE_PRIM_PATCHES)
437 length = VIRGL_DRAW_VBO_SIZE_TESS;
438 if (info->indirect)
439 length = VIRGL_DRAW_VBO_SIZE_INDIRECT;
440 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, length));
441 virgl_encoder_write_dword(ctx->cbuf, info->start);
442 virgl_encoder_write_dword(ctx->cbuf, info->count);
443 virgl_encoder_write_dword(ctx->cbuf, info->mode);
444 virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
445 virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
446 virgl_encoder_write_dword(ctx->cbuf, info->index_bias);
447 virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
448 virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
449 virgl_encoder_write_dword(ctx->cbuf, info->restart_index);
450 virgl_encoder_write_dword(ctx->cbuf, info->min_index);
451 virgl_encoder_write_dword(ctx->cbuf, info->max_index);
452 if (info->count_from_stream_output)
453 virgl_encoder_write_dword(ctx->cbuf, info->count_from_stream_output->buffer_size);
454 else
455 virgl_encoder_write_dword(ctx->cbuf, 0);
456 if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
457 virgl_encoder_write_dword(ctx->cbuf, info->vertices_per_patch); /* vertices per patch */
458 virgl_encoder_write_dword(ctx->cbuf, info->drawid); /* drawid */
459 }
460 if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
461 virgl_encoder_write_res(ctx, virgl_resource(info->indirect->buffer));
462 virgl_encoder_write_dword(ctx->cbuf, info->indirect->offset);
463 virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect stride */
464 virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count */
465 virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count offset */
466 virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */
467 }
468 return 0;
469 }
470
471 int virgl_encoder_create_surface(struct virgl_context *ctx,
472 uint32_t handle,
473 struct virgl_resource *res,
474 const struct pipe_surface *templat)
475 {
476 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
477 virgl_encoder_write_dword(ctx->cbuf, handle);
478 virgl_encoder_write_res(ctx, res);
479 virgl_encoder_write_dword(ctx->cbuf, templat->format);
480 if (templat->texture->target == PIPE_BUFFER) {
481 virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.first_element);
482 virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.last_element);
483
484 } else {
485 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
486 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
487 }
488 return 0;
489 }
490
491 int virgl_encoder_create_so_target(struct virgl_context *ctx,
492 uint32_t handle,
493 struct virgl_resource *res,
494 unsigned buffer_offset,
495 unsigned buffer_size)
496 {
497 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
498 virgl_encoder_write_dword(ctx->cbuf, handle);
499 virgl_encoder_write_res(ctx, res);
500 virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
501 virgl_encoder_write_dword(ctx->cbuf, buffer_size);
502 return 0;
503 }
504
505 static void virgl_encoder_iw_emit_header_1d(struct virgl_context *ctx,
506 struct virgl_resource *res,
507 unsigned level, unsigned usage,
508 const struct pipe_box *box,
509 unsigned stride, unsigned layer_stride)
510 {
511 virgl_encoder_write_res(ctx, res);
512 virgl_encoder_write_dword(ctx->cbuf, level);
513 virgl_encoder_write_dword(ctx->cbuf, usage);
514 virgl_encoder_write_dword(ctx->cbuf, stride);
515 virgl_encoder_write_dword(ctx->cbuf, layer_stride);
516 virgl_encoder_write_dword(ctx->cbuf, box->x);
517 virgl_encoder_write_dword(ctx->cbuf, box->y);
518 virgl_encoder_write_dword(ctx->cbuf, box->z);
519 virgl_encoder_write_dword(ctx->cbuf, box->width);
520 virgl_encoder_write_dword(ctx->cbuf, box->height);
521 virgl_encoder_write_dword(ctx->cbuf, box->depth);
522 }
523
524 int virgl_encoder_inline_write(struct virgl_context *ctx,
525 struct virgl_resource *res,
526 unsigned level, unsigned usage,
527 const struct pipe_box *box,
528 const void *data, unsigned stride,
529 unsigned layer_stride)
530 {
531 uint32_t size = (stride ? stride : box->width) * box->height;
532 uint32_t length, thispass, left_bytes;
533 struct pipe_box mybox = *box;
534
535 length = 11 + (size + 3) / 4;
536 if ((ctx->cbuf->cdw + length + 1) > VIRGL_MAX_CMDBUF_DWORDS) {
537 if (box->height > 1 || box->depth > 1) {
538 debug_printf("inline transfer failed due to multi dimensions and too large\n");
539 assert(0);
540 }
541 }
542
543 left_bytes = size;
544 while (left_bytes) {
545 if (ctx->cbuf->cdw + 12 >= VIRGL_MAX_CMDBUF_DWORDS)
546 ctx->base.flush(&ctx->base, NULL, 0);
547
548 thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - 12) * 4;
549
550 length = MIN2(thispass, left_bytes);
551
552 mybox.width = length;
553 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
554 virgl_encoder_iw_emit_header_1d(ctx, res, level, usage, &mybox, stride, layer_stride);
555 virgl_encoder_write_block(ctx->cbuf, data, length);
556 left_bytes -= length;
557 mybox.x += length;
558 data += length;
559 }
560 return 0;
561 }
562
563 int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
564 struct virgl_resource *res)
565 {
566 // virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
567 // virgl_encoder_write_dword(ctx->cbuf, res_handle);
568 return 0;
569 }
570
571 int virgl_encode_sampler_state(struct virgl_context *ctx,
572 uint32_t handle,
573 const struct pipe_sampler_state *state)
574 {
575 uint32_t tmp;
576 int i;
577 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
578 virgl_encoder_write_dword(ctx->cbuf, handle);
579
580 tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
581 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
582 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
583 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
584 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
585 VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
586 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
587 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func) |
588 VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state->seamless_cube_map);
589
590 virgl_encoder_write_dword(ctx->cbuf, tmp);
591 virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
592 virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
593 virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
594 for (i = 0; i < 4; i++)
595 virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
596 return 0;
597 }
598
599
600 int virgl_encode_sampler_view(struct virgl_context *ctx,
601 uint32_t handle,
602 struct virgl_resource *res,
603 const struct pipe_sampler_view *state)
604 {
605 unsigned elem_size = util_format_get_blocksize(state->format);
606 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
607 uint32_t tmp;
608 uint32_t dword_fmt_target = state->format;
609 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
610 virgl_encoder_write_dword(ctx->cbuf, handle);
611 virgl_encoder_write_res(ctx, res);
612 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_VIEW)
613 dword_fmt_target |= (state->target << 24);
614 virgl_encoder_write_dword(ctx->cbuf, dword_fmt_target);
615 if (res->u.b.target == PIPE_BUFFER) {
616 virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
617 virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
618 } else {
619 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
620 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
621 }
622 tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
623 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
624 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
625 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
626 virgl_encoder_write_dword(ctx->cbuf, tmp);
627 return 0;
628 }
629
630 int virgl_encode_set_sampler_views(struct virgl_context *ctx,
631 uint32_t shader_type,
632 uint32_t start_slot,
633 uint32_t num_views,
634 struct virgl_sampler_view **views)
635 {
636 int i;
637 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
638 virgl_encoder_write_dword(ctx->cbuf, shader_type);
639 virgl_encoder_write_dword(ctx->cbuf, start_slot);
640 for (i = 0; i < num_views; i++) {
641 uint32_t handle = views[i] ? views[i]->handle : 0;
642 virgl_encoder_write_dword(ctx->cbuf, handle);
643 }
644 return 0;
645 }
646
647 int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
648 uint32_t shader_type,
649 uint32_t start_slot,
650 uint32_t num_handles,
651 uint32_t *handles)
652 {
653 int i;
654 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
655 virgl_encoder_write_dword(ctx->cbuf, shader_type);
656 virgl_encoder_write_dword(ctx->cbuf, start_slot);
657 for (i = 0; i < num_handles; i++)
658 virgl_encoder_write_dword(ctx->cbuf, handles[i]);
659 return 0;
660 }
661
662 int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
663 uint32_t shader,
664 uint32_t index,
665 uint32_t size,
666 const void *data)
667 {
668 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
669 virgl_encoder_write_dword(ctx->cbuf, shader);
670 virgl_encoder_write_dword(ctx->cbuf, index);
671 if (data)
672 virgl_encoder_write_block(ctx->cbuf, data, size * 4);
673 return 0;
674 }
675
676 int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
677 uint32_t shader,
678 uint32_t index,
679 uint32_t offset,
680 uint32_t length,
681 struct virgl_resource *res)
682 {
683 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
684 virgl_encoder_write_dword(ctx->cbuf, shader);
685 virgl_encoder_write_dword(ctx->cbuf, index);
686 virgl_encoder_write_dword(ctx->cbuf, offset);
687 virgl_encoder_write_dword(ctx->cbuf, length);
688 virgl_encoder_write_res(ctx, res);
689 return 0;
690 }
691
692
693 int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
694 const struct pipe_stencil_ref *ref)
695 {
696 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
697 virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
698 return 0;
699 }
700
701 int virgl_encoder_set_blend_color(struct virgl_context *ctx,
702 const struct pipe_blend_color *color)
703 {
704 int i;
705 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
706 for (i = 0; i < 4; i++)
707 virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
708 return 0;
709 }
710
711 int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
712 unsigned start_slot,
713 int num_scissors,
714 const struct pipe_scissor_state *ss)
715 {
716 int i;
717 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
718 virgl_encoder_write_dword(ctx->cbuf, start_slot);
719 for (i = 0; i < num_scissors; i++) {
720 virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
721 virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
722 }
723 return 0;
724 }
725
726 void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
727 const struct pipe_poly_stipple *ps)
728 {
729 int i;
730 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
731 for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
732 virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
733 }
734 }
735
736 void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
737 unsigned sample_mask)
738 {
739 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
740 virgl_encoder_write_dword(ctx->cbuf, sample_mask);
741 }
742
743 void virgl_encoder_set_min_samples(struct virgl_context *ctx,
744 unsigned min_samples)
745 {
746 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES, 0, VIRGL_SET_MIN_SAMPLES_SIZE));
747 virgl_encoder_write_dword(ctx->cbuf, min_samples);
748 }
749
750 void virgl_encoder_set_clip_state(struct virgl_context *ctx,
751 const struct pipe_clip_state *clip)
752 {
753 int i, j;
754 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
755 for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
756 for (j = 0; j < 4; j++) {
757 virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
758 }
759 }
760 }
761
762 int virgl_encode_resource_copy_region(struct virgl_context *ctx,
763 struct virgl_resource *dst_res,
764 unsigned dst_level,
765 unsigned dstx, unsigned dsty, unsigned dstz,
766 struct virgl_resource *src_res,
767 unsigned src_level,
768 const struct pipe_box *src_box)
769 {
770 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
771 virgl_encoder_write_res(ctx, dst_res);
772 virgl_encoder_write_dword(ctx->cbuf, dst_level);
773 virgl_encoder_write_dword(ctx->cbuf, dstx);
774 virgl_encoder_write_dword(ctx->cbuf, dsty);
775 virgl_encoder_write_dword(ctx->cbuf, dstz);
776 virgl_encoder_write_res(ctx, src_res);
777 virgl_encoder_write_dword(ctx->cbuf, src_level);
778 virgl_encoder_write_dword(ctx->cbuf, src_box->x);
779 virgl_encoder_write_dword(ctx->cbuf, src_box->y);
780 virgl_encoder_write_dword(ctx->cbuf, src_box->z);
781 virgl_encoder_write_dword(ctx->cbuf, src_box->width);
782 virgl_encoder_write_dword(ctx->cbuf, src_box->height);
783 virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
784 return 0;
785 }
786
787 int virgl_encode_blit(struct virgl_context *ctx,
788 struct virgl_resource *dst_res,
789 struct virgl_resource *src_res,
790 const struct pipe_blit_info *blit)
791 {
792 uint32_t tmp;
793 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
794 tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
795 VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
796 VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable) |
797 VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit->render_condition_enable) |
798 VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit->alpha_blend);
799 virgl_encoder_write_dword(ctx->cbuf, tmp);
800 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
801 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
802
803 virgl_encoder_write_res(ctx, dst_res);
804 virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
805 virgl_encoder_write_dword(ctx->cbuf, blit->dst.format);
806 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
807 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
808 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
809 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
810 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
811 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
812
813 virgl_encoder_write_res(ctx, src_res);
814 virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
815 virgl_encoder_write_dword(ctx->cbuf, blit->src.format);
816 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
817 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
818 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
819 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
820 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
821 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
822 return 0;
823 }
824
825 int virgl_encoder_create_query(struct virgl_context *ctx,
826 uint32_t handle,
827 uint query_type,
828 uint query_index,
829 struct virgl_resource *res,
830 uint32_t offset)
831 {
832 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
833 virgl_encoder_write_dword(ctx->cbuf, handle);
834 virgl_encoder_write_dword(ctx->cbuf, ((query_type & 0xffff) | (query_index << 16)));
835 virgl_encoder_write_dword(ctx->cbuf, offset);
836 virgl_encoder_write_res(ctx, res);
837 return 0;
838 }
839
840 int virgl_encoder_begin_query(struct virgl_context *ctx,
841 uint32_t handle)
842 {
843 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
844 virgl_encoder_write_dword(ctx->cbuf, handle);
845 return 0;
846 }
847
848 int virgl_encoder_end_query(struct virgl_context *ctx,
849 uint32_t handle)
850 {
851 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
852 virgl_encoder_write_dword(ctx->cbuf, handle);
853 return 0;
854 }
855
856 int virgl_encoder_get_query_result(struct virgl_context *ctx,
857 uint32_t handle, boolean wait)
858 {
859 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
860 virgl_encoder_write_dword(ctx->cbuf, handle);
861 virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
862 return 0;
863 }
864
865 int virgl_encoder_render_condition(struct virgl_context *ctx,
866 uint32_t handle, boolean condition,
867 enum pipe_render_cond_flag mode)
868 {
869 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
870 virgl_encoder_write_dword(ctx->cbuf, handle);
871 virgl_encoder_write_dword(ctx->cbuf, condition);
872 virgl_encoder_write_dword(ctx->cbuf, mode);
873 return 0;
874 }
875
876 int virgl_encoder_set_so_targets(struct virgl_context *ctx,
877 unsigned num_targets,
878 struct pipe_stream_output_target **targets,
879 unsigned append_bitmask)
880 {
881 int i;
882
883 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
884 virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
885 for (i = 0; i < num_targets; i++) {
886 struct virgl_so_target *tg = virgl_so_target(targets[i]);
887 virgl_encoder_write_dword(ctx->cbuf, tg ? tg->handle : 0);
888 }
889 return 0;
890 }
891
892
893 int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
894 {
895 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
896 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
897 return 0;
898 }
899
900 int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
901 {
902 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
903 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
904 return 0;
905 }
906
907 int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
908 {
909 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
910 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
911 return 0;
912 }
913
914 int virgl_encode_bind_shader(struct virgl_context *ctx,
915 uint32_t handle, uint32_t type)
916 {
917 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
918 virgl_encoder_write_dword(ctx->cbuf, handle);
919 virgl_encoder_write_dword(ctx->cbuf, type);
920 return 0;
921 }
922
923 int virgl_encode_set_tess_state(struct virgl_context *ctx,
924 const float outer[4],
925 const float inner[2])
926 {
927 int i;
928 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE, 0, 6));
929 for (i = 0; i < 4; i++)
930 virgl_encoder_write_dword(ctx->cbuf, fui(outer[i]));
931 for (i = 0; i < 2; i++)
932 virgl_encoder_write_dword(ctx->cbuf, fui(inner[i]));
933 return 0;
934 }
935
936 int virgl_encode_set_shader_buffers(struct virgl_context *ctx,
937 enum pipe_shader_type shader,
938 unsigned start_slot, unsigned count,
939 const struct pipe_shader_buffer *buffers)
940 {
941 int i;
942 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count)));
943
944 virgl_encoder_write_dword(ctx->cbuf, shader);
945 virgl_encoder_write_dword(ctx->cbuf, start_slot);
946 for (i = 0; i < count; i++) {
947 if (buffers) {
948 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
949 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
950 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
951 virgl_encoder_write_res(ctx, res);
952 } else {
953 virgl_encoder_write_dword(ctx->cbuf, 0);
954 virgl_encoder_write_dword(ctx->cbuf, 0);
955 virgl_encoder_write_dword(ctx->cbuf, 0);
956 }
957 }
958 return 0;
959 }
960
961 int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
962 unsigned start_slot, unsigned count,
963 const struct pipe_shader_buffer *buffers)
964 {
965 int i;
966 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count)));
967
968 virgl_encoder_write_dword(ctx->cbuf, start_slot);
969 for (i = 0; i < count; i++) {
970 if (buffers) {
971 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
972 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
973 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
974 virgl_encoder_write_res(ctx, res);
975 } else {
976 virgl_encoder_write_dword(ctx->cbuf, 0);
977 virgl_encoder_write_dword(ctx->cbuf, 0);
978 virgl_encoder_write_dword(ctx->cbuf, 0);
979 }
980 }
981 return 0;
982 }
983
984 int virgl_encode_set_shader_images(struct virgl_context *ctx,
985 enum pipe_shader_type shader,
986 unsigned start_slot, unsigned count,
987 const struct pipe_image_view *images)
988 {
989 int i;
990 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count)));
991
992 virgl_encoder_write_dword(ctx->cbuf, shader);
993 virgl_encoder_write_dword(ctx->cbuf, start_slot);
994 for (i = 0; i < count; i++) {
995 if (images) {
996 struct virgl_resource *res = virgl_resource(images[i].resource);
997 virgl_encoder_write_dword(ctx->cbuf, images[i].format);
998 virgl_encoder_write_dword(ctx->cbuf, images[i].access);
999 virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
1000 virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
1001 virgl_encoder_write_res(ctx, res);
1002 } else {
1003 virgl_encoder_write_dword(ctx->cbuf, 0);
1004 virgl_encoder_write_dword(ctx->cbuf, 0);
1005 virgl_encoder_write_dword(ctx->cbuf, 0);
1006 virgl_encoder_write_dword(ctx->cbuf, 0);
1007 virgl_encoder_write_dword(ctx->cbuf, 0);
1008 }
1009 }
1010 return 0;
1011 }
1012
1013 int virgl_encode_memory_barrier(struct virgl_context *ctx,
1014 unsigned flags)
1015 {
1016 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER, 0, 1));
1017 virgl_encoder_write_dword(ctx->cbuf, flags);
1018 return 0;
1019 }
1020
1021 int virgl_encode_launch_grid(struct virgl_context *ctx,
1022 const struct pipe_grid_info *grid_info)
1023 {
1024 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID, 0, VIRGL_LAUNCH_GRID_SIZE));
1025 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[0]);
1026 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[1]);
1027 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[2]);
1028 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[0]);
1029 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[1]);
1030 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[2]);
1031 if (grid_info->indirect) {
1032 struct virgl_resource *res = virgl_resource(grid_info->indirect);
1033 virgl_encoder_write_res(ctx, res);
1034 } else
1035 virgl_encoder_write_dword(ctx->cbuf, 0);
1036 virgl_encoder_write_dword(ctx->cbuf, grid_info->indirect_offset);
1037 return 0;
1038 }
1039
1040 int virgl_encode_texture_barrier(struct virgl_context *ctx,
1041 unsigned flags)
1042 {
1043 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER, 0, 1));
1044 virgl_encoder_write_dword(ctx->cbuf, flags);
1045 return 0;
1046 }