radeonsi: use a helper function for BuildGEP(0, x)
[mesa.git] / src / gallium / drivers / virgl / virgl_encode.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdint.h>
24
25 #include "util/u_format.h"
26 #include "util/u_memory.h"
27 #include "util/u_math.h"
28 #include "pipe/p_state.h"
29 #include "tgsi/tgsi_dump.h"
30 #include "tgsi/tgsi_parse.h"
31
32 #include "virgl_context.h"
33 #include "virgl_encode.h"
34 #include "virgl_protocol.h"
35 #include "virgl_resource.h"
36 #include "virgl_screen.h"
37
38 static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
39 uint32_t dword)
40 {
41 int len = (dword >> 16);
42
43 if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
44 ctx->base.flush(&ctx->base, NULL, 0);
45
46 virgl_encoder_write_dword(ctx->cbuf, dword);
47 return 0;
48 }
49
50 static void virgl_encoder_write_res(struct virgl_context *ctx,
51 struct virgl_resource *res)
52 {
53 struct virgl_winsys *vws = virgl_screen(ctx->base.screen)->vws;
54
55 if (res && res->hw_res)
56 vws->emit_res(vws, ctx->cbuf, res->hw_res, TRUE);
57 else {
58 virgl_encoder_write_dword(ctx->cbuf, 0);
59 }
60 }
61
62 int virgl_encode_bind_object(struct virgl_context *ctx,
63 uint32_t handle, uint32_t object)
64 {
65 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
66 virgl_encoder_write_dword(ctx->cbuf, handle);
67 return 0;
68 }
69
70 int virgl_encode_delete_object(struct virgl_context *ctx,
71 uint32_t handle, uint32_t object)
72 {
73 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
74 virgl_encoder_write_dword(ctx->cbuf, handle);
75 return 0;
76 }
77
78 int virgl_encode_blend_state(struct virgl_context *ctx,
79 uint32_t handle,
80 const struct pipe_blend_state *blend_state)
81 {
82 uint32_t tmp;
83 int i;
84
85 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
86 virgl_encoder_write_dword(ctx->cbuf, handle);
87
88 tmp =
89 VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
90 VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
91 VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
92 VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
93 VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
94
95 virgl_encoder_write_dword(ctx->cbuf, tmp);
96
97 tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
98 virgl_encoder_write_dword(ctx->cbuf, tmp);
99
100 for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
101 tmp =
102 VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
103 VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
104 VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
105 VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
106 VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
107 VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(blend_state->rt[i].alpha_src_factor) |
108 VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
109 VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
110 virgl_encoder_write_dword(ctx->cbuf, tmp);
111 }
112 return 0;
113 }
114
115 int virgl_encode_dsa_state(struct virgl_context *ctx,
116 uint32_t handle,
117 const struct pipe_depth_stencil_alpha_state *dsa_state)
118 {
119 uint32_t tmp;
120 int i;
121 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
122 virgl_encoder_write_dword(ctx->cbuf, handle);
123
124 tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth.enabled) |
125 VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth.writemask) |
126 VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth.func) |
127 VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha.enabled) |
128 VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha.func);
129 virgl_encoder_write_dword(ctx->cbuf, tmp);
130
131 for (i = 0; i < 2; i++) {
132 tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
133 VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
134 VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
135 VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
136 VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
137 VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
138 VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
139 virgl_encoder_write_dword(ctx->cbuf, tmp);
140 }
141
142 virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha.ref_value));
143 return 0;
144 }
145 int virgl_encode_rasterizer_state(struct virgl_context *ctx,
146 uint32_t handle,
147 const struct pipe_rasterizer_state *state)
148 {
149 uint32_t tmp;
150
151 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
152 virgl_encoder_write_dword(ctx->cbuf, handle);
153
154 tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
155 VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip) |
156 VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
157 VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
158 VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
159 VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
160 VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
161 VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
162 VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
163 VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
164 VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
165 VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
166 VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
167 VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
168 VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
169 VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
170 VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
171 VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
172 VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
173 VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
174 VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
175 VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
176 VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
177 VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
178 VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
179 VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
180 VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
181 VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule);
182
183 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
184 virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
185 virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
186 tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
187 VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
188 VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
189 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
190 virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
191 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
192 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
193 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
194 return 0;
195 }
196
197 static void virgl_emit_shader_header(struct virgl_context *ctx,
198 uint32_t handle, uint32_t len,
199 uint32_t type, uint32_t offlen,
200 uint32_t num_tokens)
201 {
202 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
203 virgl_encoder_write_dword(ctx->cbuf, handle);
204 virgl_encoder_write_dword(ctx->cbuf, type);
205 virgl_encoder_write_dword(ctx->cbuf, offlen);
206 virgl_encoder_write_dword(ctx->cbuf, num_tokens);
207 }
208
209 static void virgl_emit_shader_streamout(struct virgl_context *ctx,
210 const struct pipe_stream_output_info *so_info)
211 {
212 int num_outputs = 0;
213 int i;
214 uint32_t tmp;
215
216 if (so_info)
217 num_outputs = so_info->num_outputs;
218
219 virgl_encoder_write_dword(ctx->cbuf, num_outputs);
220 if (num_outputs) {
221 for (i = 0; i < 4; i++)
222 virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
223
224 for (i = 0; i < so_info->num_outputs; i++) {
225 tmp =
226 VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
227 VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
228 VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
229 VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
230 VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
231 virgl_encoder_write_dword(ctx->cbuf, tmp);
232 virgl_encoder_write_dword(ctx->cbuf, 0);
233 }
234 }
235 }
236
237 int virgl_encode_shader_state(struct virgl_context *ctx,
238 uint32_t handle,
239 uint32_t type,
240 const struct pipe_stream_output_info *so_info,
241 const struct tgsi_token *tokens)
242 {
243 char *str, *sptr;
244 uint32_t shader_len, len;
245 bool bret;
246 int num_tokens = tgsi_num_tokens(tokens);
247 int str_total_size = 65536;
248 int retry_size = 1;
249 uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
250 bool first_pass;
251 str = CALLOC(1, str_total_size);
252 if (!str)
253 return -1;
254
255 do {
256 int old_size;
257
258 bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
259 if (bret == false) {
260 fprintf(stderr, "Failed to translate shader in available space - trying again\n");
261 old_size = str_total_size;
262 str_total_size = 65536 * ++retry_size;
263 str = REALLOC(str, old_size, str_total_size);
264 if (!str)
265 return -1;
266 }
267 } while (bret == false && retry_size < 10);
268
269 if (bret == false)
270 return -1;
271
272 shader_len = strlen(str) + 1;
273
274 left_bytes = shader_len;
275
276 base_hdr_size = 5;
277 strm_hdr_size = so_info->num_outputs ? so_info->num_outputs * 2 + 4 : 0;
278 first_pass = true;
279 sptr = str;
280 while (left_bytes) {
281 uint32_t length, offlen;
282 int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
283 if (ctx->cbuf->cdw + hdr_len + 1 > VIRGL_MAX_CMDBUF_DWORDS)
284 ctx->base.flush(&ctx->base, NULL, 0);
285
286 thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
287
288 length = MIN2(thispass, left_bytes);
289 len = ((length + 3) / 4) + hdr_len;
290
291 if (first_pass)
292 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
293 else
294 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
295
296 virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
297
298 virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
299
300 virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
301
302 sptr += length;
303 first_pass = false;
304 left_bytes -= length;
305 }
306
307 FREE(str);
308 return 0;
309 }
310
311
312 int virgl_encode_clear(struct virgl_context *ctx,
313 unsigned buffers,
314 const union pipe_color_union *color,
315 double depth, unsigned stencil)
316 {
317 int i;
318
319 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
320 virgl_encoder_write_dword(ctx->cbuf, buffers);
321 for (i = 0; i < 4; i++)
322 virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
323 virgl_encoder_write_qword(ctx->cbuf, *(uint64_t *)&depth);
324 virgl_encoder_write_dword(ctx->cbuf, stencil);
325 return 0;
326 }
327
328 int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
329 const struct pipe_framebuffer_state *state)
330 {
331 struct virgl_surface *zsurf = virgl_surface(state->zsbuf);
332 int i;
333
334 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
335 virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
336 virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
337 for (i = 0; i < state->nr_cbufs; i++) {
338 struct virgl_surface *surf = virgl_surface(state->cbufs[i]);
339 virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
340 }
341
342 return 0;
343 }
344
345 int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
346 int start_slot,
347 int num_viewports,
348 const struct pipe_viewport_state *states)
349 {
350 int i,v;
351 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
352 virgl_encoder_write_dword(ctx->cbuf, start_slot);
353 for (v = 0; v < num_viewports; v++) {
354 for (i = 0; i < 3; i++)
355 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
356 for (i = 0; i < 3; i++)
357 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
358 }
359 return 0;
360 }
361
362 int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
363 uint32_t handle,
364 unsigned num_elements,
365 const struct pipe_vertex_element *element)
366 {
367 int i;
368 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
369 virgl_encoder_write_dword(ctx->cbuf, handle);
370 for (i = 0; i < num_elements; i++) {
371 virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
372 virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
373 virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
374 virgl_encoder_write_dword(ctx->cbuf, element[i].src_format);
375 }
376 return 0;
377 }
378
379 int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
380 unsigned num_buffers,
381 const struct pipe_vertex_buffer *buffers)
382 {
383 int i;
384 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
385 for (i = 0; i < num_buffers; i++) {
386 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
387 virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
388 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
389 virgl_encoder_write_res(ctx, res);
390 }
391 return 0;
392 }
393
394 int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
395 const struct pipe_index_buffer *ib)
396 {
397 int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
398 struct virgl_resource *res = NULL;
399 if (ib)
400 res = virgl_resource(ib->buffer);
401
402 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
403 virgl_encoder_write_res(ctx, res);
404 if (ib) {
405 virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
406 virgl_encoder_write_dword(ctx->cbuf, ib->offset);
407 }
408 return 0;
409 }
410
411 int virgl_encoder_draw_vbo(struct virgl_context *ctx,
412 const struct pipe_draw_info *info)
413 {
414 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, VIRGL_DRAW_VBO_SIZE));
415 virgl_encoder_write_dword(ctx->cbuf, info->start);
416 virgl_encoder_write_dword(ctx->cbuf, info->count);
417 virgl_encoder_write_dword(ctx->cbuf, info->mode);
418 virgl_encoder_write_dword(ctx->cbuf, info->indexed);
419 virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
420 virgl_encoder_write_dword(ctx->cbuf, info->index_bias);
421 virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
422 virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
423 virgl_encoder_write_dword(ctx->cbuf, info->restart_index);
424 virgl_encoder_write_dword(ctx->cbuf, info->min_index);
425 virgl_encoder_write_dword(ctx->cbuf, info->max_index);
426 if (info->count_from_stream_output)
427 virgl_encoder_write_dword(ctx->cbuf, info->count_from_stream_output->buffer_size);
428 else
429 virgl_encoder_write_dword(ctx->cbuf, 0);
430 return 0;
431 }
432
433 int virgl_encoder_create_surface(struct virgl_context *ctx,
434 uint32_t handle,
435 struct virgl_resource *res,
436 const struct pipe_surface *templat)
437 {
438 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
439 virgl_encoder_write_dword(ctx->cbuf, handle);
440 virgl_encoder_write_res(ctx, res);
441 virgl_encoder_write_dword(ctx->cbuf, templat->format);
442 if (templat->texture->target == PIPE_BUFFER) {
443 virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.first_element);
444 virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.last_element);
445
446 } else {
447 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
448 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
449 }
450 return 0;
451 }
452
453 int virgl_encoder_create_so_target(struct virgl_context *ctx,
454 uint32_t handle,
455 struct virgl_resource *res,
456 unsigned buffer_offset,
457 unsigned buffer_size)
458 {
459 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
460 virgl_encoder_write_dword(ctx->cbuf, handle);
461 virgl_encoder_write_res(ctx, res);
462 virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
463 virgl_encoder_write_dword(ctx->cbuf, buffer_size);
464 return 0;
465 }
466
467 static void virgl_encoder_iw_emit_header_1d(struct virgl_context *ctx,
468 struct virgl_resource *res,
469 unsigned level, unsigned usage,
470 const struct pipe_box *box,
471 unsigned stride, unsigned layer_stride)
472 {
473 virgl_encoder_write_res(ctx, res);
474 virgl_encoder_write_dword(ctx->cbuf, level);
475 virgl_encoder_write_dword(ctx->cbuf, usage);
476 virgl_encoder_write_dword(ctx->cbuf, stride);
477 virgl_encoder_write_dword(ctx->cbuf, layer_stride);
478 virgl_encoder_write_dword(ctx->cbuf, box->x);
479 virgl_encoder_write_dword(ctx->cbuf, box->y);
480 virgl_encoder_write_dword(ctx->cbuf, box->z);
481 virgl_encoder_write_dword(ctx->cbuf, box->width);
482 virgl_encoder_write_dword(ctx->cbuf, box->height);
483 virgl_encoder_write_dword(ctx->cbuf, box->depth);
484 }
485
486 int virgl_encoder_inline_write(struct virgl_context *ctx,
487 struct virgl_resource *res,
488 unsigned level, unsigned usage,
489 const struct pipe_box *box,
490 const void *data, unsigned stride,
491 unsigned layer_stride)
492 {
493 uint32_t size = (stride ? stride : box->width) * box->height;
494 uint32_t length, thispass, left_bytes;
495 struct pipe_box mybox = *box;
496
497 length = 11 + (size + 3) / 4;
498 if ((ctx->cbuf->cdw + length + 1) > VIRGL_MAX_CMDBUF_DWORDS) {
499 if (box->height > 1 || box->depth > 1) {
500 debug_printf("inline transfer failed due to multi dimensions and too large\n");
501 assert(0);
502 }
503 }
504
505 left_bytes = size;
506 while (left_bytes) {
507 if (ctx->cbuf->cdw + 12 > VIRGL_MAX_CMDBUF_DWORDS)
508 ctx->base.flush(&ctx->base, NULL, 0);
509
510 thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - 12) * 4;
511
512 length = MIN2(thispass, left_bytes);
513
514 mybox.width = length;
515 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
516 virgl_encoder_iw_emit_header_1d(ctx, res, level, usage, &mybox, stride, layer_stride);
517 virgl_encoder_write_block(ctx->cbuf, data, length);
518 left_bytes -= length;
519 mybox.x += length;
520 data += length;
521 }
522 return 0;
523 }
524
525 int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
526 struct virgl_resource *res)
527 {
528 // virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
529 // virgl_encoder_write_dword(ctx->cbuf, res_handle);
530 return 0;
531 }
532
533 int virgl_encode_sampler_state(struct virgl_context *ctx,
534 uint32_t handle,
535 const struct pipe_sampler_state *state)
536 {
537 uint32_t tmp;
538 int i;
539 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
540 virgl_encoder_write_dword(ctx->cbuf, handle);
541
542 tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
543 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
544 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
545 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
546 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
547 VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
548 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
549 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func);
550
551 virgl_encoder_write_dword(ctx->cbuf, tmp);
552 virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
553 virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
554 virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
555 for (i = 0; i < 4; i++)
556 virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
557 return 0;
558 }
559
560
561 int virgl_encode_sampler_view(struct virgl_context *ctx,
562 uint32_t handle,
563 struct virgl_resource *res,
564 const struct pipe_sampler_view *state)
565 {
566 unsigned elem_size = util_format_get_blocksize(state->format);
567
568 uint32_t tmp;
569 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
570 virgl_encoder_write_dword(ctx->cbuf, handle);
571 virgl_encoder_write_res(ctx, res);
572 virgl_encoder_write_dword(ctx->cbuf, state->format);
573 if (res->u.b.target == PIPE_BUFFER) {
574 virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
575 virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
576 } else {
577 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
578 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
579 }
580 tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
581 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
582 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
583 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
584 virgl_encoder_write_dword(ctx->cbuf, tmp);
585 return 0;
586 }
587
588 int virgl_encode_set_sampler_views(struct virgl_context *ctx,
589 uint32_t shader_type,
590 uint32_t start_slot,
591 uint32_t num_views,
592 struct virgl_sampler_view **views)
593 {
594 int i;
595 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
596 virgl_encoder_write_dword(ctx->cbuf, shader_type);
597 virgl_encoder_write_dword(ctx->cbuf, start_slot);
598 for (i = 0; i < num_views; i++) {
599 uint32_t handle = views[i] ? views[i]->handle : 0;
600 virgl_encoder_write_dword(ctx->cbuf, handle);
601 }
602 return 0;
603 }
604
605 int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
606 uint32_t shader_type,
607 uint32_t start_slot,
608 uint32_t num_handles,
609 uint32_t *handles)
610 {
611 int i;
612 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
613 virgl_encoder_write_dword(ctx->cbuf, shader_type);
614 virgl_encoder_write_dword(ctx->cbuf, start_slot);
615 for (i = 0; i < num_handles; i++)
616 virgl_encoder_write_dword(ctx->cbuf, handles[i]);
617 return 0;
618 }
619
620 int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
621 uint32_t shader,
622 uint32_t index,
623 uint32_t size,
624 const void *data)
625 {
626 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
627 virgl_encoder_write_dword(ctx->cbuf, shader);
628 virgl_encoder_write_dword(ctx->cbuf, index);
629 if (data)
630 virgl_encoder_write_block(ctx->cbuf, data, size * 4);
631 return 0;
632 }
633
634 int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
635 uint32_t shader,
636 uint32_t index,
637 uint32_t offset,
638 uint32_t length,
639 struct virgl_resource *res)
640 {
641 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
642 virgl_encoder_write_dword(ctx->cbuf, shader);
643 virgl_encoder_write_dword(ctx->cbuf, index);
644 virgl_encoder_write_dword(ctx->cbuf, offset);
645 virgl_encoder_write_dword(ctx->cbuf, length);
646 virgl_encoder_write_res(ctx, res);
647 return 0;
648 }
649
650
651 int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
652 const struct pipe_stencil_ref *ref)
653 {
654 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
655 virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
656 return 0;
657 }
658
659 int virgl_encoder_set_blend_color(struct virgl_context *ctx,
660 const struct pipe_blend_color *color)
661 {
662 int i;
663 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
664 for (i = 0; i < 4; i++)
665 virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
666 return 0;
667 }
668
669 int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
670 unsigned start_slot,
671 int num_scissors,
672 const struct pipe_scissor_state *ss)
673 {
674 int i;
675 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
676 virgl_encoder_write_dword(ctx->cbuf, start_slot);
677 for (i = 0; i < num_scissors; i++) {
678 virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
679 virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
680 }
681 return 0;
682 }
683
684 void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
685 const struct pipe_poly_stipple *ps)
686 {
687 int i;
688 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
689 for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
690 virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
691 }
692 }
693
694 void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
695 unsigned sample_mask)
696 {
697 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
698 virgl_encoder_write_dword(ctx->cbuf, sample_mask);
699 }
700
701 void virgl_encoder_set_clip_state(struct virgl_context *ctx,
702 const struct pipe_clip_state *clip)
703 {
704 int i, j;
705 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
706 for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
707 for (j = 0; j < 4; j++) {
708 virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
709 }
710 }
711 }
712
713 int virgl_encode_resource_copy_region(struct virgl_context *ctx,
714 struct virgl_resource *dst_res,
715 unsigned dst_level,
716 unsigned dstx, unsigned dsty, unsigned dstz,
717 struct virgl_resource *src_res,
718 unsigned src_level,
719 const struct pipe_box *src_box)
720 {
721 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
722 virgl_encoder_write_res(ctx, dst_res);
723 virgl_encoder_write_dword(ctx->cbuf, dst_level);
724 virgl_encoder_write_dword(ctx->cbuf, dstx);
725 virgl_encoder_write_dword(ctx->cbuf, dsty);
726 virgl_encoder_write_dword(ctx->cbuf, dstz);
727 virgl_encoder_write_res(ctx, src_res);
728 virgl_encoder_write_dword(ctx->cbuf, src_level);
729 virgl_encoder_write_dword(ctx->cbuf, src_box->x);
730 virgl_encoder_write_dword(ctx->cbuf, src_box->y);
731 virgl_encoder_write_dword(ctx->cbuf, src_box->z);
732 virgl_encoder_write_dword(ctx->cbuf, src_box->width);
733 virgl_encoder_write_dword(ctx->cbuf, src_box->height);
734 virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
735 return 0;
736 }
737
738 int virgl_encode_blit(struct virgl_context *ctx,
739 struct virgl_resource *dst_res,
740 struct virgl_resource *src_res,
741 const struct pipe_blit_info *blit)
742 {
743 uint32_t tmp;
744 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
745 tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
746 VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
747 VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable) |
748 VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit->render_condition_enable) |
749 VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit->alpha_blend);
750 virgl_encoder_write_dword(ctx->cbuf, tmp);
751 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
752 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
753
754 virgl_encoder_write_res(ctx, dst_res);
755 virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
756 virgl_encoder_write_dword(ctx->cbuf, blit->dst.format);
757 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
758 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
759 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
760 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
761 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
762 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
763
764 virgl_encoder_write_res(ctx, src_res);
765 virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
766 virgl_encoder_write_dword(ctx->cbuf, blit->src.format);
767 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
768 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
769 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
770 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
771 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
772 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
773 return 0;
774 }
775
776 int virgl_encoder_create_query(struct virgl_context *ctx,
777 uint32_t handle,
778 uint query_type,
779 uint query_index,
780 struct virgl_resource *res,
781 uint32_t offset)
782 {
783 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
784 virgl_encoder_write_dword(ctx->cbuf, handle);
785 virgl_encoder_write_dword(ctx->cbuf, ((query_type & 0xffff) | (query_index << 16)));
786 virgl_encoder_write_dword(ctx->cbuf, offset);
787 virgl_encoder_write_res(ctx, res);
788 return 0;
789 }
790
791 int virgl_encoder_begin_query(struct virgl_context *ctx,
792 uint32_t handle)
793 {
794 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
795 virgl_encoder_write_dword(ctx->cbuf, handle);
796 return 0;
797 }
798
799 int virgl_encoder_end_query(struct virgl_context *ctx,
800 uint32_t handle)
801 {
802 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
803 virgl_encoder_write_dword(ctx->cbuf, handle);
804 return 0;
805 }
806
807 int virgl_encoder_get_query_result(struct virgl_context *ctx,
808 uint32_t handle, boolean wait)
809 {
810 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
811 virgl_encoder_write_dword(ctx->cbuf, handle);
812 virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
813 return 0;
814 }
815
816 int virgl_encoder_render_condition(struct virgl_context *ctx,
817 uint32_t handle, boolean condition,
818 uint mode)
819 {
820 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
821 virgl_encoder_write_dword(ctx->cbuf, handle);
822 virgl_encoder_write_dword(ctx->cbuf, condition);
823 virgl_encoder_write_dword(ctx->cbuf, mode);
824 return 0;
825 }
826
827 int virgl_encoder_set_so_targets(struct virgl_context *ctx,
828 unsigned num_targets,
829 struct pipe_stream_output_target **targets,
830 unsigned append_bitmask)
831 {
832 int i;
833
834 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
835 virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
836 for (i = 0; i < num_targets; i++) {
837 struct virgl_so_target *tg = virgl_so_target(targets[i]);
838 virgl_encoder_write_dword(ctx->cbuf, tg->handle);
839 }
840 return 0;
841 }
842
843
844 int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
845 {
846 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
847 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
848 return 0;
849 }
850
851 int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
852 {
853 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
854 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
855 return 0;
856 }
857
858 int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
859 {
860 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
861 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
862 return 0;
863 }
864
865 int virgl_encode_bind_shader(struct virgl_context *ctx,
866 uint32_t handle, uint32_t type)
867 {
868 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
869 virgl_encoder_write_dword(ctx->cbuf, handle);
870 virgl_encoder_write_dword(ctx->cbuf, type);
871 return 0;
872 }