gallium: Add PIPE_CAP_BLEND_EQUATION_ADVANCED
[mesa.git] / src / gallium / drivers / virgl / virgl_encode.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdint.h>
24 #include <assert.h>
25 #include <string.h>
26
27 #include "util/format/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
33
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
39
40 #define VIRGL_ENCODE_MAX_DWORDS MIN2(VIRGL_MAX_CMDBUF_DWORDS, VIRGL_CMD0_MAX_DWORDS)
41
42 #define CONV_FORMAT(f) [PIPE_FORMAT_##f] = VIRGL_FORMAT_##f,
43
44 static const enum virgl_formats virgl_formats_conv_table[PIPE_FORMAT_COUNT] = {
45 CONV_FORMAT(B8G8R8A8_UNORM)
46 CONV_FORMAT(B8G8R8X8_UNORM)
47 CONV_FORMAT(A8R8G8B8_UNORM)
48 CONV_FORMAT(X8R8G8B8_UNORM)
49 CONV_FORMAT(B5G5R5A1_UNORM)
50 CONV_FORMAT(B4G4R4A4_UNORM)
51 CONV_FORMAT(B5G6R5_UNORM)
52 CONV_FORMAT(R10G10B10A2_UNORM)
53 CONV_FORMAT(L8_UNORM)
54 CONV_FORMAT(A8_UNORM)
55 CONV_FORMAT(L8A8_UNORM)
56 CONV_FORMAT(L16_UNORM)
57 CONV_FORMAT(Z16_UNORM)
58 CONV_FORMAT(Z32_UNORM)
59 CONV_FORMAT(Z32_FLOAT)
60 CONV_FORMAT(Z24_UNORM_S8_UINT)
61 CONV_FORMAT(S8_UINT_Z24_UNORM)
62 CONV_FORMAT(Z24X8_UNORM)
63 CONV_FORMAT(X8Z24_UNORM)
64 CONV_FORMAT(S8_UINT)
65 CONV_FORMAT(R64_FLOAT)
66 CONV_FORMAT(R64G64_FLOAT)
67 CONV_FORMAT(R64G64B64_FLOAT)
68 CONV_FORMAT(R64G64B64A64_FLOAT)
69 CONV_FORMAT(R32_FLOAT)
70 CONV_FORMAT(R32G32_FLOAT)
71 CONV_FORMAT(R32G32B32_FLOAT)
72 CONV_FORMAT(R32G32B32A32_FLOAT)
73 CONV_FORMAT(R32_UNORM)
74 CONV_FORMAT(R32G32_UNORM)
75 CONV_FORMAT(R32G32B32_UNORM)
76 CONV_FORMAT(R32G32B32A32_UNORM)
77 CONV_FORMAT(R32_USCALED)
78 CONV_FORMAT(R32G32_USCALED)
79 CONV_FORMAT(R32G32B32_USCALED)
80 CONV_FORMAT(R32G32B32A32_USCALED)
81 CONV_FORMAT(R32_SNORM)
82 CONV_FORMAT(R32G32_SNORM)
83 CONV_FORMAT(R32G32B32_SNORM)
84 CONV_FORMAT(R32G32B32A32_SNORM)
85 CONV_FORMAT(R32_SSCALED)
86 CONV_FORMAT(R32G32_SSCALED)
87 CONV_FORMAT(R32G32B32_SSCALED)
88 CONV_FORMAT(R32G32B32A32_SSCALED)
89 CONV_FORMAT(R16_UNORM)
90 CONV_FORMAT(R16G16_UNORM)
91 CONV_FORMAT(R16G16B16_UNORM)
92 CONV_FORMAT(R16G16B16A16_UNORM)
93 CONV_FORMAT(R16_USCALED)
94 CONV_FORMAT(R16G16_USCALED)
95 CONV_FORMAT(R16G16B16_USCALED)
96 CONV_FORMAT(R16G16B16A16_USCALED)
97 CONV_FORMAT(R16_SNORM)
98 CONV_FORMAT(R16G16_SNORM)
99 CONV_FORMAT(R16G16B16_SNORM)
100 CONV_FORMAT(R16G16B16A16_SNORM)
101 CONV_FORMAT(R16_SSCALED)
102 CONV_FORMAT(R16G16_SSCALED)
103 CONV_FORMAT(R16G16B16_SSCALED)
104 CONV_FORMAT(R16G16B16A16_SSCALED)
105 CONV_FORMAT(R8_UNORM)
106 CONV_FORMAT(R8G8_UNORM)
107 CONV_FORMAT(R8G8B8_UNORM)
108 CONV_FORMAT(R8G8B8A8_UNORM)
109 CONV_FORMAT(R8_USCALED)
110 CONV_FORMAT(R8G8_USCALED)
111 CONV_FORMAT(R8G8B8_USCALED)
112 CONV_FORMAT(R8G8B8A8_USCALED)
113 CONV_FORMAT(R8_SNORM)
114 CONV_FORMAT(R8G8_SNORM)
115 CONV_FORMAT(R8G8B8_SNORM)
116 CONV_FORMAT(R8G8B8A8_SNORM)
117 CONV_FORMAT(R8_SSCALED)
118 CONV_FORMAT(R8G8_SSCALED)
119 CONV_FORMAT(R8G8B8_SSCALED)
120 CONV_FORMAT(R8G8B8A8_SSCALED)
121 CONV_FORMAT(R16_FLOAT)
122 CONV_FORMAT(R16G16_FLOAT)
123 CONV_FORMAT(R16G16B16_FLOAT)
124 CONV_FORMAT(R16G16B16A16_FLOAT)
125 CONV_FORMAT(L8_SRGB)
126 CONV_FORMAT(L8A8_SRGB)
127 CONV_FORMAT(R8G8B8_SRGB)
128 CONV_FORMAT(A8B8G8R8_SRGB)
129 CONV_FORMAT(X8B8G8R8_SRGB)
130 CONV_FORMAT(B8G8R8A8_SRGB)
131 CONV_FORMAT(B8G8R8X8_SRGB)
132 CONV_FORMAT(A8R8G8B8_SRGB)
133 CONV_FORMAT(X8R8G8B8_SRGB)
134 CONV_FORMAT(R8G8B8A8_SRGB)
135 CONV_FORMAT(DXT1_RGB)
136 CONV_FORMAT(DXT1_RGBA)
137 CONV_FORMAT(DXT3_RGBA)
138 CONV_FORMAT(DXT5_RGBA)
139 CONV_FORMAT(DXT1_SRGB)
140 CONV_FORMAT(DXT1_SRGBA)
141 CONV_FORMAT(DXT3_SRGBA)
142 CONV_FORMAT(DXT5_SRGBA)
143 CONV_FORMAT(RGTC1_UNORM)
144 CONV_FORMAT(RGTC1_SNORM)
145 CONV_FORMAT(RGTC2_UNORM)
146 CONV_FORMAT(RGTC2_SNORM)
147 CONV_FORMAT(A8B8G8R8_UNORM)
148 CONV_FORMAT(B5G5R5X1_UNORM)
149 CONV_FORMAT(R10G10B10A2_USCALED)
150 CONV_FORMAT(R11G11B10_FLOAT)
151 CONV_FORMAT(R9G9B9E5_FLOAT)
152 CONV_FORMAT(Z32_FLOAT_S8X24_UINT)
153 CONV_FORMAT(B10G10R10A2_UNORM)
154 CONV_FORMAT(R8G8B8X8_UNORM)
155 CONV_FORMAT(B4G4R4X4_UNORM)
156 CONV_FORMAT(X24S8_UINT)
157 CONV_FORMAT(S8X24_UINT)
158 CONV_FORMAT(X32_S8X24_UINT)
159 CONV_FORMAT(B2G3R3_UNORM)
160 CONV_FORMAT(L16A16_UNORM)
161 CONV_FORMAT(A16_UNORM)
162 CONV_FORMAT(I16_UNORM)
163 CONV_FORMAT(LATC1_UNORM)
164 CONV_FORMAT(LATC1_SNORM)
165 CONV_FORMAT(LATC2_UNORM)
166 CONV_FORMAT(LATC2_SNORM)
167 CONV_FORMAT(A8_SNORM)
168 CONV_FORMAT(L8_SNORM)
169 CONV_FORMAT(L8A8_SNORM)
170 CONV_FORMAT(A16_SNORM)
171 CONV_FORMAT(L16_SNORM)
172 CONV_FORMAT(L16A16_SNORM)
173 CONV_FORMAT(A16_FLOAT)
174 CONV_FORMAT(L16_FLOAT)
175 CONV_FORMAT(L16A16_FLOAT)
176 CONV_FORMAT(A32_FLOAT)
177 CONV_FORMAT(L32_FLOAT)
178 CONV_FORMAT(L32A32_FLOAT)
179 CONV_FORMAT(YV12)
180 CONV_FORMAT(YV16)
181 CONV_FORMAT(IYUV)
182 CONV_FORMAT(NV12)
183 CONV_FORMAT(NV21)
184 CONV_FORMAT(R8_UINT)
185 CONV_FORMAT(R8G8_UINT)
186 CONV_FORMAT(R8G8B8_UINT)
187 CONV_FORMAT(R8G8B8A8_UINT)
188 CONV_FORMAT(R8_SINT)
189 CONV_FORMAT(R8G8_SINT)
190 CONV_FORMAT(R8G8B8_SINT)
191 CONV_FORMAT(R8G8B8A8_SINT)
192 CONV_FORMAT(R16_UINT)
193 CONV_FORMAT(R16G16_UINT)
194 CONV_FORMAT(R16G16B16_UINT)
195 CONV_FORMAT(R16G16B16A16_UINT)
196 CONV_FORMAT(R16_SINT)
197 CONV_FORMAT(R16G16_SINT)
198 CONV_FORMAT(R16G16B16_SINT)
199 CONV_FORMAT(R16G16B16A16_SINT)
200 CONV_FORMAT(R32_UINT)
201 CONV_FORMAT(R32G32_UINT)
202 CONV_FORMAT(R32G32B32_UINT)
203 CONV_FORMAT(R32G32B32A32_UINT)
204 CONV_FORMAT(R32_SINT)
205 CONV_FORMAT(R32G32_SINT)
206 CONV_FORMAT(R32G32B32_SINT)
207 CONV_FORMAT(R32G32B32A32_SINT)
208 CONV_FORMAT(A8_UINT)
209 CONV_FORMAT(L8_UINT)
210 CONV_FORMAT(L8A8_UINT)
211 CONV_FORMAT(A8_SINT)
212 CONV_FORMAT(L8_SINT)
213 CONV_FORMAT(L8A8_SINT)
214 CONV_FORMAT(A16_UINT)
215 CONV_FORMAT(L16_UINT)
216 CONV_FORMAT(L16A16_UINT)
217 CONV_FORMAT(A16_SINT)
218 CONV_FORMAT(L16_SINT)
219 CONV_FORMAT(L16A16_SINT)
220 CONV_FORMAT(A32_UINT)
221 CONV_FORMAT(L32_UINT)
222 CONV_FORMAT(L32A32_UINT)
223 CONV_FORMAT(A32_SINT)
224 CONV_FORMAT(L32_SINT)
225 CONV_FORMAT(L32A32_SINT)
226 CONV_FORMAT(R10G10B10A2_SSCALED)
227 CONV_FORMAT(R10G10B10A2_SNORM)
228 CONV_FORMAT(B10G10R10A2_SNORM)
229 CONV_FORMAT(B10G10R10A2_UINT)
230 CONV_FORMAT(R8G8B8X8_SNORM)
231 CONV_FORMAT(R8G8B8X8_SRGB)
232 CONV_FORMAT(R8G8B8X8_UINT)
233 CONV_FORMAT(R8G8B8X8_SINT)
234 CONV_FORMAT(B10G10R10X2_UNORM)
235 CONV_FORMAT(R16G16B16X16_UNORM)
236 CONV_FORMAT(R16G16B16X16_SNORM)
237 CONV_FORMAT(R16G16B16X16_FLOAT)
238 CONV_FORMAT(R16G16B16X16_UINT)
239 CONV_FORMAT(R16G16B16X16_SINT)
240 CONV_FORMAT(R32G32B32X32_FLOAT)
241 CONV_FORMAT(R32G32B32X32_UINT)
242 CONV_FORMAT(R32G32B32X32_SINT)
243 CONV_FORMAT(R10G10B10A2_UINT)
244 CONV_FORMAT(BPTC_RGBA_UNORM)
245 CONV_FORMAT(BPTC_SRGBA)
246 CONV_FORMAT(BPTC_RGB_FLOAT)
247 CONV_FORMAT(BPTC_RGB_UFLOAT)
248 CONV_FORMAT(R10G10B10X2_UNORM)
249 CONV_FORMAT(A4B4G4R4_UNORM)
250 CONV_FORMAT(R8_SRGB)
251 CONV_FORMAT(ETC2_RGB8)
252 CONV_FORMAT(ETC2_SRGB8)
253 CONV_FORMAT(ETC2_RGB8A1)
254 CONV_FORMAT(ETC2_SRGB8A1)
255 CONV_FORMAT(ETC2_RGBA8)
256 CONV_FORMAT(ETC2_SRGBA8)
257 CONV_FORMAT(ETC2_R11_UNORM)
258 CONV_FORMAT(ETC2_R11_SNORM)
259 CONV_FORMAT(ETC2_RG11_UNORM)
260 CONV_FORMAT(ETC2_RG11_SNORM)
261 };
262
263 enum virgl_formats pipe_to_virgl_format(enum pipe_format format)
264 {
265 enum virgl_formats vformat = virgl_formats_conv_table[format];
266 if (format != PIPE_FORMAT_NONE && !vformat)
267 debug_printf("VIRGL: pipe format %s not in the format table\n", util_format_name(format));
268 return vformat;
269 }
270
271 static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
272 uint32_t dword)
273 {
274 int len = (dword >> 16);
275
276 if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
277 ctx->base.flush(&ctx->base, NULL, 0);
278
279 virgl_encoder_write_dword(ctx->cbuf, dword);
280 return 0;
281 }
282
283 static void virgl_encoder_emit_resource(struct virgl_screen *vs,
284 struct virgl_cmd_buf *buf,
285 struct virgl_resource *res)
286 {
287 struct virgl_winsys *vws = vs->vws;
288 if (res && res->hw_res)
289 vws->emit_res(vws, buf, res->hw_res, TRUE);
290 else {
291 virgl_encoder_write_dword(buf, 0);
292 }
293 }
294
295 static void virgl_encoder_write_res(struct virgl_context *ctx,
296 struct virgl_resource *res)
297 {
298 struct virgl_screen *vs = virgl_screen(ctx->base.screen);
299 virgl_encoder_emit_resource(vs, ctx->cbuf, res);
300 }
301
302 int virgl_encode_bind_object(struct virgl_context *ctx,
303 uint32_t handle, uint32_t object)
304 {
305 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
306 virgl_encoder_write_dword(ctx->cbuf, handle);
307 return 0;
308 }
309
310 int virgl_encode_delete_object(struct virgl_context *ctx,
311 uint32_t handle, uint32_t object)
312 {
313 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
314 virgl_encoder_write_dword(ctx->cbuf, handle);
315 return 0;
316 }
317
318 int virgl_encode_blend_state(struct virgl_context *ctx,
319 uint32_t handle,
320 const struct pipe_blend_state *blend_state)
321 {
322 uint32_t tmp;
323 int i;
324
325 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
326 virgl_encoder_write_dword(ctx->cbuf, handle);
327
328 tmp =
329 VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
330 VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
331 VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
332 VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
333 VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
334
335 virgl_encoder_write_dword(ctx->cbuf, tmp);
336
337 tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
338 virgl_encoder_write_dword(ctx->cbuf, tmp);
339
340 for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
341 tmp =
342 VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
343 VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
344 VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
345 VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
346 VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
347 VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(blend_state->rt[i].alpha_src_factor) |
348 VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
349 VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
350 virgl_encoder_write_dword(ctx->cbuf, tmp);
351 }
352 return 0;
353 }
354
355 int virgl_encode_dsa_state(struct virgl_context *ctx,
356 uint32_t handle,
357 const struct pipe_depth_stencil_alpha_state *dsa_state)
358 {
359 uint32_t tmp;
360 int i;
361 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
362 virgl_encoder_write_dword(ctx->cbuf, handle);
363
364 tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth.enabled) |
365 VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth.writemask) |
366 VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth.func) |
367 VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha.enabled) |
368 VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha.func);
369 virgl_encoder_write_dword(ctx->cbuf, tmp);
370
371 for (i = 0; i < 2; i++) {
372 tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
373 VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
374 VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
375 VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
376 VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
377 VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
378 VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
379 virgl_encoder_write_dword(ctx->cbuf, tmp);
380 }
381
382 virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha.ref_value));
383 return 0;
384 }
385 int virgl_encode_rasterizer_state(struct virgl_context *ctx,
386 uint32_t handle,
387 const struct pipe_rasterizer_state *state)
388 {
389 uint32_t tmp;
390
391 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
392 virgl_encoder_write_dword(ctx->cbuf, handle);
393
394 tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
395 VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip_near) |
396 VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
397 VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
398 VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
399 VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
400 VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
401 VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
402 VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
403 VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
404 VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
405 VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
406 VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
407 VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
408 VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
409 VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
410 VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
411 VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
412 VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
413 VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
414 VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
415 VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
416 VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
417 VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
418 VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
419 VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
420 VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
421 VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule) |
422 VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state->force_persample_interp);
423
424 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
425 virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
426 virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
427 tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
428 VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
429 VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
430 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
431 virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
432 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
433 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
434 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
435 return 0;
436 }
437
438 static void virgl_emit_shader_header(struct virgl_context *ctx,
439 uint32_t handle, uint32_t len,
440 uint32_t type, uint32_t offlen,
441 uint32_t num_tokens)
442 {
443 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
444 virgl_encoder_write_dword(ctx->cbuf, handle);
445 virgl_encoder_write_dword(ctx->cbuf, type);
446 virgl_encoder_write_dword(ctx->cbuf, offlen);
447 virgl_encoder_write_dword(ctx->cbuf, num_tokens);
448 }
449
450 static void virgl_emit_shader_streamout(struct virgl_context *ctx,
451 const struct pipe_stream_output_info *so_info)
452 {
453 int num_outputs = 0;
454 int i;
455 uint32_t tmp;
456
457 if (so_info)
458 num_outputs = so_info->num_outputs;
459
460 virgl_encoder_write_dword(ctx->cbuf, num_outputs);
461 if (num_outputs) {
462 for (i = 0; i < 4; i++)
463 virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
464
465 for (i = 0; i < so_info->num_outputs; i++) {
466 tmp =
467 VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
468 VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
469 VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
470 VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
471 VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
472 virgl_encoder_write_dword(ctx->cbuf, tmp);
473 virgl_encoder_write_dword(ctx->cbuf, so_info->output[i].stream);
474 }
475 }
476 }
477
478 int virgl_encode_shader_state(struct virgl_context *ctx,
479 uint32_t handle,
480 uint32_t type,
481 const struct pipe_stream_output_info *so_info,
482 uint32_t cs_req_local_mem,
483 const struct tgsi_token *tokens)
484 {
485 char *str, *sptr;
486 uint32_t shader_len, len;
487 bool bret;
488 int num_tokens = tgsi_num_tokens(tokens);
489 int str_total_size = 65536;
490 int retry_size = 1;
491 uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
492 bool first_pass;
493 str = CALLOC(1, str_total_size);
494 if (!str)
495 return -1;
496
497 do {
498 int old_size;
499
500 bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
501 if (bret == false) {
502 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
503 debug_printf("Failed to translate shader in available space - trying again\n");
504 old_size = str_total_size;
505 str_total_size = 65536 * retry_size;
506 retry_size *= 2;
507 str = REALLOC(str, old_size, str_total_size);
508 if (!str)
509 return -1;
510 }
511 } while (bret == false && retry_size < 1024);
512
513 if (bret == false)
514 return -1;
515
516 if (virgl_debug & VIRGL_DEBUG_TGSI)
517 debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str);
518
519 shader_len = strlen(str) + 1;
520
521 left_bytes = shader_len;
522
523 base_hdr_size = 5;
524 strm_hdr_size = so_info->num_outputs ? so_info->num_outputs * 2 + 4 : 0;
525 first_pass = true;
526 sptr = str;
527 while (left_bytes) {
528 uint32_t length, offlen;
529 int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
530 if (ctx->cbuf->cdw + hdr_len + 1 >= VIRGL_ENCODE_MAX_DWORDS)
531 ctx->base.flush(&ctx->base, NULL, 0);
532
533 thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
534
535 length = MIN2(thispass, left_bytes);
536 len = ((length + 3) / 4) + hdr_len;
537
538 if (first_pass)
539 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
540 else
541 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
542
543 virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
544
545 if (type == PIPE_SHADER_COMPUTE)
546 virgl_encoder_write_dword(ctx->cbuf, cs_req_local_mem);
547 else
548 virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
549
550 virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
551
552 sptr += length;
553 first_pass = false;
554 left_bytes -= length;
555 }
556
557 FREE(str);
558 return 0;
559 }
560
561
562 int virgl_encode_clear(struct virgl_context *ctx,
563 unsigned buffers,
564 const union pipe_color_union *color,
565 double depth, unsigned stencil)
566 {
567 int i;
568 uint64_t qword;
569
570 STATIC_ASSERT(sizeof(qword) == sizeof(depth));
571 memcpy(&qword, &depth, sizeof(qword));
572
573 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
574 virgl_encoder_write_dword(ctx->cbuf, buffers);
575 for (i = 0; i < 4; i++)
576 virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
577 virgl_encoder_write_qword(ctx->cbuf, qword);
578 virgl_encoder_write_dword(ctx->cbuf, stencil);
579 return 0;
580 }
581
582 int virgl_encode_clear_texture(struct virgl_context *ctx,
583 struct virgl_resource *res,
584 unsigned int level,
585 const struct pipe_box *box,
586 const void *data)
587 {
588 const struct util_format_description *desc = util_format_description(res->u.b.format);
589 unsigned block_bits = desc->block.bits;
590 uint32_t arr[4] = {0};
591 /* The spec describe <data> as a pointer to an array of between one
592 * and four components of texel data that will be used as the source
593 * for the constant fill value.
594 * Here, we are just copying the memory into <arr>. We do not try to
595 * re-create the data array. The host part will take care of interpreting
596 * the memory and applying the correct format to the clear call.
597 */
598 memcpy(&arr, data, block_bits / 8);
599
600 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR_TEXTURE, 0, VIRGL_CLEAR_TEXTURE_SIZE));
601 virgl_encoder_write_res(ctx, res);
602 virgl_encoder_write_dword(ctx->cbuf, level);
603 virgl_encoder_write_dword(ctx->cbuf, box->x);
604 virgl_encoder_write_dword(ctx->cbuf, box->y);
605 virgl_encoder_write_dword(ctx->cbuf, box->z);
606 virgl_encoder_write_dword(ctx->cbuf, box->width);
607 virgl_encoder_write_dword(ctx->cbuf, box->height);
608 virgl_encoder_write_dword(ctx->cbuf, box->depth);
609 for (unsigned i = 0; i < 4; i++)
610 virgl_encoder_write_dword(ctx->cbuf, arr[i]);
611 return 0;
612 }
613
614 int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
615 const struct pipe_framebuffer_state *state)
616 {
617 struct virgl_surface *zsurf = virgl_surface(state->zsbuf);
618 int i;
619
620 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
621 virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
622 virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
623 for (i = 0; i < state->nr_cbufs; i++) {
624 struct virgl_surface *surf = virgl_surface(state->cbufs[i]);
625 virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
626 }
627
628 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
629 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_FB_NO_ATTACH) {
630 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE));
631 virgl_encoder_write_dword(ctx->cbuf, state->width | (state->height << 16));
632 virgl_encoder_write_dword(ctx->cbuf, state->layers | (state->samples << 16));
633 }
634 return 0;
635 }
636
637 int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
638 int start_slot,
639 int num_viewports,
640 const struct pipe_viewport_state *states)
641 {
642 int i,v;
643 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
644 virgl_encoder_write_dword(ctx->cbuf, start_slot);
645 for (v = 0; v < num_viewports; v++) {
646 for (i = 0; i < 3; i++)
647 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
648 for (i = 0; i < 3; i++)
649 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
650 }
651 return 0;
652 }
653
654 int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
655 uint32_t handle,
656 unsigned num_elements,
657 const struct pipe_vertex_element *element)
658 {
659 int i;
660 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
661 virgl_encoder_write_dword(ctx->cbuf, handle);
662 for (i = 0; i < num_elements; i++) {
663 virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
664 virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
665 virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
666 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(element[i].src_format));
667 }
668 return 0;
669 }
670
671 int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
672 unsigned num_buffers,
673 const struct pipe_vertex_buffer *buffers)
674 {
675 int i;
676 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
677 for (i = 0; i < num_buffers; i++) {
678 struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
679 virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
680 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
681 virgl_encoder_write_res(ctx, res);
682 }
683 return 0;
684 }
685
686 int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
687 const struct virgl_indexbuf *ib)
688 {
689 int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
690 struct virgl_resource *res = NULL;
691 if (ib)
692 res = virgl_resource(ib->buffer);
693
694 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
695 virgl_encoder_write_res(ctx, res);
696 if (ib) {
697 virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
698 virgl_encoder_write_dword(ctx->cbuf, ib->offset);
699 }
700 return 0;
701 }
702
703 int virgl_encoder_draw_vbo(struct virgl_context *ctx,
704 const struct pipe_draw_info *info)
705 {
706 uint32_t length = VIRGL_DRAW_VBO_SIZE;
707 if (info->mode == PIPE_PRIM_PATCHES)
708 length = VIRGL_DRAW_VBO_SIZE_TESS;
709 if (info->indirect)
710 length = VIRGL_DRAW_VBO_SIZE_INDIRECT;
711 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, length));
712 virgl_encoder_write_dword(ctx->cbuf, info->start);
713 virgl_encoder_write_dword(ctx->cbuf, info->count);
714 virgl_encoder_write_dword(ctx->cbuf, info->mode);
715 virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
716 virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
717 virgl_encoder_write_dword(ctx->cbuf, info->index_bias);
718 virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
719 virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
720 virgl_encoder_write_dword(ctx->cbuf, info->restart_index);
721 virgl_encoder_write_dword(ctx->cbuf, info->min_index);
722 virgl_encoder_write_dword(ctx->cbuf, info->max_index);
723 if (info->count_from_stream_output)
724 virgl_encoder_write_dword(ctx->cbuf, info->count_from_stream_output->buffer_size);
725 else
726 virgl_encoder_write_dword(ctx->cbuf, 0);
727 if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
728 virgl_encoder_write_dword(ctx->cbuf, info->vertices_per_patch); /* vertices per patch */
729 virgl_encoder_write_dword(ctx->cbuf, info->drawid); /* drawid */
730 }
731 if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
732 virgl_encoder_write_res(ctx, virgl_resource(info->indirect->buffer));
733 virgl_encoder_write_dword(ctx->cbuf, info->indirect->offset);
734 virgl_encoder_write_dword(ctx->cbuf, info->indirect->stride); /* indirect stride */
735 virgl_encoder_write_dword(ctx->cbuf, info->indirect->draw_count); /* indirect draw count */
736 virgl_encoder_write_dword(ctx->cbuf, info->indirect->indirect_draw_count_offset); /* indirect draw count offset */
737 if (info->indirect->indirect_draw_count)
738 virgl_encoder_write_res(ctx, virgl_resource(info->indirect->indirect_draw_count));
739 else
740 virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */
741 }
742 return 0;
743 }
744
745 int virgl_encoder_create_surface(struct virgl_context *ctx,
746 uint32_t handle,
747 struct virgl_resource *res,
748 const struct pipe_surface *templat)
749 {
750 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
751 virgl_encoder_write_dword(ctx->cbuf, handle);
752 virgl_encoder_write_res(ctx, res);
753 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(templat->format));
754
755 assert(templat->texture->target != PIPE_BUFFER);
756 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
757 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
758
759 return 0;
760 }
761
762 int virgl_encoder_create_so_target(struct virgl_context *ctx,
763 uint32_t handle,
764 struct virgl_resource *res,
765 unsigned buffer_offset,
766 unsigned buffer_size)
767 {
768 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
769 virgl_encoder_write_dword(ctx->cbuf, handle);
770 virgl_encoder_write_res(ctx, res);
771 virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
772 virgl_encoder_write_dword(ctx->cbuf, buffer_size);
773 return 0;
774 }
775
776 enum virgl_transfer3d_encode_stride {
777 /* The stride and layer_stride are explicitly specified in the command. */
778 virgl_transfer3d_explicit_stride,
779 /* The stride and layer_stride are inferred by the host. In this case, the
780 * host will use the image stride and layer_stride for the specified level.
781 */
782 virgl_transfer3d_host_inferred_stride,
783 };
784
785 static void virgl_encoder_transfer3d_common(struct virgl_screen *vs,
786 struct virgl_cmd_buf *buf,
787 struct virgl_transfer *xfer,
788 enum virgl_transfer3d_encode_stride encode_stride)
789
790 {
791 struct pipe_transfer *transfer = &xfer->base;
792 unsigned stride;
793 unsigned layer_stride;
794
795 if (encode_stride == virgl_transfer3d_explicit_stride) {
796 stride = transfer->stride;
797 layer_stride = transfer->layer_stride;
798 } else if (encode_stride == virgl_transfer3d_host_inferred_stride) {
799 stride = 0;
800 layer_stride = 0;
801 } else {
802 assert(!"Invalid virgl_transfer3d_encode_stride value");
803 }
804
805 /* We cannot use virgl_encoder_emit_resource with transfer->resource here
806 * because transfer->resource might have a different virgl_hw_res than what
807 * this transfer targets, which is saved in xfer->hw_res.
808 */
809 vs->vws->emit_res(vs->vws, buf, xfer->hw_res, TRUE);
810 virgl_encoder_write_dword(buf, transfer->level);
811 virgl_encoder_write_dword(buf, transfer->usage);
812 virgl_encoder_write_dword(buf, stride);
813 virgl_encoder_write_dword(buf, layer_stride);
814 virgl_encoder_write_dword(buf, transfer->box.x);
815 virgl_encoder_write_dword(buf, transfer->box.y);
816 virgl_encoder_write_dword(buf, transfer->box.z);
817 virgl_encoder_write_dword(buf, transfer->box.width);
818 virgl_encoder_write_dword(buf, transfer->box.height);
819 virgl_encoder_write_dword(buf, transfer->box.depth);
820 }
821
822 int virgl_encoder_inline_write(struct virgl_context *ctx,
823 struct virgl_resource *res,
824 unsigned level, unsigned usage,
825 const struct pipe_box *box,
826 const void *data, unsigned stride,
827 unsigned layer_stride)
828 {
829 uint32_t size = (stride ? stride : box->width) * box->height;
830 uint32_t length, thispass, left_bytes;
831 struct virgl_transfer transfer;
832 struct virgl_screen *vs = virgl_screen(ctx->base.screen);
833
834 transfer.base.resource = &res->u.b;
835 transfer.hw_res = res->hw_res;
836 transfer.base.level = level;
837 transfer.base.usage = usage;
838 transfer.base.box = *box;
839
840 length = 11 + (size + 3) / 4;
841 if ((ctx->cbuf->cdw + length + 1) > VIRGL_ENCODE_MAX_DWORDS) {
842 if (box->height > 1 || box->depth > 1) {
843 debug_printf("inline transfer failed due to multi dimensions and too large\n");
844 assert(0);
845 }
846 }
847
848 left_bytes = size;
849 while (left_bytes) {
850 if (ctx->cbuf->cdw + 12 >= VIRGL_ENCODE_MAX_DWORDS)
851 ctx->base.flush(&ctx->base, NULL, 0);
852
853 thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - 12) * 4;
854
855 length = MIN2(thispass, left_bytes);
856
857 transfer.base.box.width = length;
858 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
859 virgl_encoder_transfer3d_common(vs, ctx->cbuf, &transfer,
860 virgl_transfer3d_host_inferred_stride);
861 virgl_encoder_write_block(ctx->cbuf, data, length);
862 left_bytes -= length;
863 transfer.base.box.x += length;
864 data += length;
865 }
866 return 0;
867 }
868
869 int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
870 struct virgl_resource *res)
871 {
872 // virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
873 // virgl_encoder_write_dword(ctx->cbuf, res_handle);
874 return 0;
875 }
876
877 int virgl_encode_sampler_state(struct virgl_context *ctx,
878 uint32_t handle,
879 const struct pipe_sampler_state *state)
880 {
881 uint32_t tmp;
882 int i;
883 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
884 virgl_encoder_write_dword(ctx->cbuf, handle);
885
886 tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
887 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
888 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
889 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
890 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
891 VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
892 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
893 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func) |
894 VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state->seamless_cube_map);
895
896 virgl_encoder_write_dword(ctx->cbuf, tmp);
897 virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
898 virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
899 virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
900 for (i = 0; i < 4; i++)
901 virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
902 return 0;
903 }
904
905
906 int virgl_encode_sampler_view(struct virgl_context *ctx,
907 uint32_t handle,
908 struct virgl_resource *res,
909 const struct pipe_sampler_view *state)
910 {
911 unsigned elem_size = util_format_get_blocksize(state->format);
912 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
913 uint32_t tmp;
914 uint32_t dword_fmt_target = pipe_to_virgl_format(state->format);
915 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
916 virgl_encoder_write_dword(ctx->cbuf, handle);
917 virgl_encoder_write_res(ctx, res);
918 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_VIEW)
919 dword_fmt_target |= (state->target << 24);
920 virgl_encoder_write_dword(ctx->cbuf, dword_fmt_target);
921 if (res->u.b.target == PIPE_BUFFER) {
922 virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
923 virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
924 } else {
925 if (res->metadata.plane) {
926 debug_assert(state->u.tex.first_layer == 0 && state->u.tex.last_layer == 0);
927 virgl_encoder_write_dword(ctx->cbuf, res->metadata.plane);
928 } else {
929 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
930 }
931 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
932 }
933 tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
934 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
935 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
936 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
937 virgl_encoder_write_dword(ctx->cbuf, tmp);
938 return 0;
939 }
940
941 int virgl_encode_set_sampler_views(struct virgl_context *ctx,
942 uint32_t shader_type,
943 uint32_t start_slot,
944 uint32_t num_views,
945 struct virgl_sampler_view **views)
946 {
947 int i;
948 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
949 virgl_encoder_write_dword(ctx->cbuf, shader_type);
950 virgl_encoder_write_dword(ctx->cbuf, start_slot);
951 for (i = 0; i < num_views; i++) {
952 uint32_t handle = views[i] ? views[i]->handle : 0;
953 virgl_encoder_write_dword(ctx->cbuf, handle);
954 }
955 return 0;
956 }
957
958 int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
959 uint32_t shader_type,
960 uint32_t start_slot,
961 uint32_t num_handles,
962 uint32_t *handles)
963 {
964 int i;
965 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
966 virgl_encoder_write_dword(ctx->cbuf, shader_type);
967 virgl_encoder_write_dword(ctx->cbuf, start_slot);
968 for (i = 0; i < num_handles; i++)
969 virgl_encoder_write_dword(ctx->cbuf, handles[i]);
970 return 0;
971 }
972
973 int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
974 uint32_t shader,
975 uint32_t index,
976 uint32_t size,
977 const void *data)
978 {
979 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
980 virgl_encoder_write_dword(ctx->cbuf, shader);
981 virgl_encoder_write_dword(ctx->cbuf, index);
982 if (data)
983 virgl_encoder_write_block(ctx->cbuf, data, size * 4);
984 return 0;
985 }
986
987 int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
988 uint32_t shader,
989 uint32_t index,
990 uint32_t offset,
991 uint32_t length,
992 struct virgl_resource *res)
993 {
994 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
995 virgl_encoder_write_dword(ctx->cbuf, shader);
996 virgl_encoder_write_dword(ctx->cbuf, index);
997 virgl_encoder_write_dword(ctx->cbuf, offset);
998 virgl_encoder_write_dword(ctx->cbuf, length);
999 virgl_encoder_write_res(ctx, res);
1000 return 0;
1001 }
1002
1003
1004 int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
1005 const struct pipe_stencil_ref *ref)
1006 {
1007 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
1008 virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
1009 return 0;
1010 }
1011
1012 int virgl_encoder_set_blend_color(struct virgl_context *ctx,
1013 const struct pipe_blend_color *color)
1014 {
1015 int i;
1016 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
1017 for (i = 0; i < 4; i++)
1018 virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
1019 return 0;
1020 }
1021
1022 int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
1023 unsigned start_slot,
1024 int num_scissors,
1025 const struct pipe_scissor_state *ss)
1026 {
1027 int i;
1028 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
1029 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1030 for (i = 0; i < num_scissors; i++) {
1031 virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
1032 virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
1033 }
1034 return 0;
1035 }
1036
1037 void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
1038 const struct pipe_poly_stipple *ps)
1039 {
1040 int i;
1041 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
1042 for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
1043 virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
1044 }
1045 }
1046
1047 void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
1048 unsigned sample_mask)
1049 {
1050 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
1051 virgl_encoder_write_dword(ctx->cbuf, sample_mask);
1052 }
1053
1054 void virgl_encoder_set_min_samples(struct virgl_context *ctx,
1055 unsigned min_samples)
1056 {
1057 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES, 0, VIRGL_SET_MIN_SAMPLES_SIZE));
1058 virgl_encoder_write_dword(ctx->cbuf, min_samples);
1059 }
1060
1061 void virgl_encoder_set_clip_state(struct virgl_context *ctx,
1062 const struct pipe_clip_state *clip)
1063 {
1064 int i, j;
1065 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
1066 for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
1067 for (j = 0; j < 4; j++) {
1068 virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
1069 }
1070 }
1071 }
1072
1073 int virgl_encode_resource_copy_region(struct virgl_context *ctx,
1074 struct virgl_resource *dst_res,
1075 unsigned dst_level,
1076 unsigned dstx, unsigned dsty, unsigned dstz,
1077 struct virgl_resource *src_res,
1078 unsigned src_level,
1079 const struct pipe_box *src_box)
1080 {
1081 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
1082 virgl_encoder_write_res(ctx, dst_res);
1083 virgl_encoder_write_dword(ctx->cbuf, dst_level);
1084 virgl_encoder_write_dword(ctx->cbuf, dstx);
1085 virgl_encoder_write_dword(ctx->cbuf, dsty);
1086 virgl_encoder_write_dword(ctx->cbuf, dstz);
1087 virgl_encoder_write_res(ctx, src_res);
1088 virgl_encoder_write_dword(ctx->cbuf, src_level);
1089 virgl_encoder_write_dword(ctx->cbuf, src_box->x);
1090 virgl_encoder_write_dword(ctx->cbuf, src_box->y);
1091 virgl_encoder_write_dword(ctx->cbuf, src_box->z);
1092 virgl_encoder_write_dword(ctx->cbuf, src_box->width);
1093 virgl_encoder_write_dword(ctx->cbuf, src_box->height);
1094 virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
1095 return 0;
1096 }
1097
1098 int virgl_encode_blit(struct virgl_context *ctx,
1099 struct virgl_resource *dst_res,
1100 struct virgl_resource *src_res,
1101 const struct pipe_blit_info *blit)
1102 {
1103 uint32_t tmp;
1104 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
1105 tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
1106 VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
1107 VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable) |
1108 VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit->render_condition_enable) |
1109 VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit->alpha_blend);
1110 virgl_encoder_write_dword(ctx->cbuf, tmp);
1111 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
1112 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
1113
1114 virgl_encoder_write_res(ctx, dst_res);
1115 virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
1116 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->dst.format));
1117 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
1118 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
1119 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
1120 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
1121 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
1122 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
1123
1124 virgl_encoder_write_res(ctx, src_res);
1125 virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
1126 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->src.format));
1127 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
1128 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
1129 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
1130 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
1131 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
1132 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
1133 return 0;
1134 }
1135
1136 int virgl_encoder_create_query(struct virgl_context *ctx,
1137 uint32_t handle,
1138 uint query_type,
1139 uint query_index,
1140 struct virgl_resource *res,
1141 uint32_t offset)
1142 {
1143 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
1144 virgl_encoder_write_dword(ctx->cbuf, handle);
1145 virgl_encoder_write_dword(ctx->cbuf, ((query_type & 0xffff) | (query_index << 16)));
1146 virgl_encoder_write_dword(ctx->cbuf, offset);
1147 virgl_encoder_write_res(ctx, res);
1148 return 0;
1149 }
1150
1151 int virgl_encoder_begin_query(struct virgl_context *ctx,
1152 uint32_t handle)
1153 {
1154 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
1155 virgl_encoder_write_dword(ctx->cbuf, handle);
1156 return 0;
1157 }
1158
1159 int virgl_encoder_end_query(struct virgl_context *ctx,
1160 uint32_t handle)
1161 {
1162 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
1163 virgl_encoder_write_dword(ctx->cbuf, handle);
1164 return 0;
1165 }
1166
1167 int virgl_encoder_get_query_result(struct virgl_context *ctx,
1168 uint32_t handle, boolean wait)
1169 {
1170 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
1171 virgl_encoder_write_dword(ctx->cbuf, handle);
1172 virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1173 return 0;
1174 }
1175
1176 int virgl_encoder_render_condition(struct virgl_context *ctx,
1177 uint32_t handle, boolean condition,
1178 enum pipe_render_cond_flag mode)
1179 {
1180 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
1181 virgl_encoder_write_dword(ctx->cbuf, handle);
1182 virgl_encoder_write_dword(ctx->cbuf, condition);
1183 virgl_encoder_write_dword(ctx->cbuf, mode);
1184 return 0;
1185 }
1186
1187 int virgl_encoder_set_so_targets(struct virgl_context *ctx,
1188 unsigned num_targets,
1189 struct pipe_stream_output_target **targets,
1190 unsigned append_bitmask)
1191 {
1192 int i;
1193
1194 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
1195 virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
1196 for (i = 0; i < num_targets; i++) {
1197 struct virgl_so_target *tg = virgl_so_target(targets[i]);
1198 virgl_encoder_write_dword(ctx->cbuf, tg ? tg->handle : 0);
1199 }
1200 return 0;
1201 }
1202
1203
1204 int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1205 {
1206 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
1207 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1208 return 0;
1209 }
1210
1211 int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1212 {
1213 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
1214 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1215 return 0;
1216 }
1217
1218 int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1219 {
1220 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
1221 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1222 return 0;
1223 }
1224
1225 int virgl_encode_bind_shader(struct virgl_context *ctx,
1226 uint32_t handle, uint32_t type)
1227 {
1228 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
1229 virgl_encoder_write_dword(ctx->cbuf, handle);
1230 virgl_encoder_write_dword(ctx->cbuf, type);
1231 return 0;
1232 }
1233
1234 int virgl_encode_set_tess_state(struct virgl_context *ctx,
1235 const float outer[4],
1236 const float inner[2])
1237 {
1238 int i;
1239 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE, 0, 6));
1240 for (i = 0; i < 4; i++)
1241 virgl_encoder_write_dword(ctx->cbuf, fui(outer[i]));
1242 for (i = 0; i < 2; i++)
1243 virgl_encoder_write_dword(ctx->cbuf, fui(inner[i]));
1244 return 0;
1245 }
1246
1247 int virgl_encode_set_shader_buffers(struct virgl_context *ctx,
1248 enum pipe_shader_type shader,
1249 unsigned start_slot, unsigned count,
1250 const struct pipe_shader_buffer *buffers)
1251 {
1252 int i;
1253 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count)));
1254
1255 virgl_encoder_write_dword(ctx->cbuf, shader);
1256 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1257 for (i = 0; i < count; i++) {
1258 if (buffers && buffers[i].buffer) {
1259 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1260 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1261 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1262 virgl_encoder_write_res(ctx, res);
1263
1264 util_range_add(&res->u.b, &res->valid_buffer_range, buffers[i].buffer_offset,
1265 buffers[i].buffer_offset + buffers[i].buffer_size);
1266 virgl_resource_dirty(res, 0);
1267 } else {
1268 virgl_encoder_write_dword(ctx->cbuf, 0);
1269 virgl_encoder_write_dword(ctx->cbuf, 0);
1270 virgl_encoder_write_dword(ctx->cbuf, 0);
1271 }
1272 }
1273 return 0;
1274 }
1275
1276 int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
1277 unsigned start_slot, unsigned count,
1278 const struct pipe_shader_buffer *buffers)
1279 {
1280 int i;
1281 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count)));
1282
1283 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1284 for (i = 0; i < count; i++) {
1285 if (buffers && buffers[i].buffer) {
1286 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1287 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1288 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1289 virgl_encoder_write_res(ctx, res);
1290
1291 util_range_add(&res->u.b, &res->valid_buffer_range, buffers[i].buffer_offset,
1292 buffers[i].buffer_offset + buffers[i].buffer_size);
1293 virgl_resource_dirty(res, 0);
1294 } else {
1295 virgl_encoder_write_dword(ctx->cbuf, 0);
1296 virgl_encoder_write_dword(ctx->cbuf, 0);
1297 virgl_encoder_write_dword(ctx->cbuf, 0);
1298 }
1299 }
1300 return 0;
1301 }
1302
1303 int virgl_encode_set_shader_images(struct virgl_context *ctx,
1304 enum pipe_shader_type shader,
1305 unsigned start_slot, unsigned count,
1306 const struct pipe_image_view *images)
1307 {
1308 int i;
1309 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count)));
1310
1311 virgl_encoder_write_dword(ctx->cbuf, shader);
1312 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1313 for (i = 0; i < count; i++) {
1314 if (images && images[i].resource) {
1315 struct virgl_resource *res = virgl_resource(images[i].resource);
1316 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(images[i].format));
1317 virgl_encoder_write_dword(ctx->cbuf, images[i].access);
1318 virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
1319 virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
1320 virgl_encoder_write_res(ctx, res);
1321
1322 if (res->u.b.target == PIPE_BUFFER) {
1323 util_range_add(&res->u.b, &res->valid_buffer_range, images[i].u.buf.offset,
1324 images[i].u.buf.offset + images[i].u.buf.size);
1325 }
1326 virgl_resource_dirty(res, images[i].u.tex.level);
1327 } else {
1328 virgl_encoder_write_dword(ctx->cbuf, 0);
1329 virgl_encoder_write_dword(ctx->cbuf, 0);
1330 virgl_encoder_write_dword(ctx->cbuf, 0);
1331 virgl_encoder_write_dword(ctx->cbuf, 0);
1332 virgl_encoder_write_dword(ctx->cbuf, 0);
1333 }
1334 }
1335 return 0;
1336 }
1337
1338 int virgl_encode_memory_barrier(struct virgl_context *ctx,
1339 unsigned flags)
1340 {
1341 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER, 0, 1));
1342 virgl_encoder_write_dword(ctx->cbuf, flags);
1343 return 0;
1344 }
1345
1346 int virgl_encode_launch_grid(struct virgl_context *ctx,
1347 const struct pipe_grid_info *grid_info)
1348 {
1349 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID, 0, VIRGL_LAUNCH_GRID_SIZE));
1350 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[0]);
1351 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[1]);
1352 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[2]);
1353 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[0]);
1354 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[1]);
1355 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[2]);
1356 if (grid_info->indirect) {
1357 struct virgl_resource *res = virgl_resource(grid_info->indirect);
1358 virgl_encoder_write_res(ctx, res);
1359 } else
1360 virgl_encoder_write_dword(ctx->cbuf, 0);
1361 virgl_encoder_write_dword(ctx->cbuf, grid_info->indirect_offset);
1362 return 0;
1363 }
1364
1365 int virgl_encode_texture_barrier(struct virgl_context *ctx,
1366 unsigned flags)
1367 {
1368 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER, 0, 1));
1369 virgl_encoder_write_dword(ctx->cbuf, flags);
1370 return 0;
1371 }
1372
1373 int virgl_encode_host_debug_flagstring(struct virgl_context *ctx,
1374 const char *flagstring)
1375 {
1376 unsigned long slen = strlen(flagstring) + 1;
1377 uint32_t sslen;
1378 uint32_t string_length;
1379
1380 if (!slen)
1381 return 0;
1382
1383 if (slen > 4 * 0xffff) {
1384 debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1385 slen = 4 * 0xffff;
1386 }
1387
1388 sslen = (uint32_t )(slen + 3) / 4;
1389 string_length = (uint32_t)MIN2(sslen * 4, slen);
1390
1391 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS, 0, sslen));
1392 virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)flagstring, string_length);
1393 return 0;
1394 }
1395
1396 int virgl_encode_tweak(struct virgl_context *ctx, enum vrend_tweak_type tweak, uint32_t value)
1397 {
1398 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TWEAKS, 0, VIRGL_SET_TWEAKS_SIZE));
1399 virgl_encoder_write_dword(ctx->cbuf, tweak);
1400 virgl_encoder_write_dword(ctx->cbuf, value);
1401 return 0;
1402 }
1403
1404
1405 int virgl_encode_get_query_result_qbo(struct virgl_context *ctx,
1406 uint32_t handle,
1407 struct virgl_resource *res, boolean wait,
1408 uint32_t result_type,
1409 uint32_t offset,
1410 uint32_t index)
1411 {
1412 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT_QBO, 0, VIRGL_QUERY_RESULT_QBO_SIZE));
1413 virgl_encoder_write_dword(ctx->cbuf, handle);
1414 virgl_encoder_write_res(ctx, res);
1415 virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1416 virgl_encoder_write_dword(ctx->cbuf, result_type);
1417 virgl_encoder_write_dword(ctx->cbuf, offset);
1418 virgl_encoder_write_dword(ctx->cbuf, index);
1419 return 0;
1420 }
1421
1422 void virgl_encode_transfer(struct virgl_screen *vs, struct virgl_cmd_buf *buf,
1423 struct virgl_transfer *trans, uint32_t direction)
1424 {
1425 uint32_t command;
1426 command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE);
1427 virgl_encoder_write_dword(buf, command);
1428 virgl_encoder_transfer3d_common(vs, buf, trans,
1429 virgl_transfer3d_host_inferred_stride);
1430 virgl_encoder_write_dword(buf, trans->offset);
1431 virgl_encoder_write_dword(buf, direction);
1432 }
1433
1434 void virgl_encode_copy_transfer(struct virgl_context *ctx,
1435 struct virgl_transfer *trans)
1436 {
1437 uint32_t command;
1438 struct virgl_screen *vs = virgl_screen(ctx->base.screen);
1439
1440 assert(trans->copy_src_hw_res);
1441
1442 command = VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D, 0, VIRGL_COPY_TRANSFER3D_SIZE);
1443 virgl_encoder_write_cmd_dword(ctx, command);
1444 /* Copy transfers need to explicitly specify the stride, since it may differ
1445 * from the image stride.
1446 */
1447 virgl_encoder_transfer3d_common(vs, ctx->cbuf, trans, virgl_transfer3d_explicit_stride);
1448 vs->vws->emit_res(vs->vws, ctx->cbuf, trans->copy_src_hw_res, TRUE);
1449 virgl_encoder_write_dword(ctx->cbuf, trans->copy_src_offset);
1450 /* At the moment all copy transfers are synchronized. */
1451 virgl_encoder_write_dword(ctx->cbuf, 1);
1452 }
1453
1454 void virgl_encode_end_transfers(struct virgl_cmd_buf *buf)
1455 {
1456 uint32_t command, diff;
1457 diff = VIRGL_MAX_TBUF_DWORDS - buf->cdw;
1458 if (diff) {
1459 command = VIRGL_CMD0(VIRGL_CCMD_END_TRANSFERS, 0, diff - 1);
1460 virgl_encoder_write_dword(buf, command);
1461 }
1462 }