2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "util/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
40 #define VIRGL_ENCODE_MAX_DWORDS MIN2(VIRGL_MAX_CMDBUF_DWORDS, VIRGL_CMD0_MAX_DWORDS)
42 #define CONV_FORMAT(f) [PIPE_FORMAT_##f] = VIRGL_FORMAT_##f,
44 static const enum virgl_formats virgl_formats_conv_table
[PIPE_FORMAT_COUNT
] = {
45 CONV_FORMAT(B8G8R8A8_UNORM
)
46 CONV_FORMAT(B8G8R8X8_UNORM
)
47 CONV_FORMAT(A8R8G8B8_UNORM
)
48 CONV_FORMAT(X8R8G8B8_UNORM
)
49 CONV_FORMAT(B5G5R5A1_UNORM
)
50 CONV_FORMAT(B4G4R4A4_UNORM
)
51 CONV_FORMAT(B5G6R5_UNORM
)
52 CONV_FORMAT(R10G10B10A2_UNORM
)
55 CONV_FORMAT(L8A8_UNORM
)
56 CONV_FORMAT(L16_UNORM
)
57 CONV_FORMAT(Z16_UNORM
)
58 CONV_FORMAT(Z32_UNORM
)
59 CONV_FORMAT(Z32_FLOAT
)
60 CONV_FORMAT(Z24_UNORM_S8_UINT
)
61 CONV_FORMAT(S8_UINT_Z24_UNORM
)
62 CONV_FORMAT(Z24X8_UNORM
)
63 CONV_FORMAT(X8Z24_UNORM
)
65 CONV_FORMAT(R64_FLOAT
)
66 CONV_FORMAT(R64G64_FLOAT
)
67 CONV_FORMAT(R64G64B64_FLOAT
)
68 CONV_FORMAT(R64G64B64A64_FLOAT
)
69 CONV_FORMAT(R32_FLOAT
)
70 CONV_FORMAT(R32G32_FLOAT
)
71 CONV_FORMAT(R32G32B32_FLOAT
)
72 CONV_FORMAT(R32G32B32A32_FLOAT
)
73 CONV_FORMAT(R32_UNORM
)
74 CONV_FORMAT(R32G32_UNORM
)
75 CONV_FORMAT(R32G32B32_UNORM
)
76 CONV_FORMAT(R32G32B32A32_UNORM
)
77 CONV_FORMAT(R32_USCALED
)
78 CONV_FORMAT(R32G32_USCALED
)
79 CONV_FORMAT(R32G32B32_USCALED
)
80 CONV_FORMAT(R32G32B32A32_USCALED
)
81 CONV_FORMAT(R32_SNORM
)
82 CONV_FORMAT(R32G32_SNORM
)
83 CONV_FORMAT(R32G32B32_SNORM
)
84 CONV_FORMAT(R32G32B32A32_SNORM
)
85 CONV_FORMAT(R32_SSCALED
)
86 CONV_FORMAT(R32G32_SSCALED
)
87 CONV_FORMAT(R32G32B32_SSCALED
)
88 CONV_FORMAT(R32G32B32A32_SSCALED
)
89 CONV_FORMAT(R16_UNORM
)
90 CONV_FORMAT(R16G16_UNORM
)
91 CONV_FORMAT(R16G16B16_UNORM
)
92 CONV_FORMAT(R16G16B16A16_UNORM
)
93 CONV_FORMAT(R16_USCALED
)
94 CONV_FORMAT(R16G16_USCALED
)
95 CONV_FORMAT(R16G16B16_USCALED
)
96 CONV_FORMAT(R16G16B16A16_USCALED
)
97 CONV_FORMAT(R16_SNORM
)
98 CONV_FORMAT(R16G16_SNORM
)
99 CONV_FORMAT(R16G16B16_SNORM
)
100 CONV_FORMAT(R16G16B16A16_SNORM
)
101 CONV_FORMAT(R16_SSCALED
)
102 CONV_FORMAT(R16G16_SSCALED
)
103 CONV_FORMAT(R16G16B16_SSCALED
)
104 CONV_FORMAT(R16G16B16A16_SSCALED
)
105 CONV_FORMAT(R8_UNORM
)
106 CONV_FORMAT(R8G8_UNORM
)
107 CONV_FORMAT(R8G8B8_UNORM
)
108 CONV_FORMAT(R8G8B8A8_UNORM
)
109 CONV_FORMAT(R8_USCALED
)
110 CONV_FORMAT(R8G8_USCALED
)
111 CONV_FORMAT(R8G8B8_USCALED
)
112 CONV_FORMAT(R8G8B8A8_USCALED
)
113 CONV_FORMAT(R8_SNORM
)
114 CONV_FORMAT(R8G8_SNORM
)
115 CONV_FORMAT(R8G8B8_SNORM
)
116 CONV_FORMAT(R8G8B8A8_SNORM
)
117 CONV_FORMAT(R8_SSCALED
)
118 CONV_FORMAT(R8G8_SSCALED
)
119 CONV_FORMAT(R8G8B8_SSCALED
)
120 CONV_FORMAT(R8G8B8A8_SSCALED
)
121 CONV_FORMAT(R16_FLOAT
)
122 CONV_FORMAT(R16G16_FLOAT
)
123 CONV_FORMAT(R16G16B16_FLOAT
)
124 CONV_FORMAT(R16G16B16A16_FLOAT
)
126 CONV_FORMAT(L8A8_SRGB
)
127 CONV_FORMAT(R8G8B8_SRGB
)
128 CONV_FORMAT(A8B8G8R8_SRGB
)
129 CONV_FORMAT(X8B8G8R8_SRGB
)
130 CONV_FORMAT(B8G8R8A8_SRGB
)
131 CONV_FORMAT(B8G8R8X8_SRGB
)
132 CONV_FORMAT(A8R8G8B8_SRGB
)
133 CONV_FORMAT(X8R8G8B8_SRGB
)
134 CONV_FORMAT(R8G8B8A8_SRGB
)
135 CONV_FORMAT(DXT1_RGB
)
136 CONV_FORMAT(DXT1_RGBA
)
137 CONV_FORMAT(DXT3_RGBA
)
138 CONV_FORMAT(DXT5_RGBA
)
139 CONV_FORMAT(DXT1_SRGB
)
140 CONV_FORMAT(DXT1_SRGBA
)
141 CONV_FORMAT(DXT3_SRGBA
)
142 CONV_FORMAT(DXT5_SRGBA
)
143 CONV_FORMAT(RGTC1_UNORM
)
144 CONV_FORMAT(RGTC1_SNORM
)
145 CONV_FORMAT(RGTC2_UNORM
)
146 CONV_FORMAT(RGTC2_SNORM
)
147 CONV_FORMAT(A8B8G8R8_UNORM
)
148 CONV_FORMAT(B5G5R5X1_UNORM
)
149 CONV_FORMAT(R10G10B10A2_USCALED
)
150 CONV_FORMAT(R11G11B10_FLOAT
)
151 CONV_FORMAT(R9G9B9E5_FLOAT
)
152 CONV_FORMAT(Z32_FLOAT_S8X24_UINT
)
153 CONV_FORMAT(B10G10R10A2_UNORM
)
154 CONV_FORMAT(R8G8B8X8_UNORM
)
155 CONV_FORMAT(B4G4R4X4_UNORM
)
156 CONV_FORMAT(X24S8_UINT
)
157 CONV_FORMAT(S8X24_UINT
)
158 CONV_FORMAT(X32_S8X24_UINT
)
159 CONV_FORMAT(B2G3R3_UNORM
)
160 CONV_FORMAT(L16A16_UNORM
)
161 CONV_FORMAT(A16_UNORM
)
162 CONV_FORMAT(I16_UNORM
)
163 CONV_FORMAT(LATC1_UNORM
)
164 CONV_FORMAT(LATC1_SNORM
)
165 CONV_FORMAT(LATC2_UNORM
)
166 CONV_FORMAT(LATC2_SNORM
)
167 CONV_FORMAT(A8_SNORM
)
168 CONV_FORMAT(L8_SNORM
)
169 CONV_FORMAT(L8A8_SNORM
)
170 CONV_FORMAT(A16_SNORM
)
171 CONV_FORMAT(L16_SNORM
)
172 CONV_FORMAT(L16A16_SNORM
)
173 CONV_FORMAT(A16_FLOAT
)
174 CONV_FORMAT(L16_FLOAT
)
175 CONV_FORMAT(L16A16_FLOAT
)
176 CONV_FORMAT(A32_FLOAT
)
177 CONV_FORMAT(L32_FLOAT
)
178 CONV_FORMAT(L32A32_FLOAT
)
185 CONV_FORMAT(R8G8_UINT
)
186 CONV_FORMAT(R8G8B8_UINT
)
187 CONV_FORMAT(R8G8B8A8_UINT
)
189 CONV_FORMAT(R8G8_SINT
)
190 CONV_FORMAT(R8G8B8_SINT
)
191 CONV_FORMAT(R8G8B8A8_SINT
)
192 CONV_FORMAT(R16_UINT
)
193 CONV_FORMAT(R16G16_UINT
)
194 CONV_FORMAT(R16G16B16_UINT
)
195 CONV_FORMAT(R16G16B16A16_UINT
)
196 CONV_FORMAT(R16_SINT
)
197 CONV_FORMAT(R16G16_SINT
)
198 CONV_FORMAT(R16G16B16_SINT
)
199 CONV_FORMAT(R16G16B16A16_SINT
)
200 CONV_FORMAT(R32_UINT
)
201 CONV_FORMAT(R32G32_UINT
)
202 CONV_FORMAT(R32G32B32_UINT
)
203 CONV_FORMAT(R32G32B32A32_UINT
)
204 CONV_FORMAT(R32_SINT
)
205 CONV_FORMAT(R32G32_SINT
)
206 CONV_FORMAT(R32G32B32_SINT
)
207 CONV_FORMAT(R32G32B32A32_SINT
)
210 CONV_FORMAT(L8A8_UINT
)
213 CONV_FORMAT(L8A8_SINT
)
214 CONV_FORMAT(A16_UINT
)
215 CONV_FORMAT(L16_UINT
)
216 CONV_FORMAT(L16A16_UINT
)
217 CONV_FORMAT(A16_SINT
)
218 CONV_FORMAT(L16_SINT
)
219 CONV_FORMAT(L16A16_SINT
)
220 CONV_FORMAT(A32_UINT
)
221 CONV_FORMAT(L32_UINT
)
222 CONV_FORMAT(L32A32_UINT
)
223 CONV_FORMAT(A32_SINT
)
224 CONV_FORMAT(L32_SINT
)
225 CONV_FORMAT(L32A32_SINT
)
226 CONV_FORMAT(R10G10B10A2_SSCALED
)
227 CONV_FORMAT(R10G10B10A2_SNORM
)
228 CONV_FORMAT(B10G10R10A2_SNORM
)
229 CONV_FORMAT(B10G10R10A2_UINT
)
230 CONV_FORMAT(R8G8B8X8_SNORM
)
231 CONV_FORMAT(R8G8B8X8_SRGB
)
232 CONV_FORMAT(R8G8B8X8_UINT
)
233 CONV_FORMAT(R8G8B8X8_SINT
)
234 CONV_FORMAT(B10G10R10X2_UNORM
)
235 CONV_FORMAT(R16G16B16X16_UNORM
)
236 CONV_FORMAT(R16G16B16X16_SNORM
)
237 CONV_FORMAT(R16G16B16X16_FLOAT
)
238 CONV_FORMAT(R16G16B16X16_UINT
)
239 CONV_FORMAT(R16G16B16X16_SINT
)
240 CONV_FORMAT(R32G32B32X32_FLOAT
)
241 CONV_FORMAT(R32G32B32X32_UINT
)
242 CONV_FORMAT(R32G32B32X32_SINT
)
243 CONV_FORMAT(R10G10B10A2_UINT
)
244 CONV_FORMAT(BPTC_RGBA_UNORM
)
245 CONV_FORMAT(BPTC_SRGBA
)
246 CONV_FORMAT(BPTC_RGB_FLOAT
)
247 CONV_FORMAT(BPTC_RGB_UFLOAT
)
248 CONV_FORMAT(R10G10B10X2_UNORM
)
249 CONV_FORMAT(A4B4G4R4_UNORM
)
253 enum virgl_formats
pipe_to_virgl_format(enum pipe_format format
)
255 enum virgl_formats vformat
= virgl_formats_conv_table
[format
];
256 if (format
!= PIPE_FORMAT_NONE
&& !vformat
)
257 debug_printf("VIRGL: pipe format %s not in the format table\n", util_format_name(format
));
261 static int virgl_encoder_write_cmd_dword(struct virgl_context
*ctx
,
264 int len
= (dword
>> 16);
266 if ((ctx
->cbuf
->cdw
+ len
+ 1) > VIRGL_MAX_CMDBUF_DWORDS
)
267 ctx
->base
.flush(&ctx
->base
, NULL
, 0);
269 virgl_encoder_write_dword(ctx
->cbuf
, dword
);
273 static void virgl_encoder_emit_resource(struct virgl_screen
*vs
,
274 struct virgl_cmd_buf
*buf
,
275 struct virgl_resource
*res
)
277 struct virgl_winsys
*vws
= vs
->vws
;
278 if (res
&& res
->hw_res
)
279 vws
->emit_res(vws
, buf
, res
->hw_res
, TRUE
);
281 virgl_encoder_write_dword(buf
, 0);
285 static void virgl_encoder_write_res(struct virgl_context
*ctx
,
286 struct virgl_resource
*res
)
288 struct virgl_screen
*vs
= virgl_screen(ctx
->base
.screen
);
289 virgl_encoder_emit_resource(vs
, ctx
->cbuf
, res
);
292 int virgl_encode_bind_object(struct virgl_context
*ctx
,
293 uint32_t handle
, uint32_t object
)
295 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT
, object
, 1));
296 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
300 int virgl_encode_delete_object(struct virgl_context
*ctx
,
301 uint32_t handle
, uint32_t object
)
303 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT
, object
, 1));
304 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
308 int virgl_encode_blend_state(struct virgl_context
*ctx
,
310 const struct pipe_blend_state
*blend_state
)
315 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_BLEND
, VIRGL_OBJ_BLEND_SIZE
));
316 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
319 VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state
->independent_blend_enable
) |
320 VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state
->logicop_enable
) |
321 VIRGL_OBJ_BLEND_S0_DITHER(blend_state
->dither
) |
322 VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state
->alpha_to_coverage
) |
323 VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state
->alpha_to_one
);
325 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
327 tmp
= VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state
->logicop_func
);
328 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
330 for (i
= 0; i
< VIRGL_MAX_COLOR_BUFS
; i
++) {
332 VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state
->rt
[i
].blend_enable
) |
333 VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state
->rt
[i
].rgb_func
) |
334 VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state
->rt
[i
].rgb_src_factor
) |
335 VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state
->rt
[i
].rgb_dst_factor
)|
336 VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state
->rt
[i
].alpha_func
) |
337 VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(blend_state
->rt
[i
].alpha_src_factor
) |
338 VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state
->rt
[i
].alpha_dst_factor
) |
339 VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state
->rt
[i
].colormask
);
340 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
345 int virgl_encode_dsa_state(struct virgl_context
*ctx
,
347 const struct pipe_depth_stencil_alpha_state
*dsa_state
)
351 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_DSA
, VIRGL_OBJ_DSA_SIZE
));
352 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
354 tmp
= VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state
->depth
.enabled
) |
355 VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state
->depth
.writemask
) |
356 VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state
->depth
.func
) |
357 VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state
->alpha
.enabled
) |
358 VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state
->alpha
.func
);
359 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
361 for (i
= 0; i
< 2; i
++) {
362 tmp
= VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state
->stencil
[i
].enabled
) |
363 VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state
->stencil
[i
].func
) |
364 VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state
->stencil
[i
].fail_op
) |
365 VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state
->stencil
[i
].zpass_op
) |
366 VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state
->stencil
[i
].zfail_op
) |
367 VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state
->stencil
[i
].valuemask
) |
368 VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state
->stencil
[i
].writemask
);
369 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
372 virgl_encoder_write_dword(ctx
->cbuf
, fui(dsa_state
->alpha
.ref_value
));
375 int virgl_encode_rasterizer_state(struct virgl_context
*ctx
,
377 const struct pipe_rasterizer_state
*state
)
381 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_RASTERIZER
, VIRGL_OBJ_RS_SIZE
));
382 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
384 tmp
= VIRGL_OBJ_RS_S0_FLATSHADE(state
->flatshade
) |
385 VIRGL_OBJ_RS_S0_DEPTH_CLIP(state
->depth_clip_near
) |
386 VIRGL_OBJ_RS_S0_CLIP_HALFZ(state
->clip_halfz
) |
387 VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state
->rasterizer_discard
) |
388 VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state
->flatshade_first
) |
389 VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state
->light_twoside
) |
390 VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state
->sprite_coord_mode
) |
391 VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state
->point_quad_rasterization
) |
392 VIRGL_OBJ_RS_S0_CULL_FACE(state
->cull_face
) |
393 VIRGL_OBJ_RS_S0_FILL_FRONT(state
->fill_front
) |
394 VIRGL_OBJ_RS_S0_FILL_BACK(state
->fill_back
) |
395 VIRGL_OBJ_RS_S0_SCISSOR(state
->scissor
) |
396 VIRGL_OBJ_RS_S0_FRONT_CCW(state
->front_ccw
) |
397 VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state
->clamp_vertex_color
) |
398 VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state
->clamp_fragment_color
) |
399 VIRGL_OBJ_RS_S0_OFFSET_LINE(state
->offset_line
) |
400 VIRGL_OBJ_RS_S0_OFFSET_POINT(state
->offset_point
) |
401 VIRGL_OBJ_RS_S0_OFFSET_TRI(state
->offset_tri
) |
402 VIRGL_OBJ_RS_S0_POLY_SMOOTH(state
->poly_smooth
) |
403 VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state
->poly_stipple_enable
) |
404 VIRGL_OBJ_RS_S0_POINT_SMOOTH(state
->point_smooth
) |
405 VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state
->point_size_per_vertex
) |
406 VIRGL_OBJ_RS_S0_MULTISAMPLE(state
->multisample
) |
407 VIRGL_OBJ_RS_S0_LINE_SMOOTH(state
->line_smooth
) |
408 VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state
->line_stipple_enable
) |
409 VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state
->line_last_pixel
) |
410 VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state
->half_pixel_center
) |
411 VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state
->bottom_edge_rule
) |
412 VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state
->force_persample_interp
);
414 virgl_encoder_write_dword(ctx
->cbuf
, tmp
); /* S0 */
415 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->point_size
)); /* S1 */
416 virgl_encoder_write_dword(ctx
->cbuf
, state
->sprite_coord_enable
); /* S2 */
417 tmp
= VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state
->line_stipple_pattern
) |
418 VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state
->line_stipple_factor
) |
419 VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state
->clip_plane_enable
);
420 virgl_encoder_write_dword(ctx
->cbuf
, tmp
); /* S3 */
421 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->line_width
)); /* S4 */
422 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->offset_units
)); /* S5 */
423 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->offset_scale
)); /* S6 */
424 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->offset_clamp
)); /* S7 */
428 static void virgl_emit_shader_header(struct virgl_context
*ctx
,
429 uint32_t handle
, uint32_t len
,
430 uint32_t type
, uint32_t offlen
,
433 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SHADER
, len
));
434 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
435 virgl_encoder_write_dword(ctx
->cbuf
, type
);
436 virgl_encoder_write_dword(ctx
->cbuf
, offlen
);
437 virgl_encoder_write_dword(ctx
->cbuf
, num_tokens
);
440 static void virgl_emit_shader_streamout(struct virgl_context
*ctx
,
441 const struct pipe_stream_output_info
*so_info
)
448 num_outputs
= so_info
->num_outputs
;
450 virgl_encoder_write_dword(ctx
->cbuf
, num_outputs
);
452 for (i
= 0; i
< 4; i
++)
453 virgl_encoder_write_dword(ctx
->cbuf
, so_info
->stride
[i
]);
455 for (i
= 0; i
< so_info
->num_outputs
; i
++) {
457 VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info
->output
[i
].register_index
) |
458 VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info
->output
[i
].start_component
) |
459 VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info
->output
[i
].num_components
) |
460 VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info
->output
[i
].output_buffer
) |
461 VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info
->output
[i
].dst_offset
);
462 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
463 virgl_encoder_write_dword(ctx
->cbuf
, so_info
->output
[i
].stream
);
468 int virgl_encode_shader_state(struct virgl_context
*ctx
,
471 const struct pipe_stream_output_info
*so_info
,
472 uint32_t cs_req_local_mem
,
473 const struct tgsi_token
*tokens
)
476 uint32_t shader_len
, len
;
478 int num_tokens
= tgsi_num_tokens(tokens
);
479 int str_total_size
= 65536;
481 uint32_t left_bytes
, base_hdr_size
, strm_hdr_size
, thispass
;
483 str
= CALLOC(1, str_total_size
);
490 bret
= tgsi_dump_str(tokens
, TGSI_DUMP_FLOAT_AS_HEX
, str
, str_total_size
);
492 if (virgl_debug
& VIRGL_DEBUG_VERBOSE
)
493 debug_printf("Failed to translate shader in available space - trying again\n");
494 old_size
= str_total_size
;
495 str_total_size
= 65536 * ++retry_size
;
496 str
= REALLOC(str
, old_size
, str_total_size
);
500 } while (bret
== false && retry_size
< 10);
505 if (virgl_debug
& VIRGL_DEBUG_TGSI
)
506 debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str
);
508 shader_len
= strlen(str
) + 1;
510 left_bytes
= shader_len
;
513 strm_hdr_size
= so_info
->num_outputs
? so_info
->num_outputs
* 2 + 4 : 0;
517 uint32_t length
, offlen
;
518 int hdr_len
= base_hdr_size
+ (first_pass
? strm_hdr_size
: 0);
519 if (ctx
->cbuf
->cdw
+ hdr_len
+ 1 >= VIRGL_ENCODE_MAX_DWORDS
)
520 ctx
->base
.flush(&ctx
->base
, NULL
, 0);
522 thispass
= (VIRGL_ENCODE_MAX_DWORDS
- ctx
->cbuf
->cdw
- hdr_len
- 1) * 4;
524 length
= MIN2(thispass
, left_bytes
);
525 len
= ((length
+ 3) / 4) + hdr_len
;
528 offlen
= VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len
);
530 offlen
= VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr
- (uintptr_t)str
) | VIRGL_OBJ_SHADER_OFFSET_CONT
;
532 virgl_emit_shader_header(ctx
, handle
, len
, type
, offlen
, num_tokens
);
534 if (type
== PIPE_SHADER_COMPUTE
)
535 virgl_encoder_write_dword(ctx
->cbuf
, cs_req_local_mem
);
537 virgl_emit_shader_streamout(ctx
, first_pass
? so_info
: NULL
);
539 virgl_encoder_write_block(ctx
->cbuf
, (uint8_t *)sptr
, length
);
543 left_bytes
-= length
;
551 int virgl_encode_clear(struct virgl_context
*ctx
,
553 const union pipe_color_union
*color
,
554 double depth
, unsigned stencil
)
559 STATIC_ASSERT(sizeof(qword
) == sizeof(depth
));
560 memcpy(&qword
, &depth
, sizeof(qword
));
562 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CLEAR
, 0, VIRGL_OBJ_CLEAR_SIZE
));
563 virgl_encoder_write_dword(ctx
->cbuf
, buffers
);
564 for (i
= 0; i
< 4; i
++)
565 virgl_encoder_write_dword(ctx
->cbuf
, color
->ui
[i
]);
566 virgl_encoder_write_qword(ctx
->cbuf
, qword
);
567 virgl_encoder_write_dword(ctx
->cbuf
, stencil
);
571 int virgl_encoder_set_framebuffer_state(struct virgl_context
*ctx
,
572 const struct pipe_framebuffer_state
*state
)
574 struct virgl_surface
*zsurf
= virgl_surface(state
->zsbuf
);
577 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE
, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state
->nr_cbufs
)));
578 virgl_encoder_write_dword(ctx
->cbuf
, state
->nr_cbufs
);
579 virgl_encoder_write_dword(ctx
->cbuf
, zsurf
? zsurf
->handle
: 0);
580 for (i
= 0; i
< state
->nr_cbufs
; i
++) {
581 struct virgl_surface
*surf
= virgl_surface(state
->cbufs
[i
]);
582 virgl_encoder_write_dword(ctx
->cbuf
, surf
? surf
->handle
: 0);
585 struct virgl_screen
*rs
= virgl_screen(ctx
->base
.screen
);
586 if (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_FB_NO_ATTACH
) {
587 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH
, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE
));
588 virgl_encoder_write_dword(ctx
->cbuf
, state
->width
| (state
->height
<< 16));
589 virgl_encoder_write_dword(ctx
->cbuf
, state
->layers
| (state
->samples
<< 16));
594 int virgl_encoder_set_viewport_states(struct virgl_context
*ctx
,
597 const struct pipe_viewport_state
*states
)
600 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE
, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports
)));
601 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
602 for (v
= 0; v
< num_viewports
; v
++) {
603 for (i
= 0; i
< 3; i
++)
604 virgl_encoder_write_dword(ctx
->cbuf
, fui(states
[v
].scale
[i
]));
605 for (i
= 0; i
< 3; i
++)
606 virgl_encoder_write_dword(ctx
->cbuf
, fui(states
[v
].translate
[i
]));
611 int virgl_encoder_create_vertex_elements(struct virgl_context
*ctx
,
613 unsigned num_elements
,
614 const struct pipe_vertex_element
*element
)
617 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_VERTEX_ELEMENTS
, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements
)));
618 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
619 for (i
= 0; i
< num_elements
; i
++) {
620 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].src_offset
);
621 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].instance_divisor
);
622 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].vertex_buffer_index
);
623 virgl_encoder_write_dword(ctx
->cbuf
, pipe_to_virgl_format(element
[i
].src_format
));
628 int virgl_encoder_set_vertex_buffers(struct virgl_context
*ctx
,
629 unsigned num_buffers
,
630 const struct pipe_vertex_buffer
*buffers
)
633 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS
, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers
)));
634 for (i
= 0; i
< num_buffers
; i
++) {
635 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
.resource
);
636 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].stride
);
637 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_offset
);
638 virgl_encoder_write_res(ctx
, res
);
643 int virgl_encoder_set_index_buffer(struct virgl_context
*ctx
,
644 const struct virgl_indexbuf
*ib
)
646 int length
= VIRGL_SET_INDEX_BUFFER_SIZE(ib
);
647 struct virgl_resource
*res
= NULL
;
649 res
= virgl_resource(ib
->buffer
);
651 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER
, 0, length
));
652 virgl_encoder_write_res(ctx
, res
);
654 virgl_encoder_write_dword(ctx
->cbuf
, ib
->index_size
);
655 virgl_encoder_write_dword(ctx
->cbuf
, ib
->offset
);
660 int virgl_encoder_draw_vbo(struct virgl_context
*ctx
,
661 const struct pipe_draw_info
*info
)
663 uint32_t length
= VIRGL_DRAW_VBO_SIZE
;
664 if (info
->mode
== PIPE_PRIM_PATCHES
)
665 length
= VIRGL_DRAW_VBO_SIZE_TESS
;
667 length
= VIRGL_DRAW_VBO_SIZE_INDIRECT
;
668 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO
, 0, length
));
669 virgl_encoder_write_dword(ctx
->cbuf
, info
->start
);
670 virgl_encoder_write_dword(ctx
->cbuf
, info
->count
);
671 virgl_encoder_write_dword(ctx
->cbuf
, info
->mode
);
672 virgl_encoder_write_dword(ctx
->cbuf
, !!info
->index_size
);
673 virgl_encoder_write_dword(ctx
->cbuf
, info
->instance_count
);
674 virgl_encoder_write_dword(ctx
->cbuf
, info
->index_bias
);
675 virgl_encoder_write_dword(ctx
->cbuf
, info
->start_instance
);
676 virgl_encoder_write_dword(ctx
->cbuf
, info
->primitive_restart
);
677 virgl_encoder_write_dword(ctx
->cbuf
, info
->restart_index
);
678 virgl_encoder_write_dword(ctx
->cbuf
, info
->min_index
);
679 virgl_encoder_write_dword(ctx
->cbuf
, info
->max_index
);
680 if (info
->count_from_stream_output
)
681 virgl_encoder_write_dword(ctx
->cbuf
, info
->count_from_stream_output
->buffer_size
);
683 virgl_encoder_write_dword(ctx
->cbuf
, 0);
684 if (length
>= VIRGL_DRAW_VBO_SIZE_TESS
) {
685 virgl_encoder_write_dword(ctx
->cbuf
, info
->vertices_per_patch
); /* vertices per patch */
686 virgl_encoder_write_dword(ctx
->cbuf
, info
->drawid
); /* drawid */
688 if (length
== VIRGL_DRAW_VBO_SIZE_INDIRECT
) {
689 virgl_encoder_write_res(ctx
, virgl_resource(info
->indirect
->buffer
));
690 virgl_encoder_write_dword(ctx
->cbuf
, info
->indirect
->offset
);
691 virgl_encoder_write_dword(ctx
->cbuf
, info
->indirect
->stride
); /* indirect stride */
692 virgl_encoder_write_dword(ctx
->cbuf
, info
->indirect
->draw_count
); /* indirect draw count */
693 virgl_encoder_write_dword(ctx
->cbuf
, info
->indirect
->indirect_draw_count_offset
); /* indirect draw count offset */
694 if (info
->indirect
->indirect_draw_count
)
695 virgl_encoder_write_res(ctx
, virgl_resource(info
->indirect
->indirect_draw_count
));
697 virgl_encoder_write_dword(ctx
->cbuf
, 0); /* indirect draw count handle */
702 int virgl_encoder_create_surface(struct virgl_context
*ctx
,
704 struct virgl_resource
*res
,
705 const struct pipe_surface
*templat
)
707 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SURFACE
, VIRGL_OBJ_SURFACE_SIZE
));
708 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
709 virgl_encoder_write_res(ctx
, res
);
710 virgl_encoder_write_dword(ctx
->cbuf
, pipe_to_virgl_format(templat
->format
));
712 assert(templat
->texture
->target
!= PIPE_BUFFER
);
713 virgl_encoder_write_dword(ctx
->cbuf
, templat
->u
.tex
.level
);
714 virgl_encoder_write_dword(ctx
->cbuf
, templat
->u
.tex
.first_layer
| (templat
->u
.tex
.last_layer
<< 16));
719 int virgl_encoder_create_so_target(struct virgl_context
*ctx
,
721 struct virgl_resource
*res
,
722 unsigned buffer_offset
,
723 unsigned buffer_size
)
725 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_STREAMOUT_TARGET
, VIRGL_OBJ_STREAMOUT_SIZE
));
726 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
727 virgl_encoder_write_res(ctx
, res
);
728 virgl_encoder_write_dword(ctx
->cbuf
, buffer_offset
);
729 virgl_encoder_write_dword(ctx
->cbuf
, buffer_size
);
733 enum virgl_transfer3d_encode_stride
{
734 /* The stride and layer_stride are explicitly specified in the command. */
735 virgl_transfer3d_explicit_stride
,
736 /* The stride and layer_stride are inferred by the host. In this case, the
737 * host will use the image stride and layer_stride for the specified level.
739 virgl_transfer3d_host_inferred_stride
,
742 static void virgl_encoder_transfer3d_common(struct virgl_screen
*vs
,
743 struct virgl_cmd_buf
*buf
,
744 struct virgl_transfer
*xfer
,
745 enum virgl_transfer3d_encode_stride encode_stride
)
748 struct pipe_transfer
*transfer
= &xfer
->base
;
750 unsigned layer_stride
;
752 if (encode_stride
== virgl_transfer3d_explicit_stride
) {
753 stride
= transfer
->stride
;
754 layer_stride
= transfer
->layer_stride
;
755 } else if (virgl_transfer3d_host_inferred_stride
) {
759 assert(!"Invalid virgl_transfer3d_encode_stride value");
762 /* We cannot use virgl_encoder_emit_resource with transfer->resource here
763 * because transfer->resource might have a different virgl_hw_res than what
764 * this transfer targets, which is saved in xfer->hw_res.
766 vs
->vws
->emit_res(vs
->vws
, buf
, xfer
->hw_res
, TRUE
);
767 virgl_encoder_write_dword(buf
, transfer
->level
);
768 virgl_encoder_write_dword(buf
, transfer
->usage
);
769 virgl_encoder_write_dword(buf
, stride
);
770 virgl_encoder_write_dword(buf
, layer_stride
);
771 virgl_encoder_write_dword(buf
, transfer
->box
.x
);
772 virgl_encoder_write_dword(buf
, transfer
->box
.y
);
773 virgl_encoder_write_dword(buf
, transfer
->box
.z
);
774 virgl_encoder_write_dword(buf
, transfer
->box
.width
);
775 virgl_encoder_write_dword(buf
, transfer
->box
.height
);
776 virgl_encoder_write_dword(buf
, transfer
->box
.depth
);
779 int virgl_encoder_inline_write(struct virgl_context
*ctx
,
780 struct virgl_resource
*res
,
781 unsigned level
, unsigned usage
,
782 const struct pipe_box
*box
,
783 const void *data
, unsigned stride
,
784 unsigned layer_stride
)
786 uint32_t size
= (stride
? stride
: box
->width
) * box
->height
;
787 uint32_t length
, thispass
, left_bytes
;
788 struct virgl_transfer transfer
;
789 struct virgl_screen
*vs
= virgl_screen(ctx
->base
.screen
);
791 transfer
.base
.resource
= &res
->u
.b
;
792 transfer
.hw_res
= res
->hw_res
;
793 transfer
.base
.level
= level
;
794 transfer
.base
.usage
= usage
;
795 transfer
.base
.box
= *box
;
797 length
= 11 + (size
+ 3) / 4;
798 if ((ctx
->cbuf
->cdw
+ length
+ 1) > VIRGL_ENCODE_MAX_DWORDS
) {
799 if (box
->height
> 1 || box
->depth
> 1) {
800 debug_printf("inline transfer failed due to multi dimensions and too large\n");
807 if (ctx
->cbuf
->cdw
+ 12 >= VIRGL_ENCODE_MAX_DWORDS
)
808 ctx
->base
.flush(&ctx
->base
, NULL
, 0);
810 thispass
= (VIRGL_ENCODE_MAX_DWORDS
- ctx
->cbuf
->cdw
- 12) * 4;
812 length
= MIN2(thispass
, left_bytes
);
814 transfer
.base
.box
.width
= length
;
815 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE
, 0, ((length
+ 3) / 4) + 11));
816 virgl_encoder_transfer3d_common(vs
, ctx
->cbuf
, &transfer
,
817 virgl_transfer3d_host_inferred_stride
);
818 virgl_encoder_write_block(ctx
->cbuf
, data
, length
);
819 left_bytes
-= length
;
820 transfer
.base
.box
.x
+= length
;
826 int virgl_encoder_flush_frontbuffer(struct virgl_context
*ctx
,
827 struct virgl_resource
*res
)
829 // virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
830 // virgl_encoder_write_dword(ctx->cbuf, res_handle);
834 int virgl_encode_sampler_state(struct virgl_context
*ctx
,
836 const struct pipe_sampler_state
*state
)
840 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SAMPLER_STATE
, VIRGL_OBJ_SAMPLER_STATE_SIZE
));
841 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
843 tmp
= VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state
->wrap_s
) |
844 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state
->wrap_t
) |
845 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state
->wrap_r
) |
846 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state
->min_img_filter
) |
847 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state
->min_mip_filter
) |
848 VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state
->mag_img_filter
) |
849 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state
->compare_mode
) |
850 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state
->compare_func
) |
851 VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state
->seamless_cube_map
);
853 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
854 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->lod_bias
));
855 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->min_lod
));
856 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->max_lod
));
857 for (i
= 0; i
< 4; i
++)
858 virgl_encoder_write_dword(ctx
->cbuf
, state
->border_color
.ui
[i
]);
863 int virgl_encode_sampler_view(struct virgl_context
*ctx
,
865 struct virgl_resource
*res
,
866 const struct pipe_sampler_view
*state
)
868 unsigned elem_size
= util_format_get_blocksize(state
->format
);
869 struct virgl_screen
*rs
= virgl_screen(ctx
->base
.screen
);
871 uint32_t dword_fmt_target
= pipe_to_virgl_format(state
->format
);
872 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SAMPLER_VIEW
, VIRGL_OBJ_SAMPLER_VIEW_SIZE
));
873 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
874 virgl_encoder_write_res(ctx
, res
);
875 if (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_TEXTURE_VIEW
)
876 dword_fmt_target
|= (state
->target
<< 24);
877 virgl_encoder_write_dword(ctx
->cbuf
, dword_fmt_target
);
878 if (res
->u
.b
.target
== PIPE_BUFFER
) {
879 virgl_encoder_write_dword(ctx
->cbuf
, state
->u
.buf
.offset
/ elem_size
);
880 virgl_encoder_write_dword(ctx
->cbuf
, (state
->u
.buf
.offset
+ state
->u
.buf
.size
) / elem_size
- 1);
882 virgl_encoder_write_dword(ctx
->cbuf
, state
->u
.tex
.first_layer
| state
->u
.tex
.last_layer
<< 16);
883 virgl_encoder_write_dword(ctx
->cbuf
, state
->u
.tex
.first_level
| state
->u
.tex
.last_level
<< 8);
885 tmp
= VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state
->swizzle_r
) |
886 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state
->swizzle_g
) |
887 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state
->swizzle_b
) |
888 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state
->swizzle_a
);
889 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
893 int virgl_encode_set_sampler_views(struct virgl_context
*ctx
,
894 uint32_t shader_type
,
897 struct virgl_sampler_view
**views
)
900 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS
, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views
)));
901 virgl_encoder_write_dword(ctx
->cbuf
, shader_type
);
902 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
903 for (i
= 0; i
< num_views
; i
++) {
904 uint32_t handle
= views
[i
] ? views
[i
]->handle
: 0;
905 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
910 int virgl_encode_bind_sampler_states(struct virgl_context
*ctx
,
911 uint32_t shader_type
,
913 uint32_t num_handles
,
917 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES
, 0, VIRGL_BIND_SAMPLER_STATES(num_handles
)));
918 virgl_encoder_write_dword(ctx
->cbuf
, shader_type
);
919 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
920 for (i
= 0; i
< num_handles
; i
++)
921 virgl_encoder_write_dword(ctx
->cbuf
, handles
[i
]);
925 int virgl_encoder_write_constant_buffer(struct virgl_context
*ctx
,
931 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER
, 0, size
+ 2));
932 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
933 virgl_encoder_write_dword(ctx
->cbuf
, index
);
935 virgl_encoder_write_block(ctx
->cbuf
, data
, size
* 4);
939 int virgl_encoder_set_uniform_buffer(struct virgl_context
*ctx
,
944 struct virgl_resource
*res
)
946 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER
, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE
));
947 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
948 virgl_encoder_write_dword(ctx
->cbuf
, index
);
949 virgl_encoder_write_dword(ctx
->cbuf
, offset
);
950 virgl_encoder_write_dword(ctx
->cbuf
, length
);
951 virgl_encoder_write_res(ctx
, res
);
956 int virgl_encoder_set_stencil_ref(struct virgl_context
*ctx
,
957 const struct pipe_stencil_ref
*ref
)
959 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF
, 0, VIRGL_SET_STENCIL_REF_SIZE
));
960 virgl_encoder_write_dword(ctx
->cbuf
, VIRGL_STENCIL_REF_VAL(ref
->ref_value
[0] , (ref
->ref_value
[1])));
964 int virgl_encoder_set_blend_color(struct virgl_context
*ctx
,
965 const struct pipe_blend_color
*color
)
968 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR
, 0, VIRGL_SET_BLEND_COLOR_SIZE
));
969 for (i
= 0; i
< 4; i
++)
970 virgl_encoder_write_dword(ctx
->cbuf
, fui(color
->color
[i
]));
974 int virgl_encoder_set_scissor_state(struct virgl_context
*ctx
,
977 const struct pipe_scissor_state
*ss
)
980 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE
, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors
)));
981 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
982 for (i
= 0; i
< num_scissors
; i
++) {
983 virgl_encoder_write_dword(ctx
->cbuf
, (ss
[i
].minx
| ss
[i
].miny
<< 16));
984 virgl_encoder_write_dword(ctx
->cbuf
, (ss
[i
].maxx
| ss
[i
].maxy
<< 16));
989 void virgl_encoder_set_polygon_stipple(struct virgl_context
*ctx
,
990 const struct pipe_poly_stipple
*ps
)
993 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE
, 0, VIRGL_POLYGON_STIPPLE_SIZE
));
994 for (i
= 0; i
< VIRGL_POLYGON_STIPPLE_SIZE
; i
++) {
995 virgl_encoder_write_dword(ctx
->cbuf
, ps
->stipple
[i
]);
999 void virgl_encoder_set_sample_mask(struct virgl_context
*ctx
,
1000 unsigned sample_mask
)
1002 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK
, 0, VIRGL_SET_SAMPLE_MASK_SIZE
));
1003 virgl_encoder_write_dword(ctx
->cbuf
, sample_mask
);
1006 void virgl_encoder_set_min_samples(struct virgl_context
*ctx
,
1007 unsigned min_samples
)
1009 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES
, 0, VIRGL_SET_MIN_SAMPLES_SIZE
));
1010 virgl_encoder_write_dword(ctx
->cbuf
, min_samples
);
1013 void virgl_encoder_set_clip_state(struct virgl_context
*ctx
,
1014 const struct pipe_clip_state
*clip
)
1017 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE
, 0, VIRGL_SET_CLIP_STATE_SIZE
));
1018 for (i
= 0; i
< VIRGL_MAX_CLIP_PLANES
; i
++) {
1019 for (j
= 0; j
< 4; j
++) {
1020 virgl_encoder_write_dword(ctx
->cbuf
, fui(clip
->ucp
[i
][j
]));
1025 int virgl_encode_resource_copy_region(struct virgl_context
*ctx
,
1026 struct virgl_resource
*dst_res
,
1028 unsigned dstx
, unsigned dsty
, unsigned dstz
,
1029 struct virgl_resource
*src_res
,
1031 const struct pipe_box
*src_box
)
1033 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION
, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE
));
1034 virgl_encoder_write_res(ctx
, dst_res
);
1035 virgl_encoder_write_dword(ctx
->cbuf
, dst_level
);
1036 virgl_encoder_write_dword(ctx
->cbuf
, dstx
);
1037 virgl_encoder_write_dword(ctx
->cbuf
, dsty
);
1038 virgl_encoder_write_dword(ctx
->cbuf
, dstz
);
1039 virgl_encoder_write_res(ctx
, src_res
);
1040 virgl_encoder_write_dword(ctx
->cbuf
, src_level
);
1041 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->x
);
1042 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->y
);
1043 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->z
);
1044 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->width
);
1045 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->height
);
1046 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->depth
);
1050 int virgl_encode_blit(struct virgl_context
*ctx
,
1051 struct virgl_resource
*dst_res
,
1052 struct virgl_resource
*src_res
,
1053 const struct pipe_blit_info
*blit
)
1056 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BLIT
, 0, VIRGL_CMD_BLIT_SIZE
));
1057 tmp
= VIRGL_CMD_BLIT_S0_MASK(blit
->mask
) |
1058 VIRGL_CMD_BLIT_S0_FILTER(blit
->filter
) |
1059 VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit
->scissor_enable
) |
1060 VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit
->render_condition_enable
) |
1061 VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit
->alpha_blend
);
1062 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
1063 virgl_encoder_write_dword(ctx
->cbuf
, (blit
->scissor
.minx
| blit
->scissor
.miny
<< 16));
1064 virgl_encoder_write_dword(ctx
->cbuf
, (blit
->scissor
.maxx
| blit
->scissor
.maxy
<< 16));
1066 virgl_encoder_write_res(ctx
, dst_res
);
1067 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.level
);
1068 virgl_encoder_write_dword(ctx
->cbuf
, pipe_to_virgl_format(blit
->dst
.format
));
1069 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.x
);
1070 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.y
);
1071 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.z
);
1072 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.width
);
1073 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.height
);
1074 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.depth
);
1076 virgl_encoder_write_res(ctx
, src_res
);
1077 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.level
);
1078 virgl_encoder_write_dword(ctx
->cbuf
, pipe_to_virgl_format(blit
->src
.format
));
1079 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.x
);
1080 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.y
);
1081 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.z
);
1082 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.width
);
1083 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.height
);
1084 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.depth
);
1088 int virgl_encoder_create_query(struct virgl_context
*ctx
,
1092 struct virgl_resource
*res
,
1095 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_QUERY
, VIRGL_OBJ_QUERY_SIZE
));
1096 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1097 virgl_encoder_write_dword(ctx
->cbuf
, ((query_type
& 0xffff) | (query_index
<< 16)));
1098 virgl_encoder_write_dword(ctx
->cbuf
, offset
);
1099 virgl_encoder_write_res(ctx
, res
);
1103 int virgl_encoder_begin_query(struct virgl_context
*ctx
,
1106 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY
, 0, 1));
1107 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1111 int virgl_encoder_end_query(struct virgl_context
*ctx
,
1114 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_END_QUERY
, 0, 1));
1115 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1119 int virgl_encoder_get_query_result(struct virgl_context
*ctx
,
1120 uint32_t handle
, boolean wait
)
1122 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT
, 0, 2));
1123 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1124 virgl_encoder_write_dword(ctx
->cbuf
, wait
? 1 : 0);
1128 int virgl_encoder_render_condition(struct virgl_context
*ctx
,
1129 uint32_t handle
, boolean condition
,
1130 enum pipe_render_cond_flag mode
)
1132 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION
, 0, VIRGL_RENDER_CONDITION_SIZE
));
1133 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1134 virgl_encoder_write_dword(ctx
->cbuf
, condition
);
1135 virgl_encoder_write_dword(ctx
->cbuf
, mode
);
1139 int virgl_encoder_set_so_targets(struct virgl_context
*ctx
,
1140 unsigned num_targets
,
1141 struct pipe_stream_output_target
**targets
,
1142 unsigned append_bitmask
)
1146 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS
, 0, num_targets
+ 1));
1147 virgl_encoder_write_dword(ctx
->cbuf
, append_bitmask
);
1148 for (i
= 0; i
< num_targets
; i
++) {
1149 struct virgl_so_target
*tg
= virgl_so_target(targets
[i
]);
1150 virgl_encoder_write_dword(ctx
->cbuf
, tg
? tg
->handle
: 0);
1156 int virgl_encoder_set_sub_ctx(struct virgl_context
*ctx
, uint32_t sub_ctx_id
)
1158 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX
, 0, 1));
1159 virgl_encoder_write_dword(ctx
->cbuf
, sub_ctx_id
);
1163 int virgl_encoder_create_sub_ctx(struct virgl_context
*ctx
, uint32_t sub_ctx_id
)
1165 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX
, 0, 1));
1166 virgl_encoder_write_dword(ctx
->cbuf
, sub_ctx_id
);
1170 int virgl_encoder_destroy_sub_ctx(struct virgl_context
*ctx
, uint32_t sub_ctx_id
)
1172 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX
, 0, 1));
1173 virgl_encoder_write_dword(ctx
->cbuf
, sub_ctx_id
);
1177 int virgl_encode_bind_shader(struct virgl_context
*ctx
,
1178 uint32_t handle
, uint32_t type
)
1180 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER
, 0, 2));
1181 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1182 virgl_encoder_write_dword(ctx
->cbuf
, type
);
1186 int virgl_encode_set_tess_state(struct virgl_context
*ctx
,
1187 const float outer
[4],
1188 const float inner
[2])
1191 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE
, 0, 6));
1192 for (i
= 0; i
< 4; i
++)
1193 virgl_encoder_write_dword(ctx
->cbuf
, fui(outer
[i
]));
1194 for (i
= 0; i
< 2; i
++)
1195 virgl_encoder_write_dword(ctx
->cbuf
, fui(inner
[i
]));
1199 int virgl_encode_set_shader_buffers(struct virgl_context
*ctx
,
1200 enum pipe_shader_type shader
,
1201 unsigned start_slot
, unsigned count
,
1202 const struct pipe_shader_buffer
*buffers
)
1205 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS
, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count
)));
1207 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
1208 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
1209 for (i
= 0; i
< count
; i
++) {
1210 if (buffers
&& buffers
[i
].buffer
) {
1211 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
);
1212 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_offset
);
1213 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_size
);
1214 virgl_encoder_write_res(ctx
, res
);
1216 util_range_add(&res
->u
.b
, &res
->valid_buffer_range
, buffers
[i
].buffer_offset
,
1217 buffers
[i
].buffer_offset
+ buffers
[i
].buffer_size
);
1218 virgl_resource_dirty(res
, 0);
1220 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1221 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1222 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1228 int virgl_encode_set_hw_atomic_buffers(struct virgl_context
*ctx
,
1229 unsigned start_slot
, unsigned count
,
1230 const struct pipe_shader_buffer
*buffers
)
1233 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS
, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count
)));
1235 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
1236 for (i
= 0; i
< count
; i
++) {
1237 if (buffers
&& buffers
[i
].buffer
) {
1238 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
);
1239 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_offset
);
1240 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_size
);
1241 virgl_encoder_write_res(ctx
, res
);
1243 util_range_add(&res
->u
.b
, &res
->valid_buffer_range
, buffers
[i
].buffer_offset
,
1244 buffers
[i
].buffer_offset
+ buffers
[i
].buffer_size
);
1245 virgl_resource_dirty(res
, 0);
1247 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1248 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1249 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1255 int virgl_encode_set_shader_images(struct virgl_context
*ctx
,
1256 enum pipe_shader_type shader
,
1257 unsigned start_slot
, unsigned count
,
1258 const struct pipe_image_view
*images
)
1261 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES
, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count
)));
1263 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
1264 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
1265 for (i
= 0; i
< count
; i
++) {
1266 if (images
&& images
[i
].resource
) {
1267 struct virgl_resource
*res
= virgl_resource(images
[i
].resource
);
1268 virgl_encoder_write_dword(ctx
->cbuf
, pipe_to_virgl_format(images
[i
].format
));
1269 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].access
);
1270 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].u
.buf
.offset
);
1271 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].u
.buf
.size
);
1272 virgl_encoder_write_res(ctx
, res
);
1274 if (res
->u
.b
.target
== PIPE_BUFFER
) {
1275 util_range_add(&res
->u
.b
, &res
->valid_buffer_range
, images
[i
].u
.buf
.offset
,
1276 images
[i
].u
.buf
.offset
+ images
[i
].u
.buf
.size
);
1278 virgl_resource_dirty(res
, images
[i
].u
.tex
.level
);
1280 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1281 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1282 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1283 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1284 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1290 int virgl_encode_memory_barrier(struct virgl_context
*ctx
,
1293 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER
, 0, 1));
1294 virgl_encoder_write_dword(ctx
->cbuf
, flags
);
1298 int virgl_encode_launch_grid(struct virgl_context
*ctx
,
1299 const struct pipe_grid_info
*grid_info
)
1301 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID
, 0, VIRGL_LAUNCH_GRID_SIZE
));
1302 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->block
[0]);
1303 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->block
[1]);
1304 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->block
[2]);
1305 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->grid
[0]);
1306 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->grid
[1]);
1307 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->grid
[2]);
1308 if (grid_info
->indirect
) {
1309 struct virgl_resource
*res
= virgl_resource(grid_info
->indirect
);
1310 virgl_encoder_write_res(ctx
, res
);
1312 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1313 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->indirect_offset
);
1317 int virgl_encode_texture_barrier(struct virgl_context
*ctx
,
1320 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER
, 0, 1));
1321 virgl_encoder_write_dword(ctx
->cbuf
, flags
);
1325 int virgl_encode_host_debug_flagstring(struct virgl_context
*ctx
,
1326 const char *flagstring
)
1328 unsigned long slen
= strlen(flagstring
) + 1;
1330 uint32_t string_length
;
1335 if (slen
> 4 * 0xffff) {
1336 debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1340 sslen
= (uint32_t )(slen
+ 3) / 4;
1341 string_length
= (uint32_t)MIN2(sslen
* 4, slen
);
1343 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS
, 0, sslen
));
1344 virgl_encoder_write_block(ctx
->cbuf
, (const uint8_t *)flagstring
, string_length
);
1348 int virgl_encode_tweak(struct virgl_context
*ctx
, enum vrend_tweak_type tweak
, uint32_t value
)
1350 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_TWEAKS
, 0, VIRGL_SET_TWEAKS_SIZE
));
1351 virgl_encoder_write_dword(ctx
->cbuf
, tweak
);
1352 virgl_encoder_write_dword(ctx
->cbuf
, value
);
1357 int virgl_encode_get_query_result_qbo(struct virgl_context
*ctx
,
1359 struct virgl_resource
*res
, boolean wait
,
1360 uint32_t result_type
,
1364 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT_QBO
, 0, VIRGL_QUERY_RESULT_QBO_SIZE
));
1365 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1366 virgl_encoder_write_res(ctx
, res
);
1367 virgl_encoder_write_dword(ctx
->cbuf
, wait
? 1 : 0);
1368 virgl_encoder_write_dword(ctx
->cbuf
, result_type
);
1369 virgl_encoder_write_dword(ctx
->cbuf
, offset
);
1370 virgl_encoder_write_dword(ctx
->cbuf
, index
);
1374 void virgl_encode_transfer(struct virgl_screen
*vs
, struct virgl_cmd_buf
*buf
,
1375 struct virgl_transfer
*trans
, uint32_t direction
)
1378 command
= VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D
, 0, VIRGL_TRANSFER3D_SIZE
);
1379 virgl_encoder_write_dword(buf
, command
);
1380 virgl_encoder_transfer3d_common(vs
, buf
, trans
,
1381 virgl_transfer3d_host_inferred_stride
);
1382 virgl_encoder_write_dword(buf
, trans
->offset
);
1383 virgl_encoder_write_dword(buf
, direction
);
1386 void virgl_encode_copy_transfer(struct virgl_context
*ctx
,
1387 struct virgl_transfer
*trans
)
1390 struct virgl_screen
*vs
= virgl_screen(ctx
->base
.screen
);
1392 assert(trans
->copy_src_hw_res
);
1394 command
= VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D
, 0, VIRGL_COPY_TRANSFER3D_SIZE
);
1395 virgl_encoder_write_cmd_dword(ctx
, command
);
1396 /* Copy transfers need to explicitly specify the stride, since it may differ
1397 * from the image stride.
1399 virgl_encoder_transfer3d_common(vs
, ctx
->cbuf
, trans
, virgl_transfer3d_explicit_stride
);
1400 vs
->vws
->emit_res(vs
->vws
, ctx
->cbuf
, trans
->copy_src_hw_res
, TRUE
);
1401 virgl_encoder_write_dword(ctx
->cbuf
, trans
->copy_src_offset
);
1402 /* At the moment all copy transfers are synchronized. */
1403 virgl_encoder_write_dword(ctx
->cbuf
, 1);
1406 void virgl_encode_end_transfers(struct virgl_cmd_buf
*buf
)
1408 uint32_t command
, diff
;
1409 diff
= VIRGL_MAX_TBUF_DWORDS
- buf
->cdw
;
1411 command
= VIRGL_CMD0(VIRGL_CCMD_END_TRANSFERS
, 0, diff
- 1);
1412 virgl_encoder_write_dword(buf
, command
);