2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "util/format/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virtio-gpu/virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
40 #define VIRGL_ENCODE_MAX_DWORDS MIN2(VIRGL_MAX_CMDBUF_DWORDS, VIRGL_CMD0_MAX_DWORDS)
42 #define CONV_FORMAT(f) [PIPE_FORMAT_##f] = VIRGL_FORMAT_##f,
44 static const enum virgl_formats virgl_formats_conv_table
[PIPE_FORMAT_COUNT
] = {
45 CONV_FORMAT(B8G8R8A8_UNORM
)
46 CONV_FORMAT(B8G8R8X8_UNORM
)
47 CONV_FORMAT(A8R8G8B8_UNORM
)
48 CONV_FORMAT(X8R8G8B8_UNORM
)
49 CONV_FORMAT(B5G5R5A1_UNORM
)
50 CONV_FORMAT(B4G4R4A4_UNORM
)
51 CONV_FORMAT(B5G6R5_UNORM
)
52 CONV_FORMAT(R10G10B10A2_UNORM
)
55 CONV_FORMAT(L8A8_UNORM
)
56 CONV_FORMAT(L16_UNORM
)
57 CONV_FORMAT(Z16_UNORM
)
58 CONV_FORMAT(Z32_UNORM
)
59 CONV_FORMAT(Z32_FLOAT
)
60 CONV_FORMAT(Z24_UNORM_S8_UINT
)
61 CONV_FORMAT(S8_UINT_Z24_UNORM
)
62 CONV_FORMAT(Z24X8_UNORM
)
63 CONV_FORMAT(X8Z24_UNORM
)
65 CONV_FORMAT(R64_FLOAT
)
66 CONV_FORMAT(R64G64_FLOAT
)
67 CONV_FORMAT(R64G64B64_FLOAT
)
68 CONV_FORMAT(R64G64B64A64_FLOAT
)
69 CONV_FORMAT(R32_FLOAT
)
70 CONV_FORMAT(R32G32_FLOAT
)
71 CONV_FORMAT(R32G32B32_FLOAT
)
72 CONV_FORMAT(R32G32B32A32_FLOAT
)
73 CONV_FORMAT(R32_UNORM
)
74 CONV_FORMAT(R32G32_UNORM
)
75 CONV_FORMAT(R32G32B32_UNORM
)
76 CONV_FORMAT(R32G32B32A32_UNORM
)
77 CONV_FORMAT(R32_USCALED
)
78 CONV_FORMAT(R32G32_USCALED
)
79 CONV_FORMAT(R32G32B32_USCALED
)
80 CONV_FORMAT(R32G32B32A32_USCALED
)
81 CONV_FORMAT(R32_SNORM
)
82 CONV_FORMAT(R32G32_SNORM
)
83 CONV_FORMAT(R32G32B32_SNORM
)
84 CONV_FORMAT(R32G32B32A32_SNORM
)
85 CONV_FORMAT(R32_SSCALED
)
86 CONV_FORMAT(R32G32_SSCALED
)
87 CONV_FORMAT(R32G32B32_SSCALED
)
88 CONV_FORMAT(R32G32B32A32_SSCALED
)
89 CONV_FORMAT(R16_UNORM
)
90 CONV_FORMAT(R16G16_UNORM
)
91 CONV_FORMAT(R16G16B16_UNORM
)
92 CONV_FORMAT(R16G16B16A16_UNORM
)
93 CONV_FORMAT(R16_USCALED
)
94 CONV_FORMAT(R16G16_USCALED
)
95 CONV_FORMAT(R16G16B16_USCALED
)
96 CONV_FORMAT(R16G16B16A16_USCALED
)
97 CONV_FORMAT(R16_SNORM
)
98 CONV_FORMAT(R16G16_SNORM
)
99 CONV_FORMAT(R16G16B16_SNORM
)
100 CONV_FORMAT(R16G16B16A16_SNORM
)
101 CONV_FORMAT(R16_SSCALED
)
102 CONV_FORMAT(R16G16_SSCALED
)
103 CONV_FORMAT(R16G16B16_SSCALED
)
104 CONV_FORMAT(R16G16B16A16_SSCALED
)
105 CONV_FORMAT(R8_UNORM
)
106 CONV_FORMAT(R8G8_UNORM
)
107 CONV_FORMAT(R8G8B8_UNORM
)
108 CONV_FORMAT(R8G8B8A8_UNORM
)
109 CONV_FORMAT(R8_USCALED
)
110 CONV_FORMAT(R8G8_USCALED
)
111 CONV_FORMAT(R8G8B8_USCALED
)
112 CONV_FORMAT(R8G8B8A8_USCALED
)
113 CONV_FORMAT(R8_SNORM
)
114 CONV_FORMAT(R8G8_SNORM
)
115 CONV_FORMAT(R8G8B8_SNORM
)
116 CONV_FORMAT(R8G8B8A8_SNORM
)
117 CONV_FORMAT(R8_SSCALED
)
118 CONV_FORMAT(R8G8_SSCALED
)
119 CONV_FORMAT(R8G8B8_SSCALED
)
120 CONV_FORMAT(R8G8B8A8_SSCALED
)
121 CONV_FORMAT(R16_FLOAT
)
122 CONV_FORMAT(R16G16_FLOAT
)
123 CONV_FORMAT(R16G16B16_FLOAT
)
124 CONV_FORMAT(R16G16B16A16_FLOAT
)
126 CONV_FORMAT(L8A8_SRGB
)
127 CONV_FORMAT(R8G8B8_SRGB
)
128 CONV_FORMAT(A8B8G8R8_SRGB
)
129 CONV_FORMAT(X8B8G8R8_SRGB
)
130 CONV_FORMAT(B8G8R8A8_SRGB
)
131 CONV_FORMAT(B8G8R8X8_SRGB
)
132 CONV_FORMAT(A8R8G8B8_SRGB
)
133 CONV_FORMAT(X8R8G8B8_SRGB
)
134 CONV_FORMAT(R8G8B8A8_SRGB
)
135 CONV_FORMAT(DXT1_RGB
)
136 CONV_FORMAT(DXT1_RGBA
)
137 CONV_FORMAT(DXT3_RGBA
)
138 CONV_FORMAT(DXT5_RGBA
)
139 CONV_FORMAT(DXT1_SRGB
)
140 CONV_FORMAT(DXT1_SRGBA
)
141 CONV_FORMAT(DXT3_SRGBA
)
142 CONV_FORMAT(DXT5_SRGBA
)
143 CONV_FORMAT(RGTC1_UNORM
)
144 CONV_FORMAT(RGTC1_SNORM
)
145 CONV_FORMAT(RGTC2_UNORM
)
146 CONV_FORMAT(RGTC2_SNORM
)
147 CONV_FORMAT(A8B8G8R8_UNORM
)
148 CONV_FORMAT(B5G5R5X1_UNORM
)
149 CONV_FORMAT(R10G10B10A2_USCALED
)
150 CONV_FORMAT(R11G11B10_FLOAT
)
151 CONV_FORMAT(R9G9B9E5_FLOAT
)
152 CONV_FORMAT(Z32_FLOAT_S8X24_UINT
)
153 CONV_FORMAT(B10G10R10A2_UNORM
)
154 CONV_FORMAT(R8G8B8X8_UNORM
)
155 CONV_FORMAT(B4G4R4X4_UNORM
)
156 CONV_FORMAT(X24S8_UINT
)
157 CONV_FORMAT(S8X24_UINT
)
158 CONV_FORMAT(X32_S8X24_UINT
)
159 CONV_FORMAT(B2G3R3_UNORM
)
160 CONV_FORMAT(L16A16_UNORM
)
161 CONV_FORMAT(A16_UNORM
)
162 CONV_FORMAT(I16_UNORM
)
163 CONV_FORMAT(LATC1_UNORM
)
164 CONV_FORMAT(LATC1_SNORM
)
165 CONV_FORMAT(LATC2_UNORM
)
166 CONV_FORMAT(LATC2_SNORM
)
167 CONV_FORMAT(A8_SNORM
)
168 CONV_FORMAT(L8_SNORM
)
169 CONV_FORMAT(L8A8_SNORM
)
170 CONV_FORMAT(A16_SNORM
)
171 CONV_FORMAT(L16_SNORM
)
172 CONV_FORMAT(L16A16_SNORM
)
173 CONV_FORMAT(A16_FLOAT
)
174 CONV_FORMAT(L16_FLOAT
)
175 CONV_FORMAT(L16A16_FLOAT
)
176 CONV_FORMAT(A32_FLOAT
)
177 CONV_FORMAT(L32_FLOAT
)
178 CONV_FORMAT(L32A32_FLOAT
)
185 CONV_FORMAT(R8G8_UINT
)
186 CONV_FORMAT(R8G8B8_UINT
)
187 CONV_FORMAT(R8G8B8A8_UINT
)
189 CONV_FORMAT(R8G8_SINT
)
190 CONV_FORMAT(R8G8B8_SINT
)
191 CONV_FORMAT(R8G8B8A8_SINT
)
192 CONV_FORMAT(R16_UINT
)
193 CONV_FORMAT(R16G16_UINT
)
194 CONV_FORMAT(R16G16B16_UINT
)
195 CONV_FORMAT(R16G16B16A16_UINT
)
196 CONV_FORMAT(R16_SINT
)
197 CONV_FORMAT(R16G16_SINT
)
198 CONV_FORMAT(R16G16B16_SINT
)
199 CONV_FORMAT(R16G16B16A16_SINT
)
200 CONV_FORMAT(R32_UINT
)
201 CONV_FORMAT(R32G32_UINT
)
202 CONV_FORMAT(R32G32B32_UINT
)
203 CONV_FORMAT(R32G32B32A32_UINT
)
204 CONV_FORMAT(R32_SINT
)
205 CONV_FORMAT(R32G32_SINT
)
206 CONV_FORMAT(R32G32B32_SINT
)
207 CONV_FORMAT(R32G32B32A32_SINT
)
210 CONV_FORMAT(L8A8_UINT
)
213 CONV_FORMAT(L8A8_SINT
)
214 CONV_FORMAT(A16_UINT
)
215 CONV_FORMAT(L16_UINT
)
216 CONV_FORMAT(L16A16_UINT
)
217 CONV_FORMAT(A16_SINT
)
218 CONV_FORMAT(L16_SINT
)
219 CONV_FORMAT(L16A16_SINT
)
220 CONV_FORMAT(A32_UINT
)
221 CONV_FORMAT(L32_UINT
)
222 CONV_FORMAT(L32A32_UINT
)
223 CONV_FORMAT(A32_SINT
)
224 CONV_FORMAT(L32_SINT
)
225 CONV_FORMAT(L32A32_SINT
)
226 CONV_FORMAT(R10G10B10A2_SSCALED
)
227 CONV_FORMAT(R10G10B10A2_SNORM
)
228 CONV_FORMAT(B10G10R10A2_SNORM
)
229 CONV_FORMAT(B10G10R10A2_UINT
)
230 CONV_FORMAT(R8G8B8X8_SNORM
)
231 CONV_FORMAT(R8G8B8X8_SRGB
)
232 CONV_FORMAT(R8G8B8X8_UINT
)
233 CONV_FORMAT(R8G8B8X8_SINT
)
234 CONV_FORMAT(B10G10R10X2_UNORM
)
235 CONV_FORMAT(R16G16B16X16_UNORM
)
236 CONV_FORMAT(R16G16B16X16_SNORM
)
237 CONV_FORMAT(R16G16B16X16_FLOAT
)
238 CONV_FORMAT(R16G16B16X16_UINT
)
239 CONV_FORMAT(R16G16B16X16_SINT
)
240 CONV_FORMAT(R32G32B32X32_FLOAT
)
241 CONV_FORMAT(R32G32B32X32_UINT
)
242 CONV_FORMAT(R32G32B32X32_SINT
)
243 CONV_FORMAT(R10G10B10A2_UINT
)
244 CONV_FORMAT(BPTC_RGBA_UNORM
)
245 CONV_FORMAT(BPTC_SRGBA
)
246 CONV_FORMAT(BPTC_RGB_FLOAT
)
247 CONV_FORMAT(BPTC_RGB_UFLOAT
)
248 CONV_FORMAT(R10G10B10X2_UNORM
)
249 CONV_FORMAT(A4B4G4R4_UNORM
)
251 CONV_FORMAT(ETC2_RGB8
)
252 CONV_FORMAT(ETC2_SRGB8
)
253 CONV_FORMAT(ETC2_RGB8A1
)
254 CONV_FORMAT(ETC2_SRGB8A1
)
255 CONV_FORMAT(ETC2_RGBA8
)
256 CONV_FORMAT(ETC2_SRGBA8
)
257 CONV_FORMAT(ETC2_R11_UNORM
)
258 CONV_FORMAT(ETC2_R11_SNORM
)
259 CONV_FORMAT(ETC2_RG11_UNORM
)
260 CONV_FORMAT(ETC2_RG11_SNORM
)
263 enum virgl_formats
pipe_to_virgl_format(enum pipe_format format
)
265 enum virgl_formats vformat
= virgl_formats_conv_table
[format
];
266 if (format
!= PIPE_FORMAT_NONE
&& !vformat
)
267 debug_printf("VIRGL: pipe format %s not in the format table\n", util_format_name(format
));
271 static int virgl_encoder_write_cmd_dword(struct virgl_context
*ctx
,
274 int len
= (dword
>> 16);
276 if ((ctx
->cbuf
->cdw
+ len
+ 1) > VIRGL_MAX_CMDBUF_DWORDS
)
277 ctx
->base
.flush(&ctx
->base
, NULL
, 0);
279 virgl_encoder_write_dword(ctx
->cbuf
, dword
);
283 static void virgl_encoder_emit_resource(struct virgl_screen
*vs
,
284 struct virgl_cmd_buf
*buf
,
285 struct virgl_resource
*res
)
287 struct virgl_winsys
*vws
= vs
->vws
;
288 if (res
&& res
->hw_res
)
289 vws
->emit_res(vws
, buf
, res
->hw_res
, TRUE
);
291 virgl_encoder_write_dword(buf
, 0);
295 static void virgl_encoder_write_res(struct virgl_context
*ctx
,
296 struct virgl_resource
*res
)
298 struct virgl_screen
*vs
= virgl_screen(ctx
->base
.screen
);
299 virgl_encoder_emit_resource(vs
, ctx
->cbuf
, res
);
302 int virgl_encode_bind_object(struct virgl_context
*ctx
,
303 uint32_t handle
, uint32_t object
)
305 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT
, object
, 1));
306 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
310 int virgl_encode_delete_object(struct virgl_context
*ctx
,
311 uint32_t handle
, uint32_t object
)
313 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT
, object
, 1));
314 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
318 int virgl_encode_blend_state(struct virgl_context
*ctx
,
320 const struct pipe_blend_state
*blend_state
)
325 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_BLEND
, VIRGL_OBJ_BLEND_SIZE
));
326 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
329 VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state
->independent_blend_enable
) |
330 VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state
->logicop_enable
) |
331 VIRGL_OBJ_BLEND_S0_DITHER(blend_state
->dither
) |
332 VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state
->alpha_to_coverage
) |
333 VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state
->alpha_to_one
);
335 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
337 tmp
= VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state
->logicop_func
);
338 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
340 for (i
= 0; i
< VIRGL_MAX_COLOR_BUFS
; i
++) {
341 /* We use alpha src factor to pass the advanced blend equation value
342 * to the host. By doing so, we don't have to change the protocol.
344 uint32_t alpha
= (i
== 0 && blend_state
->advanced_blend_func
)
345 ? blend_state
->advanced_blend_func
346 : blend_state
->rt
[i
].alpha_src_factor
;
348 VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state
->rt
[i
].blend_enable
) |
349 VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state
->rt
[i
].rgb_func
) |
350 VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state
->rt
[i
].rgb_src_factor
) |
351 VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state
->rt
[i
].rgb_dst_factor
)|
352 VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state
->rt
[i
].alpha_func
) |
353 VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(alpha
) |
354 VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state
->rt
[i
].alpha_dst_factor
) |
355 VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state
->rt
[i
].colormask
);
356 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
361 int virgl_encode_dsa_state(struct virgl_context
*ctx
,
363 const struct pipe_depth_stencil_alpha_state
*dsa_state
)
367 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_DSA
, VIRGL_OBJ_DSA_SIZE
));
368 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
370 tmp
= VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state
->depth
.enabled
) |
371 VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state
->depth
.writemask
) |
372 VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state
->depth
.func
) |
373 VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state
->alpha
.enabled
) |
374 VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state
->alpha
.func
);
375 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
377 for (i
= 0; i
< 2; i
++) {
378 tmp
= VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state
->stencil
[i
].enabled
) |
379 VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state
->stencil
[i
].func
) |
380 VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state
->stencil
[i
].fail_op
) |
381 VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state
->stencil
[i
].zpass_op
) |
382 VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state
->stencil
[i
].zfail_op
) |
383 VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state
->stencil
[i
].valuemask
) |
384 VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state
->stencil
[i
].writemask
);
385 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
388 virgl_encoder_write_dword(ctx
->cbuf
, fui(dsa_state
->alpha
.ref_value
));
391 int virgl_encode_rasterizer_state(struct virgl_context
*ctx
,
393 const struct pipe_rasterizer_state
*state
)
397 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_RASTERIZER
, VIRGL_OBJ_RS_SIZE
));
398 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
400 tmp
= VIRGL_OBJ_RS_S0_FLATSHADE(state
->flatshade
) |
401 VIRGL_OBJ_RS_S0_DEPTH_CLIP(state
->depth_clip_near
) |
402 VIRGL_OBJ_RS_S0_CLIP_HALFZ(state
->clip_halfz
) |
403 VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state
->rasterizer_discard
) |
404 VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state
->flatshade_first
) |
405 VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state
->light_twoside
) |
406 VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state
->sprite_coord_mode
) |
407 VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state
->point_quad_rasterization
) |
408 VIRGL_OBJ_RS_S0_CULL_FACE(state
->cull_face
) |
409 VIRGL_OBJ_RS_S0_FILL_FRONT(state
->fill_front
) |
410 VIRGL_OBJ_RS_S0_FILL_BACK(state
->fill_back
) |
411 VIRGL_OBJ_RS_S0_SCISSOR(state
->scissor
) |
412 VIRGL_OBJ_RS_S0_FRONT_CCW(state
->front_ccw
) |
413 VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state
->clamp_vertex_color
) |
414 VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state
->clamp_fragment_color
) |
415 VIRGL_OBJ_RS_S0_OFFSET_LINE(state
->offset_line
) |
416 VIRGL_OBJ_RS_S0_OFFSET_POINT(state
->offset_point
) |
417 VIRGL_OBJ_RS_S0_OFFSET_TRI(state
->offset_tri
) |
418 VIRGL_OBJ_RS_S0_POLY_SMOOTH(state
->poly_smooth
) |
419 VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state
->poly_stipple_enable
) |
420 VIRGL_OBJ_RS_S0_POINT_SMOOTH(state
->point_smooth
) |
421 VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state
->point_size_per_vertex
) |
422 VIRGL_OBJ_RS_S0_MULTISAMPLE(state
->multisample
) |
423 VIRGL_OBJ_RS_S0_LINE_SMOOTH(state
->line_smooth
) |
424 VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state
->line_stipple_enable
) |
425 VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state
->line_last_pixel
) |
426 VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state
->half_pixel_center
) |
427 VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state
->bottom_edge_rule
) |
428 VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state
->force_persample_interp
);
430 virgl_encoder_write_dword(ctx
->cbuf
, tmp
); /* S0 */
431 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->point_size
)); /* S1 */
432 virgl_encoder_write_dword(ctx
->cbuf
, state
->sprite_coord_enable
); /* S2 */
433 tmp
= VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state
->line_stipple_pattern
) |
434 VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state
->line_stipple_factor
) |
435 VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state
->clip_plane_enable
);
436 virgl_encoder_write_dword(ctx
->cbuf
, tmp
); /* S3 */
437 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->line_width
)); /* S4 */
438 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->offset_units
)); /* S5 */
439 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->offset_scale
)); /* S6 */
440 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->offset_clamp
)); /* S7 */
444 static void virgl_emit_shader_header(struct virgl_context
*ctx
,
445 uint32_t handle
, uint32_t len
,
446 uint32_t type
, uint32_t offlen
,
449 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SHADER
, len
));
450 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
451 virgl_encoder_write_dword(ctx
->cbuf
, type
);
452 virgl_encoder_write_dword(ctx
->cbuf
, offlen
);
453 virgl_encoder_write_dword(ctx
->cbuf
, num_tokens
);
456 static void virgl_emit_shader_streamout(struct virgl_context
*ctx
,
457 const struct pipe_stream_output_info
*so_info
)
464 num_outputs
= so_info
->num_outputs
;
466 virgl_encoder_write_dword(ctx
->cbuf
, num_outputs
);
468 for (i
= 0; i
< 4; i
++)
469 virgl_encoder_write_dword(ctx
->cbuf
, so_info
->stride
[i
]);
471 for (i
= 0; i
< so_info
->num_outputs
; i
++) {
473 VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info
->output
[i
].register_index
) |
474 VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info
->output
[i
].start_component
) |
475 VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info
->output
[i
].num_components
) |
476 VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info
->output
[i
].output_buffer
) |
477 VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info
->output
[i
].dst_offset
);
478 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
479 virgl_encoder_write_dword(ctx
->cbuf
, so_info
->output
[i
].stream
);
484 int virgl_encode_shader_state(struct virgl_context
*ctx
,
487 const struct pipe_stream_output_info
*so_info
,
488 uint32_t cs_req_local_mem
,
489 const struct tgsi_token
*tokens
)
492 uint32_t shader_len
, len
;
494 int num_tokens
= tgsi_num_tokens(tokens
);
495 int str_total_size
= 65536;
497 uint32_t left_bytes
, base_hdr_size
, strm_hdr_size
, thispass
;
499 str
= CALLOC(1, str_total_size
);
506 bret
= tgsi_dump_str(tokens
, TGSI_DUMP_FLOAT_AS_HEX
, str
, str_total_size
);
508 if (virgl_debug
& VIRGL_DEBUG_VERBOSE
)
509 debug_printf("Failed to translate shader in available space - trying again\n");
510 old_size
= str_total_size
;
511 str_total_size
= 65536 * retry_size
;
513 str
= REALLOC(str
, old_size
, str_total_size
);
517 } while (bret
== false && retry_size
< 1024);
522 if (virgl_debug
& VIRGL_DEBUG_TGSI
)
523 debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str
);
525 shader_len
= strlen(str
) + 1;
527 left_bytes
= shader_len
;
530 strm_hdr_size
= so_info
->num_outputs
? so_info
->num_outputs
* 2 + 4 : 0;
534 uint32_t length
, offlen
;
535 int hdr_len
= base_hdr_size
+ (first_pass
? strm_hdr_size
: 0);
536 if (ctx
->cbuf
->cdw
+ hdr_len
+ 1 >= VIRGL_ENCODE_MAX_DWORDS
)
537 ctx
->base
.flush(&ctx
->base
, NULL
, 0);
539 thispass
= (VIRGL_ENCODE_MAX_DWORDS
- ctx
->cbuf
->cdw
- hdr_len
- 1) * 4;
541 length
= MIN2(thispass
, left_bytes
);
542 len
= ((length
+ 3) / 4) + hdr_len
;
545 offlen
= VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len
);
547 offlen
= VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr
- (uintptr_t)str
) | VIRGL_OBJ_SHADER_OFFSET_CONT
;
549 virgl_emit_shader_header(ctx
, handle
, len
, type
, offlen
, num_tokens
);
551 if (type
== PIPE_SHADER_COMPUTE
)
552 virgl_encoder_write_dword(ctx
->cbuf
, cs_req_local_mem
);
554 virgl_emit_shader_streamout(ctx
, first_pass
? so_info
: NULL
);
556 virgl_encoder_write_block(ctx
->cbuf
, (uint8_t *)sptr
, length
);
560 left_bytes
-= length
;
568 int virgl_encode_clear(struct virgl_context
*ctx
,
570 const union pipe_color_union
*color
,
571 double depth
, unsigned stencil
)
576 STATIC_ASSERT(sizeof(qword
) == sizeof(depth
));
577 memcpy(&qword
, &depth
, sizeof(qword
));
579 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CLEAR
, 0, VIRGL_OBJ_CLEAR_SIZE
));
580 virgl_encoder_write_dword(ctx
->cbuf
, buffers
);
581 for (i
= 0; i
< 4; i
++)
582 virgl_encoder_write_dword(ctx
->cbuf
, color
->ui
[i
]);
583 virgl_encoder_write_qword(ctx
->cbuf
, qword
);
584 virgl_encoder_write_dword(ctx
->cbuf
, stencil
);
588 int virgl_encode_clear_texture(struct virgl_context
*ctx
,
589 struct virgl_resource
*res
,
591 const struct pipe_box
*box
,
594 const struct util_format_description
*desc
= util_format_description(res
->u
.b
.format
);
595 unsigned block_bits
= desc
->block
.bits
;
596 uint32_t arr
[4] = {0};
597 /* The spec describe <data> as a pointer to an array of between one
598 * and four components of texel data that will be used as the source
599 * for the constant fill value.
600 * Here, we are just copying the memory into <arr>. We do not try to
601 * re-create the data array. The host part will take care of interpreting
602 * the memory and applying the correct format to the clear call.
604 memcpy(&arr
, data
, block_bits
/ 8);
606 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CLEAR_TEXTURE
, 0, VIRGL_CLEAR_TEXTURE_SIZE
));
607 virgl_encoder_write_res(ctx
, res
);
608 virgl_encoder_write_dword(ctx
->cbuf
, level
);
609 virgl_encoder_write_dword(ctx
->cbuf
, box
->x
);
610 virgl_encoder_write_dword(ctx
->cbuf
, box
->y
);
611 virgl_encoder_write_dword(ctx
->cbuf
, box
->z
);
612 virgl_encoder_write_dword(ctx
->cbuf
, box
->width
);
613 virgl_encoder_write_dword(ctx
->cbuf
, box
->height
);
614 virgl_encoder_write_dword(ctx
->cbuf
, box
->depth
);
615 for (unsigned i
= 0; i
< 4; i
++)
616 virgl_encoder_write_dword(ctx
->cbuf
, arr
[i
]);
620 int virgl_encoder_set_framebuffer_state(struct virgl_context
*ctx
,
621 const struct pipe_framebuffer_state
*state
)
623 struct virgl_surface
*zsurf
= virgl_surface(state
->zsbuf
);
626 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE
, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state
->nr_cbufs
)));
627 virgl_encoder_write_dword(ctx
->cbuf
, state
->nr_cbufs
);
628 virgl_encoder_write_dword(ctx
->cbuf
, zsurf
? zsurf
->handle
: 0);
629 for (i
= 0; i
< state
->nr_cbufs
; i
++) {
630 struct virgl_surface
*surf
= virgl_surface(state
->cbufs
[i
]);
631 virgl_encoder_write_dword(ctx
->cbuf
, surf
? surf
->handle
: 0);
634 struct virgl_screen
*rs
= virgl_screen(ctx
->base
.screen
);
635 if (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_FB_NO_ATTACH
) {
636 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH
, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE
));
637 virgl_encoder_write_dword(ctx
->cbuf
, state
->width
| (state
->height
<< 16));
638 virgl_encoder_write_dword(ctx
->cbuf
, state
->layers
| (state
->samples
<< 16));
643 int virgl_encoder_set_viewport_states(struct virgl_context
*ctx
,
646 const struct pipe_viewport_state
*states
)
649 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE
, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports
)));
650 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
651 for (v
= 0; v
< num_viewports
; v
++) {
652 for (i
= 0; i
< 3; i
++)
653 virgl_encoder_write_dword(ctx
->cbuf
, fui(states
[v
].scale
[i
]));
654 for (i
= 0; i
< 3; i
++)
655 virgl_encoder_write_dword(ctx
->cbuf
, fui(states
[v
].translate
[i
]));
660 int virgl_encoder_create_vertex_elements(struct virgl_context
*ctx
,
662 unsigned num_elements
,
663 const struct pipe_vertex_element
*element
)
666 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_VERTEX_ELEMENTS
, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements
)));
667 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
668 for (i
= 0; i
< num_elements
; i
++) {
669 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].src_offset
);
670 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].instance_divisor
);
671 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].vertex_buffer_index
);
672 virgl_encoder_write_dword(ctx
->cbuf
, pipe_to_virgl_format(element
[i
].src_format
));
677 int virgl_encoder_set_vertex_buffers(struct virgl_context
*ctx
,
678 unsigned num_buffers
,
679 const struct pipe_vertex_buffer
*buffers
)
682 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS
, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers
)));
683 for (i
= 0; i
< num_buffers
; i
++) {
684 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
.resource
);
685 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].stride
);
686 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_offset
);
687 virgl_encoder_write_res(ctx
, res
);
692 int virgl_encoder_set_index_buffer(struct virgl_context
*ctx
,
693 const struct virgl_indexbuf
*ib
)
695 int length
= VIRGL_SET_INDEX_BUFFER_SIZE(ib
);
696 struct virgl_resource
*res
= NULL
;
698 res
= virgl_resource(ib
->buffer
);
700 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER
, 0, length
));
701 virgl_encoder_write_res(ctx
, res
);
703 virgl_encoder_write_dword(ctx
->cbuf
, ib
->index_size
);
704 virgl_encoder_write_dword(ctx
->cbuf
, ib
->offset
);
709 int virgl_encoder_draw_vbo(struct virgl_context
*ctx
,
710 const struct pipe_draw_info
*info
)
712 uint32_t length
= VIRGL_DRAW_VBO_SIZE
;
713 if (info
->mode
== PIPE_PRIM_PATCHES
)
714 length
= VIRGL_DRAW_VBO_SIZE_TESS
;
716 length
= VIRGL_DRAW_VBO_SIZE_INDIRECT
;
717 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO
, 0, length
));
718 virgl_encoder_write_dword(ctx
->cbuf
, info
->start
);
719 virgl_encoder_write_dword(ctx
->cbuf
, info
->count
);
720 virgl_encoder_write_dword(ctx
->cbuf
, info
->mode
);
721 virgl_encoder_write_dword(ctx
->cbuf
, !!info
->index_size
);
722 virgl_encoder_write_dword(ctx
->cbuf
, info
->instance_count
);
723 virgl_encoder_write_dword(ctx
->cbuf
, info
->index_bias
);
724 virgl_encoder_write_dword(ctx
->cbuf
, info
->start_instance
);
725 virgl_encoder_write_dword(ctx
->cbuf
, info
->primitive_restart
);
726 virgl_encoder_write_dword(ctx
->cbuf
, info
->restart_index
);
727 virgl_encoder_write_dword(ctx
->cbuf
, info
->min_index
);
728 virgl_encoder_write_dword(ctx
->cbuf
, info
->max_index
);
729 if (info
->count_from_stream_output
)
730 virgl_encoder_write_dword(ctx
->cbuf
, info
->count_from_stream_output
->buffer_size
);
732 virgl_encoder_write_dword(ctx
->cbuf
, 0);
733 if (length
>= VIRGL_DRAW_VBO_SIZE_TESS
) {
734 virgl_encoder_write_dword(ctx
->cbuf
, info
->vertices_per_patch
); /* vertices per patch */
735 virgl_encoder_write_dword(ctx
->cbuf
, info
->drawid
); /* drawid */
737 if (length
== VIRGL_DRAW_VBO_SIZE_INDIRECT
) {
738 virgl_encoder_write_res(ctx
, virgl_resource(info
->indirect
->buffer
));
739 virgl_encoder_write_dword(ctx
->cbuf
, info
->indirect
->offset
);
740 virgl_encoder_write_dword(ctx
->cbuf
, info
->indirect
->stride
); /* indirect stride */
741 virgl_encoder_write_dword(ctx
->cbuf
, info
->indirect
->draw_count
); /* indirect draw count */
742 virgl_encoder_write_dword(ctx
->cbuf
, info
->indirect
->indirect_draw_count_offset
); /* indirect draw count offset */
743 if (info
->indirect
->indirect_draw_count
)
744 virgl_encoder_write_res(ctx
, virgl_resource(info
->indirect
->indirect_draw_count
));
746 virgl_encoder_write_dword(ctx
->cbuf
, 0); /* indirect draw count handle */
751 int virgl_encoder_create_surface(struct virgl_context
*ctx
,
753 struct virgl_resource
*res
,
754 const struct pipe_surface
*templat
)
756 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SURFACE
, VIRGL_OBJ_SURFACE_SIZE
));
757 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
758 virgl_encoder_write_res(ctx
, res
);
759 virgl_encoder_write_dword(ctx
->cbuf
, pipe_to_virgl_format(templat
->format
));
761 assert(templat
->texture
->target
!= PIPE_BUFFER
);
762 virgl_encoder_write_dword(ctx
->cbuf
, templat
->u
.tex
.level
);
763 virgl_encoder_write_dword(ctx
->cbuf
, templat
->u
.tex
.first_layer
| (templat
->u
.tex
.last_layer
<< 16));
768 int virgl_encoder_create_so_target(struct virgl_context
*ctx
,
770 struct virgl_resource
*res
,
771 unsigned buffer_offset
,
772 unsigned buffer_size
)
774 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_STREAMOUT_TARGET
, VIRGL_OBJ_STREAMOUT_SIZE
));
775 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
776 virgl_encoder_write_res(ctx
, res
);
777 virgl_encoder_write_dword(ctx
->cbuf
, buffer_offset
);
778 virgl_encoder_write_dword(ctx
->cbuf
, buffer_size
);
782 enum virgl_transfer3d_encode_stride
{
783 /* The stride and layer_stride are explicitly specified in the command. */
784 virgl_transfer3d_explicit_stride
,
785 /* The stride and layer_stride are inferred by the host. In this case, the
786 * host will use the image stride and layer_stride for the specified level.
788 virgl_transfer3d_host_inferred_stride
,
791 static void virgl_encoder_transfer3d_common(struct virgl_screen
*vs
,
792 struct virgl_cmd_buf
*buf
,
793 struct virgl_transfer
*xfer
,
794 enum virgl_transfer3d_encode_stride encode_stride
)
797 struct pipe_transfer
*transfer
= &xfer
->base
;
799 unsigned layer_stride
;
801 if (encode_stride
== virgl_transfer3d_explicit_stride
) {
802 stride
= transfer
->stride
;
803 layer_stride
= transfer
->layer_stride
;
804 } else if (encode_stride
== virgl_transfer3d_host_inferred_stride
) {
808 assert(!"Invalid virgl_transfer3d_encode_stride value");
811 /* We cannot use virgl_encoder_emit_resource with transfer->resource here
812 * because transfer->resource might have a different virgl_hw_res than what
813 * this transfer targets, which is saved in xfer->hw_res.
815 vs
->vws
->emit_res(vs
->vws
, buf
, xfer
->hw_res
, TRUE
);
816 virgl_encoder_write_dword(buf
, transfer
->level
);
817 virgl_encoder_write_dword(buf
, transfer
->usage
);
818 virgl_encoder_write_dword(buf
, stride
);
819 virgl_encoder_write_dword(buf
, layer_stride
);
820 virgl_encoder_write_dword(buf
, transfer
->box
.x
);
821 virgl_encoder_write_dword(buf
, transfer
->box
.y
);
822 virgl_encoder_write_dword(buf
, transfer
->box
.z
);
823 virgl_encoder_write_dword(buf
, transfer
->box
.width
);
824 virgl_encoder_write_dword(buf
, transfer
->box
.height
);
825 virgl_encoder_write_dword(buf
, transfer
->box
.depth
);
828 int virgl_encoder_inline_write(struct virgl_context
*ctx
,
829 struct virgl_resource
*res
,
830 unsigned level
, unsigned usage
,
831 const struct pipe_box
*box
,
832 const void *data
, unsigned stride
,
833 unsigned layer_stride
)
835 uint32_t size
= (stride
? stride
: box
->width
) * box
->height
;
836 uint32_t length
, thispass
, left_bytes
;
837 struct virgl_transfer transfer
;
838 struct virgl_screen
*vs
= virgl_screen(ctx
->base
.screen
);
840 transfer
.base
.resource
= &res
->u
.b
;
841 transfer
.hw_res
= res
->hw_res
;
842 transfer
.base
.level
= level
;
843 transfer
.base
.usage
= usage
;
844 transfer
.base
.box
= *box
;
846 length
= 11 + (size
+ 3) / 4;
847 if ((ctx
->cbuf
->cdw
+ length
+ 1) > VIRGL_ENCODE_MAX_DWORDS
) {
848 if (box
->height
> 1 || box
->depth
> 1) {
849 debug_printf("inline transfer failed due to multi dimensions and too large\n");
856 if (ctx
->cbuf
->cdw
+ 12 >= VIRGL_ENCODE_MAX_DWORDS
)
857 ctx
->base
.flush(&ctx
->base
, NULL
, 0);
859 thispass
= (VIRGL_ENCODE_MAX_DWORDS
- ctx
->cbuf
->cdw
- 12) * 4;
861 length
= MIN2(thispass
, left_bytes
);
863 transfer
.base
.box
.width
= length
;
864 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE
, 0, ((length
+ 3) / 4) + 11));
865 virgl_encoder_transfer3d_common(vs
, ctx
->cbuf
, &transfer
,
866 virgl_transfer3d_host_inferred_stride
);
867 virgl_encoder_write_block(ctx
->cbuf
, data
, length
);
868 left_bytes
-= length
;
869 transfer
.base
.box
.x
+= length
;
875 int virgl_encoder_flush_frontbuffer(struct virgl_context
*ctx
,
876 struct virgl_resource
*res
)
878 // virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
879 // virgl_encoder_write_dword(ctx->cbuf, res_handle);
883 int virgl_encode_sampler_state(struct virgl_context
*ctx
,
885 const struct pipe_sampler_state
*state
)
889 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SAMPLER_STATE
, VIRGL_OBJ_SAMPLER_STATE_SIZE
));
890 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
892 tmp
= VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state
->wrap_s
) |
893 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state
->wrap_t
) |
894 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state
->wrap_r
) |
895 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state
->min_img_filter
) |
896 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state
->min_mip_filter
) |
897 VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state
->mag_img_filter
) |
898 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state
->compare_mode
) |
899 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state
->compare_func
) |
900 VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state
->seamless_cube_map
);
902 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
903 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->lod_bias
));
904 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->min_lod
));
905 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->max_lod
));
906 for (i
= 0; i
< 4; i
++)
907 virgl_encoder_write_dword(ctx
->cbuf
, state
->border_color
.ui
[i
]);
912 int virgl_encode_sampler_view(struct virgl_context
*ctx
,
914 struct virgl_resource
*res
,
915 const struct pipe_sampler_view
*state
)
917 unsigned elem_size
= util_format_get_blocksize(state
->format
);
918 struct virgl_screen
*rs
= virgl_screen(ctx
->base
.screen
);
920 uint32_t dword_fmt_target
= pipe_to_virgl_format(state
->format
);
921 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SAMPLER_VIEW
, VIRGL_OBJ_SAMPLER_VIEW_SIZE
));
922 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
923 virgl_encoder_write_res(ctx
, res
);
924 if (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_TEXTURE_VIEW
)
925 dword_fmt_target
|= (state
->target
<< 24);
926 virgl_encoder_write_dword(ctx
->cbuf
, dword_fmt_target
);
927 if (res
->u
.b
.target
== PIPE_BUFFER
) {
928 virgl_encoder_write_dword(ctx
->cbuf
, state
->u
.buf
.offset
/ elem_size
);
929 virgl_encoder_write_dword(ctx
->cbuf
, (state
->u
.buf
.offset
+ state
->u
.buf
.size
) / elem_size
- 1);
931 if (res
->metadata
.plane
) {
932 debug_assert(state
->u
.tex
.first_layer
== 0 && state
->u
.tex
.last_layer
== 0);
933 virgl_encoder_write_dword(ctx
->cbuf
, res
->metadata
.plane
);
935 virgl_encoder_write_dword(ctx
->cbuf
, state
->u
.tex
.first_layer
| state
->u
.tex
.last_layer
<< 16);
937 virgl_encoder_write_dword(ctx
->cbuf
, state
->u
.tex
.first_level
| state
->u
.tex
.last_level
<< 8);
939 tmp
= VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state
->swizzle_r
) |
940 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state
->swizzle_g
) |
941 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state
->swizzle_b
) |
942 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state
->swizzle_a
);
943 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
947 int virgl_encode_set_sampler_views(struct virgl_context
*ctx
,
948 uint32_t shader_type
,
951 struct virgl_sampler_view
**views
)
954 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS
, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views
)));
955 virgl_encoder_write_dword(ctx
->cbuf
, shader_type
);
956 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
957 for (i
= 0; i
< num_views
; i
++) {
958 uint32_t handle
= views
[i
] ? views
[i
]->handle
: 0;
959 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
964 int virgl_encode_bind_sampler_states(struct virgl_context
*ctx
,
965 uint32_t shader_type
,
967 uint32_t num_handles
,
971 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES
, 0, VIRGL_BIND_SAMPLER_STATES(num_handles
)));
972 virgl_encoder_write_dword(ctx
->cbuf
, shader_type
);
973 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
974 for (i
= 0; i
< num_handles
; i
++)
975 virgl_encoder_write_dword(ctx
->cbuf
, handles
[i
]);
979 int virgl_encoder_write_constant_buffer(struct virgl_context
*ctx
,
985 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER
, 0, size
+ 2));
986 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
987 virgl_encoder_write_dword(ctx
->cbuf
, index
);
989 virgl_encoder_write_block(ctx
->cbuf
, data
, size
* 4);
993 int virgl_encoder_set_uniform_buffer(struct virgl_context
*ctx
,
998 struct virgl_resource
*res
)
1000 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER
, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE
));
1001 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
1002 virgl_encoder_write_dword(ctx
->cbuf
, index
);
1003 virgl_encoder_write_dword(ctx
->cbuf
, offset
);
1004 virgl_encoder_write_dword(ctx
->cbuf
, length
);
1005 virgl_encoder_write_res(ctx
, res
);
1010 int virgl_encoder_set_stencil_ref(struct virgl_context
*ctx
,
1011 const struct pipe_stencil_ref
*ref
)
1013 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF
, 0, VIRGL_SET_STENCIL_REF_SIZE
));
1014 virgl_encoder_write_dword(ctx
->cbuf
, VIRGL_STENCIL_REF_VAL(ref
->ref_value
[0] , (ref
->ref_value
[1])));
1018 int virgl_encoder_set_blend_color(struct virgl_context
*ctx
,
1019 const struct pipe_blend_color
*color
)
1022 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR
, 0, VIRGL_SET_BLEND_COLOR_SIZE
));
1023 for (i
= 0; i
< 4; i
++)
1024 virgl_encoder_write_dword(ctx
->cbuf
, fui(color
->color
[i
]));
1028 int virgl_encoder_set_scissor_state(struct virgl_context
*ctx
,
1029 unsigned start_slot
,
1031 const struct pipe_scissor_state
*ss
)
1034 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE
, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors
)));
1035 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
1036 for (i
= 0; i
< num_scissors
; i
++) {
1037 virgl_encoder_write_dword(ctx
->cbuf
, (ss
[i
].minx
| ss
[i
].miny
<< 16));
1038 virgl_encoder_write_dword(ctx
->cbuf
, (ss
[i
].maxx
| ss
[i
].maxy
<< 16));
1043 void virgl_encoder_set_polygon_stipple(struct virgl_context
*ctx
,
1044 const struct pipe_poly_stipple
*ps
)
1047 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE
, 0, VIRGL_POLYGON_STIPPLE_SIZE
));
1048 for (i
= 0; i
< VIRGL_POLYGON_STIPPLE_SIZE
; i
++) {
1049 virgl_encoder_write_dword(ctx
->cbuf
, ps
->stipple
[i
]);
1053 void virgl_encoder_set_sample_mask(struct virgl_context
*ctx
,
1054 unsigned sample_mask
)
1056 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK
, 0, VIRGL_SET_SAMPLE_MASK_SIZE
));
1057 virgl_encoder_write_dword(ctx
->cbuf
, sample_mask
);
1060 void virgl_encoder_set_min_samples(struct virgl_context
*ctx
,
1061 unsigned min_samples
)
1063 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES
, 0, VIRGL_SET_MIN_SAMPLES_SIZE
));
1064 virgl_encoder_write_dword(ctx
->cbuf
, min_samples
);
1067 void virgl_encoder_set_clip_state(struct virgl_context
*ctx
,
1068 const struct pipe_clip_state
*clip
)
1071 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE
, 0, VIRGL_SET_CLIP_STATE_SIZE
));
1072 for (i
= 0; i
< VIRGL_MAX_CLIP_PLANES
; i
++) {
1073 for (j
= 0; j
< 4; j
++) {
1074 virgl_encoder_write_dword(ctx
->cbuf
, fui(clip
->ucp
[i
][j
]));
1079 int virgl_encode_resource_copy_region(struct virgl_context
*ctx
,
1080 struct virgl_resource
*dst_res
,
1082 unsigned dstx
, unsigned dsty
, unsigned dstz
,
1083 struct virgl_resource
*src_res
,
1085 const struct pipe_box
*src_box
)
1087 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION
, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE
));
1088 virgl_encoder_write_res(ctx
, dst_res
);
1089 virgl_encoder_write_dword(ctx
->cbuf
, dst_level
);
1090 virgl_encoder_write_dword(ctx
->cbuf
, dstx
);
1091 virgl_encoder_write_dword(ctx
->cbuf
, dsty
);
1092 virgl_encoder_write_dword(ctx
->cbuf
, dstz
);
1093 virgl_encoder_write_res(ctx
, src_res
);
1094 virgl_encoder_write_dword(ctx
->cbuf
, src_level
);
1095 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->x
);
1096 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->y
);
1097 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->z
);
1098 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->width
);
1099 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->height
);
1100 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->depth
);
1104 int virgl_encode_blit(struct virgl_context
*ctx
,
1105 struct virgl_resource
*dst_res
,
1106 struct virgl_resource
*src_res
,
1107 const struct pipe_blit_info
*blit
)
1110 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BLIT
, 0, VIRGL_CMD_BLIT_SIZE
));
1111 tmp
= VIRGL_CMD_BLIT_S0_MASK(blit
->mask
) |
1112 VIRGL_CMD_BLIT_S0_FILTER(blit
->filter
) |
1113 VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit
->scissor_enable
) |
1114 VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit
->render_condition_enable
) |
1115 VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit
->alpha_blend
);
1116 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
1117 virgl_encoder_write_dword(ctx
->cbuf
, (blit
->scissor
.minx
| blit
->scissor
.miny
<< 16));
1118 virgl_encoder_write_dword(ctx
->cbuf
, (blit
->scissor
.maxx
| blit
->scissor
.maxy
<< 16));
1120 virgl_encoder_write_res(ctx
, dst_res
);
1121 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.level
);
1122 virgl_encoder_write_dword(ctx
->cbuf
, pipe_to_virgl_format(blit
->dst
.format
));
1123 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.x
);
1124 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.y
);
1125 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.z
);
1126 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.width
);
1127 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.height
);
1128 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.depth
);
1130 virgl_encoder_write_res(ctx
, src_res
);
1131 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.level
);
1132 virgl_encoder_write_dword(ctx
->cbuf
, pipe_to_virgl_format(blit
->src
.format
));
1133 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.x
);
1134 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.y
);
1135 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.z
);
1136 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.width
);
1137 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.height
);
1138 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.depth
);
1142 int virgl_encoder_create_query(struct virgl_context
*ctx
,
1146 struct virgl_resource
*res
,
1149 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_QUERY
, VIRGL_OBJ_QUERY_SIZE
));
1150 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1151 virgl_encoder_write_dword(ctx
->cbuf
, ((query_type
& 0xffff) | (query_index
<< 16)));
1152 virgl_encoder_write_dword(ctx
->cbuf
, offset
);
1153 virgl_encoder_write_res(ctx
, res
);
1157 int virgl_encoder_begin_query(struct virgl_context
*ctx
,
1160 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY
, 0, 1));
1161 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1165 int virgl_encoder_end_query(struct virgl_context
*ctx
,
1168 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_END_QUERY
, 0, 1));
1169 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1173 int virgl_encoder_get_query_result(struct virgl_context
*ctx
,
1174 uint32_t handle
, boolean wait
)
1176 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT
, 0, 2));
1177 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1178 virgl_encoder_write_dword(ctx
->cbuf
, wait
? 1 : 0);
1182 int virgl_encoder_render_condition(struct virgl_context
*ctx
,
1183 uint32_t handle
, boolean condition
,
1184 enum pipe_render_cond_flag mode
)
1186 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION
, 0, VIRGL_RENDER_CONDITION_SIZE
));
1187 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1188 virgl_encoder_write_dword(ctx
->cbuf
, condition
);
1189 virgl_encoder_write_dword(ctx
->cbuf
, mode
);
1193 int virgl_encoder_set_so_targets(struct virgl_context
*ctx
,
1194 unsigned num_targets
,
1195 struct pipe_stream_output_target
**targets
,
1196 unsigned append_bitmask
)
1200 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS
, 0, num_targets
+ 1));
1201 virgl_encoder_write_dword(ctx
->cbuf
, append_bitmask
);
1202 for (i
= 0; i
< num_targets
; i
++) {
1203 struct virgl_so_target
*tg
= virgl_so_target(targets
[i
]);
1204 virgl_encoder_write_dword(ctx
->cbuf
, tg
? tg
->handle
: 0);
1210 int virgl_encoder_set_sub_ctx(struct virgl_context
*ctx
, uint32_t sub_ctx_id
)
1212 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX
, 0, 1));
1213 virgl_encoder_write_dword(ctx
->cbuf
, sub_ctx_id
);
1217 int virgl_encoder_create_sub_ctx(struct virgl_context
*ctx
, uint32_t sub_ctx_id
)
1219 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX
, 0, 1));
1220 virgl_encoder_write_dword(ctx
->cbuf
, sub_ctx_id
);
1224 int virgl_encoder_destroy_sub_ctx(struct virgl_context
*ctx
, uint32_t sub_ctx_id
)
1226 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX
, 0, 1));
1227 virgl_encoder_write_dword(ctx
->cbuf
, sub_ctx_id
);
1231 int virgl_encode_bind_shader(struct virgl_context
*ctx
,
1232 uint32_t handle
, uint32_t type
)
1234 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER
, 0, 2));
1235 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1236 virgl_encoder_write_dword(ctx
->cbuf
, type
);
1240 int virgl_encode_set_tess_state(struct virgl_context
*ctx
,
1241 const float outer
[4],
1242 const float inner
[2])
1245 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE
, 0, 6));
1246 for (i
= 0; i
< 4; i
++)
1247 virgl_encoder_write_dword(ctx
->cbuf
, fui(outer
[i
]));
1248 for (i
= 0; i
< 2; i
++)
1249 virgl_encoder_write_dword(ctx
->cbuf
, fui(inner
[i
]));
1253 int virgl_encode_set_shader_buffers(struct virgl_context
*ctx
,
1254 enum pipe_shader_type shader
,
1255 unsigned start_slot
, unsigned count
,
1256 const struct pipe_shader_buffer
*buffers
)
1259 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS
, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count
)));
1261 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
1262 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
1263 for (i
= 0; i
< count
; i
++) {
1264 if (buffers
&& buffers
[i
].buffer
) {
1265 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
);
1266 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_offset
);
1267 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_size
);
1268 virgl_encoder_write_res(ctx
, res
);
1270 util_range_add(&res
->u
.b
, &res
->valid_buffer_range
, buffers
[i
].buffer_offset
,
1271 buffers
[i
].buffer_offset
+ buffers
[i
].buffer_size
);
1272 virgl_resource_dirty(res
, 0);
1274 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1275 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1276 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1282 int virgl_encode_set_hw_atomic_buffers(struct virgl_context
*ctx
,
1283 unsigned start_slot
, unsigned count
,
1284 const struct pipe_shader_buffer
*buffers
)
1287 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS
, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count
)));
1289 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
1290 for (i
= 0; i
< count
; i
++) {
1291 if (buffers
&& buffers
[i
].buffer
) {
1292 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
);
1293 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_offset
);
1294 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_size
);
1295 virgl_encoder_write_res(ctx
, res
);
1297 util_range_add(&res
->u
.b
, &res
->valid_buffer_range
, buffers
[i
].buffer_offset
,
1298 buffers
[i
].buffer_offset
+ buffers
[i
].buffer_size
);
1299 virgl_resource_dirty(res
, 0);
1301 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1302 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1303 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1309 int virgl_encode_set_shader_images(struct virgl_context
*ctx
,
1310 enum pipe_shader_type shader
,
1311 unsigned start_slot
, unsigned count
,
1312 const struct pipe_image_view
*images
)
1315 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES
, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count
)));
1317 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
1318 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
1319 for (i
= 0; i
< count
; i
++) {
1320 if (images
&& images
[i
].resource
) {
1321 struct virgl_resource
*res
= virgl_resource(images
[i
].resource
);
1322 virgl_encoder_write_dword(ctx
->cbuf
, pipe_to_virgl_format(images
[i
].format
));
1323 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].access
);
1324 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].u
.buf
.offset
);
1325 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].u
.buf
.size
);
1326 virgl_encoder_write_res(ctx
, res
);
1328 if (res
->u
.b
.target
== PIPE_BUFFER
) {
1329 util_range_add(&res
->u
.b
, &res
->valid_buffer_range
, images
[i
].u
.buf
.offset
,
1330 images
[i
].u
.buf
.offset
+ images
[i
].u
.buf
.size
);
1332 virgl_resource_dirty(res
, images
[i
].u
.tex
.level
);
1334 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1335 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1336 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1337 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1338 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1344 int virgl_encode_memory_barrier(struct virgl_context
*ctx
,
1347 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER
, 0, 1));
1348 virgl_encoder_write_dword(ctx
->cbuf
, flags
);
1352 int virgl_encode_launch_grid(struct virgl_context
*ctx
,
1353 const struct pipe_grid_info
*grid_info
)
1355 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID
, 0, VIRGL_LAUNCH_GRID_SIZE
));
1356 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->block
[0]);
1357 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->block
[1]);
1358 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->block
[2]);
1359 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->grid
[0]);
1360 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->grid
[1]);
1361 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->grid
[2]);
1362 if (grid_info
->indirect
) {
1363 struct virgl_resource
*res
= virgl_resource(grid_info
->indirect
);
1364 virgl_encoder_write_res(ctx
, res
);
1366 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1367 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->indirect_offset
);
1371 int virgl_encode_texture_barrier(struct virgl_context
*ctx
,
1374 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER
, 0, 1));
1375 virgl_encoder_write_dword(ctx
->cbuf
, flags
);
1379 int virgl_encode_host_debug_flagstring(struct virgl_context
*ctx
,
1380 const char *flagstring
)
1382 unsigned long slen
= strlen(flagstring
) + 1;
1384 uint32_t string_length
;
1389 if (slen
> 4 * 0xffff) {
1390 debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1394 sslen
= (uint32_t )(slen
+ 3) / 4;
1395 string_length
= (uint32_t)MIN2(sslen
* 4, slen
);
1397 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS
, 0, sslen
));
1398 virgl_encoder_write_block(ctx
->cbuf
, (const uint8_t *)flagstring
, string_length
);
1402 int virgl_encode_tweak(struct virgl_context
*ctx
, enum vrend_tweak_type tweak
, uint32_t value
)
1404 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_TWEAKS
, 0, VIRGL_SET_TWEAKS_SIZE
));
1405 virgl_encoder_write_dword(ctx
->cbuf
, tweak
);
1406 virgl_encoder_write_dword(ctx
->cbuf
, value
);
1411 int virgl_encode_get_query_result_qbo(struct virgl_context
*ctx
,
1413 struct virgl_resource
*res
, boolean wait
,
1414 uint32_t result_type
,
1418 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT_QBO
, 0, VIRGL_QUERY_RESULT_QBO_SIZE
));
1419 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
1420 virgl_encoder_write_res(ctx
, res
);
1421 virgl_encoder_write_dword(ctx
->cbuf
, wait
? 1 : 0);
1422 virgl_encoder_write_dword(ctx
->cbuf
, result_type
);
1423 virgl_encoder_write_dword(ctx
->cbuf
, offset
);
1424 virgl_encoder_write_dword(ctx
->cbuf
, index
);
1428 void virgl_encode_transfer(struct virgl_screen
*vs
, struct virgl_cmd_buf
*buf
,
1429 struct virgl_transfer
*trans
, uint32_t direction
)
1432 command
= VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D
, 0, VIRGL_TRANSFER3D_SIZE
);
1433 virgl_encoder_write_dword(buf
, command
);
1434 virgl_encoder_transfer3d_common(vs
, buf
, trans
,
1435 virgl_transfer3d_host_inferred_stride
);
1436 virgl_encoder_write_dword(buf
, trans
->offset
);
1437 virgl_encoder_write_dword(buf
, direction
);
1440 void virgl_encode_copy_transfer(struct virgl_context
*ctx
,
1441 struct virgl_transfer
*trans
)
1444 struct virgl_screen
*vs
= virgl_screen(ctx
->base
.screen
);
1446 assert(trans
->copy_src_hw_res
);
1448 command
= VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D
, 0, VIRGL_COPY_TRANSFER3D_SIZE
);
1449 virgl_encoder_write_cmd_dword(ctx
, command
);
1450 /* Copy transfers need to explicitly specify the stride, since it may differ
1451 * from the image stride.
1453 virgl_encoder_transfer3d_common(vs
, ctx
->cbuf
, trans
, virgl_transfer3d_explicit_stride
);
1454 vs
->vws
->emit_res(vs
->vws
, ctx
->cbuf
, trans
->copy_src_hw_res
, TRUE
);
1455 virgl_encoder_write_dword(ctx
->cbuf
, trans
->copy_src_offset
);
1456 /* At the moment all copy transfers are synchronized. */
1457 virgl_encoder_write_dword(ctx
->cbuf
, 1);
1460 void virgl_encode_end_transfers(struct virgl_cmd_buf
*buf
)
1462 uint32_t command
, diff
;
1463 diff
= VIRGL_MAX_TBUF_DWORDS
- buf
->cdw
;
1465 command
= VIRGL_CMD0(VIRGL_CCMD_END_TRANSFERS
, 0, diff
- 1);
1466 virgl_encoder_write_dword(buf
, command
);