2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "util/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
40 static int virgl_encoder_write_cmd_dword(struct virgl_context
*ctx
,
43 int len
= (dword
>> 16);
45 if ((ctx
->cbuf
->cdw
+ len
+ 1) > VIRGL_MAX_CMDBUF_DWORDS
)
46 ctx
->base
.flush(&ctx
->base
, NULL
, 0);
48 virgl_encoder_write_dword(ctx
->cbuf
, dword
);
52 static void virgl_encoder_write_res(struct virgl_context
*ctx
,
53 struct virgl_resource
*res
)
55 struct virgl_winsys
*vws
= virgl_screen(ctx
->base
.screen
)->vws
;
57 if (res
&& res
->hw_res
)
58 vws
->emit_res(vws
, ctx
->cbuf
, res
->hw_res
, TRUE
);
60 virgl_encoder_write_dword(ctx
->cbuf
, 0);
64 int virgl_encode_bind_object(struct virgl_context
*ctx
,
65 uint32_t handle
, uint32_t object
)
67 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT
, object
, 1));
68 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
72 int virgl_encode_delete_object(struct virgl_context
*ctx
,
73 uint32_t handle
, uint32_t object
)
75 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT
, object
, 1));
76 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
80 int virgl_encode_blend_state(struct virgl_context
*ctx
,
82 const struct pipe_blend_state
*blend_state
)
87 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_BLEND
, VIRGL_OBJ_BLEND_SIZE
));
88 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
91 VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state
->independent_blend_enable
) |
92 VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state
->logicop_enable
) |
93 VIRGL_OBJ_BLEND_S0_DITHER(blend_state
->dither
) |
94 VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state
->alpha_to_coverage
) |
95 VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state
->alpha_to_one
);
97 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
99 tmp
= VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state
->logicop_func
);
100 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
102 for (i
= 0; i
< VIRGL_MAX_COLOR_BUFS
; i
++) {
104 VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state
->rt
[i
].blend_enable
) |
105 VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state
->rt
[i
].rgb_func
) |
106 VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state
->rt
[i
].rgb_src_factor
) |
107 VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state
->rt
[i
].rgb_dst_factor
)|
108 VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state
->rt
[i
].alpha_func
) |
109 VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(blend_state
->rt
[i
].alpha_src_factor
) |
110 VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state
->rt
[i
].alpha_dst_factor
) |
111 VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state
->rt
[i
].colormask
);
112 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
117 int virgl_encode_dsa_state(struct virgl_context
*ctx
,
119 const struct pipe_depth_stencil_alpha_state
*dsa_state
)
123 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_DSA
, VIRGL_OBJ_DSA_SIZE
));
124 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
126 tmp
= VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state
->depth
.enabled
) |
127 VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state
->depth
.writemask
) |
128 VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state
->depth
.func
) |
129 VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state
->alpha
.enabled
) |
130 VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state
->alpha
.func
);
131 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
133 for (i
= 0; i
< 2; i
++) {
134 tmp
= VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state
->stencil
[i
].enabled
) |
135 VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state
->stencil
[i
].func
) |
136 VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state
->stencil
[i
].fail_op
) |
137 VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state
->stencil
[i
].zpass_op
) |
138 VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state
->stencil
[i
].zfail_op
) |
139 VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state
->stencil
[i
].valuemask
) |
140 VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state
->stencil
[i
].writemask
);
141 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
144 virgl_encoder_write_dword(ctx
->cbuf
, fui(dsa_state
->alpha
.ref_value
));
147 int virgl_encode_rasterizer_state(struct virgl_context
*ctx
,
149 const struct pipe_rasterizer_state
*state
)
153 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_RASTERIZER
, VIRGL_OBJ_RS_SIZE
));
154 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
156 tmp
= VIRGL_OBJ_RS_S0_FLATSHADE(state
->flatshade
) |
157 VIRGL_OBJ_RS_S0_DEPTH_CLIP(state
->depth_clip_near
) |
158 VIRGL_OBJ_RS_S0_CLIP_HALFZ(state
->clip_halfz
) |
159 VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state
->rasterizer_discard
) |
160 VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state
->flatshade_first
) |
161 VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state
->light_twoside
) |
162 VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state
->sprite_coord_mode
) |
163 VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state
->point_quad_rasterization
) |
164 VIRGL_OBJ_RS_S0_CULL_FACE(state
->cull_face
) |
165 VIRGL_OBJ_RS_S0_FILL_FRONT(state
->fill_front
) |
166 VIRGL_OBJ_RS_S0_FILL_BACK(state
->fill_back
) |
167 VIRGL_OBJ_RS_S0_SCISSOR(state
->scissor
) |
168 VIRGL_OBJ_RS_S0_FRONT_CCW(state
->front_ccw
) |
169 VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state
->clamp_vertex_color
) |
170 VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state
->clamp_fragment_color
) |
171 VIRGL_OBJ_RS_S0_OFFSET_LINE(state
->offset_line
) |
172 VIRGL_OBJ_RS_S0_OFFSET_POINT(state
->offset_point
) |
173 VIRGL_OBJ_RS_S0_OFFSET_TRI(state
->offset_tri
) |
174 VIRGL_OBJ_RS_S0_POLY_SMOOTH(state
->poly_smooth
) |
175 VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state
->poly_stipple_enable
) |
176 VIRGL_OBJ_RS_S0_POINT_SMOOTH(state
->point_smooth
) |
177 VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state
->point_size_per_vertex
) |
178 VIRGL_OBJ_RS_S0_MULTISAMPLE(state
->multisample
) |
179 VIRGL_OBJ_RS_S0_LINE_SMOOTH(state
->line_smooth
) |
180 VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state
->line_stipple_enable
) |
181 VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state
->line_last_pixel
) |
182 VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state
->half_pixel_center
) |
183 VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state
->bottom_edge_rule
) |
184 VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state
->force_persample_interp
);
186 virgl_encoder_write_dword(ctx
->cbuf
, tmp
); /* S0 */
187 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->point_size
)); /* S1 */
188 virgl_encoder_write_dword(ctx
->cbuf
, state
->sprite_coord_enable
); /* S2 */
189 tmp
= VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state
->line_stipple_pattern
) |
190 VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state
->line_stipple_factor
) |
191 VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state
->clip_plane_enable
);
192 virgl_encoder_write_dword(ctx
->cbuf
, tmp
); /* S3 */
193 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->line_width
)); /* S4 */
194 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->offset_units
)); /* S5 */
195 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->offset_scale
)); /* S6 */
196 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->offset_clamp
)); /* S7 */
200 static void virgl_emit_shader_header(struct virgl_context
*ctx
,
201 uint32_t handle
, uint32_t len
,
202 uint32_t type
, uint32_t offlen
,
205 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SHADER
, len
));
206 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
207 virgl_encoder_write_dword(ctx
->cbuf
, type
);
208 virgl_encoder_write_dword(ctx
->cbuf
, offlen
);
209 virgl_encoder_write_dword(ctx
->cbuf
, num_tokens
);
212 static void virgl_emit_shader_streamout(struct virgl_context
*ctx
,
213 const struct pipe_stream_output_info
*so_info
)
220 num_outputs
= so_info
->num_outputs
;
222 virgl_encoder_write_dword(ctx
->cbuf
, num_outputs
);
224 for (i
= 0; i
< 4; i
++)
225 virgl_encoder_write_dword(ctx
->cbuf
, so_info
->stride
[i
]);
227 for (i
= 0; i
< so_info
->num_outputs
; i
++) {
229 VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info
->output
[i
].register_index
) |
230 VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info
->output
[i
].start_component
) |
231 VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info
->output
[i
].num_components
) |
232 VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info
->output
[i
].output_buffer
) |
233 VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info
->output
[i
].dst_offset
);
234 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
235 virgl_encoder_write_dword(ctx
->cbuf
, so_info
->output
[i
].stream
);
240 int virgl_encode_shader_state(struct virgl_context
*ctx
,
243 const struct pipe_stream_output_info
*so_info
,
244 uint32_t cs_req_local_mem
,
245 const struct tgsi_token
*tokens
)
248 uint32_t shader_len
, len
;
250 int num_tokens
= tgsi_num_tokens(tokens
);
251 int str_total_size
= 65536;
253 uint32_t left_bytes
, base_hdr_size
, strm_hdr_size
, thispass
;
255 str
= CALLOC(1, str_total_size
);
262 bret
= tgsi_dump_str(tokens
, TGSI_DUMP_FLOAT_AS_HEX
, str
, str_total_size
);
264 if (virgl_debug
& VIRGL_DEBUG_VERBOSE
)
265 debug_printf("Failed to translate shader in available space - trying again\n");
266 old_size
= str_total_size
;
267 str_total_size
= 65536 * ++retry_size
;
268 str
= REALLOC(str
, old_size
, str_total_size
);
272 } while (bret
== false && retry_size
< 10);
277 if (virgl_debug
& VIRGL_DEBUG_TGSI
)
278 debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str
);
280 shader_len
= strlen(str
) + 1;
282 left_bytes
= shader_len
;
285 strm_hdr_size
= so_info
->num_outputs
? so_info
->num_outputs
* 2 + 4 : 0;
289 uint32_t length
, offlen
;
290 int hdr_len
= base_hdr_size
+ (first_pass
? strm_hdr_size
: 0);
291 if (ctx
->cbuf
->cdw
+ hdr_len
+ 1 >= VIRGL_MAX_CMDBUF_DWORDS
)
292 ctx
->base
.flush(&ctx
->base
, NULL
, 0);
294 thispass
= (VIRGL_MAX_CMDBUF_DWORDS
- ctx
->cbuf
->cdw
- hdr_len
- 1) * 4;
296 length
= MIN2(thispass
, left_bytes
);
297 len
= ((length
+ 3) / 4) + hdr_len
;
300 offlen
= VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len
);
302 offlen
= VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr
- (uintptr_t)str
) | VIRGL_OBJ_SHADER_OFFSET_CONT
;
304 virgl_emit_shader_header(ctx
, handle
, len
, type
, offlen
, num_tokens
);
306 if (type
== PIPE_SHADER_COMPUTE
)
307 virgl_encoder_write_dword(ctx
->cbuf
, cs_req_local_mem
);
309 virgl_emit_shader_streamout(ctx
, first_pass
? so_info
: NULL
);
311 virgl_encoder_write_block(ctx
->cbuf
, (uint8_t *)sptr
, length
);
315 left_bytes
-= length
;
323 int virgl_encode_clear(struct virgl_context
*ctx
,
325 const union pipe_color_union
*color
,
326 double depth
, unsigned stencil
)
331 STATIC_ASSERT(sizeof(qword
) == sizeof(depth
));
332 memcpy(&qword
, &depth
, sizeof(qword
));
334 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CLEAR
, 0, VIRGL_OBJ_CLEAR_SIZE
));
335 virgl_encoder_write_dword(ctx
->cbuf
, buffers
);
336 for (i
= 0; i
< 4; i
++)
337 virgl_encoder_write_dword(ctx
->cbuf
, color
->ui
[i
]);
338 virgl_encoder_write_qword(ctx
->cbuf
, qword
);
339 virgl_encoder_write_dword(ctx
->cbuf
, stencil
);
343 int virgl_encoder_set_framebuffer_state(struct virgl_context
*ctx
,
344 const struct pipe_framebuffer_state
*state
)
346 struct virgl_surface
*zsurf
= virgl_surface(state
->zsbuf
);
349 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE
, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state
->nr_cbufs
)));
350 virgl_encoder_write_dword(ctx
->cbuf
, state
->nr_cbufs
);
351 virgl_encoder_write_dword(ctx
->cbuf
, zsurf
? zsurf
->handle
: 0);
352 for (i
= 0; i
< state
->nr_cbufs
; i
++) {
353 struct virgl_surface
*surf
= virgl_surface(state
->cbufs
[i
]);
354 virgl_encoder_write_dword(ctx
->cbuf
, surf
? surf
->handle
: 0);
357 struct virgl_screen
*rs
= virgl_screen(ctx
->base
.screen
);
358 if (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_FB_NO_ATTACH
) {
359 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH
, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE
));
360 virgl_encoder_write_dword(ctx
->cbuf
, state
->width
| (state
->height
<< 16));
361 virgl_encoder_write_dword(ctx
->cbuf
, state
->layers
| (state
->samples
<< 16));
366 int virgl_encoder_set_viewport_states(struct virgl_context
*ctx
,
369 const struct pipe_viewport_state
*states
)
372 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE
, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports
)));
373 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
374 for (v
= 0; v
< num_viewports
; v
++) {
375 for (i
= 0; i
< 3; i
++)
376 virgl_encoder_write_dword(ctx
->cbuf
, fui(states
[v
].scale
[i
]));
377 for (i
= 0; i
< 3; i
++)
378 virgl_encoder_write_dword(ctx
->cbuf
, fui(states
[v
].translate
[i
]));
383 int virgl_encoder_create_vertex_elements(struct virgl_context
*ctx
,
385 unsigned num_elements
,
386 const struct pipe_vertex_element
*element
)
389 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_VERTEX_ELEMENTS
, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements
)));
390 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
391 for (i
= 0; i
< num_elements
; i
++) {
392 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].src_offset
);
393 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].instance_divisor
);
394 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].vertex_buffer_index
);
395 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].src_format
);
400 int virgl_encoder_set_vertex_buffers(struct virgl_context
*ctx
,
401 unsigned num_buffers
,
402 const struct pipe_vertex_buffer
*buffers
)
405 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS
, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers
)));
406 for (i
= 0; i
< num_buffers
; i
++) {
407 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
.resource
);
408 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].stride
);
409 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_offset
);
410 virgl_encoder_write_res(ctx
, res
);
415 int virgl_encoder_set_index_buffer(struct virgl_context
*ctx
,
416 const struct virgl_indexbuf
*ib
)
418 int length
= VIRGL_SET_INDEX_BUFFER_SIZE(ib
);
419 struct virgl_resource
*res
= NULL
;
421 res
= virgl_resource(ib
->buffer
);
423 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER
, 0, length
));
424 virgl_encoder_write_res(ctx
, res
);
426 virgl_encoder_write_dword(ctx
->cbuf
, ib
->index_size
);
427 virgl_encoder_write_dword(ctx
->cbuf
, ib
->offset
);
432 int virgl_encoder_draw_vbo(struct virgl_context
*ctx
,
433 const struct pipe_draw_info
*info
)
435 uint32_t length
= VIRGL_DRAW_VBO_SIZE
;
436 if (info
->mode
== PIPE_PRIM_PATCHES
)
437 length
= VIRGL_DRAW_VBO_SIZE_TESS
;
439 length
= VIRGL_DRAW_VBO_SIZE_INDIRECT
;
440 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO
, 0, length
));
441 virgl_encoder_write_dword(ctx
->cbuf
, info
->start
);
442 virgl_encoder_write_dword(ctx
->cbuf
, info
->count
);
443 virgl_encoder_write_dword(ctx
->cbuf
, info
->mode
);
444 virgl_encoder_write_dword(ctx
->cbuf
, !!info
->index_size
);
445 virgl_encoder_write_dword(ctx
->cbuf
, info
->instance_count
);
446 virgl_encoder_write_dword(ctx
->cbuf
, info
->index_bias
);
447 virgl_encoder_write_dword(ctx
->cbuf
, info
->start_instance
);
448 virgl_encoder_write_dword(ctx
->cbuf
, info
->primitive_restart
);
449 virgl_encoder_write_dword(ctx
->cbuf
, info
->restart_index
);
450 virgl_encoder_write_dword(ctx
->cbuf
, info
->min_index
);
451 virgl_encoder_write_dword(ctx
->cbuf
, info
->max_index
);
452 if (info
->count_from_stream_output
)
453 virgl_encoder_write_dword(ctx
->cbuf
, info
->count_from_stream_output
->buffer_size
);
455 virgl_encoder_write_dword(ctx
->cbuf
, 0);
456 if (length
>= VIRGL_DRAW_VBO_SIZE_TESS
) {
457 virgl_encoder_write_dword(ctx
->cbuf
, info
->vertices_per_patch
); /* vertices per patch */
458 virgl_encoder_write_dword(ctx
->cbuf
, info
->drawid
); /* drawid */
460 if (length
== VIRGL_DRAW_VBO_SIZE_INDIRECT
) {
461 virgl_encoder_write_res(ctx
, virgl_resource(info
->indirect
->buffer
));
462 virgl_encoder_write_dword(ctx
->cbuf
, info
->indirect
->offset
);
463 virgl_encoder_write_dword(ctx
->cbuf
, 0); /* indirect stride */
464 virgl_encoder_write_dword(ctx
->cbuf
, 0); /* indirect draw count */
465 virgl_encoder_write_dword(ctx
->cbuf
, 0); /* indirect draw count offset */
466 virgl_encoder_write_dword(ctx
->cbuf
, 0); /* indirect draw count handle */
471 int virgl_encoder_create_surface(struct virgl_context
*ctx
,
473 struct virgl_resource
*res
,
474 const struct pipe_surface
*templat
)
476 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SURFACE
, VIRGL_OBJ_SURFACE_SIZE
));
477 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
478 virgl_encoder_write_res(ctx
, res
);
479 virgl_encoder_write_dword(ctx
->cbuf
, templat
->format
);
480 if (templat
->texture
->target
== PIPE_BUFFER
) {
481 virgl_encoder_write_dword(ctx
->cbuf
, templat
->u
.buf
.first_element
);
482 virgl_encoder_write_dword(ctx
->cbuf
, templat
->u
.buf
.last_element
);
485 virgl_encoder_write_dword(ctx
->cbuf
, templat
->u
.tex
.level
);
486 virgl_encoder_write_dword(ctx
->cbuf
, templat
->u
.tex
.first_layer
| (templat
->u
.tex
.last_layer
<< 16));
491 int virgl_encoder_create_so_target(struct virgl_context
*ctx
,
493 struct virgl_resource
*res
,
494 unsigned buffer_offset
,
495 unsigned buffer_size
)
497 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_STREAMOUT_TARGET
, VIRGL_OBJ_STREAMOUT_SIZE
));
498 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
499 virgl_encoder_write_res(ctx
, res
);
500 virgl_encoder_write_dword(ctx
->cbuf
, buffer_offset
);
501 virgl_encoder_write_dword(ctx
->cbuf
, buffer_size
);
505 static void virgl_encoder_iw_emit_header_1d(struct virgl_context
*ctx
,
506 struct virgl_resource
*res
,
507 unsigned level
, unsigned usage
,
508 const struct pipe_box
*box
,
509 unsigned stride
, unsigned layer_stride
)
511 virgl_encoder_write_res(ctx
, res
);
512 virgl_encoder_write_dword(ctx
->cbuf
, level
);
513 virgl_encoder_write_dword(ctx
->cbuf
, usage
);
514 virgl_encoder_write_dword(ctx
->cbuf
, stride
);
515 virgl_encoder_write_dword(ctx
->cbuf
, layer_stride
);
516 virgl_encoder_write_dword(ctx
->cbuf
, box
->x
);
517 virgl_encoder_write_dword(ctx
->cbuf
, box
->y
);
518 virgl_encoder_write_dword(ctx
->cbuf
, box
->z
);
519 virgl_encoder_write_dword(ctx
->cbuf
, box
->width
);
520 virgl_encoder_write_dword(ctx
->cbuf
, box
->height
);
521 virgl_encoder_write_dword(ctx
->cbuf
, box
->depth
);
524 int virgl_encoder_inline_write(struct virgl_context
*ctx
,
525 struct virgl_resource
*res
,
526 unsigned level
, unsigned usage
,
527 const struct pipe_box
*box
,
528 const void *data
, unsigned stride
,
529 unsigned layer_stride
)
531 uint32_t size
= (stride
? stride
: box
->width
) * box
->height
;
532 uint32_t length
, thispass
, left_bytes
;
533 struct pipe_box mybox
= *box
;
535 length
= 11 + (size
+ 3) / 4;
536 if ((ctx
->cbuf
->cdw
+ length
+ 1) > VIRGL_MAX_CMDBUF_DWORDS
) {
537 if (box
->height
> 1 || box
->depth
> 1) {
538 debug_printf("inline transfer failed due to multi dimensions and too large\n");
545 if (ctx
->cbuf
->cdw
+ 12 >= VIRGL_MAX_CMDBUF_DWORDS
)
546 ctx
->base
.flush(&ctx
->base
, NULL
, 0);
548 thispass
= (VIRGL_MAX_CMDBUF_DWORDS
- ctx
->cbuf
->cdw
- 12) * 4;
550 length
= MIN2(thispass
, left_bytes
);
552 mybox
.width
= length
;
553 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE
, 0, ((length
+ 3) / 4) + 11));
554 virgl_encoder_iw_emit_header_1d(ctx
, res
, level
, usage
, &mybox
, stride
, layer_stride
);
555 virgl_encoder_write_block(ctx
->cbuf
, data
, length
);
556 left_bytes
-= length
;
563 int virgl_encoder_flush_frontbuffer(struct virgl_context
*ctx
,
564 struct virgl_resource
*res
)
566 // virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
567 // virgl_encoder_write_dword(ctx->cbuf, res_handle);
571 int virgl_encode_sampler_state(struct virgl_context
*ctx
,
573 const struct pipe_sampler_state
*state
)
577 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SAMPLER_STATE
, VIRGL_OBJ_SAMPLER_STATE_SIZE
));
578 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
580 tmp
= VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state
->wrap_s
) |
581 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state
->wrap_t
) |
582 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state
->wrap_r
) |
583 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state
->min_img_filter
) |
584 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state
->min_mip_filter
) |
585 VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state
->mag_img_filter
) |
586 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state
->compare_mode
) |
587 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state
->compare_func
) |
588 VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state
->seamless_cube_map
);
590 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
591 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->lod_bias
));
592 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->min_lod
));
593 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->max_lod
));
594 for (i
= 0; i
< 4; i
++)
595 virgl_encoder_write_dword(ctx
->cbuf
, state
->border_color
.ui
[i
]);
600 int virgl_encode_sampler_view(struct virgl_context
*ctx
,
602 struct virgl_resource
*res
,
603 const struct pipe_sampler_view
*state
)
605 unsigned elem_size
= util_format_get_blocksize(state
->format
);
606 struct virgl_screen
*rs
= virgl_screen(ctx
->base
.screen
);
608 uint32_t dword_fmt_target
= state
->format
;
609 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SAMPLER_VIEW
, VIRGL_OBJ_SAMPLER_VIEW_SIZE
));
610 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
611 virgl_encoder_write_res(ctx
, res
);
612 if (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_TEXTURE_VIEW
)
613 dword_fmt_target
|= (state
->target
<< 24);
614 virgl_encoder_write_dword(ctx
->cbuf
, dword_fmt_target
);
615 if (res
->u
.b
.target
== PIPE_BUFFER
) {
616 virgl_encoder_write_dword(ctx
->cbuf
, state
->u
.buf
.offset
/ elem_size
);
617 virgl_encoder_write_dword(ctx
->cbuf
, (state
->u
.buf
.offset
+ state
->u
.buf
.size
) / elem_size
- 1);
619 virgl_encoder_write_dword(ctx
->cbuf
, state
->u
.tex
.first_layer
| state
->u
.tex
.last_layer
<< 16);
620 virgl_encoder_write_dword(ctx
->cbuf
, state
->u
.tex
.first_level
| state
->u
.tex
.last_level
<< 8);
622 tmp
= VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state
->swizzle_r
) |
623 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state
->swizzle_g
) |
624 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state
->swizzle_b
) |
625 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state
->swizzle_a
);
626 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
630 int virgl_encode_set_sampler_views(struct virgl_context
*ctx
,
631 uint32_t shader_type
,
634 struct virgl_sampler_view
**views
)
637 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS
, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views
)));
638 virgl_encoder_write_dword(ctx
->cbuf
, shader_type
);
639 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
640 for (i
= 0; i
< num_views
; i
++) {
641 uint32_t handle
= views
[i
] ? views
[i
]->handle
: 0;
642 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
647 int virgl_encode_bind_sampler_states(struct virgl_context
*ctx
,
648 uint32_t shader_type
,
650 uint32_t num_handles
,
654 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES
, 0, VIRGL_BIND_SAMPLER_STATES(num_handles
)));
655 virgl_encoder_write_dword(ctx
->cbuf
, shader_type
);
656 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
657 for (i
= 0; i
< num_handles
; i
++)
658 virgl_encoder_write_dword(ctx
->cbuf
, handles
[i
]);
662 int virgl_encoder_write_constant_buffer(struct virgl_context
*ctx
,
668 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER
, 0, size
+ 2));
669 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
670 virgl_encoder_write_dword(ctx
->cbuf
, index
);
672 virgl_encoder_write_block(ctx
->cbuf
, data
, size
* 4);
676 int virgl_encoder_set_uniform_buffer(struct virgl_context
*ctx
,
681 struct virgl_resource
*res
)
683 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER
, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE
));
684 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
685 virgl_encoder_write_dword(ctx
->cbuf
, index
);
686 virgl_encoder_write_dword(ctx
->cbuf
, offset
);
687 virgl_encoder_write_dword(ctx
->cbuf
, length
);
688 virgl_encoder_write_res(ctx
, res
);
693 int virgl_encoder_set_stencil_ref(struct virgl_context
*ctx
,
694 const struct pipe_stencil_ref
*ref
)
696 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF
, 0, VIRGL_SET_STENCIL_REF_SIZE
));
697 virgl_encoder_write_dword(ctx
->cbuf
, VIRGL_STENCIL_REF_VAL(ref
->ref_value
[0] , (ref
->ref_value
[1])));
701 int virgl_encoder_set_blend_color(struct virgl_context
*ctx
,
702 const struct pipe_blend_color
*color
)
705 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR
, 0, VIRGL_SET_BLEND_COLOR_SIZE
));
706 for (i
= 0; i
< 4; i
++)
707 virgl_encoder_write_dword(ctx
->cbuf
, fui(color
->color
[i
]));
711 int virgl_encoder_set_scissor_state(struct virgl_context
*ctx
,
714 const struct pipe_scissor_state
*ss
)
717 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE
, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors
)));
718 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
719 for (i
= 0; i
< num_scissors
; i
++) {
720 virgl_encoder_write_dword(ctx
->cbuf
, (ss
[i
].minx
| ss
[i
].miny
<< 16));
721 virgl_encoder_write_dword(ctx
->cbuf
, (ss
[i
].maxx
| ss
[i
].maxy
<< 16));
726 void virgl_encoder_set_polygon_stipple(struct virgl_context
*ctx
,
727 const struct pipe_poly_stipple
*ps
)
730 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE
, 0, VIRGL_POLYGON_STIPPLE_SIZE
));
731 for (i
= 0; i
< VIRGL_POLYGON_STIPPLE_SIZE
; i
++) {
732 virgl_encoder_write_dword(ctx
->cbuf
, ps
->stipple
[i
]);
736 void virgl_encoder_set_sample_mask(struct virgl_context
*ctx
,
737 unsigned sample_mask
)
739 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK
, 0, VIRGL_SET_SAMPLE_MASK_SIZE
));
740 virgl_encoder_write_dword(ctx
->cbuf
, sample_mask
);
743 void virgl_encoder_set_min_samples(struct virgl_context
*ctx
,
744 unsigned min_samples
)
746 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES
, 0, VIRGL_SET_MIN_SAMPLES_SIZE
));
747 virgl_encoder_write_dword(ctx
->cbuf
, min_samples
);
750 void virgl_encoder_set_clip_state(struct virgl_context
*ctx
,
751 const struct pipe_clip_state
*clip
)
754 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE
, 0, VIRGL_SET_CLIP_STATE_SIZE
));
755 for (i
= 0; i
< VIRGL_MAX_CLIP_PLANES
; i
++) {
756 for (j
= 0; j
< 4; j
++) {
757 virgl_encoder_write_dword(ctx
->cbuf
, fui(clip
->ucp
[i
][j
]));
762 int virgl_encode_resource_copy_region(struct virgl_context
*ctx
,
763 struct virgl_resource
*dst_res
,
765 unsigned dstx
, unsigned dsty
, unsigned dstz
,
766 struct virgl_resource
*src_res
,
768 const struct pipe_box
*src_box
)
770 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION
, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE
));
771 virgl_encoder_write_res(ctx
, dst_res
);
772 virgl_encoder_write_dword(ctx
->cbuf
, dst_level
);
773 virgl_encoder_write_dword(ctx
->cbuf
, dstx
);
774 virgl_encoder_write_dword(ctx
->cbuf
, dsty
);
775 virgl_encoder_write_dword(ctx
->cbuf
, dstz
);
776 virgl_encoder_write_res(ctx
, src_res
);
777 virgl_encoder_write_dword(ctx
->cbuf
, src_level
);
778 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->x
);
779 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->y
);
780 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->z
);
781 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->width
);
782 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->height
);
783 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->depth
);
787 int virgl_encode_blit(struct virgl_context
*ctx
,
788 struct virgl_resource
*dst_res
,
789 struct virgl_resource
*src_res
,
790 const struct pipe_blit_info
*blit
)
793 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BLIT
, 0, VIRGL_CMD_BLIT_SIZE
));
794 tmp
= VIRGL_CMD_BLIT_S0_MASK(blit
->mask
) |
795 VIRGL_CMD_BLIT_S0_FILTER(blit
->filter
) |
796 VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit
->scissor_enable
) |
797 VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit
->render_condition_enable
) |
798 VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit
->alpha_blend
);
799 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
800 virgl_encoder_write_dword(ctx
->cbuf
, (blit
->scissor
.minx
| blit
->scissor
.miny
<< 16));
801 virgl_encoder_write_dword(ctx
->cbuf
, (blit
->scissor
.maxx
| blit
->scissor
.maxy
<< 16));
803 virgl_encoder_write_res(ctx
, dst_res
);
804 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.level
);
805 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.format
);
806 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.x
);
807 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.y
);
808 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.z
);
809 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.width
);
810 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.height
);
811 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.depth
);
813 virgl_encoder_write_res(ctx
, src_res
);
814 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.level
);
815 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.format
);
816 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.x
);
817 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.y
);
818 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.z
);
819 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.width
);
820 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.height
);
821 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.depth
);
825 int virgl_encoder_create_query(struct virgl_context
*ctx
,
829 struct virgl_resource
*res
,
832 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_QUERY
, VIRGL_OBJ_QUERY_SIZE
));
833 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
834 virgl_encoder_write_dword(ctx
->cbuf
, ((query_type
& 0xffff) | (query_index
<< 16)));
835 virgl_encoder_write_dword(ctx
->cbuf
, offset
);
836 virgl_encoder_write_res(ctx
, res
);
840 int virgl_encoder_begin_query(struct virgl_context
*ctx
,
843 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY
, 0, 1));
844 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
848 int virgl_encoder_end_query(struct virgl_context
*ctx
,
851 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_END_QUERY
, 0, 1));
852 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
856 int virgl_encoder_get_query_result(struct virgl_context
*ctx
,
857 uint32_t handle
, boolean wait
)
859 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT
, 0, 2));
860 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
861 virgl_encoder_write_dword(ctx
->cbuf
, wait
? 1 : 0);
865 int virgl_encoder_render_condition(struct virgl_context
*ctx
,
866 uint32_t handle
, boolean condition
,
867 enum pipe_render_cond_flag mode
)
869 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION
, 0, VIRGL_RENDER_CONDITION_SIZE
));
870 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
871 virgl_encoder_write_dword(ctx
->cbuf
, condition
);
872 virgl_encoder_write_dword(ctx
->cbuf
, mode
);
876 int virgl_encoder_set_so_targets(struct virgl_context
*ctx
,
877 unsigned num_targets
,
878 struct pipe_stream_output_target
**targets
,
879 unsigned append_bitmask
)
883 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS
, 0, num_targets
+ 1));
884 virgl_encoder_write_dword(ctx
->cbuf
, append_bitmask
);
885 for (i
= 0; i
< num_targets
; i
++) {
886 struct virgl_so_target
*tg
= virgl_so_target(targets
[i
]);
887 virgl_encoder_write_dword(ctx
->cbuf
, tg
? tg
->handle
: 0);
893 int virgl_encoder_set_sub_ctx(struct virgl_context
*ctx
, uint32_t sub_ctx_id
)
895 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX
, 0, 1));
896 virgl_encoder_write_dword(ctx
->cbuf
, sub_ctx_id
);
900 int virgl_encoder_create_sub_ctx(struct virgl_context
*ctx
, uint32_t sub_ctx_id
)
902 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX
, 0, 1));
903 virgl_encoder_write_dword(ctx
->cbuf
, sub_ctx_id
);
907 int virgl_encoder_destroy_sub_ctx(struct virgl_context
*ctx
, uint32_t sub_ctx_id
)
909 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX
, 0, 1));
910 virgl_encoder_write_dword(ctx
->cbuf
, sub_ctx_id
);
914 int virgl_encode_bind_shader(struct virgl_context
*ctx
,
915 uint32_t handle
, uint32_t type
)
917 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER
, 0, 2));
918 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
919 virgl_encoder_write_dword(ctx
->cbuf
, type
);
923 int virgl_encode_set_tess_state(struct virgl_context
*ctx
,
924 const float outer
[4],
925 const float inner
[2])
928 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE
, 0, 6));
929 for (i
= 0; i
< 4; i
++)
930 virgl_encoder_write_dword(ctx
->cbuf
, fui(outer
[i
]));
931 for (i
= 0; i
< 2; i
++)
932 virgl_encoder_write_dword(ctx
->cbuf
, fui(inner
[i
]));
936 int virgl_encode_set_shader_buffers(struct virgl_context
*ctx
,
937 enum pipe_shader_type shader
,
938 unsigned start_slot
, unsigned count
,
939 const struct pipe_shader_buffer
*buffers
)
942 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS
, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count
)));
944 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
945 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
946 for (i
= 0; i
< count
; i
++) {
948 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
);
949 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_offset
);
950 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_size
);
951 virgl_encoder_write_res(ctx
, res
);
953 virgl_encoder_write_dword(ctx
->cbuf
, 0);
954 virgl_encoder_write_dword(ctx
->cbuf
, 0);
955 virgl_encoder_write_dword(ctx
->cbuf
, 0);
961 int virgl_encode_set_hw_atomic_buffers(struct virgl_context
*ctx
,
962 unsigned start_slot
, unsigned count
,
963 const struct pipe_shader_buffer
*buffers
)
966 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS
, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count
)));
968 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
969 for (i
= 0; i
< count
; i
++) {
971 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
);
972 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_offset
);
973 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_size
);
974 virgl_encoder_write_res(ctx
, res
);
976 virgl_encoder_write_dword(ctx
->cbuf
, 0);
977 virgl_encoder_write_dword(ctx
->cbuf
, 0);
978 virgl_encoder_write_dword(ctx
->cbuf
, 0);
984 int virgl_encode_set_shader_images(struct virgl_context
*ctx
,
985 enum pipe_shader_type shader
,
986 unsigned start_slot
, unsigned count
,
987 const struct pipe_image_view
*images
)
990 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES
, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count
)));
992 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
993 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
994 for (i
= 0; i
< count
; i
++) {
996 struct virgl_resource
*res
= virgl_resource(images
[i
].resource
);
997 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].format
);
998 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].access
);
999 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].u
.buf
.offset
);
1000 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].u
.buf
.size
);
1001 virgl_encoder_write_res(ctx
, res
);
1003 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1004 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1005 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1006 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1007 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1013 int virgl_encode_memory_barrier(struct virgl_context
*ctx
,
1016 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER
, 0, 1));
1017 virgl_encoder_write_dword(ctx
->cbuf
, flags
);
1021 int virgl_encode_launch_grid(struct virgl_context
*ctx
,
1022 const struct pipe_grid_info
*grid_info
)
1024 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID
, 0, VIRGL_LAUNCH_GRID_SIZE
));
1025 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->block
[0]);
1026 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->block
[1]);
1027 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->block
[2]);
1028 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->grid
[0]);
1029 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->grid
[1]);
1030 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->grid
[2]);
1031 if (grid_info
->indirect
) {
1032 struct virgl_resource
*res
= virgl_resource(grid_info
->indirect
);
1033 virgl_encoder_write_res(ctx
, res
);
1035 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1036 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->indirect_offset
);
1040 int virgl_encode_texture_barrier(struct virgl_context
*ctx
,
1043 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER
, 0, 1));
1044 virgl_encoder_write_dword(ctx
->cbuf
, flags
);