2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "util/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
40 static int virgl_encoder_write_cmd_dword(struct virgl_context
*ctx
,
43 int len
= (dword
>> 16);
45 if ((ctx
->cbuf
->cdw
+ len
+ 1) > VIRGL_MAX_CMDBUF_DWORDS
)
46 ctx
->base
.flush(&ctx
->base
, NULL
, 0);
48 virgl_encoder_write_dword(ctx
->cbuf
, dword
);
52 static void virgl_encoder_write_res(struct virgl_context
*ctx
,
53 struct virgl_resource
*res
)
55 struct virgl_winsys
*vws
= virgl_screen(ctx
->base
.screen
)->vws
;
57 if (res
&& res
->hw_res
)
58 vws
->emit_res(vws
, ctx
->cbuf
, res
->hw_res
, TRUE
);
60 virgl_encoder_write_dword(ctx
->cbuf
, 0);
64 int virgl_encode_bind_object(struct virgl_context
*ctx
,
65 uint32_t handle
, uint32_t object
)
67 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT
, object
, 1));
68 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
72 int virgl_encode_delete_object(struct virgl_context
*ctx
,
73 uint32_t handle
, uint32_t object
)
75 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT
, object
, 1));
76 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
80 int virgl_encode_blend_state(struct virgl_context
*ctx
,
82 const struct pipe_blend_state
*blend_state
)
87 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_BLEND
, VIRGL_OBJ_BLEND_SIZE
));
88 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
91 VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state
->independent_blend_enable
) |
92 VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state
->logicop_enable
) |
93 VIRGL_OBJ_BLEND_S0_DITHER(blend_state
->dither
) |
94 VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state
->alpha_to_coverage
) |
95 VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state
->alpha_to_one
);
97 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
99 tmp
= VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state
->logicop_func
);
100 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
102 for (i
= 0; i
< VIRGL_MAX_COLOR_BUFS
; i
++) {
104 VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state
->rt
[i
].blend_enable
) |
105 VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state
->rt
[i
].rgb_func
) |
106 VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state
->rt
[i
].rgb_src_factor
) |
107 VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state
->rt
[i
].rgb_dst_factor
)|
108 VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state
->rt
[i
].alpha_func
) |
109 VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(blend_state
->rt
[i
].alpha_src_factor
) |
110 VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state
->rt
[i
].alpha_dst_factor
) |
111 VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state
->rt
[i
].colormask
);
112 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
117 int virgl_encode_dsa_state(struct virgl_context
*ctx
,
119 const struct pipe_depth_stencil_alpha_state
*dsa_state
)
123 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_DSA
, VIRGL_OBJ_DSA_SIZE
));
124 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
126 tmp
= VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state
->depth
.enabled
) |
127 VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state
->depth
.writemask
) |
128 VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state
->depth
.func
) |
129 VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state
->alpha
.enabled
) |
130 VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state
->alpha
.func
);
131 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
133 for (i
= 0; i
< 2; i
++) {
134 tmp
= VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state
->stencil
[i
].enabled
) |
135 VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state
->stencil
[i
].func
) |
136 VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state
->stencil
[i
].fail_op
) |
137 VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state
->stencil
[i
].zpass_op
) |
138 VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state
->stencil
[i
].zfail_op
) |
139 VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state
->stencil
[i
].valuemask
) |
140 VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state
->stencil
[i
].writemask
);
141 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
144 virgl_encoder_write_dword(ctx
->cbuf
, fui(dsa_state
->alpha
.ref_value
));
147 int virgl_encode_rasterizer_state(struct virgl_context
*ctx
,
149 const struct pipe_rasterizer_state
*state
)
153 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_RASTERIZER
, VIRGL_OBJ_RS_SIZE
));
154 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
156 tmp
= VIRGL_OBJ_RS_S0_FLATSHADE(state
->flatshade
) |
157 VIRGL_OBJ_RS_S0_DEPTH_CLIP(state
->depth_clip
) |
158 VIRGL_OBJ_RS_S0_CLIP_HALFZ(state
->clip_halfz
) |
159 VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state
->rasterizer_discard
) |
160 VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state
->flatshade_first
) |
161 VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state
->light_twoside
) |
162 VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state
->sprite_coord_mode
) |
163 VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state
->point_quad_rasterization
) |
164 VIRGL_OBJ_RS_S0_CULL_FACE(state
->cull_face
) |
165 VIRGL_OBJ_RS_S0_FILL_FRONT(state
->fill_front
) |
166 VIRGL_OBJ_RS_S0_FILL_BACK(state
->fill_back
) |
167 VIRGL_OBJ_RS_S0_SCISSOR(state
->scissor
) |
168 VIRGL_OBJ_RS_S0_FRONT_CCW(state
->front_ccw
) |
169 VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state
->clamp_vertex_color
) |
170 VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state
->clamp_fragment_color
) |
171 VIRGL_OBJ_RS_S0_OFFSET_LINE(state
->offset_line
) |
172 VIRGL_OBJ_RS_S0_OFFSET_POINT(state
->offset_point
) |
173 VIRGL_OBJ_RS_S0_OFFSET_TRI(state
->offset_tri
) |
174 VIRGL_OBJ_RS_S0_POLY_SMOOTH(state
->poly_smooth
) |
175 VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state
->poly_stipple_enable
) |
176 VIRGL_OBJ_RS_S0_POINT_SMOOTH(state
->point_smooth
) |
177 VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state
->point_size_per_vertex
) |
178 VIRGL_OBJ_RS_S0_MULTISAMPLE(state
->multisample
) |
179 VIRGL_OBJ_RS_S0_LINE_SMOOTH(state
->line_smooth
) |
180 VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state
->line_stipple_enable
) |
181 VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state
->line_last_pixel
) |
182 VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state
->half_pixel_center
) |
183 VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state
->bottom_edge_rule
) |
184 VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state
->force_persample_interp
);
186 virgl_encoder_write_dword(ctx
->cbuf
, tmp
); /* S0 */
187 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->point_size
)); /* S1 */
188 virgl_encoder_write_dword(ctx
->cbuf
, state
->sprite_coord_enable
); /* S2 */
189 tmp
= VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state
->line_stipple_pattern
) |
190 VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state
->line_stipple_factor
) |
191 VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state
->clip_plane_enable
);
192 virgl_encoder_write_dword(ctx
->cbuf
, tmp
); /* S3 */
193 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->line_width
)); /* S4 */
194 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->offset_units
)); /* S5 */
195 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->offset_scale
)); /* S6 */
196 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->offset_clamp
)); /* S7 */
200 static void virgl_emit_shader_header(struct virgl_context
*ctx
,
201 uint32_t handle
, uint32_t len
,
202 uint32_t type
, uint32_t offlen
,
205 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SHADER
, len
));
206 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
207 virgl_encoder_write_dword(ctx
->cbuf
, type
);
208 virgl_encoder_write_dword(ctx
->cbuf
, offlen
);
209 virgl_encoder_write_dword(ctx
->cbuf
, num_tokens
);
212 static void virgl_emit_shader_streamout(struct virgl_context
*ctx
,
213 const struct pipe_stream_output_info
*so_info
)
220 num_outputs
= so_info
->num_outputs
;
222 virgl_encoder_write_dword(ctx
->cbuf
, num_outputs
);
224 for (i
= 0; i
< 4; i
++)
225 virgl_encoder_write_dword(ctx
->cbuf
, so_info
->stride
[i
]);
227 for (i
= 0; i
< so_info
->num_outputs
; i
++) {
229 VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info
->output
[i
].register_index
) |
230 VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info
->output
[i
].start_component
) |
231 VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info
->output
[i
].num_components
) |
232 VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info
->output
[i
].output_buffer
) |
233 VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info
->output
[i
].dst_offset
);
234 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
235 virgl_encoder_write_dword(ctx
->cbuf
, so_info
->output
[i
].stream
);
240 int virgl_encode_shader_state(struct virgl_context
*ctx
,
243 const struct pipe_stream_output_info
*so_info
,
244 uint32_t cs_req_local_mem
,
245 const struct tgsi_token
*tokens
)
248 uint32_t shader_len
, len
;
250 int num_tokens
= tgsi_num_tokens(tokens
);
251 int str_total_size
= 65536;
253 uint32_t left_bytes
, base_hdr_size
, strm_hdr_size
, thispass
;
255 str
= CALLOC(1, str_total_size
);
262 bret
= tgsi_dump_str(tokens
, TGSI_DUMP_FLOAT_AS_HEX
, str
, str_total_size
);
264 if (virgl_debug
& VIRGL_DEBUG_VERBOSE
)
265 debug_printf("Failed to translate shader in available space - trying again\n");
266 old_size
= str_total_size
;
267 str_total_size
= 65536 * ++retry_size
;
268 str
= REALLOC(str
, old_size
, str_total_size
);
272 } while (bret
== false && retry_size
< 10);
277 shader_len
= strlen(str
) + 1;
279 left_bytes
= shader_len
;
282 strm_hdr_size
= so_info
->num_outputs
? so_info
->num_outputs
* 2 + 4 : 0;
286 uint32_t length
, offlen
;
287 int hdr_len
= base_hdr_size
+ (first_pass
? strm_hdr_size
: 0);
288 if (ctx
->cbuf
->cdw
+ hdr_len
+ 1 > VIRGL_MAX_CMDBUF_DWORDS
)
289 ctx
->base
.flush(&ctx
->base
, NULL
, 0);
291 thispass
= (VIRGL_MAX_CMDBUF_DWORDS
- ctx
->cbuf
->cdw
- hdr_len
- 1) * 4;
293 length
= MIN2(thispass
, left_bytes
);
294 len
= ((length
+ 3) / 4) + hdr_len
;
297 offlen
= VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len
);
299 offlen
= VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr
- (uintptr_t)str
) | VIRGL_OBJ_SHADER_OFFSET_CONT
;
301 virgl_emit_shader_header(ctx
, handle
, len
, type
, offlen
, num_tokens
);
303 if (type
== PIPE_SHADER_COMPUTE
)
304 virgl_encoder_write_dword(ctx
->cbuf
, cs_req_local_mem
);
306 virgl_emit_shader_streamout(ctx
, first_pass
? so_info
: NULL
);
308 virgl_encoder_write_block(ctx
->cbuf
, (uint8_t *)sptr
, length
);
312 left_bytes
-= length
;
320 int virgl_encode_clear(struct virgl_context
*ctx
,
322 const union pipe_color_union
*color
,
323 double depth
, unsigned stencil
)
328 STATIC_ASSERT(sizeof(qword
) == sizeof(depth
));
329 memcpy(&qword
, &depth
, sizeof(qword
));
331 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CLEAR
, 0, VIRGL_OBJ_CLEAR_SIZE
));
332 virgl_encoder_write_dword(ctx
->cbuf
, buffers
);
333 for (i
= 0; i
< 4; i
++)
334 virgl_encoder_write_dword(ctx
->cbuf
, color
->ui
[i
]);
335 virgl_encoder_write_qword(ctx
->cbuf
, qword
);
336 virgl_encoder_write_dword(ctx
->cbuf
, stencil
);
340 int virgl_encoder_set_framebuffer_state(struct virgl_context
*ctx
,
341 const struct pipe_framebuffer_state
*state
)
343 struct virgl_surface
*zsurf
= virgl_surface(state
->zsbuf
);
346 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE
, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state
->nr_cbufs
)));
347 virgl_encoder_write_dword(ctx
->cbuf
, state
->nr_cbufs
);
348 virgl_encoder_write_dword(ctx
->cbuf
, zsurf
? zsurf
->handle
: 0);
349 for (i
= 0; i
< state
->nr_cbufs
; i
++) {
350 struct virgl_surface
*surf
= virgl_surface(state
->cbufs
[i
]);
351 virgl_encoder_write_dword(ctx
->cbuf
, surf
? surf
->handle
: 0);
354 struct virgl_screen
*rs
= virgl_screen(ctx
->base
.screen
);
355 if (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_FB_NO_ATTACH
) {
356 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH
, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE
));
357 virgl_encoder_write_dword(ctx
->cbuf
, state
->width
| (state
->height
<< 16));
358 virgl_encoder_write_dword(ctx
->cbuf
, state
->layers
| (state
->samples
<< 16));
363 int virgl_encoder_set_viewport_states(struct virgl_context
*ctx
,
366 const struct pipe_viewport_state
*states
)
369 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE
, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports
)));
370 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
371 for (v
= 0; v
< num_viewports
; v
++) {
372 for (i
= 0; i
< 3; i
++)
373 virgl_encoder_write_dword(ctx
->cbuf
, fui(states
[v
].scale
[i
]));
374 for (i
= 0; i
< 3; i
++)
375 virgl_encoder_write_dword(ctx
->cbuf
, fui(states
[v
].translate
[i
]));
380 int virgl_encoder_create_vertex_elements(struct virgl_context
*ctx
,
382 unsigned num_elements
,
383 const struct pipe_vertex_element
*element
)
386 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_VERTEX_ELEMENTS
, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements
)));
387 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
388 for (i
= 0; i
< num_elements
; i
++) {
389 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].src_offset
);
390 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].instance_divisor
);
391 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].vertex_buffer_index
);
392 virgl_encoder_write_dword(ctx
->cbuf
, element
[i
].src_format
);
397 int virgl_encoder_set_vertex_buffers(struct virgl_context
*ctx
,
398 unsigned num_buffers
,
399 const struct pipe_vertex_buffer
*buffers
)
402 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS
, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers
)));
403 for (i
= 0; i
< num_buffers
; i
++) {
404 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
.resource
);
405 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].stride
);
406 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_offset
);
407 virgl_encoder_write_res(ctx
, res
);
412 int virgl_encoder_set_index_buffer(struct virgl_context
*ctx
,
413 const struct virgl_indexbuf
*ib
)
415 int length
= VIRGL_SET_INDEX_BUFFER_SIZE(ib
);
416 struct virgl_resource
*res
= NULL
;
418 res
= virgl_resource(ib
->buffer
);
420 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER
, 0, length
));
421 virgl_encoder_write_res(ctx
, res
);
423 virgl_encoder_write_dword(ctx
->cbuf
, ib
->index_size
);
424 virgl_encoder_write_dword(ctx
->cbuf
, ib
->offset
);
429 int virgl_encoder_draw_vbo(struct virgl_context
*ctx
,
430 const struct pipe_draw_info
*info
)
432 uint32_t length
= VIRGL_DRAW_VBO_SIZE
;
433 if (info
->mode
== PIPE_PRIM_PATCHES
)
434 length
= VIRGL_DRAW_VBO_SIZE_TESS
;
436 length
= VIRGL_DRAW_VBO_SIZE_INDIRECT
;
437 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO
, 0, length
));
438 virgl_encoder_write_dword(ctx
->cbuf
, info
->start
);
439 virgl_encoder_write_dword(ctx
->cbuf
, info
->count
);
440 virgl_encoder_write_dword(ctx
->cbuf
, info
->mode
);
441 virgl_encoder_write_dword(ctx
->cbuf
, !!info
->index_size
);
442 virgl_encoder_write_dword(ctx
->cbuf
, info
->instance_count
);
443 virgl_encoder_write_dword(ctx
->cbuf
, info
->index_bias
);
444 virgl_encoder_write_dword(ctx
->cbuf
, info
->start_instance
);
445 virgl_encoder_write_dword(ctx
->cbuf
, info
->primitive_restart
);
446 virgl_encoder_write_dword(ctx
->cbuf
, info
->restart_index
);
447 virgl_encoder_write_dword(ctx
->cbuf
, info
->min_index
);
448 virgl_encoder_write_dword(ctx
->cbuf
, info
->max_index
);
449 if (info
->count_from_stream_output
)
450 virgl_encoder_write_dword(ctx
->cbuf
, info
->count_from_stream_output
->buffer_size
);
452 virgl_encoder_write_dword(ctx
->cbuf
, 0);
453 if (length
>= VIRGL_DRAW_VBO_SIZE_TESS
) {
454 virgl_encoder_write_dword(ctx
->cbuf
, info
->vertices_per_patch
); /* vertices per patch */
455 virgl_encoder_write_dword(ctx
->cbuf
, info
->drawid
); /* drawid */
457 if (length
== VIRGL_DRAW_VBO_SIZE_INDIRECT
) {
458 virgl_encoder_write_res(ctx
, virgl_resource(info
->indirect
->buffer
));
459 virgl_encoder_write_dword(ctx
->cbuf
, info
->indirect
->offset
);
460 virgl_encoder_write_dword(ctx
->cbuf
, 0); /* indirect stride */
461 virgl_encoder_write_dword(ctx
->cbuf
, 0); /* indirect draw count */
462 virgl_encoder_write_dword(ctx
->cbuf
, 0); /* indirect draw count offset */
463 virgl_encoder_write_dword(ctx
->cbuf
, 0); /* indirect draw count handle */
468 int virgl_encoder_create_surface(struct virgl_context
*ctx
,
470 struct virgl_resource
*res
,
471 const struct pipe_surface
*templat
)
473 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SURFACE
, VIRGL_OBJ_SURFACE_SIZE
));
474 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
475 virgl_encoder_write_res(ctx
, res
);
476 virgl_encoder_write_dword(ctx
->cbuf
, templat
->format
);
477 if (templat
->texture
->target
== PIPE_BUFFER
) {
478 virgl_encoder_write_dword(ctx
->cbuf
, templat
->u
.buf
.first_element
);
479 virgl_encoder_write_dword(ctx
->cbuf
, templat
->u
.buf
.last_element
);
482 virgl_encoder_write_dword(ctx
->cbuf
, templat
->u
.tex
.level
);
483 virgl_encoder_write_dword(ctx
->cbuf
, templat
->u
.tex
.first_layer
| (templat
->u
.tex
.last_layer
<< 16));
488 int virgl_encoder_create_so_target(struct virgl_context
*ctx
,
490 struct virgl_resource
*res
,
491 unsigned buffer_offset
,
492 unsigned buffer_size
)
494 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_STREAMOUT_TARGET
, VIRGL_OBJ_STREAMOUT_SIZE
));
495 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
496 virgl_encoder_write_res(ctx
, res
);
497 virgl_encoder_write_dword(ctx
->cbuf
, buffer_offset
);
498 virgl_encoder_write_dword(ctx
->cbuf
, buffer_size
);
502 static void virgl_encoder_iw_emit_header_1d(struct virgl_context
*ctx
,
503 struct virgl_resource
*res
,
504 unsigned level
, unsigned usage
,
505 const struct pipe_box
*box
,
506 unsigned stride
, unsigned layer_stride
)
508 virgl_encoder_write_res(ctx
, res
);
509 virgl_encoder_write_dword(ctx
->cbuf
, level
);
510 virgl_encoder_write_dword(ctx
->cbuf
, usage
);
511 virgl_encoder_write_dword(ctx
->cbuf
, stride
);
512 virgl_encoder_write_dword(ctx
->cbuf
, layer_stride
);
513 virgl_encoder_write_dword(ctx
->cbuf
, box
->x
);
514 virgl_encoder_write_dword(ctx
->cbuf
, box
->y
);
515 virgl_encoder_write_dword(ctx
->cbuf
, box
->z
);
516 virgl_encoder_write_dword(ctx
->cbuf
, box
->width
);
517 virgl_encoder_write_dword(ctx
->cbuf
, box
->height
);
518 virgl_encoder_write_dword(ctx
->cbuf
, box
->depth
);
521 int virgl_encoder_inline_write(struct virgl_context
*ctx
,
522 struct virgl_resource
*res
,
523 unsigned level
, unsigned usage
,
524 const struct pipe_box
*box
,
525 const void *data
, unsigned stride
,
526 unsigned layer_stride
)
528 uint32_t size
= (stride
? stride
: box
->width
) * box
->height
;
529 uint32_t length
, thispass
, left_bytes
;
530 struct pipe_box mybox
= *box
;
532 length
= 11 + (size
+ 3) / 4;
533 if ((ctx
->cbuf
->cdw
+ length
+ 1) > VIRGL_MAX_CMDBUF_DWORDS
) {
534 if (box
->height
> 1 || box
->depth
> 1) {
535 debug_printf("inline transfer failed due to multi dimensions and too large\n");
542 if (ctx
->cbuf
->cdw
+ 12 >= VIRGL_MAX_CMDBUF_DWORDS
)
543 ctx
->base
.flush(&ctx
->base
, NULL
, 0);
545 thispass
= (VIRGL_MAX_CMDBUF_DWORDS
- ctx
->cbuf
->cdw
- 12) * 4;
547 length
= MIN2(thispass
, left_bytes
);
549 mybox
.width
= length
;
550 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE
, 0, ((length
+ 3) / 4) + 11));
551 virgl_encoder_iw_emit_header_1d(ctx
, res
, level
, usage
, &mybox
, stride
, layer_stride
);
552 virgl_encoder_write_block(ctx
->cbuf
, data
, length
);
553 left_bytes
-= length
;
560 int virgl_encoder_flush_frontbuffer(struct virgl_context
*ctx
,
561 struct virgl_resource
*res
)
563 // virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
564 // virgl_encoder_write_dword(ctx->cbuf, res_handle);
568 int virgl_encode_sampler_state(struct virgl_context
*ctx
,
570 const struct pipe_sampler_state
*state
)
574 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SAMPLER_STATE
, VIRGL_OBJ_SAMPLER_STATE_SIZE
));
575 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
577 tmp
= VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state
->wrap_s
) |
578 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state
->wrap_t
) |
579 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state
->wrap_r
) |
580 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state
->min_img_filter
) |
581 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state
->min_mip_filter
) |
582 VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state
->mag_img_filter
) |
583 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state
->compare_mode
) |
584 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state
->compare_func
) |
585 VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state
->seamless_cube_map
);
587 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
588 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->lod_bias
));
589 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->min_lod
));
590 virgl_encoder_write_dword(ctx
->cbuf
, fui(state
->max_lod
));
591 for (i
= 0; i
< 4; i
++)
592 virgl_encoder_write_dword(ctx
->cbuf
, state
->border_color
.ui
[i
]);
597 int virgl_encode_sampler_view(struct virgl_context
*ctx
,
599 struct virgl_resource
*res
,
600 const struct pipe_sampler_view
*state
)
602 unsigned elem_size
= util_format_get_blocksize(state
->format
);
603 struct virgl_screen
*rs
= virgl_screen(ctx
->base
.screen
);
605 uint32_t dword_fmt_target
= state
->format
;
606 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_SAMPLER_VIEW
, VIRGL_OBJ_SAMPLER_VIEW_SIZE
));
607 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
608 virgl_encoder_write_res(ctx
, res
);
609 if (rs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_TEXTURE_VIEW
)
610 dword_fmt_target
|= (state
->target
<< 24);
611 virgl_encoder_write_dword(ctx
->cbuf
, dword_fmt_target
);
612 if (res
->u
.b
.target
== PIPE_BUFFER
) {
613 virgl_encoder_write_dword(ctx
->cbuf
, state
->u
.buf
.offset
/ elem_size
);
614 virgl_encoder_write_dword(ctx
->cbuf
, (state
->u
.buf
.offset
+ state
->u
.buf
.size
) / elem_size
- 1);
616 virgl_encoder_write_dword(ctx
->cbuf
, state
->u
.tex
.first_layer
| state
->u
.tex
.last_layer
<< 16);
617 virgl_encoder_write_dword(ctx
->cbuf
, state
->u
.tex
.first_level
| state
->u
.tex
.last_level
<< 8);
619 tmp
= VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state
->swizzle_r
) |
620 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state
->swizzle_g
) |
621 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state
->swizzle_b
) |
622 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state
->swizzle_a
);
623 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
627 int virgl_encode_set_sampler_views(struct virgl_context
*ctx
,
628 uint32_t shader_type
,
631 struct virgl_sampler_view
**views
)
634 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS
, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views
)));
635 virgl_encoder_write_dword(ctx
->cbuf
, shader_type
);
636 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
637 for (i
= 0; i
< num_views
; i
++) {
638 uint32_t handle
= views
[i
] ? views
[i
]->handle
: 0;
639 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
644 int virgl_encode_bind_sampler_states(struct virgl_context
*ctx
,
645 uint32_t shader_type
,
647 uint32_t num_handles
,
651 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES
, 0, VIRGL_BIND_SAMPLER_STATES(num_handles
)));
652 virgl_encoder_write_dword(ctx
->cbuf
, shader_type
);
653 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
654 for (i
= 0; i
< num_handles
; i
++)
655 virgl_encoder_write_dword(ctx
->cbuf
, handles
[i
]);
659 int virgl_encoder_write_constant_buffer(struct virgl_context
*ctx
,
665 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER
, 0, size
+ 2));
666 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
667 virgl_encoder_write_dword(ctx
->cbuf
, index
);
669 virgl_encoder_write_block(ctx
->cbuf
, data
, size
* 4);
673 int virgl_encoder_set_uniform_buffer(struct virgl_context
*ctx
,
678 struct virgl_resource
*res
)
680 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER
, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE
));
681 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
682 virgl_encoder_write_dword(ctx
->cbuf
, index
);
683 virgl_encoder_write_dword(ctx
->cbuf
, offset
);
684 virgl_encoder_write_dword(ctx
->cbuf
, length
);
685 virgl_encoder_write_res(ctx
, res
);
690 int virgl_encoder_set_stencil_ref(struct virgl_context
*ctx
,
691 const struct pipe_stencil_ref
*ref
)
693 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF
, 0, VIRGL_SET_STENCIL_REF_SIZE
));
694 virgl_encoder_write_dword(ctx
->cbuf
, VIRGL_STENCIL_REF_VAL(ref
->ref_value
[0] , (ref
->ref_value
[1])));
698 int virgl_encoder_set_blend_color(struct virgl_context
*ctx
,
699 const struct pipe_blend_color
*color
)
702 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR
, 0, VIRGL_SET_BLEND_COLOR_SIZE
));
703 for (i
= 0; i
< 4; i
++)
704 virgl_encoder_write_dword(ctx
->cbuf
, fui(color
->color
[i
]));
708 int virgl_encoder_set_scissor_state(struct virgl_context
*ctx
,
711 const struct pipe_scissor_state
*ss
)
714 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE
, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors
)));
715 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
716 for (i
= 0; i
< num_scissors
; i
++) {
717 virgl_encoder_write_dword(ctx
->cbuf
, (ss
[i
].minx
| ss
[i
].miny
<< 16));
718 virgl_encoder_write_dword(ctx
->cbuf
, (ss
[i
].maxx
| ss
[i
].maxy
<< 16));
723 void virgl_encoder_set_polygon_stipple(struct virgl_context
*ctx
,
724 const struct pipe_poly_stipple
*ps
)
727 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE
, 0, VIRGL_POLYGON_STIPPLE_SIZE
));
728 for (i
= 0; i
< VIRGL_POLYGON_STIPPLE_SIZE
; i
++) {
729 virgl_encoder_write_dword(ctx
->cbuf
, ps
->stipple
[i
]);
733 void virgl_encoder_set_sample_mask(struct virgl_context
*ctx
,
734 unsigned sample_mask
)
736 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK
, 0, VIRGL_SET_SAMPLE_MASK_SIZE
));
737 virgl_encoder_write_dword(ctx
->cbuf
, sample_mask
);
740 void virgl_encoder_set_min_samples(struct virgl_context
*ctx
,
741 unsigned min_samples
)
743 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES
, 0, VIRGL_SET_MIN_SAMPLES_SIZE
));
744 virgl_encoder_write_dword(ctx
->cbuf
, min_samples
);
747 void virgl_encoder_set_clip_state(struct virgl_context
*ctx
,
748 const struct pipe_clip_state
*clip
)
751 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE
, 0, VIRGL_SET_CLIP_STATE_SIZE
));
752 for (i
= 0; i
< VIRGL_MAX_CLIP_PLANES
; i
++) {
753 for (j
= 0; j
< 4; j
++) {
754 virgl_encoder_write_dword(ctx
->cbuf
, fui(clip
->ucp
[i
][j
]));
759 int virgl_encode_resource_copy_region(struct virgl_context
*ctx
,
760 struct virgl_resource
*dst_res
,
762 unsigned dstx
, unsigned dsty
, unsigned dstz
,
763 struct virgl_resource
*src_res
,
765 const struct pipe_box
*src_box
)
767 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION
, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE
));
768 virgl_encoder_write_res(ctx
, dst_res
);
769 virgl_encoder_write_dword(ctx
->cbuf
, dst_level
);
770 virgl_encoder_write_dword(ctx
->cbuf
, dstx
);
771 virgl_encoder_write_dword(ctx
->cbuf
, dsty
);
772 virgl_encoder_write_dword(ctx
->cbuf
, dstz
);
773 virgl_encoder_write_res(ctx
, src_res
);
774 virgl_encoder_write_dword(ctx
->cbuf
, src_level
);
775 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->x
);
776 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->y
);
777 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->z
);
778 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->width
);
779 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->height
);
780 virgl_encoder_write_dword(ctx
->cbuf
, src_box
->depth
);
784 int virgl_encode_blit(struct virgl_context
*ctx
,
785 struct virgl_resource
*dst_res
,
786 struct virgl_resource
*src_res
,
787 const struct pipe_blit_info
*blit
)
790 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BLIT
, 0, VIRGL_CMD_BLIT_SIZE
));
791 tmp
= VIRGL_CMD_BLIT_S0_MASK(blit
->mask
) |
792 VIRGL_CMD_BLIT_S0_FILTER(blit
->filter
) |
793 VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit
->scissor_enable
) |
794 VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit
->render_condition_enable
) |
795 VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit
->alpha_blend
);
796 virgl_encoder_write_dword(ctx
->cbuf
, tmp
);
797 virgl_encoder_write_dword(ctx
->cbuf
, (blit
->scissor
.minx
| blit
->scissor
.miny
<< 16));
798 virgl_encoder_write_dword(ctx
->cbuf
, (blit
->scissor
.maxx
| blit
->scissor
.maxy
<< 16));
800 virgl_encoder_write_res(ctx
, dst_res
);
801 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.level
);
802 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.format
);
803 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.x
);
804 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.y
);
805 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.z
);
806 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.width
);
807 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.height
);
808 virgl_encoder_write_dword(ctx
->cbuf
, blit
->dst
.box
.depth
);
810 virgl_encoder_write_res(ctx
, src_res
);
811 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.level
);
812 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.format
);
813 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.x
);
814 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.y
);
815 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.z
);
816 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.width
);
817 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.height
);
818 virgl_encoder_write_dword(ctx
->cbuf
, blit
->src
.box
.depth
);
822 int virgl_encoder_create_query(struct virgl_context
*ctx
,
826 struct virgl_resource
*res
,
829 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT
, VIRGL_OBJECT_QUERY
, VIRGL_OBJ_QUERY_SIZE
));
830 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
831 virgl_encoder_write_dword(ctx
->cbuf
, ((query_type
& 0xffff) | (query_index
<< 16)));
832 virgl_encoder_write_dword(ctx
->cbuf
, offset
);
833 virgl_encoder_write_res(ctx
, res
);
837 int virgl_encoder_begin_query(struct virgl_context
*ctx
,
840 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY
, 0, 1));
841 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
845 int virgl_encoder_end_query(struct virgl_context
*ctx
,
848 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_END_QUERY
, 0, 1));
849 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
853 int virgl_encoder_get_query_result(struct virgl_context
*ctx
,
854 uint32_t handle
, boolean wait
)
856 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT
, 0, 2));
857 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
858 virgl_encoder_write_dword(ctx
->cbuf
, wait
? 1 : 0);
862 int virgl_encoder_render_condition(struct virgl_context
*ctx
,
863 uint32_t handle
, boolean condition
,
864 enum pipe_render_cond_flag mode
)
866 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION
, 0, VIRGL_RENDER_CONDITION_SIZE
));
867 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
868 virgl_encoder_write_dword(ctx
->cbuf
, condition
);
869 virgl_encoder_write_dword(ctx
->cbuf
, mode
);
873 int virgl_encoder_set_so_targets(struct virgl_context
*ctx
,
874 unsigned num_targets
,
875 struct pipe_stream_output_target
**targets
,
876 unsigned append_bitmask
)
880 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS
, 0, num_targets
+ 1));
881 virgl_encoder_write_dword(ctx
->cbuf
, append_bitmask
);
882 for (i
= 0; i
< num_targets
; i
++) {
883 struct virgl_so_target
*tg
= virgl_so_target(targets
[i
]);
884 virgl_encoder_write_dword(ctx
->cbuf
, tg
? tg
->handle
: 0);
890 int virgl_encoder_set_sub_ctx(struct virgl_context
*ctx
, uint32_t sub_ctx_id
)
892 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX
, 0, 1));
893 virgl_encoder_write_dword(ctx
->cbuf
, sub_ctx_id
);
897 int virgl_encoder_create_sub_ctx(struct virgl_context
*ctx
, uint32_t sub_ctx_id
)
899 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX
, 0, 1));
900 virgl_encoder_write_dword(ctx
->cbuf
, sub_ctx_id
);
904 int virgl_encoder_destroy_sub_ctx(struct virgl_context
*ctx
, uint32_t sub_ctx_id
)
906 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX
, 0, 1));
907 virgl_encoder_write_dword(ctx
->cbuf
, sub_ctx_id
);
911 int virgl_encode_bind_shader(struct virgl_context
*ctx
,
912 uint32_t handle
, uint32_t type
)
914 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER
, 0, 2));
915 virgl_encoder_write_dword(ctx
->cbuf
, handle
);
916 virgl_encoder_write_dword(ctx
->cbuf
, type
);
920 int virgl_encode_set_tess_state(struct virgl_context
*ctx
,
921 const float outer
[4],
922 const float inner
[2])
925 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE
, 0, 6));
926 for (i
= 0; i
< 4; i
++)
927 virgl_encoder_write_dword(ctx
->cbuf
, fui(outer
[i
]));
928 for (i
= 0; i
< 2; i
++)
929 virgl_encoder_write_dword(ctx
->cbuf
, fui(inner
[i
]));
933 int virgl_encode_set_shader_buffers(struct virgl_context
*ctx
,
934 enum pipe_shader_type shader
,
935 unsigned start_slot
, unsigned count
,
936 const struct pipe_shader_buffer
*buffers
)
939 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS
, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count
)));
941 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
942 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
943 for (i
= 0; i
< count
; i
++) {
945 struct virgl_resource
*res
= virgl_resource(buffers
[i
].buffer
);
946 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_offset
);
947 virgl_encoder_write_dword(ctx
->cbuf
, buffers
[i
].buffer_size
);
948 virgl_encoder_write_res(ctx
, res
);
950 virgl_encoder_write_dword(ctx
->cbuf
, 0);
951 virgl_encoder_write_dword(ctx
->cbuf
, 0);
952 virgl_encoder_write_dword(ctx
->cbuf
, 0);
958 int virgl_encode_set_shader_images(struct virgl_context
*ctx
,
959 enum pipe_shader_type shader
,
960 unsigned start_slot
, unsigned count
,
961 const struct pipe_image_view
*images
)
964 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES
, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count
)));
966 virgl_encoder_write_dword(ctx
->cbuf
, shader
);
967 virgl_encoder_write_dword(ctx
->cbuf
, start_slot
);
968 for (i
= 0; i
< count
; i
++) {
970 struct virgl_resource
*res
= virgl_resource(images
[i
].resource
);
971 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].format
);
972 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].access
);
973 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].u
.buf
.offset
);
974 virgl_encoder_write_dword(ctx
->cbuf
, images
[i
].u
.buf
.size
);
975 virgl_encoder_write_res(ctx
, res
);
977 virgl_encoder_write_dword(ctx
->cbuf
, 0);
978 virgl_encoder_write_dword(ctx
->cbuf
, 0);
979 virgl_encoder_write_dword(ctx
->cbuf
, 0);
980 virgl_encoder_write_dword(ctx
->cbuf
, 0);
981 virgl_encoder_write_dword(ctx
->cbuf
, 0);
987 int virgl_encode_memory_barrier(struct virgl_context
*ctx
,
990 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER
, 0, 1));
991 virgl_encoder_write_dword(ctx
->cbuf
, flags
);
995 int virgl_encode_launch_grid(struct virgl_context
*ctx
,
996 const struct pipe_grid_info
*grid_info
)
998 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID
, 0, VIRGL_LAUNCH_GRID_SIZE
));
999 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->block
[0]);
1000 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->block
[1]);
1001 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->block
[2]);
1002 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->grid
[0]);
1003 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->grid
[1]);
1004 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->grid
[2]);
1005 if (grid_info
->indirect
) {
1006 struct virgl_resource
*res
= virgl_resource(grid_info
->indirect
);
1007 virgl_encoder_write_res(ctx
, res
);
1009 virgl_encoder_write_dword(ctx
->cbuf
, 0);
1010 virgl_encoder_write_dword(ctx
->cbuf
, grid_info
->indirect_offset
);
1014 int virgl_encode_texture_barrier(struct virgl_context
*ctx
,
1017 virgl_encoder_write_cmd_dword(ctx
, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER
, 0, 1));
1018 virgl_encoder_write_dword(ctx
->cbuf
, flags
);