radeonsi: Flesh out support for depth/stencil exports from the pixel shader.
[mesa.git] / src / gallium / drivers / radeonsi / si_state_draw.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Christian König <christian.koenig@amd.com>
25 */
26
27 #include "util/u_memory.h"
28 #include "util/u_framebuffer.h"
29 #include "util/u_blitter.h"
30 #include "tgsi/tgsi_parse.h"
31 #include "radeonsi_pipe.h"
32 #include "radeonsi_shader.h"
33 #include "si_state.h"
34 #include "sid.h"
35
36 /*
37 * Shaders
38 */
39
40 static void si_pipe_shader_vs(struct pipe_context *ctx, struct si_pipe_shader *shader)
41 {
42 struct r600_context *rctx = (struct r600_context *)ctx;
43 struct si_pm4_state *pm4;
44 unsigned num_sgprs, num_user_sgprs;
45 unsigned nparams, i;
46 uint64_t va;
47
48 si_pm4_delete_state(rctx, vs, shader->pm4);
49 pm4 = shader->pm4 = CALLOC_STRUCT(si_pm4_state);
50
51 si_pm4_inval_shader_cache(pm4);
52
53 /* Certain attributes (position, psize, etc.) don't count as params.
54 * VS is required to export at least one param and r600_shader_from_tgsi()
55 * takes care of adding a dummy export.
56 */
57 for (nparams = 0, i = 0 ; i < shader->shader.noutput; i++) {
58 if (shader->shader.output[i].name != TGSI_SEMANTIC_POSITION)
59 nparams++;
60 }
61 if (nparams < 1)
62 nparams = 1;
63
64 si_pm4_set_reg(pm4, R_0286C4_SPI_VS_OUT_CONFIG,
65 S_0286C4_VS_EXPORT_COUNT(nparams - 1));
66
67 si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT,
68 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
69 S_02870C_POS1_EXPORT_FORMAT(V_02870C_SPI_SHADER_NONE) |
70 S_02870C_POS2_EXPORT_FORMAT(V_02870C_SPI_SHADER_NONE) |
71 S_02870C_POS3_EXPORT_FORMAT(V_02870C_SPI_SHADER_NONE));
72
73 va = r600_resource_va(ctx->screen, (void *)shader->bo);
74 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ);
75 si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
76 si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, va >> 40);
77
78 num_user_sgprs = SI_VS_NUM_USER_SGPR;
79 num_sgprs = shader->num_sgprs;
80 if (num_user_sgprs > num_sgprs)
81 num_sgprs = num_user_sgprs;
82 /* Last 2 reserved SGPRs are used for VCC */
83 num_sgprs += 2;
84 assert(num_sgprs <= 104);
85
86 si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS,
87 S_00B128_VGPRS((shader->num_vgprs - 1) / 4) |
88 S_00B128_SGPRS((num_sgprs - 1) / 8));
89 si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS,
90 S_00B12C_USER_SGPR(num_user_sgprs));
91
92 si_pm4_bind_state(rctx, vs, shader->pm4);
93 }
94
95 static void si_pipe_shader_ps(struct pipe_context *ctx, struct si_pipe_shader *shader)
96 {
97 struct r600_context *rctx = (struct r600_context *)ctx;
98 struct si_pm4_state *pm4;
99 unsigned i, exports_ps, num_cout, spi_ps_in_control, db_shader_control;
100 unsigned num_sgprs, num_user_sgprs;
101 boolean have_linear = FALSE, have_centroid = FALSE, have_perspective = FALSE;
102 unsigned fragcoord_interp_mode = 0;
103 unsigned spi_baryc_cntl, spi_ps_input_ena, spi_shader_z_format;
104 uint64_t va;
105
106 si_pm4_delete_state(rctx, ps, shader->pm4);
107 pm4 = shader->pm4 = CALLOC_STRUCT(si_pm4_state);
108
109 si_pm4_inval_shader_cache(pm4);
110
111 db_shader_control = S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z);
112 for (i = 0; i < shader->shader.ninput; i++) {
113 switch (shader->shader.input[i].name) {
114 case TGSI_SEMANTIC_POSITION:
115 if (shader->shader.input[i].centroid) {
116 /* fragcoord_interp_mode will be written to
117 * SPI_BARYC_CNTL.POS_FLOAT_LOCATION
118 * Possible vaules:
119 * 0 -> Position = pixel center (default)
120 * 1 -> Position = pixel centroid
121 * 2 -> Position = iterated sample number XXX:
122 * What does this mean?
123 */
124 fragcoord_interp_mode = 1;
125 }
126 /* Fall through */
127 case TGSI_SEMANTIC_FACE:
128 continue;
129 }
130
131 /* XXX: Flat shading hangs the GPU */
132 if (shader->shader.input[i].interpolate == TGSI_INTERPOLATE_CONSTANT ||
133 (shader->shader.input[i].interpolate == TGSI_INTERPOLATE_COLOR &&
134 rctx->queued.named.rasterizer->flatshade))
135 have_linear = TRUE;
136 if (shader->shader.input[i].interpolate == TGSI_INTERPOLATE_LINEAR)
137 have_linear = TRUE;
138 if (shader->shader.input[i].interpolate == TGSI_INTERPOLATE_PERSPECTIVE)
139 have_perspective = TRUE;
140 if (shader->shader.input[i].centroid)
141 have_centroid = TRUE;
142 }
143
144 for (i = 0; i < shader->shader.noutput; i++) {
145 if (shader->shader.output[i].name == TGSI_SEMANTIC_POSITION)
146 db_shader_control |= S_02880C_Z_EXPORT_ENABLE(1);
147 if (shader->shader.output[i].name == TGSI_SEMANTIC_STENCIL)
148 db_shader_control |= S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(1);
149 }
150 if (shader->shader.uses_kill || shader->key.alpha_func != PIPE_FUNC_ALWAYS)
151 db_shader_control |= S_02880C_KILL_ENABLE(1);
152
153 exports_ps = 0;
154 num_cout = 0;
155 for (i = 0; i < shader->shader.noutput; i++) {
156 if (shader->shader.output[i].name == TGSI_SEMANTIC_POSITION ||
157 shader->shader.output[i].name == TGSI_SEMANTIC_STENCIL)
158 exports_ps |= 1;
159 else if (shader->shader.output[i].name == TGSI_SEMANTIC_COLOR) {
160 if (shader->shader.fs_write_all)
161 num_cout = shader->shader.nr_cbufs;
162 else
163 num_cout++;
164 }
165 }
166 if (!exports_ps) {
167 /* always at least export 1 component per pixel */
168 exports_ps = 2;
169 }
170
171 spi_ps_in_control = S_0286D8_NUM_INTERP(shader->shader.ninterp);
172
173 spi_baryc_cntl = 0;
174 if (have_perspective)
175 spi_baryc_cntl |= have_centroid ?
176 S_0286E0_PERSP_CENTROID_CNTL(1) : S_0286E0_PERSP_CENTER_CNTL(1);
177 if (have_linear)
178 spi_baryc_cntl |= have_centroid ?
179 S_0286E0_LINEAR_CENTROID_CNTL(1) : S_0286E0_LINEAR_CENTER_CNTL(1);
180 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(fragcoord_interp_mode);
181
182 si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
183 spi_ps_input_ena = shader->spi_ps_input_ena;
184 /* we need to enable at least one of them, otherwise we hang the GPU */
185 assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena) ||
186 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena) ||
187 G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena) ||
188 G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena) ||
189 G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena) ||
190 G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena) ||
191 G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena) ||
192 G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena));
193
194 si_pm4_set_reg(pm4, R_0286CC_SPI_PS_INPUT_ENA, spi_ps_input_ena);
195 si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR, spi_ps_input_ena);
196 si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
197
198 if (G_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(db_shader_control))
199 spi_shader_z_format = V_028710_SPI_SHADER_32_GR;
200 else if (G_02880C_Z_EXPORT_ENABLE(db_shader_control))
201 spi_shader_z_format = V_028710_SPI_SHADER_32_R;
202 else
203 spi_shader_z_format = 0;
204 si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT, spi_shader_z_format);
205
206 va = r600_resource_va(ctx->screen, (void *)shader->bo);
207 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ);
208 si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
209 si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, va >> 40);
210
211 num_user_sgprs = SI_PS_NUM_USER_SGPR;
212 num_sgprs = shader->num_sgprs;
213 if (num_user_sgprs > num_sgprs)
214 num_sgprs = num_user_sgprs;
215 /* Last 2 reserved SGPRs are used for VCC */
216 num_sgprs += 2;
217 assert(num_sgprs <= 104);
218
219 si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS,
220 S_00B028_VGPRS((shader->num_vgprs - 1) / 4) |
221 S_00B028_SGPRS((num_sgprs - 1) / 8));
222 si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
223 S_00B02C_USER_SGPR(num_user_sgprs));
224
225 si_pm4_set_reg(pm4, R_02880C_DB_SHADER_CONTROL, db_shader_control);
226
227 shader->sprite_coord_enable = rctx->sprite_coord_enable;
228 si_pm4_bind_state(rctx, ps, shader->pm4);
229 }
230
231 /*
232 * Drawing
233 */
234
235 static unsigned si_conv_pipe_prim(unsigned pprim)
236 {
237 static const unsigned prim_conv[] = {
238 [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
239 [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
240 [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
241 [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
242 [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
243 [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
244 [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
245 [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
246 [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
247 [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
248 [PIPE_PRIM_LINES_ADJACENCY] = ~0,
249 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = ~0,
250 [PIPE_PRIM_TRIANGLES_ADJACENCY] = ~0,
251 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = ~0
252 };
253 unsigned result = prim_conv[pprim];
254 if (result == ~0) {
255 R600_ERR("unsupported primitive type %d\n", pprim);
256 }
257 return result;
258 }
259
260 static bool si_update_draw_info_state(struct r600_context *rctx,
261 const struct pipe_draw_info *info)
262 {
263 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
264 unsigned prim = si_conv_pipe_prim(info->mode);
265 unsigned ls_mask = 0;
266
267 if (pm4 == NULL)
268 return false;
269
270 if (prim == ~0) {
271 FREE(pm4);
272 return false;
273 }
274
275 si_pm4_set_reg(pm4, R_008958_VGT_PRIMITIVE_TYPE, prim);
276 si_pm4_set_reg(pm4, R_028400_VGT_MAX_VTX_INDX, ~0);
277 si_pm4_set_reg(pm4, R_028404_VGT_MIN_VTX_INDX, 0);
278 si_pm4_set_reg(pm4, R_028408_VGT_INDX_OFFSET,
279 info->indexed ? info->index_bias : info->start);
280 si_pm4_set_reg(pm4, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info->restart_index);
281 si_pm4_set_reg(pm4, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
282 #if 0
283 si_pm4_set_reg(pm4, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0);
284 si_pm4_set_reg(pm4, R_03CFF4_SQ_VTX_START_INST_LOC, info->start_instance);
285 #endif
286
287 if (prim == V_008958_DI_PT_LINELIST)
288 ls_mask = 1;
289 else if (prim == V_008958_DI_PT_LINESTRIP)
290 ls_mask = 2;
291 si_pm4_set_reg(pm4, R_028A0C_PA_SC_LINE_STIPPLE,
292 S_028A0C_AUTO_RESET_CNTL(ls_mask) |
293 rctx->pa_sc_line_stipple);
294
295 if (info->mode == PIPE_PRIM_QUADS || info->mode == PIPE_PRIM_QUAD_STRIP || info->mode == PIPE_PRIM_POLYGON) {
296 si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL,
297 S_028814_PROVOKING_VTX_LAST(1) | rctx->pa_su_sc_mode_cntl);
298 } else {
299 si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL, rctx->pa_su_sc_mode_cntl);
300 }
301 si_pm4_set_reg(pm4, R_02881C_PA_CL_VS_OUT_CNTL,
302 prim == PIPE_PRIM_POINTS ? rctx->pa_cl_vs_out_cntl : 0
303 /*| (rctx->rasterizer->clip_plane_enable &
304 rctx->vs_shader->shader.clip_dist_write)*/);
305 si_pm4_set_reg(pm4, R_028810_PA_CL_CLIP_CNTL, rctx->pa_cl_clip_cntl
306 /*| (rctx->vs_shader->shader.clip_dist_write ||
307 rctx->vs_shader->shader.vs_prohibit_ucps ?
308 0 : rctx->rasterizer->clip_plane_enable & 0x3F)*/);
309
310 si_pm4_set_state(rctx, draw_info, pm4);
311 return true;
312 }
313
314 static void si_update_spi_map(struct r600_context *rctx)
315 {
316 struct si_shader *ps = &rctx->ps_shader->current->shader;
317 struct si_shader *vs = &rctx->vs_shader->current->shader;
318 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
319 unsigned i, j, tmp;
320
321 for (i = 0; i < ps->ninput; i++) {
322 unsigned name = ps->input[i].name;
323 unsigned param_offset = ps->input[i].param_offset;
324
325 bcolor:
326 tmp = 0;
327
328 #if 0
329 /* XXX: Flat shading hangs the GPU */
330 if (name == TGSI_SEMANTIC_POSITION ||
331 ps->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT ||
332 (ps->input[i].interpolate == TGSI_INTERPOLATE_COLOR &&
333 rctx->rasterizer && rctx->rasterizer->flatshade)) {
334 tmp |= S_028644_FLAT_SHADE(1);
335 }
336 #endif
337
338 if (name == TGSI_SEMANTIC_GENERIC &&
339 rctx->sprite_coord_enable & (1 << ps->input[i].sid)) {
340 tmp |= S_028644_PT_SPRITE_TEX(1);
341 }
342
343 for (j = 0; j < vs->noutput; j++) {
344 if (name == vs->output[j].name &&
345 ps->input[i].sid == vs->output[j].sid) {
346 tmp |= S_028644_OFFSET(vs->output[j].param_offset);
347 break;
348 }
349 }
350
351 if (j == vs->noutput) {
352 /* No corresponding output found, load defaults into input */
353 tmp |= S_028644_OFFSET(0x20);
354 }
355
356 si_pm4_set_reg(pm4,
357 R_028644_SPI_PS_INPUT_CNTL_0 + param_offset * 4,
358 tmp);
359
360 if (name == TGSI_SEMANTIC_COLOR &&
361 rctx->ps_shader->current->key.color_two_side) {
362 name = TGSI_SEMANTIC_BCOLOR;
363 param_offset++;
364 goto bcolor;
365 }
366 }
367
368 si_pm4_set_state(rctx, spi, pm4);
369 }
370
371 static void si_update_derived_state(struct r600_context *rctx)
372 {
373 struct pipe_context * ctx = (struct pipe_context*)rctx;
374 unsigned ps_dirty = 0;
375
376 if (!rctx->blitter->running) {
377 if (rctx->have_depth_fb || rctx->have_depth_texture)
378 si_flush_depth_textures(rctx);
379 }
380
381 si_shader_select(ctx, rctx->ps_shader, &ps_dirty);
382
383 if (!rctx->vs_shader->current->pm4) {
384 si_pipe_shader_vs(ctx, rctx->vs_shader->current);
385 }
386
387 if (!rctx->ps_shader->current->pm4) {
388 si_pipe_shader_ps(ctx, rctx->ps_shader->current);
389 ps_dirty = 0;
390 }
391 if (!rctx->ps_shader->current->bo) {
392 if (!rctx->dummy_pixel_shader->pm4)
393 si_pipe_shader_ps(ctx, rctx->dummy_pixel_shader);
394 else
395 si_pm4_bind_state(rctx, vs, rctx->dummy_pixel_shader->pm4);
396
397 ps_dirty = 0;
398 }
399
400 if (ps_dirty) {
401 si_pm4_bind_state(rctx, ps, rctx->ps_shader->current->pm4);
402 }
403
404 if (si_pm4_state_changed(rctx, ps) || si_pm4_state_changed(rctx, vs)) {
405 si_update_spi_map(rctx);
406 }
407 }
408
409 static void si_vertex_buffer_update(struct r600_context *rctx)
410 {
411 struct pipe_context *ctx = &rctx->context;
412 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
413 bool bound[PIPE_MAX_ATTRIBS] = {};
414 unsigned i, count;
415 uint64_t va;
416
417 si_pm4_inval_vertex_cache(pm4);
418
419 /* bind vertex buffer once */
420 count = rctx->vertex_elements->count;
421 assert(count <= 256 / 4);
422
423 si_pm4_sh_data_begin(pm4);
424 for (i = 0 ; i < count; i++) {
425 struct pipe_vertex_element *ve = &rctx->vertex_elements->elements[i];
426 struct pipe_vertex_buffer *vb;
427 struct si_resource *rbuffer;
428 unsigned offset;
429
430 if (ve->vertex_buffer_index >= rctx->nr_vertex_buffers)
431 continue;
432
433 vb = &rctx->vertex_buffer[ve->vertex_buffer_index];
434 rbuffer = (struct si_resource*)vb->buffer;
435 if (rbuffer == NULL)
436 continue;
437
438 offset = 0;
439 offset += vb->buffer_offset;
440 offset += ve->src_offset;
441
442 va = r600_resource_va(ctx->screen, (void*)rbuffer);
443 va += offset;
444
445 /* Fill in T# buffer resource description */
446 si_pm4_sh_data_add(pm4, va & 0xFFFFFFFF);
447 si_pm4_sh_data_add(pm4, (S_008F04_BASE_ADDRESS_HI(va >> 32) |
448 S_008F04_STRIDE(vb->stride)));
449 si_pm4_sh_data_add(pm4, (vb->buffer->width0 - vb->buffer_offset) /
450 MAX2(vb->stride, 1));
451 si_pm4_sh_data_add(pm4, rctx->vertex_elements->rsrc_word3[i]);
452
453 if (!bound[ve->vertex_buffer_index]) {
454 si_pm4_add_bo(pm4, rbuffer, RADEON_USAGE_READ);
455 bound[ve->vertex_buffer_index] = true;
456 }
457 }
458 si_pm4_sh_data_end(pm4, R_00B130_SPI_SHADER_USER_DATA_VS_0, SI_SGPR_VERTEX_BUFFER);
459 si_pm4_set_state(rctx, vertex_buffers, pm4);
460 }
461
462 static void si_state_draw(struct r600_context *rctx,
463 const struct pipe_draw_info *info,
464 const struct pipe_index_buffer *ib)
465 {
466 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
467
468 /* queries need some special values
469 * (this is non-zero if any query is active) */
470 if (rctx->num_cs_dw_queries_suspend) {
471 struct si_state_dsa *dsa = rctx->queued.named.dsa;
472
473 si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL,
474 S_028004_PERFECT_ZPASS_COUNTS(1));
475 si_pm4_set_reg(pm4, R_02800C_DB_RENDER_OVERRIDE,
476 dsa->db_render_override |
477 S_02800C_NOOP_CULL_DISABLE(1));
478 }
479
480 /* draw packet */
481 si_pm4_cmd_begin(pm4, PKT3_INDEX_TYPE);
482 if (ib->index_size == 4) {
483 si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_32 | (R600_BIG_ENDIAN ?
484 V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
485 } else {
486 si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_16 | (R600_BIG_ENDIAN ?
487 V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
488 }
489 si_pm4_cmd_end(pm4, rctx->predicate_drawing);
490
491 si_pm4_cmd_begin(pm4, PKT3_NUM_INSTANCES);
492 si_pm4_cmd_add(pm4, info->instance_count);
493 si_pm4_cmd_end(pm4, rctx->predicate_drawing);
494
495 if (info->indexed) {
496 uint32_t max_size = (ib->buffer->width0 - ib->offset) /
497 rctx->index_buffer.index_size;
498 uint64_t va;
499 va = r600_resource_va(&rctx->screen->screen, ib->buffer);
500 va += ib->offset;
501
502 si_pm4_add_bo(pm4, (struct si_resource *)ib->buffer, RADEON_USAGE_READ);
503 si_cmd_draw_index_2(pm4, max_size, va, info->count,
504 V_0287F0_DI_SRC_SEL_DMA,
505 rctx->predicate_drawing);
506 } else {
507 uint32_t initiator = V_0287F0_DI_SRC_SEL_AUTO_INDEX;
508 initiator |= S_0287F0_USE_OPAQUE(!!info->count_from_stream_output);
509 si_cmd_draw_index_auto(pm4, info->count, initiator, rctx->predicate_drawing);
510 }
511 si_pm4_set_state(rctx, draw, pm4);
512 }
513
514 void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
515 {
516 struct r600_context *rctx = (struct r600_context *)ctx;
517 struct pipe_index_buffer ib = {};
518 uint32_t cp_coher_cntl;
519
520 if ((!info->count && (info->indexed || !info->count_from_stream_output)) ||
521 (info->indexed && !rctx->index_buffer.buffer)) {
522 return;
523 }
524
525 if (!rctx->ps_shader || !rctx->vs_shader)
526 return;
527
528 si_update_derived_state(rctx);
529 si_vertex_buffer_update(rctx);
530
531 if (info->indexed) {
532 /* Initialize the index buffer struct. */
533 pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
534 ib.index_size = rctx->index_buffer.index_size;
535 ib.offset = rctx->index_buffer.offset + info->start * ib.index_size;
536
537 /* Translate or upload, if needed. */
538 r600_translate_index_buffer(rctx, &ib, info->count);
539
540 if (ib.user_buffer) {
541 r600_upload_index_buffer(rctx, &ib, info->count);
542 }
543
544 } else if (info->count_from_stream_output) {
545 r600_context_draw_opaque_count(rctx, (struct r600_so_target*)info->count_from_stream_output);
546 }
547
548 rctx->vs_shader_so_strides = rctx->vs_shader->current->so_strides;
549
550 if (!si_update_draw_info_state(rctx, info))
551 return;
552
553 si_state_draw(rctx, info, &ib);
554
555 cp_coher_cntl = si_pm4_sync_flags(rctx);
556 if (cp_coher_cntl) {
557 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
558 si_cmd_surface_sync(pm4, cp_coher_cntl);
559 si_pm4_set_state(rctx, sync, pm4);
560 }
561
562 /* Emit states. */
563 rctx->pm4_dirty_cdwords += si_pm4_dirty_dw(rctx);
564
565 si_need_cs_space(rctx, 0, TRUE);
566
567 si_pm4_emit_dirty(rctx);
568 rctx->pm4_dirty_cdwords = 0;
569
570 #if 0
571 /* Enable stream out if needed. */
572 if (rctx->streamout_start) {
573 r600_context_streamout_begin(rctx);
574 rctx->streamout_start = FALSE;
575 }
576 #endif
577
578
579 rctx->flags |= R600_CONTEXT_DST_CACHES_DIRTY;
580
581 if (rctx->framebuffer.zsbuf)
582 {
583 struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture;
584 ((struct r600_resource_texture *)tex)->dirty_db = TRUE;
585 }
586
587 pipe_resource_reference(&ib.buffer, NULL);
588 }