radeonsi: remove the cache_flush atom
[mesa.git] / src / gallium / drivers / radeonsi / si_state_draw.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Christian König <christian.koenig@amd.com>
25 */
26
27 #include "si_pipe.h"
28 #include "si_shader.h"
29 #include "radeon/r600_cs.h"
30 #include "sid.h"
31
32 #include "util/u_index_modify.h"
33 #include "util/u_upload_mgr.h"
34 #include "util/u_prim.h"
35 #include "util/u_memory.h"
36
37 static unsigned si_conv_pipe_prim(unsigned mode)
38 {
39 static const unsigned prim_conv[] = {
40 [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
41 [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
42 [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
43 [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
44 [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
45 [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
46 [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
47 [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
48 [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
49 [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
50 [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
51 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
52 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
53 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
54 [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
55 [R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
56 };
57 assert(mode < ARRAY_SIZE(prim_conv));
58 return prim_conv[mode];
59 }
60
61 static unsigned si_conv_prim_to_gs_out(unsigned mode)
62 {
63 static const int prim_conv[] = {
64 [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
65 [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
66 [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
67 [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
68 [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
69 [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
70 [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
71 [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
72 [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
73 [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
74 [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
75 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
76 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
77 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
78 [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
79 [R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
80 };
81 assert(mode < ARRAY_SIZE(prim_conv));
82
83 return prim_conv[mode];
84 }
85
86 /**
87 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
88 * LS.LDS_SIZE is shared by all 3 shader stages.
89 *
90 * The information about LDS and other non-compile-time parameters is then
91 * written to userdata SGPRs.
92 */
93 static void si_emit_derived_tess_state(struct si_context *sctx,
94 const struct pipe_draw_info *info,
95 unsigned *num_patches)
96 {
97 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
98 struct si_shader_ctx_state *ls = &sctx->vs_shader;
99 /* The TES pointer will only be used for sctx->last_tcs.
100 * It would be wrong to think that TCS = TES. */
101 struct si_shader_selector *tcs =
102 sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso;
103 unsigned tes_sh_base = sctx->shader_userdata.sh_base[PIPE_SHADER_TESS_EVAL];
104 unsigned num_tcs_input_cp = info->vertices_per_patch;
105 unsigned num_tcs_output_cp, num_tcs_inputs, num_tcs_outputs;
106 unsigned num_tcs_patch_outputs;
107 unsigned input_vertex_size, output_vertex_size, pervertex_output_patch_size;
108 unsigned input_patch_size, output_patch_size, output_patch0_offset;
109 unsigned perpatch_output_offset, lds_size, ls_rsrc2;
110 unsigned tcs_in_layout, tcs_out_layout, tcs_out_offsets;
111 unsigned offchip_layout, hardware_lds_size;
112
113 /* This calculates how shader inputs and outputs among VS, TCS, and TES
114 * are laid out in LDS. */
115 num_tcs_inputs = util_last_bit64(ls->cso->outputs_written);
116
117 if (sctx->tcs_shader.cso) {
118 num_tcs_outputs = util_last_bit64(tcs->outputs_written);
119 num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
120 num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
121 } else {
122 /* No TCS. Route varyings from LS to TES. */
123 num_tcs_outputs = num_tcs_inputs;
124 num_tcs_output_cp = num_tcs_input_cp;
125 num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
126 }
127
128 input_vertex_size = num_tcs_inputs * 16;
129 output_vertex_size = num_tcs_outputs * 16;
130
131 input_patch_size = num_tcs_input_cp * input_vertex_size;
132
133 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
134 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
135
136 /* Ensure that we only need one wave per SIMD so we don't need to check
137 * resource usage. Also ensures that the number of tcs in and out
138 * vertices per threadgroup are at most 256.
139 */
140 *num_patches = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp) * 4;
141
142 /* Make sure that the data fits in LDS. This assumes the shaders only
143 * use LDS for the inputs and outputs.
144 */
145 hardware_lds_size = sctx->b.chip_class >= CIK ? 65536 : 32768;
146 *num_patches = MIN2(*num_patches, hardware_lds_size / (input_patch_size +
147 output_patch_size));
148
149 /* Make sure the output data fits in the offchip buffer */
150 *num_patches = MIN2(*num_patches,
151 (sctx->screen->tess_offchip_block_dw_size * 4) /
152 output_patch_size);
153
154 /* Not necessary for correctness, but improves performance. The
155 * specific value is taken from the proprietary driver.
156 */
157 *num_patches = MIN2(*num_patches, 40);
158
159 output_patch0_offset = input_patch_size * *num_patches;
160 perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
161
162 lds_size = output_patch0_offset + output_patch_size * *num_patches;
163 ls_rsrc2 = ls->current->config.rsrc2;
164
165 if (sctx->b.chip_class >= CIK) {
166 assert(lds_size <= 65536);
167 ls_rsrc2 |= S_00B52C_LDS_SIZE(align(lds_size, 512) / 512);
168 } else {
169 assert(lds_size <= 32768);
170 ls_rsrc2 |= S_00B52C_LDS_SIZE(align(lds_size, 256) / 256);
171 }
172
173 if (sctx->last_ls == ls->current &&
174 sctx->last_tcs == tcs &&
175 sctx->last_tes_sh_base == tes_sh_base &&
176 sctx->last_num_tcs_input_cp == num_tcs_input_cp)
177 return;
178
179 sctx->last_ls = ls->current;
180 sctx->last_tcs = tcs;
181 sctx->last_tes_sh_base = tes_sh_base;
182 sctx->last_num_tcs_input_cp = num_tcs_input_cp;
183
184 /* Due to a hw bug, RSRC2_LS must be written twice with another
185 * LS register written in between. */
186 if (sctx->b.chip_class == CIK && sctx->b.family != CHIP_HAWAII)
187 radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
188 radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
189 radeon_emit(cs, ls->current->config.rsrc1);
190 radeon_emit(cs, ls_rsrc2);
191
192 /* Compute userdata SGPRs. */
193 assert(((input_vertex_size / 4) & ~0xff) == 0);
194 assert(((output_vertex_size / 4) & ~0xff) == 0);
195 assert(((input_patch_size / 4) & ~0x1fff) == 0);
196 assert(((output_patch_size / 4) & ~0x1fff) == 0);
197 assert(((output_patch0_offset / 16) & ~0xffff) == 0);
198 assert(((perpatch_output_offset / 16) & ~0xffff) == 0);
199 assert(num_tcs_input_cp <= 32);
200 assert(num_tcs_output_cp <= 32);
201
202 tcs_in_layout = (input_patch_size / 4) |
203 ((input_vertex_size / 4) << 13);
204 tcs_out_layout = (output_patch_size / 4) |
205 ((output_vertex_size / 4) << 13);
206 tcs_out_offsets = (output_patch0_offset / 16) |
207 ((perpatch_output_offset / 16) << 16);
208 offchip_layout = (pervertex_output_patch_size * *num_patches << 16) |
209 (num_tcs_output_cp << 9) | *num_patches;
210
211 /* Set them for LS. */
212 radeon_set_sh_reg(cs,
213 R_00B530_SPI_SHADER_USER_DATA_LS_0 + SI_SGPR_LS_OUT_LAYOUT * 4,
214 tcs_in_layout);
215
216 /* Set them for TCS. */
217 radeon_set_sh_reg_seq(cs,
218 R_00B430_SPI_SHADER_USER_DATA_HS_0 + SI_SGPR_TCS_OFFCHIP_LAYOUT * 4, 4);
219 radeon_emit(cs, offchip_layout);
220 radeon_emit(cs, tcs_out_offsets);
221 radeon_emit(cs, tcs_out_layout | (num_tcs_input_cp << 26));
222 radeon_emit(cs, tcs_in_layout);
223
224 /* Set them for TES. */
225 radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TCS_OFFCHIP_LAYOUT * 4, 1);
226 radeon_emit(cs, offchip_layout);
227 }
228
229 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info *info)
230 {
231 switch (info->mode) {
232 case PIPE_PRIM_PATCHES:
233 return info->count / info->vertices_per_patch;
234 case R600_PRIM_RECTANGLE_LIST:
235 return info->count / 3;
236 default:
237 return u_prims_for_vertices(info->mode, info->count);
238 }
239 }
240
241 static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
242 const struct pipe_draw_info *info,
243 unsigned num_patches)
244 {
245 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
246 unsigned prim = info->mode;
247 unsigned primgroup_size = 128; /* recommended without a GS */
248 unsigned max_primgroup_in_wave = 2;
249
250 /* SWITCH_ON_EOP(0) is always preferable. */
251 bool wd_switch_on_eop = false;
252 bool ia_switch_on_eop = false;
253 bool ia_switch_on_eoi = false;
254 bool partial_vs_wave = false;
255 bool partial_es_wave = false;
256
257 if (sctx->gs_shader.cso)
258 primgroup_size = 64; /* recommended with a GS */
259
260 if (sctx->tes_shader.cso) {
261 /* primgroup_size must be set to a multiple of NUM_PATCHES */
262 primgroup_size = num_patches;
263
264 /* SWITCH_ON_EOI must be set if PrimID is used. */
265 if ((sctx->tcs_shader.cso && sctx->tcs_shader.cso->info.uses_primid) ||
266 sctx->tes_shader.cso->info.uses_primid)
267 ia_switch_on_eoi = true;
268
269 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
270 if ((sctx->b.family == CHIP_TAHITI ||
271 sctx->b.family == CHIP_PITCAIRN ||
272 sctx->b.family == CHIP_BONAIRE) &&
273 sctx->gs_shader.cso)
274 partial_vs_wave = true;
275
276 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
277 if (sctx->screen->has_distributed_tess) {
278 if (sctx->gs_shader.cso)
279 partial_es_wave = true;
280 else
281 partial_vs_wave = true;
282 }
283 }
284
285 /* This is a hardware requirement. */
286 if ((rs && rs->line_stipple_enable) ||
287 (sctx->b.screen->debug_flags & DBG_SWITCH_ON_EOP)) {
288 ia_switch_on_eop = true;
289 wd_switch_on_eop = true;
290 }
291
292 if (sctx->b.chip_class >= CIK) {
293 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
294 * 4 shader engines. Set 1 to pass the assertion below.
295 * The other cases are hardware requirements.
296 *
297 * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
298 * for points, line strips, and tri strips.
299 */
300 if (sctx->b.screen->info.max_se < 4 ||
301 prim == PIPE_PRIM_POLYGON ||
302 prim == PIPE_PRIM_LINE_LOOP ||
303 prim == PIPE_PRIM_TRIANGLE_FAN ||
304 prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
305 (info->primitive_restart &&
306 (sctx->b.family < CHIP_POLARIS10 ||
307 (prim != PIPE_PRIM_POINTS &&
308 prim != PIPE_PRIM_LINE_STRIP &&
309 prim != PIPE_PRIM_TRIANGLE_STRIP))) ||
310 info->count_from_stream_output)
311 wd_switch_on_eop = true;
312
313 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
314 * We don't know that for indirect drawing, so treat it as
315 * always problematic. */
316 if (sctx->b.family == CHIP_HAWAII &&
317 (info->indirect || info->instance_count > 1))
318 wd_switch_on_eop = true;
319
320 /* Performance recommendation for 4 SE Gfx7-8 parts if
321 * instances are smaller than a primgroup. Ignore the fact
322 * primgroup_size is a primitive count, not vertex count.
323 * Don't do anything for indirect draws.
324 */
325 if (sctx->b.chip_class <= VI &&
326 sctx->b.screen->info.max_se >= 4 &&
327 !info->indirect &&
328 info->instance_count > 1 && info->count < primgroup_size)
329 wd_switch_on_eop = true;
330
331 /* Required on CIK and later. */
332 if (sctx->b.screen->info.max_se > 2 && !wd_switch_on_eop)
333 ia_switch_on_eoi = true;
334
335 /* Required by Hawaii and, for some special cases, by VI. */
336 if (ia_switch_on_eoi &&
337 (sctx->b.family == CHIP_HAWAII ||
338 (sctx->b.chip_class == VI &&
339 (sctx->gs_shader.cso || max_primgroup_in_wave != 2))))
340 partial_vs_wave = true;
341
342 /* Instancing bug on Bonaire. */
343 if (sctx->b.family == CHIP_BONAIRE && ia_switch_on_eoi &&
344 (info->indirect || info->instance_count > 1))
345 partial_vs_wave = true;
346
347 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
348 * The hw doc says all multi-SE chips are affected, but Vulkan
349 * only applies it to Hawaii. Do what Vulkan does.
350 */
351 if (sctx->b.family == CHIP_HAWAII &&
352 sctx->gs_shader.cso &&
353 ia_switch_on_eoi &&
354 (info->indirect ||
355 (info->instance_count > 1 &&
356 si_num_prims_for_vertices(info) <= 1)))
357 sctx->b.flags |= SI_CONTEXT_VGT_FLUSH;
358
359
360 /* If the WD switch is false, the IA switch must be false too. */
361 assert(wd_switch_on_eop || !ia_switch_on_eop);
362 }
363
364 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
365 if (ia_switch_on_eoi)
366 partial_es_wave = true;
367
368 /* GS requirement. */
369 if (SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
370 partial_es_wave = true;
371
372 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
373 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
374 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
375 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
376 S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
377 S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0) |
378 S_028AA8_MAX_PRIMGRP_IN_WAVE(sctx->b.chip_class >= VI ?
379 max_primgroup_in_wave : 0);
380 }
381
382 static unsigned si_get_ls_hs_config(struct si_context *sctx,
383 const struct pipe_draw_info *info,
384 unsigned num_patches)
385 {
386 unsigned num_output_cp;
387
388 if (!sctx->tes_shader.cso)
389 return 0;
390
391 num_output_cp = sctx->tcs_shader.cso ?
392 sctx->tcs_shader.cso->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
393 info->vertices_per_patch;
394
395 return S_028B58_NUM_PATCHES(num_patches) |
396 S_028B58_HS_NUM_INPUT_CP(info->vertices_per_patch) |
397 S_028B58_HS_NUM_OUTPUT_CP(num_output_cp);
398 }
399
400 static void si_emit_scratch_reloc(struct si_context *sctx)
401 {
402 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
403
404 if (!sctx->emit_scratch_reloc)
405 return;
406
407 radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
408 sctx->spi_tmpring_size);
409
410 if (sctx->scratch_buffer) {
411 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
412 sctx->scratch_buffer, RADEON_USAGE_READWRITE,
413 RADEON_PRIO_SCRATCH_BUFFER);
414
415 }
416 sctx->emit_scratch_reloc = false;
417 }
418
419 /* rast_prim is the primitive type after GS. */
420 static void si_emit_rasterizer_prim_state(struct si_context *sctx)
421 {
422 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
423 unsigned rast_prim = sctx->current_rast_prim;
424 struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
425
426 /* Skip this if not rendering lines. */
427 if (rast_prim != PIPE_PRIM_LINES &&
428 rast_prim != PIPE_PRIM_LINE_LOOP &&
429 rast_prim != PIPE_PRIM_LINE_STRIP &&
430 rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
431 rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
432 return;
433
434 if (rast_prim == sctx->last_rast_prim &&
435 rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
436 return;
437
438 /* For lines, reset the stipple pattern at each primitive. Otherwise,
439 * reset the stipple pattern at each packet (line strips, line loops).
440 */
441 radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
442 rs->pa_sc_line_stipple |
443 S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2));
444
445 sctx->last_rast_prim = rast_prim;
446 sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
447 }
448
449 static void si_emit_draw_registers(struct si_context *sctx,
450 const struct pipe_draw_info *info)
451 {
452 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
453 unsigned prim = si_conv_pipe_prim(info->mode);
454 unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
455 unsigned ia_multi_vgt_param, ls_hs_config, num_patches = 0;
456
457 /* Polaris needs different VTX_REUSE_DEPTH settings depending on
458 * whether the "fractional odd" tessellation spacing is used.
459 */
460 if (sctx->b.family >= CHIP_POLARIS10) {
461 struct si_shader_selector *tes = sctx->tes_shader.cso;
462 unsigned vtx_reuse_depth = 30;
463
464 if (tes &&
465 tes->info.properties[TGSI_PROPERTY_TES_SPACING] ==
466 PIPE_TESS_SPACING_FRACTIONAL_ODD)
467 vtx_reuse_depth = 14;
468
469 if (vtx_reuse_depth != sctx->last_vtx_reuse_depth) {
470 radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
471 vtx_reuse_depth);
472 sctx->last_vtx_reuse_depth = vtx_reuse_depth;
473 }
474 }
475
476 if (sctx->tes_shader.cso)
477 si_emit_derived_tess_state(sctx, info, &num_patches);
478
479 ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, num_patches);
480 ls_hs_config = si_get_ls_hs_config(sctx, info, num_patches);
481
482 /* Draw state. */
483 if (prim != sctx->last_prim ||
484 ia_multi_vgt_param != sctx->last_multi_vgt_param ||
485 ls_hs_config != sctx->last_ls_hs_config) {
486 if (sctx->b.chip_class >= CIK) {
487 radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
488 radeon_set_context_reg_idx(cs, R_028B58_VGT_LS_HS_CONFIG, 2, ls_hs_config);
489 radeon_set_uconfig_reg_idx(cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim);
490 } else {
491 radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
492 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
493 radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
494 }
495
496 sctx->last_prim = prim;
497 sctx->last_multi_vgt_param = ia_multi_vgt_param;
498 sctx->last_ls_hs_config = ls_hs_config;
499 }
500
501 if (gs_out_prim != sctx->last_gs_out_prim) {
502 radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
503 sctx->last_gs_out_prim = gs_out_prim;
504 }
505
506 /* Primitive restart. */
507 if (info->primitive_restart != sctx->last_primitive_restart_en) {
508 radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
509 sctx->last_primitive_restart_en = info->primitive_restart;
510
511 if (info->primitive_restart &&
512 (info->restart_index != sctx->last_restart_index ||
513 sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) {
514 radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
515 info->restart_index);
516 sctx->last_restart_index = info->restart_index;
517 }
518 }
519 }
520
521 static void si_emit_draw_packets(struct si_context *sctx,
522 const struct pipe_draw_info *info,
523 const struct pipe_index_buffer *ib)
524 {
525 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
526 unsigned sh_base_reg = sctx->shader_userdata.sh_base[PIPE_SHADER_VERTEX];
527 bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
528 uint32_t index_max_size = 0;
529 uint64_t index_va = 0;
530
531 if (info->count_from_stream_output) {
532 struct r600_so_target *t =
533 (struct r600_so_target*)info->count_from_stream_output;
534 uint64_t va = t->buf_filled_size->gpu_address +
535 t->buf_filled_size_offset;
536
537 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
538 t->stride_in_dw);
539
540 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
541 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
542 COPY_DATA_DST_SEL(COPY_DATA_REG) |
543 COPY_DATA_WR_CONFIRM);
544 radeon_emit(cs, va); /* src address lo */
545 radeon_emit(cs, va >> 32); /* src address hi */
546 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
547 radeon_emit(cs, 0); /* unused */
548
549 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
550 t->buf_filled_size, RADEON_USAGE_READ,
551 RADEON_PRIO_SO_FILLED_SIZE);
552 }
553
554 /* draw packet */
555 if (info->indexed) {
556 if (ib->index_size != sctx->last_index_size) {
557 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
558
559 /* index type */
560 switch (ib->index_size) {
561 case 1:
562 radeon_emit(cs, V_028A7C_VGT_INDEX_8);
563 break;
564 case 2:
565 radeon_emit(cs, V_028A7C_VGT_INDEX_16 |
566 (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
567 V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
568 break;
569 case 4:
570 radeon_emit(cs, V_028A7C_VGT_INDEX_32 |
571 (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
572 V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
573 break;
574 default:
575 assert(!"unreachable");
576 return;
577 }
578
579 sctx->last_index_size = ib->index_size;
580 }
581
582 index_max_size = (ib->buffer->width0 - ib->offset) /
583 ib->index_size;
584 index_va = r600_resource(ib->buffer)->gpu_address + ib->offset;
585
586 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
587 (struct r600_resource *)ib->buffer,
588 RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
589 } else {
590 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
591 * so the state must be re-emitted before the next indexed draw.
592 */
593 if (sctx->b.chip_class >= CIK)
594 sctx->last_index_size = -1;
595 }
596
597 if (!info->indirect) {
598 int base_vertex;
599
600 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
601 radeon_emit(cs, info->instance_count);
602
603 /* Base vertex and start instance. */
604 base_vertex = info->indexed ? info->index_bias : info->start;
605
606 if (base_vertex != sctx->last_base_vertex ||
607 sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
608 info->start_instance != sctx->last_start_instance ||
609 info->drawid != sctx->last_drawid ||
610 sh_base_reg != sctx->last_sh_base_reg) {
611 radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 3);
612 radeon_emit(cs, base_vertex);
613 radeon_emit(cs, info->start_instance);
614 radeon_emit(cs, info->drawid);
615
616 sctx->last_base_vertex = base_vertex;
617 sctx->last_start_instance = info->start_instance;
618 sctx->last_drawid = info->drawid;
619 sctx->last_sh_base_reg = sh_base_reg;
620 }
621 } else {
622 uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
623
624 assert(indirect_va % 8 == 0);
625
626 si_invalidate_draw_sh_constants(sctx);
627
628 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
629 radeon_emit(cs, 1);
630 radeon_emit(cs, indirect_va);
631 radeon_emit(cs, indirect_va >> 32);
632
633 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
634 (struct r600_resource *)info->indirect,
635 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
636 }
637
638 if (info->indirect) {
639 unsigned di_src_sel = info->indexed ? V_0287F0_DI_SRC_SEL_DMA
640 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
641
642 assert(info->indirect_offset % 4 == 0);
643
644 if (info->indexed) {
645 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
646 radeon_emit(cs, index_va);
647 radeon_emit(cs, index_va >> 32);
648
649 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
650 radeon_emit(cs, index_max_size);
651 }
652
653 if (!sctx->screen->has_draw_indirect_multi) {
654 radeon_emit(cs, PKT3(info->indexed ? PKT3_DRAW_INDEX_INDIRECT
655 : PKT3_DRAW_INDIRECT,
656 3, render_cond_bit));
657 radeon_emit(cs, info->indirect_offset);
658 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
659 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
660 radeon_emit(cs, di_src_sel);
661 } else {
662 uint64_t count_va = 0;
663
664 if (info->indirect_params) {
665 struct r600_resource *params_buf =
666 (struct r600_resource *)info->indirect_params;
667
668 radeon_add_to_buffer_list(
669 &sctx->b, &sctx->b.gfx, params_buf,
670 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
671
672 count_va = params_buf->gpu_address + info->indirect_params_offset;
673 }
674
675 radeon_emit(cs, PKT3(info->indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
676 PKT3_DRAW_INDIRECT_MULTI,
677 8, render_cond_bit));
678 radeon_emit(cs, info->indirect_offset);
679 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
680 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
681 radeon_emit(cs, ((sh_base_reg + SI_SGPR_DRAWID * 4 - SI_SH_REG_OFFSET) >> 2) |
682 S_2C3_DRAW_INDEX_ENABLE(1) |
683 S_2C3_COUNT_INDIRECT_ENABLE(!!info->indirect_params));
684 radeon_emit(cs, info->indirect_count);
685 radeon_emit(cs, count_va);
686 radeon_emit(cs, count_va >> 32);
687 radeon_emit(cs, info->indirect_stride);
688 radeon_emit(cs, di_src_sel);
689 }
690 } else {
691 if (info->indexed) {
692 index_va += info->start * ib->index_size;
693
694 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
695 radeon_emit(cs, index_max_size);
696 radeon_emit(cs, index_va);
697 radeon_emit(cs, (index_va >> 32UL) & 0xFF);
698 radeon_emit(cs, info->count);
699 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
700 } else {
701 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
702 radeon_emit(cs, info->count);
703 radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
704 S_0287F0_USE_OPAQUE(!!info->count_from_stream_output));
705 }
706 }
707 }
708
709 void si_emit_cache_flush(struct si_context *sctx)
710 {
711 struct r600_common_context *rctx = &sctx->b;
712 struct radeon_winsys_cs *cs = rctx->gfx.cs;
713 uint32_t cp_coher_cntl = 0;
714
715 /* SI has a bug that it always flushes ICACHE and KCACHE if either
716 * bit is set. An alternative way is to write SQC_CACHES, but that
717 * doesn't seem to work reliably. Since the bug doesn't affect
718 * correctness (it only does more work than necessary) and
719 * the performance impact is likely negligible, there is no plan
720 * to add a workaround for it.
721 */
722
723 if (rctx->flags & SI_CONTEXT_INV_ICACHE)
724 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
725 if (rctx->flags & SI_CONTEXT_INV_SMEM_L1)
726 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
727
728 if (rctx->flags & SI_CONTEXT_INV_VMEM_L1)
729 cp_coher_cntl |= S_0085F0_TCL1_ACTION_ENA(1);
730 if (rctx->flags & SI_CONTEXT_INV_GLOBAL_L2) {
731 cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
732
733 if (rctx->chip_class >= VI)
734 cp_coher_cntl |= S_0301F0_TC_WB_ACTION_ENA(1);
735 }
736
737 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
738 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
739 S_0085F0_CB0_DEST_BASE_ENA(1) |
740 S_0085F0_CB1_DEST_BASE_ENA(1) |
741 S_0085F0_CB2_DEST_BASE_ENA(1) |
742 S_0085F0_CB3_DEST_BASE_ENA(1) |
743 S_0085F0_CB4_DEST_BASE_ENA(1) |
744 S_0085F0_CB5_DEST_BASE_ENA(1) |
745 S_0085F0_CB6_DEST_BASE_ENA(1) |
746 S_0085F0_CB7_DEST_BASE_ENA(1);
747
748 /* Necessary for DCC */
749 if (rctx->chip_class >= VI) {
750 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
751 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_DATA_TS) |
752 EVENT_INDEX(5));
753 radeon_emit(cs, 0);
754 radeon_emit(cs, 0);
755 radeon_emit(cs, 0);
756 radeon_emit(cs, 0);
757 }
758 }
759 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
760 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
761 S_0085F0_DB_DEST_BASE_ENA(1);
762 }
763
764 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB_META) {
765 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
766 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
767 /* needed for wait for idle in SURFACE_SYNC */
768 assert(rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB);
769 }
770 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB_META) {
771 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
772 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
773 /* needed for wait for idle in SURFACE_SYNC */
774 assert(rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB);
775 }
776
777 /* Wait for shader engines to go idle.
778 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
779 * for everything including CB/DB cache flushes.
780 */
781 if (!(rctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
782 SI_CONTEXT_FLUSH_AND_INV_DB))) {
783 if (rctx->flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
784 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
785 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
786 /* Only count explicit shader flushes, not implicit ones
787 * done by SURFACE_SYNC.
788 */
789 rctx->num_vs_flushes++;
790 rctx->num_ps_flushes++;
791 } else if (rctx->flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
792 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
793 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
794 rctx->num_vs_flushes++;
795 }
796 }
797
798 if (rctx->flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
799 sctx->compute_is_busy) {
800 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
801 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
802 rctx->num_cs_flushes++;
803 sctx->compute_is_busy = false;
804 }
805
806 /* VGT state synchronization. */
807 if (rctx->flags & SI_CONTEXT_VGT_FLUSH) {
808 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
809 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
810 }
811 if (rctx->flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
812 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
813 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
814 }
815
816 /* Make sure ME is idle (it executes most packets) before continuing.
817 * This prevents read-after-write hazards between PFP and ME.
818 */
819 if (cp_coher_cntl || (rctx->flags & SI_CONTEXT_CS_PARTIAL_FLUSH)) {
820 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
821 radeon_emit(cs, 0);
822 }
823
824 /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
825 * Therefore, it should be last. Done in PFP.
826 */
827 if (cp_coher_cntl) {
828 /* ACQUIRE_MEM is only required on a compute ring. */
829 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
830 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
831 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
832 radeon_emit(cs, 0); /* CP_COHER_BASE */
833 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
834 }
835
836 if (rctx->flags & R600_CONTEXT_START_PIPELINE_STATS) {
837 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
838 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
839 EVENT_INDEX(0));
840 } else if (rctx->flags & R600_CONTEXT_STOP_PIPELINE_STATS) {
841 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
842 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
843 EVENT_INDEX(0));
844 }
845
846 rctx->flags = 0;
847 }
848
849 static void si_get_draw_start_count(struct si_context *sctx,
850 const struct pipe_draw_info *info,
851 unsigned *start, unsigned *count)
852 {
853 if (info->indirect) {
854 struct r600_resource *indirect =
855 (struct r600_resource*)info->indirect;
856 int *data = r600_buffer_map_sync_with_rings(&sctx->b,
857 indirect, PIPE_TRANSFER_READ);
858 data += info->indirect_offset/sizeof(int);
859 *start = data[2];
860 *count = data[0];
861 } else {
862 *start = info->start;
863 *count = info->count;
864 }
865 }
866
867 void si_ce_pre_draw_synchronization(struct si_context *sctx)
868 {
869 if (sctx->ce_need_synchronization) {
870 radeon_emit(sctx->ce_ib, PKT3(PKT3_INCREMENT_CE_COUNTER, 0, 0));
871 radeon_emit(sctx->ce_ib, 1);
872
873 radeon_emit(sctx->b.gfx.cs, PKT3(PKT3_WAIT_ON_CE_COUNTER, 0, 0));
874 radeon_emit(sctx->b.gfx.cs, 1);
875 }
876 }
877
878 void si_ce_post_draw_synchronization(struct si_context *sctx)
879 {
880 if (sctx->ce_need_synchronization) {
881 radeon_emit(sctx->b.gfx.cs, PKT3(PKT3_INCREMENT_DE_COUNTER, 0, 0));
882 radeon_emit(sctx->b.gfx.cs, 0);
883
884 sctx->ce_need_synchronization = false;
885 }
886 }
887
888 void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
889 {
890 struct si_context *sctx = (struct si_context *)ctx;
891 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
892 struct pipe_index_buffer ib = {};
893 unsigned mask, dirty_fb_counter, dirty_tex_counter, rast_prim;
894
895 if (likely(!info->indirect)) {
896 /* SI-CI treat instance_count==0 as instance_count==1. There is
897 * no workaround for indirect draws, but we can at least skip
898 * direct draws.
899 */
900 if (unlikely(!info->instance_count))
901 return;
902
903 /* Handle count == 0. */
904 if (unlikely(!info->count &&
905 (info->indexed || !info->count_from_stream_output)))
906 return;
907 }
908
909 if (unlikely(!sctx->vs_shader.cso)) {
910 assert(0);
911 return;
912 }
913 if (unlikely(!sctx->ps_shader.cso && (!rs || !rs->rasterizer_discard))) {
914 assert(0);
915 return;
916 }
917 if (unlikely(!!sctx->tes_shader.cso != (info->mode == PIPE_PRIM_PATCHES))) {
918 assert(0);
919 return;
920 }
921
922 /* Re-emit the framebuffer state if needed. */
923 dirty_fb_counter = p_atomic_read(&sctx->b.screen->dirty_fb_counter);
924 if (unlikely(dirty_fb_counter != sctx->b.last_dirty_fb_counter)) {
925 sctx->b.last_dirty_fb_counter = dirty_fb_counter;
926 sctx->framebuffer.dirty_cbufs |=
927 ((1 << sctx->framebuffer.state.nr_cbufs) - 1);
928 sctx->framebuffer.dirty_zsbuf = true;
929 si_mark_atom_dirty(sctx, &sctx->framebuffer.atom);
930 }
931
932 /* Invalidate & recompute texture descriptors if needed. */
933 dirty_tex_counter = p_atomic_read(&sctx->b.screen->dirty_tex_descriptor_counter);
934 if (unlikely(dirty_tex_counter != sctx->b.last_dirty_tex_descriptor_counter)) {
935 sctx->b.last_dirty_tex_descriptor_counter = dirty_tex_counter;
936 si_update_all_texture_descriptors(sctx);
937 }
938
939 si_decompress_graphics_textures(sctx);
940
941 /* Set the rasterization primitive type.
942 *
943 * This must be done after si_decompress_textures, which can call
944 * draw_vbo recursively, and before si_update_shaders, which uses
945 * current_rast_prim for this draw_vbo call. */
946 if (sctx->gs_shader.cso)
947 rast_prim = sctx->gs_shader.cso->gs_output_prim;
948 else if (sctx->tes_shader.cso)
949 rast_prim = sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
950 else
951 rast_prim = info->mode;
952
953 if (rast_prim != sctx->current_rast_prim) {
954 sctx->current_rast_prim = rast_prim;
955 sctx->do_update_shaders = true;
956 }
957
958 if (sctx->do_update_shaders && !si_update_shaders(sctx))
959 return;
960
961 if (!si_upload_graphics_shader_descriptors(sctx))
962 return;
963
964 if (info->indexed) {
965 /* Initialize the index buffer struct. */
966 pipe_resource_reference(&ib.buffer, sctx->index_buffer.buffer);
967 ib.user_buffer = sctx->index_buffer.user_buffer;
968 ib.index_size = sctx->index_buffer.index_size;
969 ib.offset = sctx->index_buffer.offset;
970
971 /* Translate or upload, if needed. */
972 /* 8-bit indices are supported on VI. */
973 if (sctx->b.chip_class <= CIK && ib.index_size == 1) {
974 struct pipe_resource *out_buffer = NULL;
975 unsigned out_offset, start, count, start_offset;
976 void *ptr;
977
978 si_get_draw_start_count(sctx, info, &start, &count);
979 start_offset = start * ib.index_size;
980
981 u_upload_alloc(sctx->b.uploader, start_offset, count * 2, 256,
982 &out_offset, &out_buffer, &ptr);
983 if (!out_buffer) {
984 pipe_resource_reference(&ib.buffer, NULL);
985 return;
986 }
987
988 util_shorten_ubyte_elts_to_userptr(&sctx->b.b, &ib, 0,
989 ib.offset + start_offset,
990 count, ptr);
991
992 pipe_resource_reference(&ib.buffer, NULL);
993 ib.user_buffer = NULL;
994 ib.buffer = out_buffer;
995 /* info->start will be added by the drawing code */
996 ib.offset = out_offset - start_offset;
997 ib.index_size = 2;
998 } else if (ib.user_buffer && !ib.buffer) {
999 unsigned start, count, start_offset;
1000
1001 si_get_draw_start_count(sctx, info, &start, &count);
1002 start_offset = start * ib.index_size;
1003
1004 u_upload_data(sctx->b.uploader, start_offset, count * ib.index_size,
1005 256, (char*)ib.user_buffer + start_offset,
1006 &ib.offset, &ib.buffer);
1007 if (!ib.buffer)
1008 return;
1009 /* info->start will be added by the drawing code */
1010 ib.offset -= start_offset;
1011 }
1012 }
1013
1014 /* VI reads index buffers through TC L2. */
1015 if (info->indexed && sctx->b.chip_class <= CIK &&
1016 r600_resource(ib.buffer)->TC_L2_dirty) {
1017 sctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2;
1018 r600_resource(ib.buffer)->TC_L2_dirty = false;
1019 }
1020
1021 if (info->indirect && r600_resource(info->indirect)->TC_L2_dirty) {
1022 sctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2;
1023 r600_resource(info->indirect)->TC_L2_dirty = false;
1024 }
1025
1026 if (info->indirect_params &&
1027 r600_resource(info->indirect_params)->TC_L2_dirty) {
1028 sctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2;
1029 r600_resource(info->indirect_params)->TC_L2_dirty = false;
1030 }
1031
1032 /* Add buffer sizes for memory checking in need_cs_space. */
1033 if (sctx->emit_scratch_reloc && sctx->scratch_buffer)
1034 r600_context_add_resource_size(ctx, &sctx->scratch_buffer->b.b);
1035 if (info->indirect)
1036 r600_context_add_resource_size(ctx, info->indirect);
1037
1038 si_need_cs_space(sctx);
1039
1040 /* Since we've called r600_context_add_resource_size for vertex buffers,
1041 * this must be called after si_need_cs_space, because we must let
1042 * need_cs_space flush before we add buffers to the buffer list.
1043 */
1044 if (!si_upload_vertex_buffer_descriptors(sctx))
1045 return;
1046
1047 /* Flushed caches prior to emitting states. */
1048 if (sctx->b.flags)
1049 si_emit_cache_flush(sctx);
1050
1051 /* Emit states. */
1052 mask = sctx->dirty_atoms;
1053 while (mask) {
1054 struct r600_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
1055
1056 atom->emit(&sctx->b, atom);
1057 }
1058 sctx->dirty_atoms = 0;
1059
1060 si_pm4_emit_dirty(sctx);
1061 si_emit_scratch_reloc(sctx);
1062 si_emit_rasterizer_prim_state(sctx);
1063 si_emit_draw_registers(sctx, info);
1064
1065 si_ce_pre_draw_synchronization(sctx);
1066
1067 si_emit_draw_packets(sctx, info, &ib);
1068
1069 si_ce_post_draw_synchronization(sctx);
1070
1071 if (sctx->trace_buf)
1072 si_trace_emit(sctx);
1073
1074 /* Workaround for a VGT hang when streamout is enabled.
1075 * It must be done after drawing. */
1076 if ((sctx->b.family == CHIP_HAWAII ||
1077 sctx->b.family == CHIP_TONGA ||
1078 sctx->b.family == CHIP_FIJI) &&
1079 r600_get_strmout_en(&sctx->b)) {
1080 sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
1081 }
1082
1083 /* Set the depth buffer as dirty. */
1084 if (sctx->framebuffer.state.zsbuf) {
1085 struct pipe_surface *surf = sctx->framebuffer.state.zsbuf;
1086 struct r600_texture *rtex = (struct r600_texture *)surf->texture;
1087
1088 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
1089
1090 if (rtex->surface.flags & RADEON_SURF_SBUFFER)
1091 rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
1092 }
1093 if (sctx->framebuffer.compressed_cb_mask) {
1094 struct pipe_surface *surf;
1095 struct r600_texture *rtex;
1096 unsigned mask = sctx->framebuffer.compressed_cb_mask;
1097
1098 do {
1099 unsigned i = u_bit_scan(&mask);
1100 surf = sctx->framebuffer.state.cbufs[i];
1101 rtex = (struct r600_texture*)surf->texture;
1102
1103 if (rtex->fmask.size)
1104 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
1105 if (rtex->dcc_gather_statistics)
1106 rtex->separate_dcc_dirty = true;
1107 } while (mask);
1108 }
1109
1110 pipe_resource_reference(&ib.buffer, NULL);
1111 sctx->b.num_draw_calls++;
1112 if (G_0286E8_WAVESIZE(sctx->spi_tmpring_size))
1113 sctx->b.num_spill_draw_calls++;
1114 }
1115
1116 void si_trace_emit(struct si_context *sctx)
1117 {
1118 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1119
1120 sctx->trace_id++;
1121 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, sctx->trace_buf,
1122 RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
1123 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
1124 radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
1125 S_370_WR_CONFIRM(1) |
1126 S_370_ENGINE_SEL(V_370_ME));
1127 radeon_emit(cs, sctx->trace_buf->gpu_address);
1128 radeon_emit(cs, sctx->trace_buf->gpu_address >> 32);
1129 radeon_emit(cs, sctx->trace_id);
1130 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1131 radeon_emit(cs, SI_ENCODE_TRACE_POINT(sctx->trace_id));
1132 }