radeonsi: add one more SWITCH_ON_EOI requirement for Hawaii and VI
[mesa.git] / src / gallium / drivers / radeonsi / si_state_draw.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Christian König <christian.koenig@amd.com>
25 */
26
27 #include "si_pipe.h"
28 #include "si_shader.h"
29 #include "radeon/r600_cs.h"
30 #include "sid.h"
31
32 #include "util/u_index_modify.h"
33 #include "util/u_upload_mgr.h"
34 #include "util/u_prim.h"
35
36 static void si_decompress_textures(struct si_context *sctx)
37 {
38 if (!sctx->blitter->running) {
39 /* Flush depth textures which need to be flushed. */
40 for (int i = 0; i < SI_NUM_SHADERS; i++) {
41 if (sctx->samplers[i].depth_texture_mask) {
42 si_flush_depth_textures(sctx, &sctx->samplers[i]);
43 }
44 if (sctx->samplers[i].compressed_colortex_mask) {
45 si_decompress_color_textures(sctx, &sctx->samplers[i]);
46 }
47 }
48 }
49 }
50
51 static unsigned si_conv_pipe_prim(unsigned mode)
52 {
53 static const unsigned prim_conv[] = {
54 [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
55 [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
56 [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
57 [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
58 [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
59 [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
60 [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
61 [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
62 [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
63 [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
64 [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
65 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
66 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
67 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
68 [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
69 [R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
70 };
71 assert(mode < Elements(prim_conv));
72 return prim_conv[mode];
73 }
74
75 static unsigned si_conv_prim_to_gs_out(unsigned mode)
76 {
77 static const int prim_conv[] = {
78 [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
79 [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
80 [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
81 [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
82 [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
83 [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
84 [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
85 [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
86 [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
87 [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
88 [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
89 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
90 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
91 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
92 [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
93 [R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
94 };
95 assert(mode < Elements(prim_conv));
96
97 return prim_conv[mode];
98 }
99
100 /**
101 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
102 * LS.LDS_SIZE is shared by all 3 shader stages.
103 *
104 * The information about LDS and other non-compile-time parameters is then
105 * written to userdata SGPRs.
106 */
107 static void si_emit_derived_tess_state(struct si_context *sctx,
108 const struct pipe_draw_info *info,
109 unsigned *num_patches)
110 {
111 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
112 struct si_shader_ctx_state *ls = &sctx->vs_shader;
113 /* The TES pointer will only be used for sctx->last_tcs.
114 * It would be wrong to think that TCS = TES. */
115 struct si_shader_selector *tcs =
116 sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso;
117 unsigned tes_sh_base = sctx->shader_userdata.sh_base[PIPE_SHADER_TESS_EVAL];
118 unsigned num_tcs_input_cp = info->vertices_per_patch;
119 unsigned num_tcs_output_cp, num_tcs_inputs, num_tcs_outputs;
120 unsigned num_tcs_patch_outputs;
121 unsigned input_vertex_size, output_vertex_size, pervertex_output_patch_size;
122 unsigned input_patch_size, output_patch_size, output_patch0_offset;
123 unsigned perpatch_output_offset, lds_size, ls_rsrc2;
124 unsigned tcs_in_layout, tcs_out_layout, tcs_out_offsets;
125
126 *num_patches = 1; /* TODO: calculate this */
127
128 if (sctx->last_ls == ls->current &&
129 sctx->last_tcs == tcs &&
130 sctx->last_tes_sh_base == tes_sh_base &&
131 sctx->last_num_tcs_input_cp == num_tcs_input_cp)
132 return;
133
134 sctx->last_ls = ls->current;
135 sctx->last_tcs = tcs;
136 sctx->last_tes_sh_base = tes_sh_base;
137 sctx->last_num_tcs_input_cp = num_tcs_input_cp;
138
139 /* This calculates how shader inputs and outputs among VS, TCS, and TES
140 * are laid out in LDS. */
141 num_tcs_inputs = util_last_bit64(ls->cso->outputs_written);
142
143 if (sctx->tcs_shader.cso) {
144 num_tcs_outputs = util_last_bit64(tcs->outputs_written);
145 num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
146 num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
147 } else {
148 /* No TCS. Route varyings from LS to TES. */
149 num_tcs_outputs = num_tcs_inputs;
150 num_tcs_output_cp = num_tcs_input_cp;
151 num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
152 }
153
154 input_vertex_size = num_tcs_inputs * 16;
155 output_vertex_size = num_tcs_outputs * 16;
156
157 input_patch_size = num_tcs_input_cp * input_vertex_size;
158
159 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
160 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
161
162 output_patch0_offset = sctx->tcs_shader.cso ? input_patch_size * *num_patches : 0;
163 perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
164
165 lds_size = output_patch0_offset + output_patch_size * *num_patches;
166 ls_rsrc2 = ls->current->ls_rsrc2;
167
168 if (sctx->b.chip_class >= CIK) {
169 assert(lds_size <= 65536);
170 ls_rsrc2 |= S_00B52C_LDS_SIZE(align(lds_size, 512) / 512);
171 } else {
172 assert(lds_size <= 32768);
173 ls_rsrc2 |= S_00B52C_LDS_SIZE(align(lds_size, 256) / 256);
174 }
175
176 /* Due to a hw bug, RSRC2_LS must be written twice with another
177 * LS register written in between. */
178 if (sctx->b.chip_class == CIK && sctx->b.family != CHIP_HAWAII)
179 radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
180 radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
181 radeon_emit(cs, ls->current->ls_rsrc1);
182 radeon_emit(cs, ls_rsrc2);
183
184 /* Compute userdata SGPRs. */
185 assert(((input_vertex_size / 4) & ~0xff) == 0);
186 assert(((output_vertex_size / 4) & ~0xff) == 0);
187 assert(((input_patch_size / 4) & ~0x1fff) == 0);
188 assert(((output_patch_size / 4) & ~0x1fff) == 0);
189 assert(((output_patch0_offset / 16) & ~0xffff) == 0);
190 assert(((perpatch_output_offset / 16) & ~0xffff) == 0);
191 assert(num_tcs_input_cp <= 32);
192 assert(num_tcs_output_cp <= 32);
193
194 tcs_in_layout = (input_patch_size / 4) |
195 ((input_vertex_size / 4) << 13);
196 tcs_out_layout = (output_patch_size / 4) |
197 ((output_vertex_size / 4) << 13);
198 tcs_out_offsets = (output_patch0_offset / 16) |
199 ((perpatch_output_offset / 16) << 16);
200
201 /* Set them for LS. */
202 radeon_set_sh_reg(cs,
203 R_00B530_SPI_SHADER_USER_DATA_LS_0 + SI_SGPR_LS_OUT_LAYOUT * 4,
204 tcs_in_layout);
205
206 /* Set them for TCS. */
207 radeon_set_sh_reg_seq(cs,
208 R_00B430_SPI_SHADER_USER_DATA_HS_0 + SI_SGPR_TCS_OUT_OFFSETS * 4, 3);
209 radeon_emit(cs, tcs_out_offsets);
210 radeon_emit(cs, tcs_out_layout | (num_tcs_input_cp << 26));
211 radeon_emit(cs, tcs_in_layout);
212
213 /* Set them for TES. */
214 radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TCS_OUT_OFFSETS * 4, 2);
215 radeon_emit(cs, tcs_out_offsets);
216 radeon_emit(cs, tcs_out_layout | (num_tcs_output_cp << 26));
217 }
218
219 static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
220 const struct pipe_draw_info *info,
221 unsigned num_patches)
222 {
223 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
224 unsigned prim = info->mode;
225 unsigned primgroup_size = 128; /* recommended without a GS */
226 unsigned max_primgroup_in_wave = 2;
227
228 /* SWITCH_ON_EOP(0) is always preferable. */
229 bool wd_switch_on_eop = false;
230 bool ia_switch_on_eop = false;
231 bool ia_switch_on_eoi = false;
232 bool partial_vs_wave = false;
233 bool partial_es_wave = false;
234
235 if (sctx->gs_shader.cso)
236 primgroup_size = 64; /* recommended with a GS */
237
238 if (sctx->tes_shader.cso) {
239 unsigned num_cp_out =
240 sctx->tcs_shader.cso ?
241 sctx->tcs_shader.cso->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
242 info->vertices_per_patch;
243 unsigned max_size = 256 / MAX2(info->vertices_per_patch, num_cp_out);
244
245 primgroup_size = MIN2(primgroup_size, max_size);
246
247 /* primgroup_size must be set to a multiple of NUM_PATCHES */
248 primgroup_size = (primgroup_size / num_patches) * num_patches;
249
250 /* SWITCH_ON_EOI must be set if PrimID is used.
251 * If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
252 if ((sctx->tcs_shader.cso && sctx->tcs_shader.cso->info.uses_primid) ||
253 sctx->tes_shader.cso->info.uses_primid) {
254 ia_switch_on_eoi = true;
255 partial_es_wave = true;
256 }
257
258 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
259 if ((sctx->b.family == CHIP_TAHITI ||
260 sctx->b.family == CHIP_PITCAIRN ||
261 sctx->b.family == CHIP_BONAIRE) &&
262 sctx->gs_shader.cso)
263 partial_vs_wave = true;
264 }
265
266 /* This is a hardware requirement. */
267 if ((rs && rs->line_stipple_enable) ||
268 (sctx->b.screen->debug_flags & DBG_SWITCH_ON_EOP)) {
269 ia_switch_on_eop = true;
270 wd_switch_on_eop = true;
271 }
272
273 if (sctx->b.chip_class >= CIK) {
274 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
275 * 4 shader engines. Set 1 to pass the assertion below.
276 * The other cases are hardware requirements. */
277 if (sctx->b.screen->info.max_se < 4 ||
278 prim == PIPE_PRIM_POLYGON ||
279 prim == PIPE_PRIM_LINE_LOOP ||
280 prim == PIPE_PRIM_TRIANGLE_FAN ||
281 prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
282 info->primitive_restart)
283 wd_switch_on_eop = true;
284
285 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
286 * We don't know that for indirect drawing, so treat it as
287 * always problematic. */
288 if (sctx->b.family == CHIP_HAWAII &&
289 (info->indirect || info->instance_count > 1))
290 wd_switch_on_eop = true;
291
292 /* USE_OPAQUE doesn't work when WD_SWITCH_ON_EOP is 0. */
293 if (info->count_from_stream_output)
294 wd_switch_on_eop = true;
295
296 /* Required on CIK and later. */
297 if (sctx->b.screen->info.max_se > 2 && !wd_switch_on_eop)
298 ia_switch_on_eoi = true;
299
300 /* Required by Hawaii and, for some special cases, by VI. */
301 if (ia_switch_on_eoi &&
302 (sctx->b.family == CHIP_HAWAII ||
303 (sctx->b.chip_class == VI &&
304 (sctx->gs_shader.cso || max_primgroup_in_wave != 2))))
305 partial_vs_wave = true;
306
307 /* Instancing bug on Bonaire. */
308 if (sctx->b.family == CHIP_BONAIRE && ia_switch_on_eoi &&
309 (info->indirect || info->instance_count > 1))
310 partial_vs_wave = true;
311
312 /* If the WD switch is false, the IA switch must be false too. */
313 assert(wd_switch_on_eop || !ia_switch_on_eop);
314 }
315
316 /* Hw bug with single-primitive instances and SWITCH_ON_EOI
317 * on multi-SE chips. */
318 if (sctx->b.screen->info.max_se >= 2 && ia_switch_on_eoi &&
319 (info->indirect ||
320 (info->instance_count > 1 &&
321 u_prims_for_vertices(info->mode, info->count) <= 1)))
322 sctx->b.flags |= SI_CONTEXT_VGT_FLUSH;
323
324 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
325 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
326 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
327 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
328 S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
329 S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0) |
330 S_028AA8_MAX_PRIMGRP_IN_WAVE(sctx->b.chip_class >= VI ?
331 max_primgroup_in_wave : 0);
332 }
333
334 static unsigned si_get_ls_hs_config(struct si_context *sctx,
335 const struct pipe_draw_info *info,
336 unsigned num_patches)
337 {
338 unsigned num_output_cp;
339
340 if (!sctx->tes_shader.cso)
341 return 0;
342
343 num_output_cp = sctx->tcs_shader.cso ?
344 sctx->tcs_shader.cso->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
345 info->vertices_per_patch;
346
347 return S_028B58_NUM_PATCHES(num_patches) |
348 S_028B58_HS_NUM_INPUT_CP(info->vertices_per_patch) |
349 S_028B58_HS_NUM_OUTPUT_CP(num_output_cp);
350 }
351
352 static void si_emit_scratch_reloc(struct si_context *sctx)
353 {
354 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
355
356 if (!sctx->emit_scratch_reloc)
357 return;
358
359 radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
360 sctx->spi_tmpring_size);
361
362 if (sctx->scratch_buffer) {
363 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
364 sctx->scratch_buffer, RADEON_USAGE_READWRITE,
365 RADEON_PRIO_SCRATCH_BUFFER);
366
367 }
368 sctx->emit_scratch_reloc = false;
369 }
370
371 /* rast_prim is the primitive type after GS. */
372 static void si_emit_rasterizer_prim_state(struct si_context *sctx)
373 {
374 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
375 unsigned rast_prim = sctx->current_rast_prim;
376 struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
377
378 /* Skip this if not rendering lines. */
379 if (rast_prim != PIPE_PRIM_LINES &&
380 rast_prim != PIPE_PRIM_LINE_LOOP &&
381 rast_prim != PIPE_PRIM_LINE_STRIP &&
382 rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
383 rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
384 return;
385
386 if (rast_prim == sctx->last_rast_prim &&
387 rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
388 return;
389
390 radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
391 rs->pa_sc_line_stipple |
392 S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 :
393 rast_prim == PIPE_PRIM_LINE_STRIP ? 2 : 0));
394
395 sctx->last_rast_prim = rast_prim;
396 sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
397 }
398
399 static void si_emit_draw_registers(struct si_context *sctx,
400 const struct pipe_draw_info *info)
401 {
402 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
403 unsigned prim = si_conv_pipe_prim(info->mode);
404 unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
405 unsigned ia_multi_vgt_param, ls_hs_config, num_patches = 0;
406
407 if (sctx->tes_shader.cso)
408 si_emit_derived_tess_state(sctx, info, &num_patches);
409
410 ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, num_patches);
411 ls_hs_config = si_get_ls_hs_config(sctx, info, num_patches);
412
413 /* Draw state. */
414 if (prim != sctx->last_prim ||
415 ia_multi_vgt_param != sctx->last_multi_vgt_param ||
416 ls_hs_config != sctx->last_ls_hs_config) {
417 if (sctx->b.chip_class >= CIK) {
418 radeon_emit(cs, PKT3(PKT3_DRAW_PREAMBLE, 2, 0));
419 radeon_emit(cs, prim); /* VGT_PRIMITIVE_TYPE */
420 radeon_emit(cs, ia_multi_vgt_param); /* IA_MULTI_VGT_PARAM */
421 radeon_emit(cs, ls_hs_config); /* VGT_LS_HS_CONFIG */
422 } else {
423 radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
424 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
425 radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
426 }
427 sctx->last_prim = prim;
428 sctx->last_multi_vgt_param = ia_multi_vgt_param;
429 sctx->last_ls_hs_config = ls_hs_config;
430 }
431
432 if (gs_out_prim != sctx->last_gs_out_prim) {
433 radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
434 sctx->last_gs_out_prim = gs_out_prim;
435 }
436
437 /* Primitive restart. */
438 if (info->primitive_restart != sctx->last_primitive_restart_en) {
439 radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
440 sctx->last_primitive_restart_en = info->primitive_restart;
441
442 if (info->primitive_restart &&
443 (info->restart_index != sctx->last_restart_index ||
444 sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) {
445 radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
446 info->restart_index);
447 sctx->last_restart_index = info->restart_index;
448 }
449 }
450 }
451
452 static void si_emit_draw_packets(struct si_context *sctx,
453 const struct pipe_draw_info *info,
454 const struct pipe_index_buffer *ib)
455 {
456 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
457 unsigned sh_base_reg = sctx->shader_userdata.sh_base[PIPE_SHADER_VERTEX];
458
459 if (info->count_from_stream_output) {
460 struct r600_so_target *t =
461 (struct r600_so_target*)info->count_from_stream_output;
462 uint64_t va = t->buf_filled_size->gpu_address +
463 t->buf_filled_size_offset;
464
465 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
466 t->stride_in_dw);
467
468 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
469 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
470 COPY_DATA_DST_SEL(COPY_DATA_REG) |
471 COPY_DATA_WR_CONFIRM);
472 radeon_emit(cs, va); /* src address lo */
473 radeon_emit(cs, va >> 32); /* src address hi */
474 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
475 radeon_emit(cs, 0); /* unused */
476
477 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
478 t->buf_filled_size, RADEON_USAGE_READ,
479 RADEON_PRIO_SO_FILLED_SIZE);
480 }
481
482 /* draw packet */
483 if (info->indexed) {
484 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
485
486 /* index type */
487 switch (ib->index_size) {
488 case 1:
489 radeon_emit(cs, V_028A7C_VGT_INDEX_8);
490 break;
491 case 2:
492 radeon_emit(cs, V_028A7C_VGT_INDEX_16 |
493 (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
494 V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
495 break;
496 case 4:
497 radeon_emit(cs, V_028A7C_VGT_INDEX_32 |
498 (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
499 V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
500 break;
501 default:
502 assert(!"unreachable");
503 return;
504 }
505 }
506
507 if (!info->indirect) {
508 int base_vertex;
509
510 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
511 radeon_emit(cs, info->instance_count);
512
513 /* Base vertex and start instance. */
514 base_vertex = info->indexed ? info->index_bias : info->start;
515
516 if (base_vertex != sctx->last_base_vertex ||
517 sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
518 info->start_instance != sctx->last_start_instance ||
519 sh_base_reg != sctx->last_sh_base_reg) {
520 radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
521 radeon_emit(cs, base_vertex);
522 radeon_emit(cs, info->start_instance);
523
524 sctx->last_base_vertex = base_vertex;
525 sctx->last_start_instance = info->start_instance;
526 sctx->last_sh_base_reg = sh_base_reg;
527 }
528 } else {
529 si_invalidate_draw_sh_constants(sctx);
530
531 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
532 (struct r600_resource *)info->indirect,
533 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
534 }
535
536 if (info->indexed) {
537 uint32_t index_max_size = (ib->buffer->width0 - ib->offset) /
538 ib->index_size;
539 uint64_t index_va = r600_resource(ib->buffer)->gpu_address + ib->offset;
540
541 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
542 (struct r600_resource *)ib->buffer,
543 RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
544
545 if (info->indirect) {
546 uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
547
548 assert(indirect_va % 8 == 0);
549 assert(index_va % 2 == 0);
550 assert(info->indirect_offset % 4 == 0);
551
552 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
553 radeon_emit(cs, 1);
554 radeon_emit(cs, indirect_va);
555 radeon_emit(cs, indirect_va >> 32);
556
557 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
558 radeon_emit(cs, index_va);
559 radeon_emit(cs, index_va >> 32);
560
561 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
562 radeon_emit(cs, index_max_size);
563
564 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_INDIRECT, 3, sctx->b.predicate_drawing));
565 radeon_emit(cs, info->indirect_offset);
566 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
567 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
568 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
569 } else {
570 index_va += info->start * ib->index_size;
571
572 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, sctx->b.predicate_drawing));
573 radeon_emit(cs, index_max_size);
574 radeon_emit(cs, index_va);
575 radeon_emit(cs, (index_va >> 32UL) & 0xFF);
576 radeon_emit(cs, info->count);
577 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
578 }
579 } else {
580 if (info->indirect) {
581 uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
582
583 assert(indirect_va % 8 == 0);
584 assert(info->indirect_offset % 4 == 0);
585
586 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
587 radeon_emit(cs, 1);
588 radeon_emit(cs, indirect_va);
589 radeon_emit(cs, indirect_va >> 32);
590
591 radeon_emit(cs, PKT3(PKT3_DRAW_INDIRECT, 3, sctx->b.predicate_drawing));
592 radeon_emit(cs, info->indirect_offset);
593 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
594 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
595 radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX);
596 } else {
597 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, sctx->b.predicate_drawing));
598 radeon_emit(cs, info->count);
599 radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
600 S_0287F0_USE_OPAQUE(!!info->count_from_stream_output));
601 }
602 }
603 }
604
605 #define BOTH_ICACHE_KCACHE (SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_KCACHE)
606
607 void si_emit_cache_flush(struct si_context *si_ctx, struct r600_atom *atom)
608 {
609 struct r600_common_context *sctx = &si_ctx->b;
610 struct radeon_winsys_cs *cs = sctx->rings.gfx.cs;
611 uint32_t cp_coher_cntl = 0;
612 uint32_t compute =
613 PKT3_SHADER_TYPE_S(!!(sctx->flags & SI_CONTEXT_FLAG_COMPUTE));
614
615 /* SI has a bug that it always flushes ICACHE and KCACHE if either
616 * bit is set. An alternative way is to write SQC_CACHES, but that
617 * doesn't seem to work reliably. Since the bug doesn't affect
618 * correctness (it only does more work than necessary) and
619 * the performance impact is likely negligible, there is no plan
620 * to fix it.
621 */
622
623 if (sctx->flags & SI_CONTEXT_INV_ICACHE)
624 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
625 if (sctx->flags & SI_CONTEXT_INV_KCACHE)
626 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
627
628 if (sctx->flags & SI_CONTEXT_INV_TC_L1)
629 cp_coher_cntl |= S_0085F0_TCL1_ACTION_ENA(1);
630 if (sctx->flags & SI_CONTEXT_INV_TC_L2) {
631 cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
632
633 /* TODO: this might not be needed. */
634 if (sctx->chip_class >= VI)
635 cp_coher_cntl |= S_0301F0_TC_WB_ACTION_ENA(1);
636 }
637
638 if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
639 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
640 S_0085F0_CB0_DEST_BASE_ENA(1) |
641 S_0085F0_CB1_DEST_BASE_ENA(1) |
642 S_0085F0_CB2_DEST_BASE_ENA(1) |
643 S_0085F0_CB3_DEST_BASE_ENA(1) |
644 S_0085F0_CB4_DEST_BASE_ENA(1) |
645 S_0085F0_CB5_DEST_BASE_ENA(1) |
646 S_0085F0_CB6_DEST_BASE_ENA(1) |
647 S_0085F0_CB7_DEST_BASE_ENA(1);
648 }
649 if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
650 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
651 S_0085F0_DB_DEST_BASE_ENA(1);
652 }
653
654 if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB_META) {
655 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
656 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
657 }
658 if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB_META) {
659 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
660 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
661 }
662 if (sctx->flags & SI_CONTEXT_FLUSH_WITH_INV_L2) {
663 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
664 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH) | EVENT_INDEX(7) |
665 EVENT_WRITE_INV_L2);
666 }
667
668 /* FLUSH_AND_INV events must be emitted before PS_PARTIAL_FLUSH.
669 * Otherwise, clearing CMASK (CB meta) with CP DMA isn't reliable.
670 *
671 * I think the reason is that FLUSH_AND_INV is only added to a queue
672 * and it is PS_PARTIAL_FLUSH that waits for it to complete.
673 */
674 if (sctx->flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
675 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
676 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
677 } else if (sctx->flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
678 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
679 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
680 }
681 if (sctx->flags & SI_CONTEXT_CS_PARTIAL_FLUSH) {
682 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
683 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
684 }
685 if (sctx->flags & SI_CONTEXT_VGT_FLUSH) {
686 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
687 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
688 }
689 if (sctx->flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
690 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
691 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
692 }
693
694 /* SURFACE_SYNC must be emitted after partial flushes.
695 * It looks like SURFACE_SYNC flushes caches immediately and doesn't
696 * wait for any engines. This should be last.
697 */
698 if (cp_coher_cntl) {
699 if (sctx->chip_class >= CIK) {
700 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) | compute);
701 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
702 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
703 radeon_emit(cs, 0xff); /* CP_COHER_SIZE_HI */
704 radeon_emit(cs, 0); /* CP_COHER_BASE */
705 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
706 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
707 } else {
708 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0) | compute);
709 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
710 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
711 radeon_emit(cs, 0); /* CP_COHER_BASE */
712 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
713 }
714 }
715
716 sctx->flags = 0;
717 }
718
719 static void si_get_draw_start_count(struct si_context *sctx,
720 const struct pipe_draw_info *info,
721 unsigned *start, unsigned *count)
722 {
723 if (info->indirect) {
724 struct r600_resource *indirect =
725 (struct r600_resource*)info->indirect;
726 int *data = r600_buffer_map_sync_with_rings(&sctx->b,
727 indirect, PIPE_TRANSFER_READ);
728 data += info->indirect_offset/sizeof(int);
729 *start = data[2];
730 *count = data[0];
731 } else {
732 *start = info->start;
733 *count = info->count;
734 }
735 }
736
737 void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
738 {
739 struct si_context *sctx = (struct si_context *)ctx;
740 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
741 struct pipe_index_buffer ib = {};
742 unsigned mask;
743
744 if (!info->count && !info->indirect &&
745 (info->indexed || !info->count_from_stream_output))
746 return;
747
748 if (!sctx->vs_shader.cso) {
749 assert(0);
750 return;
751 }
752 if (!sctx->ps_shader.cso && (!rs || !rs->rasterizer_discard)) {
753 assert(0);
754 return;
755 }
756 if (!!sctx->tes_shader.cso != (info->mode == PIPE_PRIM_PATCHES)) {
757 assert(0);
758 return;
759 }
760
761 si_decompress_textures(sctx);
762
763 /* Set the rasterization primitive type.
764 *
765 * This must be done after si_decompress_textures, which can call
766 * draw_vbo recursively, and before si_update_shaders, which uses
767 * current_rast_prim for this draw_vbo call. */
768 if (sctx->gs_shader.cso)
769 sctx->current_rast_prim = sctx->gs_shader.cso->gs_output_prim;
770 else if (sctx->tes_shader.cso)
771 sctx->current_rast_prim =
772 sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
773 else
774 sctx->current_rast_prim = info->mode;
775
776 if (!si_update_shaders(sctx) ||
777 !si_upload_shader_descriptors(sctx))
778 return;
779
780 if (info->indexed) {
781 /* Initialize the index buffer struct. */
782 pipe_resource_reference(&ib.buffer, sctx->index_buffer.buffer);
783 ib.user_buffer = sctx->index_buffer.user_buffer;
784 ib.index_size = sctx->index_buffer.index_size;
785 ib.offset = sctx->index_buffer.offset;
786
787 /* Translate or upload, if needed. */
788 /* 8-bit indices are supported on VI. */
789 if (sctx->b.chip_class <= CIK && ib.index_size == 1) {
790 struct pipe_resource *out_buffer = NULL;
791 unsigned out_offset, start, count, start_offset;
792 void *ptr;
793
794 si_get_draw_start_count(sctx, info, &start, &count);
795 start_offset = start * ib.index_size;
796
797 u_upload_alloc(sctx->b.uploader, start_offset, count * 2,
798 &out_offset, &out_buffer, &ptr);
799 if (!out_buffer) {
800 pipe_resource_reference(&ib.buffer, NULL);
801 return;
802 }
803
804 util_shorten_ubyte_elts_to_userptr(&sctx->b.b, &ib, 0,
805 ib.offset + start_offset,
806 count, ptr);
807
808 pipe_resource_reference(&ib.buffer, NULL);
809 ib.user_buffer = NULL;
810 ib.buffer = out_buffer;
811 /* info->start will be added by the drawing code */
812 ib.offset = out_offset - start_offset;
813 ib.index_size = 2;
814 } else if (ib.user_buffer && !ib.buffer) {
815 unsigned start, count, start_offset;
816
817 si_get_draw_start_count(sctx, info, &start, &count);
818 start_offset = start * ib.index_size;
819
820 u_upload_data(sctx->b.uploader, start_offset, count * ib.index_size,
821 (char*)ib.user_buffer + start_offset,
822 &ib.offset, &ib.buffer);
823 if (!ib.buffer)
824 return;
825 /* info->start will be added by the drawing code */
826 ib.offset -= start_offset;
827 }
828 }
829
830 /* VI reads index buffers through TC L2. */
831 if (info->indexed && sctx->b.chip_class <= CIK &&
832 r600_resource(ib.buffer)->TC_L2_dirty) {
833 sctx->b.flags |= SI_CONTEXT_INV_TC_L2;
834 r600_resource(ib.buffer)->TC_L2_dirty = false;
835 }
836
837 /* Check flush flags. */
838 if (sctx->b.flags)
839 si_mark_atom_dirty(sctx, sctx->atoms.s.cache_flush);
840
841 si_need_cs_space(sctx);
842
843 /* Emit states. */
844 mask = sctx->dirty_atoms;
845 while (mask) {
846 struct r600_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
847
848 atom->emit(&sctx->b, atom);
849 }
850 sctx->dirty_atoms = 0;
851
852 si_pm4_emit_dirty(sctx);
853 si_emit_scratch_reloc(sctx);
854 si_emit_rasterizer_prim_state(sctx);
855 si_emit_draw_registers(sctx, info);
856 si_emit_draw_packets(sctx, info, &ib);
857
858 if (sctx->trace_buf)
859 si_trace_emit(sctx);
860
861 /* Workaround for a VGT hang when streamout is enabled.
862 * It must be done after drawing. */
863 if ((sctx->b.family == CHIP_HAWAII || sctx->b.family == CHIP_TONGA) &&
864 (sctx->b.streamout.streamout_enabled ||
865 sctx->b.streamout.prims_gen_query_enabled)) {
866 sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
867 }
868
869 /* Set the depth buffer as dirty. */
870 if (sctx->framebuffer.state.zsbuf) {
871 struct pipe_surface *surf = sctx->framebuffer.state.zsbuf;
872 struct r600_texture *rtex = (struct r600_texture *)surf->texture;
873
874 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
875
876 if (rtex->surface.flags & RADEON_SURF_SBUFFER)
877 rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
878 }
879 if (sctx->framebuffer.compressed_cb_mask) {
880 struct pipe_surface *surf;
881 struct r600_texture *rtex;
882 unsigned mask = sctx->framebuffer.compressed_cb_mask;
883
884 do {
885 unsigned i = u_bit_scan(&mask);
886 surf = sctx->framebuffer.state.cbufs[i];
887 rtex = (struct r600_texture*)surf->texture;
888
889 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
890 } while (mask);
891 }
892
893 pipe_resource_reference(&ib.buffer, NULL);
894 sctx->b.num_draw_calls++;
895 }
896
897 void si_trace_emit(struct si_context *sctx)
898 {
899 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
900
901 sctx->trace_id++;
902 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, sctx->trace_buf,
903 RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
904 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
905 radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
906 S_370_WR_CONFIRM(1) |
907 S_370_ENGINE_SEL(V_370_ME));
908 radeon_emit(cs, sctx->trace_buf->gpu_address);
909 radeon_emit(cs, sctx->trace_buf->gpu_address >> 32);
910 radeon_emit(cs, sctx->trace_id);
911 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
912 radeon_emit(cs, SI_ENCODE_TRACE_POINT(sctx->trace_id));
913 }