Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / drivers / radeonsi / si_state_draw.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Christian König <christian.koenig@amd.com>
25 */
26
27 #include "si_pipe.h"
28 #include "si_shader.h"
29 #include "radeon/r600_cs.h"
30 #include "sid.h"
31
32 #include "util/u_index_modify.h"
33 #include "util/u_upload_mgr.h"
34 #include "util/u_prim.h"
35
36 static void si_decompress_textures(struct si_context *sctx)
37 {
38 if (!sctx->blitter->running) {
39 /* Flush depth textures which need to be flushed. */
40 for (int i = 0; i < SI_NUM_SHADERS; i++) {
41 if (sctx->samplers[i].depth_texture_mask) {
42 si_flush_depth_textures(sctx, &sctx->samplers[i]);
43 }
44 if (sctx->samplers[i].compressed_colortex_mask) {
45 si_decompress_color_textures(sctx, &sctx->samplers[i]);
46 }
47 }
48 }
49 }
50
51 static unsigned si_conv_pipe_prim(unsigned mode)
52 {
53 static const unsigned prim_conv[] = {
54 [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
55 [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
56 [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
57 [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
58 [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
59 [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
60 [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
61 [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
62 [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
63 [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
64 [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
65 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
66 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
67 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
68 [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
69 [R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
70 };
71 assert(mode < Elements(prim_conv));
72 return prim_conv[mode];
73 }
74
75 static unsigned si_conv_prim_to_gs_out(unsigned mode)
76 {
77 static const int prim_conv[] = {
78 [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
79 [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
80 [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
81 [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
82 [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
83 [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
84 [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
85 [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
86 [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
87 [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
88 [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
89 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
90 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
91 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
92 [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
93 [R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
94 };
95 assert(mode < Elements(prim_conv));
96
97 return prim_conv[mode];
98 }
99
100 /**
101 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
102 * LS.LDS_SIZE is shared by all 3 shader stages.
103 *
104 * The information about LDS and other non-compile-time parameters is then
105 * written to userdata SGPRs.
106 */
107 static void si_emit_derived_tess_state(struct si_context *sctx,
108 const struct pipe_draw_info *info,
109 unsigned *num_patches)
110 {
111 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
112 struct si_shader_ctx_state *ls = &sctx->vs_shader;
113 /* The TES pointer will only be used for sctx->last_tcs.
114 * It would be wrong to think that TCS = TES. */
115 struct si_shader_selector *tcs =
116 sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso;
117 unsigned tes_sh_base = sctx->shader_userdata.sh_base[PIPE_SHADER_TESS_EVAL];
118 unsigned num_tcs_input_cp = info->vertices_per_patch;
119 unsigned num_tcs_output_cp, num_tcs_inputs, num_tcs_outputs;
120 unsigned num_tcs_patch_outputs;
121 unsigned input_vertex_size, output_vertex_size, pervertex_output_patch_size;
122 unsigned input_patch_size, output_patch_size, output_patch0_offset;
123 unsigned perpatch_output_offset, lds_size, ls_rsrc2;
124 unsigned tcs_in_layout, tcs_out_layout, tcs_out_offsets;
125
126 *num_patches = 1; /* TODO: calculate this */
127
128 if (sctx->last_ls == ls->current &&
129 sctx->last_tcs == tcs &&
130 sctx->last_tes_sh_base == tes_sh_base &&
131 sctx->last_num_tcs_input_cp == num_tcs_input_cp)
132 return;
133
134 sctx->last_ls = ls->current;
135 sctx->last_tcs = tcs;
136 sctx->last_tes_sh_base = tes_sh_base;
137 sctx->last_num_tcs_input_cp = num_tcs_input_cp;
138
139 /* This calculates how shader inputs and outputs among VS, TCS, and TES
140 * are laid out in LDS. */
141 num_tcs_inputs = util_last_bit64(ls->cso->outputs_written);
142
143 if (sctx->tcs_shader.cso) {
144 num_tcs_outputs = util_last_bit64(tcs->outputs_written);
145 num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
146 num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
147 } else {
148 /* No TCS. Route varyings from LS to TES. */
149 num_tcs_outputs = num_tcs_inputs;
150 num_tcs_output_cp = num_tcs_input_cp;
151 num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
152 }
153
154 input_vertex_size = num_tcs_inputs * 16;
155 output_vertex_size = num_tcs_outputs * 16;
156
157 input_patch_size = num_tcs_input_cp * input_vertex_size;
158
159 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
160 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
161
162 output_patch0_offset = sctx->tcs_shader.cso ? input_patch_size * *num_patches : 0;
163 perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
164
165 lds_size = output_patch0_offset + output_patch_size * *num_patches;
166 ls_rsrc2 = ls->current->rsrc2;
167
168 if (sctx->b.chip_class >= CIK) {
169 assert(lds_size <= 65536);
170 ls_rsrc2 |= S_00B52C_LDS_SIZE(align(lds_size, 512) / 512);
171 } else {
172 assert(lds_size <= 32768);
173 ls_rsrc2 |= S_00B52C_LDS_SIZE(align(lds_size, 256) / 256);
174 }
175
176 /* Due to a hw bug, RSRC2_LS must be written twice with another
177 * LS register written in between. */
178 if (sctx->b.chip_class == CIK && sctx->b.family != CHIP_HAWAII)
179 radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
180 radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
181 radeon_emit(cs, ls->current->rsrc1);
182 radeon_emit(cs, ls_rsrc2);
183
184 /* Compute userdata SGPRs. */
185 assert(((input_vertex_size / 4) & ~0xff) == 0);
186 assert(((output_vertex_size / 4) & ~0xff) == 0);
187 assert(((input_patch_size / 4) & ~0x1fff) == 0);
188 assert(((output_patch_size / 4) & ~0x1fff) == 0);
189 assert(((output_patch0_offset / 16) & ~0xffff) == 0);
190 assert(((perpatch_output_offset / 16) & ~0xffff) == 0);
191 assert(num_tcs_input_cp <= 32);
192 assert(num_tcs_output_cp <= 32);
193
194 tcs_in_layout = (input_patch_size / 4) |
195 ((input_vertex_size / 4) << 13);
196 tcs_out_layout = (output_patch_size / 4) |
197 ((output_vertex_size / 4) << 13);
198 tcs_out_offsets = (output_patch0_offset / 16) |
199 ((perpatch_output_offset / 16) << 16);
200
201 /* Set them for LS. */
202 radeon_set_sh_reg(cs,
203 R_00B530_SPI_SHADER_USER_DATA_LS_0 + SI_SGPR_LS_OUT_LAYOUT * 4,
204 tcs_in_layout);
205
206 /* Set them for TCS. */
207 radeon_set_sh_reg_seq(cs,
208 R_00B430_SPI_SHADER_USER_DATA_HS_0 + SI_SGPR_TCS_OUT_OFFSETS * 4, 3);
209 radeon_emit(cs, tcs_out_offsets);
210 radeon_emit(cs, tcs_out_layout | (num_tcs_input_cp << 26));
211 radeon_emit(cs, tcs_in_layout);
212
213 /* Set them for TES. */
214 radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TCS_OUT_OFFSETS * 4, 2);
215 radeon_emit(cs, tcs_out_offsets);
216 radeon_emit(cs, tcs_out_layout | (num_tcs_output_cp << 26));
217 }
218
219 static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
220 const struct pipe_draw_info *info,
221 unsigned num_patches)
222 {
223 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
224 unsigned prim = info->mode;
225 unsigned primgroup_size = 128; /* recommended without a GS */
226 unsigned max_primgroup_in_wave = 2;
227
228 /* SWITCH_ON_EOP(0) is always preferable. */
229 bool wd_switch_on_eop = false;
230 bool ia_switch_on_eop = false;
231 bool ia_switch_on_eoi = false;
232 bool partial_vs_wave = false;
233 bool partial_es_wave = false;
234
235 if (sctx->gs_shader.cso)
236 primgroup_size = 64; /* recommended with a GS */
237
238 if (sctx->tes_shader.cso) {
239 unsigned num_cp_out =
240 sctx->tcs_shader.cso ?
241 sctx->tcs_shader.cso->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
242 info->vertices_per_patch;
243 unsigned max_size = 256 / MAX2(info->vertices_per_patch, num_cp_out);
244
245 primgroup_size = MIN2(primgroup_size, max_size);
246
247 /* primgroup_size must be set to a multiple of NUM_PATCHES */
248 primgroup_size = (primgroup_size / num_patches) * num_patches;
249
250 /* SWITCH_ON_EOI must be set if PrimID is used. */
251 if ((sctx->tcs_shader.cso && sctx->tcs_shader.cso->info.uses_primid) ||
252 sctx->tes_shader.cso->info.uses_primid)
253 ia_switch_on_eoi = true;
254
255 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
256 if ((sctx->b.family == CHIP_TAHITI ||
257 sctx->b.family == CHIP_PITCAIRN ||
258 sctx->b.family == CHIP_BONAIRE) &&
259 sctx->gs_shader.cso)
260 partial_vs_wave = true;
261 }
262
263 /* This is a hardware requirement. */
264 if ((rs && rs->line_stipple_enable) ||
265 (sctx->b.screen->debug_flags & DBG_SWITCH_ON_EOP)) {
266 ia_switch_on_eop = true;
267 wd_switch_on_eop = true;
268 }
269
270 if (sctx->b.chip_class >= CIK) {
271 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
272 * 4 shader engines. Set 1 to pass the assertion below.
273 * The other cases are hardware requirements. */
274 if (sctx->b.screen->info.max_se < 4 ||
275 prim == PIPE_PRIM_POLYGON ||
276 prim == PIPE_PRIM_LINE_LOOP ||
277 prim == PIPE_PRIM_TRIANGLE_FAN ||
278 prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
279 info->primitive_restart ||
280 info->count_from_stream_output)
281 wd_switch_on_eop = true;
282
283 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
284 * We don't know that for indirect drawing, so treat it as
285 * always problematic. */
286 if (sctx->b.family == CHIP_HAWAII &&
287 (info->indirect || info->instance_count > 1))
288 wd_switch_on_eop = true;
289
290 /* Required on CIK and later. */
291 if (sctx->b.screen->info.max_se > 2 && !wd_switch_on_eop)
292 ia_switch_on_eoi = true;
293
294 /* Required by Hawaii and, for some special cases, by VI. */
295 if (ia_switch_on_eoi &&
296 (sctx->b.family == CHIP_HAWAII ||
297 (sctx->b.chip_class == VI &&
298 (sctx->gs_shader.cso || max_primgroup_in_wave != 2))))
299 partial_vs_wave = true;
300
301 /* Instancing bug on Bonaire. */
302 if (sctx->b.family == CHIP_BONAIRE && ia_switch_on_eoi &&
303 (info->indirect || info->instance_count > 1))
304 partial_vs_wave = true;
305
306 /* If the WD switch is false, the IA switch must be false too. */
307 assert(wd_switch_on_eop || !ia_switch_on_eop);
308 }
309
310 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
311 if (ia_switch_on_eoi)
312 partial_es_wave = true;
313
314 /* GS requirement. */
315 if (SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
316 partial_es_wave = true;
317
318 /* Hw bug with single-primitive instances and SWITCH_ON_EOI
319 * on multi-SE chips. */
320 if (sctx->b.screen->info.max_se >= 2 && ia_switch_on_eoi &&
321 (info->indirect ||
322 (info->instance_count > 1 &&
323 u_prims_for_vertices(info->mode, info->count) <= 1)))
324 sctx->b.flags |= SI_CONTEXT_VGT_FLUSH;
325
326 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
327 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
328 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
329 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
330 S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
331 S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0) |
332 S_028AA8_MAX_PRIMGRP_IN_WAVE(sctx->b.chip_class >= VI ?
333 max_primgroup_in_wave : 0);
334 }
335
336 static unsigned si_get_ls_hs_config(struct si_context *sctx,
337 const struct pipe_draw_info *info,
338 unsigned num_patches)
339 {
340 unsigned num_output_cp;
341
342 if (!sctx->tes_shader.cso)
343 return 0;
344
345 num_output_cp = sctx->tcs_shader.cso ?
346 sctx->tcs_shader.cso->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
347 info->vertices_per_patch;
348
349 return S_028B58_NUM_PATCHES(num_patches) |
350 S_028B58_HS_NUM_INPUT_CP(info->vertices_per_patch) |
351 S_028B58_HS_NUM_OUTPUT_CP(num_output_cp);
352 }
353
354 static void si_emit_scratch_reloc(struct si_context *sctx)
355 {
356 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
357
358 if (!sctx->emit_scratch_reloc)
359 return;
360
361 radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
362 sctx->spi_tmpring_size);
363
364 if (sctx->scratch_buffer) {
365 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
366 sctx->scratch_buffer, RADEON_USAGE_READWRITE,
367 RADEON_PRIO_SCRATCH_BUFFER);
368
369 }
370 sctx->emit_scratch_reloc = false;
371 }
372
373 /* rast_prim is the primitive type after GS. */
374 static void si_emit_rasterizer_prim_state(struct si_context *sctx)
375 {
376 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
377 unsigned rast_prim = sctx->current_rast_prim;
378 struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
379
380 /* Skip this if not rendering lines. */
381 if (rast_prim != PIPE_PRIM_LINES &&
382 rast_prim != PIPE_PRIM_LINE_LOOP &&
383 rast_prim != PIPE_PRIM_LINE_STRIP &&
384 rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
385 rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
386 return;
387
388 if (rast_prim == sctx->last_rast_prim &&
389 rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
390 return;
391
392 radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
393 rs->pa_sc_line_stipple |
394 S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 :
395 rast_prim == PIPE_PRIM_LINE_STRIP ? 2 : 0));
396
397 sctx->last_rast_prim = rast_prim;
398 sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
399 }
400
401 static void si_emit_draw_registers(struct si_context *sctx,
402 const struct pipe_draw_info *info)
403 {
404 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
405 unsigned prim = si_conv_pipe_prim(info->mode);
406 unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
407 unsigned ia_multi_vgt_param, ls_hs_config, num_patches = 0;
408
409 if (sctx->tes_shader.cso)
410 si_emit_derived_tess_state(sctx, info, &num_patches);
411
412 ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, num_patches);
413 ls_hs_config = si_get_ls_hs_config(sctx, info, num_patches);
414
415 /* Draw state. */
416 if (prim != sctx->last_prim ||
417 ia_multi_vgt_param != sctx->last_multi_vgt_param ||
418 ls_hs_config != sctx->last_ls_hs_config) {
419 if (sctx->b.chip_class >= CIK) {
420 radeon_emit(cs, PKT3(PKT3_DRAW_PREAMBLE, 2, 0));
421 radeon_emit(cs, prim); /* VGT_PRIMITIVE_TYPE */
422 radeon_emit(cs, ia_multi_vgt_param); /* IA_MULTI_VGT_PARAM */
423 radeon_emit(cs, ls_hs_config); /* VGT_LS_HS_CONFIG */
424 } else {
425 radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
426 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
427 radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
428 }
429 sctx->last_prim = prim;
430 sctx->last_multi_vgt_param = ia_multi_vgt_param;
431 sctx->last_ls_hs_config = ls_hs_config;
432 }
433
434 if (gs_out_prim != sctx->last_gs_out_prim) {
435 radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
436 sctx->last_gs_out_prim = gs_out_prim;
437 }
438
439 /* Primitive restart. */
440 if (info->primitive_restart != sctx->last_primitive_restart_en) {
441 radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
442 sctx->last_primitive_restart_en = info->primitive_restart;
443
444 if (info->primitive_restart &&
445 (info->restart_index != sctx->last_restart_index ||
446 sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) {
447 radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
448 info->restart_index);
449 sctx->last_restart_index = info->restart_index;
450 }
451 }
452 }
453
454 static void si_emit_draw_packets(struct si_context *sctx,
455 const struct pipe_draw_info *info,
456 const struct pipe_index_buffer *ib)
457 {
458 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
459 unsigned sh_base_reg = sctx->shader_userdata.sh_base[PIPE_SHADER_VERTEX];
460 bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
461
462 if (info->count_from_stream_output) {
463 struct r600_so_target *t =
464 (struct r600_so_target*)info->count_from_stream_output;
465 uint64_t va = t->buf_filled_size->gpu_address +
466 t->buf_filled_size_offset;
467
468 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
469 t->stride_in_dw);
470
471 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
472 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
473 COPY_DATA_DST_SEL(COPY_DATA_REG) |
474 COPY_DATA_WR_CONFIRM);
475 radeon_emit(cs, va); /* src address lo */
476 radeon_emit(cs, va >> 32); /* src address hi */
477 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
478 radeon_emit(cs, 0); /* unused */
479
480 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
481 t->buf_filled_size, RADEON_USAGE_READ,
482 RADEON_PRIO_SO_FILLED_SIZE);
483 }
484
485 /* draw packet */
486 if (info->indexed) {
487 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
488
489 /* index type */
490 switch (ib->index_size) {
491 case 1:
492 radeon_emit(cs, V_028A7C_VGT_INDEX_8);
493 break;
494 case 2:
495 radeon_emit(cs, V_028A7C_VGT_INDEX_16 |
496 (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
497 V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
498 break;
499 case 4:
500 radeon_emit(cs, V_028A7C_VGT_INDEX_32 |
501 (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
502 V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
503 break;
504 default:
505 assert(!"unreachable");
506 return;
507 }
508 }
509
510 if (!info->indirect) {
511 int base_vertex;
512
513 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
514 radeon_emit(cs, info->instance_count);
515
516 /* Base vertex and start instance. */
517 base_vertex = info->indexed ? info->index_bias : info->start;
518
519 if (base_vertex != sctx->last_base_vertex ||
520 sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
521 info->start_instance != sctx->last_start_instance ||
522 sh_base_reg != sctx->last_sh_base_reg) {
523 radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
524 radeon_emit(cs, base_vertex);
525 radeon_emit(cs, info->start_instance);
526
527 sctx->last_base_vertex = base_vertex;
528 sctx->last_start_instance = info->start_instance;
529 sctx->last_sh_base_reg = sh_base_reg;
530 }
531 } else {
532 si_invalidate_draw_sh_constants(sctx);
533
534 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
535 (struct r600_resource *)info->indirect,
536 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
537 }
538
539 if (info->indexed) {
540 uint32_t index_max_size = (ib->buffer->width0 - ib->offset) /
541 ib->index_size;
542 uint64_t index_va = r600_resource(ib->buffer)->gpu_address + ib->offset;
543
544 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
545 (struct r600_resource *)ib->buffer,
546 RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
547
548 if (info->indirect) {
549 uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
550
551 assert(indirect_va % 8 == 0);
552 assert(index_va % 2 == 0);
553 assert(info->indirect_offset % 4 == 0);
554
555 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
556 radeon_emit(cs, 1);
557 radeon_emit(cs, indirect_va);
558 radeon_emit(cs, indirect_va >> 32);
559
560 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
561 radeon_emit(cs, index_va);
562 radeon_emit(cs, index_va >> 32);
563
564 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
565 radeon_emit(cs, index_max_size);
566
567 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_INDIRECT, 3, render_cond_bit));
568 radeon_emit(cs, info->indirect_offset);
569 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
570 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
571 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
572 } else {
573 index_va += info->start * ib->index_size;
574
575 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
576 radeon_emit(cs, index_max_size);
577 radeon_emit(cs, index_va);
578 radeon_emit(cs, (index_va >> 32UL) & 0xFF);
579 radeon_emit(cs, info->count);
580 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
581 }
582 } else {
583 if (info->indirect) {
584 uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
585
586 assert(indirect_va % 8 == 0);
587 assert(info->indirect_offset % 4 == 0);
588
589 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
590 radeon_emit(cs, 1);
591 radeon_emit(cs, indirect_va);
592 radeon_emit(cs, indirect_va >> 32);
593
594 radeon_emit(cs, PKT3(PKT3_DRAW_INDIRECT, 3, render_cond_bit));
595 radeon_emit(cs, info->indirect_offset);
596 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
597 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
598 radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX);
599 } else {
600 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
601 radeon_emit(cs, info->count);
602 radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
603 S_0287F0_USE_OPAQUE(!!info->count_from_stream_output));
604 }
605 }
606 }
607
608 void si_emit_cache_flush(struct si_context *si_ctx, struct r600_atom *atom)
609 {
610 struct r600_common_context *sctx = &si_ctx->b;
611 struct radeon_winsys_cs *cs = sctx->gfx.cs;
612 uint32_t cp_coher_cntl = 0;
613 uint32_t compute =
614 PKT3_SHADER_TYPE_S(!!(sctx->flags & SI_CONTEXT_FLAG_COMPUTE));
615
616 /* SI has a bug that it always flushes ICACHE and KCACHE if either
617 * bit is set. An alternative way is to write SQC_CACHES, but that
618 * doesn't seem to work reliably. Since the bug doesn't affect
619 * correctness (it only does more work than necessary) and
620 * the performance impact is likely negligible, there is no plan
621 * to fix it.
622 */
623
624 if (sctx->flags & SI_CONTEXT_INV_ICACHE)
625 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
626 if (sctx->flags & SI_CONTEXT_INV_SMEM_L1)
627 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
628
629 if (sctx->flags & SI_CONTEXT_INV_VMEM_L1)
630 cp_coher_cntl |= S_0085F0_TCL1_ACTION_ENA(1);
631 if (sctx->flags & SI_CONTEXT_INV_GLOBAL_L2) {
632 cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
633
634 /* TODO: this might not be needed. */
635 if (sctx->chip_class >= VI)
636 cp_coher_cntl |= S_0301F0_TC_WB_ACTION_ENA(1);
637 }
638
639 if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
640 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
641 S_0085F0_CB0_DEST_BASE_ENA(1) |
642 S_0085F0_CB1_DEST_BASE_ENA(1) |
643 S_0085F0_CB2_DEST_BASE_ENA(1) |
644 S_0085F0_CB3_DEST_BASE_ENA(1) |
645 S_0085F0_CB4_DEST_BASE_ENA(1) |
646 S_0085F0_CB5_DEST_BASE_ENA(1) |
647 S_0085F0_CB6_DEST_BASE_ENA(1) |
648 S_0085F0_CB7_DEST_BASE_ENA(1);
649
650 /* Necessary for DCC */
651 if (sctx->chip_class >= VI) {
652 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0) | compute);
653 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_DATA_TS) |
654 EVENT_INDEX(5));
655 radeon_emit(cs, 0);
656 radeon_emit(cs, 0);
657 radeon_emit(cs, 0);
658 radeon_emit(cs, 0);
659 }
660 }
661 if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
662 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
663 S_0085F0_DB_DEST_BASE_ENA(1);
664 }
665
666 if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB_META) {
667 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
668 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
669 }
670 if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB_META) {
671 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
672 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
673 }
674 if (sctx->flags & SI_CONTEXT_FLUSH_WITH_INV_L2) {
675 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
676 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH) | EVENT_INDEX(7) |
677 EVENT_WRITE_INV_L2);
678 }
679
680 /* FLUSH_AND_INV events must be emitted before PS_PARTIAL_FLUSH.
681 * Otherwise, clearing CMASK (CB meta) with CP DMA isn't reliable.
682 *
683 * I think the reason is that FLUSH_AND_INV is only added to a queue
684 * and it is PS_PARTIAL_FLUSH that waits for it to complete.
685 */
686 if (sctx->flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
687 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
688 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
689 } else if (sctx->flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
690 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
691 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
692 }
693 if (sctx->flags & SI_CONTEXT_CS_PARTIAL_FLUSH) {
694 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
695 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
696 }
697 if (sctx->flags & SI_CONTEXT_VGT_FLUSH) {
698 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
699 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
700 }
701 if (sctx->flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
702 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
703 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
704 }
705
706 /* SURFACE_SYNC must be emitted after partial flushes.
707 * It looks like SURFACE_SYNC flushes caches immediately and doesn't
708 * wait for any engines. This should be last.
709 */
710 if (cp_coher_cntl) {
711 if (sctx->chip_class >= CIK) {
712 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) | compute);
713 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
714 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
715 radeon_emit(cs, 0xff); /* CP_COHER_SIZE_HI */
716 radeon_emit(cs, 0); /* CP_COHER_BASE */
717 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
718 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
719 } else {
720 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0) | compute);
721 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
722 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
723 radeon_emit(cs, 0); /* CP_COHER_BASE */
724 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
725 }
726 }
727
728 sctx->flags = 0;
729 }
730
731 static void si_get_draw_start_count(struct si_context *sctx,
732 const struct pipe_draw_info *info,
733 unsigned *start, unsigned *count)
734 {
735 if (info->indirect) {
736 struct r600_resource *indirect =
737 (struct r600_resource*)info->indirect;
738 int *data = r600_buffer_map_sync_with_rings(&sctx->b,
739 indirect, PIPE_TRANSFER_READ);
740 data += info->indirect_offset/sizeof(int);
741 *start = data[2];
742 *count = data[0];
743 } else {
744 *start = info->start;
745 *count = info->count;
746 }
747 }
748
749 void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
750 {
751 struct si_context *sctx = (struct si_context *)ctx;
752 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
753 struct pipe_index_buffer ib = {};
754 unsigned mask;
755
756 if (!info->count && !info->indirect &&
757 (info->indexed || !info->count_from_stream_output))
758 return;
759
760 if (!sctx->vs_shader.cso) {
761 assert(0);
762 return;
763 }
764 if (!sctx->ps_shader.cso && (!rs || !rs->rasterizer_discard)) {
765 assert(0);
766 return;
767 }
768 if (!!sctx->tes_shader.cso != (info->mode == PIPE_PRIM_PATCHES)) {
769 assert(0);
770 return;
771 }
772
773 si_decompress_textures(sctx);
774
775 /* Set the rasterization primitive type.
776 *
777 * This must be done after si_decompress_textures, which can call
778 * draw_vbo recursively, and before si_update_shaders, which uses
779 * current_rast_prim for this draw_vbo call. */
780 if (sctx->gs_shader.cso)
781 sctx->current_rast_prim = sctx->gs_shader.cso->gs_output_prim;
782 else if (sctx->tes_shader.cso)
783 sctx->current_rast_prim =
784 sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
785 else
786 sctx->current_rast_prim = info->mode;
787
788 if (!si_update_shaders(sctx) ||
789 !si_upload_shader_descriptors(sctx))
790 return;
791
792 if (info->indexed) {
793 /* Initialize the index buffer struct. */
794 pipe_resource_reference(&ib.buffer, sctx->index_buffer.buffer);
795 ib.user_buffer = sctx->index_buffer.user_buffer;
796 ib.index_size = sctx->index_buffer.index_size;
797 ib.offset = sctx->index_buffer.offset;
798
799 /* Translate or upload, if needed. */
800 /* 8-bit indices are supported on VI. */
801 if (sctx->b.chip_class <= CIK && ib.index_size == 1) {
802 struct pipe_resource *out_buffer = NULL;
803 unsigned out_offset, start, count, start_offset;
804 void *ptr;
805
806 si_get_draw_start_count(sctx, info, &start, &count);
807 start_offset = start * ib.index_size;
808
809 u_upload_alloc(sctx->b.uploader, start_offset, count * 2,
810 &out_offset, &out_buffer, &ptr);
811 if (!out_buffer) {
812 pipe_resource_reference(&ib.buffer, NULL);
813 return;
814 }
815
816 util_shorten_ubyte_elts_to_userptr(&sctx->b.b, &ib, 0,
817 ib.offset + start_offset,
818 count, ptr);
819
820 pipe_resource_reference(&ib.buffer, NULL);
821 ib.user_buffer = NULL;
822 ib.buffer = out_buffer;
823 /* info->start will be added by the drawing code */
824 ib.offset = out_offset - start_offset;
825 ib.index_size = 2;
826 } else if (ib.user_buffer && !ib.buffer) {
827 unsigned start, count, start_offset;
828
829 si_get_draw_start_count(sctx, info, &start, &count);
830 start_offset = start * ib.index_size;
831
832 u_upload_data(sctx->b.uploader, start_offset, count * ib.index_size,
833 (char*)ib.user_buffer + start_offset,
834 &ib.offset, &ib.buffer);
835 if (!ib.buffer)
836 return;
837 /* info->start will be added by the drawing code */
838 ib.offset -= start_offset;
839 }
840 }
841
842 /* VI reads index buffers through TC L2. */
843 if (info->indexed && sctx->b.chip_class <= CIK &&
844 r600_resource(ib.buffer)->TC_L2_dirty) {
845 sctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2;
846 r600_resource(ib.buffer)->TC_L2_dirty = false;
847 }
848
849 /* Check flush flags. */
850 if (sctx->b.flags)
851 si_mark_atom_dirty(sctx, sctx->atoms.s.cache_flush);
852
853 si_need_cs_space(sctx);
854
855 /* Emit states. */
856 mask = sctx->dirty_atoms;
857 while (mask) {
858 struct r600_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
859
860 atom->emit(&sctx->b, atom);
861 }
862 sctx->dirty_atoms = 0;
863
864 si_pm4_emit_dirty(sctx);
865 si_emit_scratch_reloc(sctx);
866 si_emit_rasterizer_prim_state(sctx);
867 si_emit_draw_registers(sctx, info);
868 si_emit_draw_packets(sctx, info, &ib);
869
870 if (sctx->trace_buf)
871 si_trace_emit(sctx);
872
873 /* Workaround for a VGT hang when streamout is enabled.
874 * It must be done after drawing. */
875 if ((sctx->b.family == CHIP_HAWAII || sctx->b.family == CHIP_TONGA) &&
876 (sctx->b.streamout.streamout_enabled ||
877 sctx->b.streamout.prims_gen_query_enabled)) {
878 sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
879 }
880
881 /* Set the depth buffer as dirty. */
882 if (sctx->framebuffer.state.zsbuf) {
883 struct pipe_surface *surf = sctx->framebuffer.state.zsbuf;
884 struct r600_texture *rtex = (struct r600_texture *)surf->texture;
885
886 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
887
888 if (rtex->surface.flags & RADEON_SURF_SBUFFER)
889 rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
890 }
891 if (sctx->framebuffer.compressed_cb_mask) {
892 struct pipe_surface *surf;
893 struct r600_texture *rtex;
894 unsigned mask = sctx->framebuffer.compressed_cb_mask;
895
896 do {
897 unsigned i = u_bit_scan(&mask);
898 surf = sctx->framebuffer.state.cbufs[i];
899 rtex = (struct r600_texture*)surf->texture;
900
901 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
902 } while (mask);
903 }
904
905 pipe_resource_reference(&ib.buffer, NULL);
906 sctx->b.num_draw_calls++;
907 }
908
909 void si_trace_emit(struct si_context *sctx)
910 {
911 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
912
913 sctx->trace_id++;
914 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, sctx->trace_buf,
915 RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
916 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
917 radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
918 S_370_WR_CONFIRM(1) |
919 S_370_ENGINE_SEL(V_370_ME));
920 radeon_emit(cs, sctx->trace_buf->gpu_address);
921 radeon_emit(cs, sctx->trace_buf->gpu_address >> 32);
922 radeon_emit(cs, sctx->trace_id);
923 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
924 radeon_emit(cs, SI_ENCODE_TRACE_POINT(sctx->trace_id));
925 }