radeonsi: make pm4 state generation for shaders independent of the context
[mesa.git] / src / gallium / drivers / radeonsi / si_state_draw.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Christian König <christian.koenig@amd.com>
25 */
26
27 #include "si_pipe.h"
28 #include "si_shader.h"
29 #include "radeon/r600_cs.h"
30 #include "sid.h"
31
32 #include "util/u_format.h"
33 #include "util/u_index_modify.h"
34 #include "util/u_memory.h"
35 #include "util/u_prim.h"
36 #include "util/u_upload_mgr.h"
37
38 /*
39 * Shaders
40 */
41
42 static void si_shader_es(struct si_shader *shader)
43 {
44 struct si_pm4_state *pm4;
45 unsigned num_sgprs, num_user_sgprs;
46 unsigned vgpr_comp_cnt;
47 uint64_t va;
48
49 pm4 = shader->pm4 = CALLOC_STRUCT(si_pm4_state);
50
51 if (pm4 == NULL)
52 return;
53
54 va = shader->bo->gpu_address;
55 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
56
57 vgpr_comp_cnt = shader->uses_instanceid ? 3 : 0;
58
59 num_user_sgprs = SI_VS_NUM_USER_SGPR;
60 num_sgprs = shader->num_sgprs;
61 /* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
62 if ((num_user_sgprs + 1) > num_sgprs) {
63 /* Last 2 reserved SGPRs are used for VCC */
64 num_sgprs = num_user_sgprs + 1 + 2;
65 }
66 assert(num_sgprs <= 104);
67
68 si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
69 si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, va >> 40);
70 si_pm4_set_reg(pm4, R_00B328_SPI_SHADER_PGM_RSRC1_ES,
71 S_00B328_VGPRS((shader->num_vgprs - 1) / 4) |
72 S_00B328_SGPRS((num_sgprs - 1) / 8) |
73 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt));
74 si_pm4_set_reg(pm4, R_00B32C_SPI_SHADER_PGM_RSRC2_ES,
75 S_00B32C_USER_SGPR(num_user_sgprs));
76 }
77
78 static void si_shader_gs(struct si_shader *shader)
79 {
80 unsigned gs_vert_itemsize = shader->selector->info.num_outputs * (16 >> 2);
81 unsigned gs_max_vert_out = shader->selector->gs_max_out_vertices;
82 unsigned gsvs_itemsize = gs_vert_itemsize * gs_max_vert_out;
83 unsigned cut_mode;
84 struct si_pm4_state *pm4;
85 unsigned num_sgprs, num_user_sgprs;
86 uint64_t va;
87
88 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
89 assert(gsvs_itemsize < (1 << 15));
90
91 pm4 = shader->pm4 = CALLOC_STRUCT(si_pm4_state);
92
93 if (pm4 == NULL)
94 return;
95
96 if (gs_max_vert_out <= 128) {
97 cut_mode = V_028A40_GS_CUT_128;
98 } else if (gs_max_vert_out <= 256) {
99 cut_mode = V_028A40_GS_CUT_256;
100 } else if (gs_max_vert_out <= 512) {
101 cut_mode = V_028A40_GS_CUT_512;
102 } else {
103 assert(gs_max_vert_out <= 1024);
104 cut_mode = V_028A40_GS_CUT_1024;
105 }
106
107 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE,
108 S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
109 S_028A40_CUT_MODE(cut_mode)|
110 S_028A40_ES_WRITE_OPTIMIZE(1) |
111 S_028A40_GS_WRITE_OPTIMIZE(1));
112
113 si_pm4_set_reg(pm4, R_028A60_VGT_GSVS_RING_OFFSET_1, gsvs_itemsize);
114 si_pm4_set_reg(pm4, R_028A64_VGT_GSVS_RING_OFFSET_2, gsvs_itemsize);
115 si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, gsvs_itemsize);
116
117 si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
118 util_bitcount64(shader->selector->gs_used_inputs) * (16 >> 2));
119 si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gsvs_itemsize);
120
121 si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, gs_max_vert_out);
122
123 si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, gs_vert_itemsize);
124
125 va = shader->bo->gpu_address;
126 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
127 si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
128 si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40);
129
130 num_user_sgprs = SI_GS_NUM_USER_SGPR;
131 num_sgprs = shader->num_sgprs;
132 /* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
133 if ((num_user_sgprs + 2) > num_sgprs) {
134 /* Last 2 reserved SGPRs are used for VCC */
135 num_sgprs = num_user_sgprs + 2 + 2;
136 }
137 assert(num_sgprs <= 104);
138
139 si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
140 S_00B228_VGPRS((shader->num_vgprs - 1) / 4) |
141 S_00B228_SGPRS((num_sgprs - 1) / 8));
142 si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
143 S_00B22C_USER_SGPR(num_user_sgprs));
144 }
145
146 static void si_shader_vs(struct si_shader *shader)
147 {
148 struct tgsi_shader_info *info = &shader->selector->info;
149 struct si_pm4_state *pm4;
150 unsigned num_sgprs, num_user_sgprs;
151 unsigned nparams, i, vgpr_comp_cnt;
152 uint64_t va;
153
154 pm4 = shader->pm4 = CALLOC_STRUCT(si_pm4_state);
155
156 if (pm4 == NULL)
157 return;
158
159 va = shader->bo->gpu_address;
160 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
161
162 vgpr_comp_cnt = shader->uses_instanceid ? 3 : 0;
163
164 if (shader->is_gs_copy_shader)
165 num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR;
166 else
167 num_user_sgprs = SI_VS_NUM_USER_SGPR;
168
169 num_sgprs = shader->num_sgprs;
170 if (num_user_sgprs > num_sgprs) {
171 /* Last 2 reserved SGPRs are used for VCC */
172 num_sgprs = num_user_sgprs + 2;
173 }
174 assert(num_sgprs <= 104);
175
176 /* Certain attributes (position, psize, etc.) don't count as params.
177 * VS is required to export at least one param and r600_shader_from_tgsi()
178 * takes care of adding a dummy export.
179 */
180 for (nparams = 0, i = 0 ; i < info->num_outputs; i++) {
181 switch (info->output_semantic_name[i]) {
182 case TGSI_SEMANTIC_CLIPVERTEX:
183 case TGSI_SEMANTIC_POSITION:
184 case TGSI_SEMANTIC_PSIZE:
185 break;
186 default:
187 nparams++;
188 }
189 }
190 if (nparams < 1)
191 nparams = 1;
192
193 si_pm4_set_reg(pm4, R_0286C4_SPI_VS_OUT_CONFIG,
194 S_0286C4_VS_EXPORT_COUNT(nparams - 1));
195
196 si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT,
197 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
198 S_02870C_POS1_EXPORT_FORMAT(shader->nr_pos_exports > 1 ?
199 V_02870C_SPI_SHADER_4COMP :
200 V_02870C_SPI_SHADER_NONE) |
201 S_02870C_POS2_EXPORT_FORMAT(shader->nr_pos_exports > 2 ?
202 V_02870C_SPI_SHADER_4COMP :
203 V_02870C_SPI_SHADER_NONE) |
204 S_02870C_POS3_EXPORT_FORMAT(shader->nr_pos_exports > 3 ?
205 V_02870C_SPI_SHADER_4COMP :
206 V_02870C_SPI_SHADER_NONE));
207
208 si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
209 si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, va >> 40);
210 si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS,
211 S_00B128_VGPRS((shader->num_vgprs - 1) / 4) |
212 S_00B128_SGPRS((num_sgprs - 1) / 8) |
213 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt));
214 si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS,
215 S_00B12C_USER_SGPR(num_user_sgprs) |
216 S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) |
217 S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) |
218 S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
219 S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
220 S_00B12C_SO_EN(!!shader->selector->so.num_outputs));
221 }
222
223 static void si_shader_ps(struct si_shader *shader)
224 {
225 struct tgsi_shader_info *info = &shader->selector->info;
226 struct si_pm4_state *pm4;
227 unsigned i, spi_ps_in_control;
228 unsigned num_sgprs, num_user_sgprs;
229 unsigned spi_baryc_cntl = 0, spi_ps_input_ena;
230 uint64_t va;
231
232 pm4 = shader->pm4 = CALLOC_STRUCT(si_pm4_state);
233
234 if (pm4 == NULL)
235 return;
236
237 for (i = 0; i < info->num_inputs; i++) {
238 switch (info->input_semantic_name[i]) {
239 case TGSI_SEMANTIC_POSITION:
240 if (info->input_interpolate_loc[i] ==
241 TGSI_INTERPOLATE_LOC_CENTROID) {
242 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
243 * Possible vaules:
244 * 0 -> Position = pixel center (default)
245 * 1 -> Position = pixel centroid
246 * 2 -> Position = iterated sample number XXX:
247 * What does this mean?
248 */
249 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(1);
250 }
251 /* Fall through */
252 case TGSI_SEMANTIC_FACE:
253 continue;
254 }
255 }
256
257 spi_ps_in_control = S_0286D8_NUM_INTERP(shader->nparam) |
258 S_0286D8_BC_OPTIMIZE_DISABLE(1);
259
260 si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
261 spi_ps_input_ena = shader->spi_ps_input_ena;
262 /* we need to enable at least one of them, otherwise we hang the GPU */
263 assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena) ||
264 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena) ||
265 G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena) ||
266 G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena) ||
267 G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena) ||
268 G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena) ||
269 G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena) ||
270 G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena));
271
272 si_pm4_set_reg(pm4, R_0286CC_SPI_PS_INPUT_ENA, spi_ps_input_ena);
273 si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR, spi_ps_input_ena);
274 si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
275
276 si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT, shader->spi_shader_z_format);
277 si_pm4_set_reg(pm4, R_028714_SPI_SHADER_COL_FORMAT,
278 shader->spi_shader_col_format);
279 si_pm4_set_reg(pm4, R_02823C_CB_SHADER_MASK, shader->cb_shader_mask);
280
281 va = shader->bo->gpu_address;
282 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
283 si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
284 si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, va >> 40);
285
286 num_user_sgprs = SI_PS_NUM_USER_SGPR;
287 num_sgprs = shader->num_sgprs;
288 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
289 if ((num_user_sgprs + 1) > num_sgprs) {
290 /* Last 2 reserved SGPRs are used for VCC */
291 num_sgprs = num_user_sgprs + 1 + 2;
292 }
293 assert(num_sgprs <= 104);
294
295 si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS,
296 S_00B028_VGPRS((shader->num_vgprs - 1) / 4) |
297 S_00B028_SGPRS((num_sgprs - 1) / 8));
298 si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
299 S_00B02C_EXTRA_LDS_SIZE(shader->lds_size) |
300 S_00B02C_USER_SGPR(num_user_sgprs));
301 }
302
303 /*
304 * Drawing
305 */
306
307 static unsigned si_conv_pipe_prim(unsigned pprim)
308 {
309 static const unsigned prim_conv[] = {
310 [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
311 [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
312 [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
313 [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
314 [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
315 [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
316 [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
317 [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
318 [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
319 [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
320 [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
321 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
322 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
323 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
324 [R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
325 };
326 unsigned result = prim_conv[pprim];
327 if (result == ~0) {
328 R600_ERR("unsupported primitive type %d\n", pprim);
329 }
330 return result;
331 }
332
333 static unsigned si_conv_prim_to_gs_out(unsigned mode)
334 {
335 static const int prim_conv[] = {
336 [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
337 [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
338 [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
339 [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
340 [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
341 [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
342 [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
343 [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
344 [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
345 [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
346 [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
347 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
348 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
349 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
350 [R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
351 };
352 assert(mode < Elements(prim_conv));
353
354 return prim_conv[mode];
355 }
356
357 static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
358 const struct pipe_draw_info *info)
359 {
360 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
361 unsigned prim = info->mode;
362 unsigned primgroup_size = 128; /* recommended without a GS */
363
364 /* SWITCH_ON_EOP(0) is always preferable. */
365 bool wd_switch_on_eop = false;
366 bool ia_switch_on_eop = false;
367 bool partial_vs_wave = false;
368
369 if (sctx->gs_shader)
370 primgroup_size = 64; /* recommended with a GS */
371
372 /* This is a hardware requirement. */
373 if ((rs && rs->line_stipple_enable) ||
374 (sctx->b.screen->debug_flags & DBG_SWITCH_ON_EOP)) {
375 ia_switch_on_eop = true;
376 wd_switch_on_eop = true;
377 }
378
379 if (sctx->b.streamout.streamout_enabled ||
380 sctx->b.streamout.prims_gen_query_enabled)
381 partial_vs_wave = true;
382
383 if (sctx->b.chip_class >= CIK) {
384 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
385 * 4 shader engines. Set 1 to pass the assertion below.
386 * The other cases are hardware requirements. */
387 if (sctx->b.screen->info.max_se < 4 ||
388 prim == PIPE_PRIM_POLYGON ||
389 prim == PIPE_PRIM_LINE_LOOP ||
390 prim == PIPE_PRIM_TRIANGLE_FAN ||
391 prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
392 info->primitive_restart)
393 wd_switch_on_eop = true;
394
395 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
396 * We don't know that for indirect drawing, so treat it as
397 * always problematic. */
398 if (sctx->b.family == CHIP_HAWAII &&
399 (info->indirect || info->instance_count > 1))
400 wd_switch_on_eop = true;
401
402 /* If the WD switch is false, the IA switch must be false too. */
403 assert(wd_switch_on_eop || !ia_switch_on_eop);
404 }
405
406 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
407 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
408 S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
409 S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0);
410 }
411
412 static bool si_update_draw_info_state(struct si_context *sctx,
413 const struct pipe_draw_info *info,
414 const struct pipe_index_buffer *ib)
415 {
416 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
417 struct si_shader *vs = si_get_vs_state(sctx);
418 unsigned prim = si_conv_pipe_prim(info->mode);
419 unsigned gs_out_prim =
420 si_conv_prim_to_gs_out(sctx->gs_shader ?
421 sctx->gs_shader->gs_output_prim :
422 info->mode);
423 unsigned ls_mask = 0;
424 unsigned ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info);
425
426 if (pm4 == NULL)
427 return false;
428
429 if (prim == ~0) {
430 FREE(pm4);
431 return false;
432 }
433
434 if (sctx->b.chip_class >= CIK) {
435 si_pm4_set_reg(pm4, R_028B74_VGT_DISPATCH_DRAW_INDEX,
436 ib->index_size == 4 ? 0xFC000000 : 0xFC00);
437
438 si_pm4_cmd_begin(pm4, PKT3_DRAW_PREAMBLE);
439 si_pm4_cmd_add(pm4, prim); /* VGT_PRIMITIVE_TYPE */
440 si_pm4_cmd_add(pm4, ia_multi_vgt_param); /* IA_MULTI_VGT_PARAM */
441 si_pm4_cmd_add(pm4, 0); /* VGT_LS_HS_CONFIG */
442 si_pm4_cmd_end(pm4, false);
443 } else {
444 si_pm4_set_reg(pm4, R_008958_VGT_PRIMITIVE_TYPE, prim);
445 si_pm4_set_reg(pm4, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
446 }
447
448 si_pm4_set_reg(pm4, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
449 si_pm4_set_reg(pm4, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info->restart_index);
450 si_pm4_set_reg(pm4, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
451
452 if (prim == V_008958_DI_PT_LINELIST)
453 ls_mask = 1;
454 else if (prim == V_008958_DI_PT_LINESTRIP)
455 ls_mask = 2;
456 si_pm4_set_reg(pm4, R_028A0C_PA_SC_LINE_STIPPLE,
457 S_028A0C_AUTO_RESET_CNTL(ls_mask) |
458 sctx->pa_sc_line_stipple);
459
460 if (info->mode == PIPE_PRIM_QUADS || info->mode == PIPE_PRIM_QUAD_STRIP || info->mode == PIPE_PRIM_POLYGON) {
461 si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL,
462 S_028814_PROVOKING_VTX_LAST(1) | sctx->pa_su_sc_mode_cntl);
463 } else {
464 si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL, sctx->pa_su_sc_mode_cntl);
465 }
466 si_pm4_set_reg(pm4, R_02881C_PA_CL_VS_OUT_CNTL,
467 S_02881C_USE_VTX_POINT_SIZE(vs->vs_out_point_size) |
468 S_02881C_USE_VTX_EDGE_FLAG(vs->vs_out_edgeflag) |
469 S_02881C_USE_VTX_RENDER_TARGET_INDX(vs->vs_out_layer) |
470 S_02881C_VS_OUT_CCDIST0_VEC_ENA((vs->clip_dist_write & 0x0F) != 0) |
471 S_02881C_VS_OUT_CCDIST1_VEC_ENA((vs->clip_dist_write & 0xF0) != 0) |
472 S_02881C_VS_OUT_MISC_VEC_ENA(vs->vs_out_misc_write) |
473 (sctx->queued.named.rasterizer->clip_plane_enable &
474 vs->clip_dist_write));
475 si_pm4_set_reg(pm4, R_028810_PA_CL_CLIP_CNTL,
476 sctx->queued.named.rasterizer->pa_cl_clip_cntl |
477 (vs->clip_dist_write ? 0 :
478 sctx->queued.named.rasterizer->clip_plane_enable & 0x3F));
479
480 si_pm4_set_state(sctx, draw_info, pm4);
481 return true;
482 }
483
484 static void si_update_spi_map(struct si_context *sctx)
485 {
486 struct si_shader *ps = sctx->ps_shader->current;
487 struct si_shader *vs = si_get_vs_state(sctx);
488 struct tgsi_shader_info *psinfo = &ps->selector->info;
489 struct tgsi_shader_info *vsinfo = &vs->selector->info;
490 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
491 unsigned i, j, tmp;
492
493 for (i = 0; i < psinfo->num_inputs; i++) {
494 unsigned name = psinfo->input_semantic_name[i];
495 unsigned index = psinfo->input_semantic_index[i];
496 unsigned interpolate = psinfo->input_interpolate[i];
497 unsigned param_offset = ps->ps_input_param_offset[i];
498
499 if (name == TGSI_SEMANTIC_POSITION)
500 /* Read from preloaded VGPRs, not parameters */
501 continue;
502
503 bcolor:
504 tmp = 0;
505
506 if (interpolate == TGSI_INTERPOLATE_CONSTANT ||
507 (interpolate == TGSI_INTERPOLATE_COLOR &&
508 ps->key.ps.flatshade)) {
509 tmp |= S_028644_FLAT_SHADE(1);
510 }
511
512 if (name == TGSI_SEMANTIC_GENERIC &&
513 sctx->sprite_coord_enable & (1 << index)) {
514 tmp |= S_028644_PT_SPRITE_TEX(1);
515 }
516
517 for (j = 0; j < vsinfo->num_outputs; j++) {
518 if (name == vsinfo->output_semantic_name[j] &&
519 index == vsinfo->output_semantic_index[j]) {
520 tmp |= S_028644_OFFSET(vs->vs_output_param_offset[j]);
521 break;
522 }
523 }
524
525 if (j == vsinfo->num_outputs) {
526 /* No corresponding output found, load defaults into input */
527 tmp |= S_028644_OFFSET(0x20);
528 }
529
530 si_pm4_set_reg(pm4,
531 R_028644_SPI_PS_INPUT_CNTL_0 + param_offset * 4,
532 tmp);
533
534 if (name == TGSI_SEMANTIC_COLOR &&
535 ps->key.ps.color_two_side) {
536 name = TGSI_SEMANTIC_BCOLOR;
537 param_offset++;
538 goto bcolor;
539 }
540 }
541
542 si_pm4_set_state(sctx, spi, pm4);
543 }
544
545 /* Initialize state related to ESGS / GSVS ring buffers */
546 static void si_init_gs_rings(struct si_context *sctx)
547 {
548 unsigned esgs_ring_size = 128 * 1024;
549 unsigned gsvs_ring_size = 64 * 1024 * 1024;
550
551 assert(!sctx->gs_rings);
552 sctx->gs_rings = CALLOC_STRUCT(si_pm4_state);
553
554 sctx->esgs_ring = pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
555 PIPE_USAGE_DEFAULT, esgs_ring_size);
556
557 sctx->gsvs_ring = pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
558 PIPE_USAGE_DEFAULT, gsvs_ring_size);
559
560 if (sctx->b.chip_class >= CIK) {
561 si_pm4_set_reg(sctx->gs_rings, R_030900_VGT_ESGS_RING_SIZE,
562 esgs_ring_size / 256);
563 si_pm4_set_reg(sctx->gs_rings, R_030904_VGT_GSVS_RING_SIZE,
564 gsvs_ring_size / 256);
565 } else {
566 si_pm4_set_reg(sctx->gs_rings, R_0088C8_VGT_ESGS_RING_SIZE,
567 esgs_ring_size / 256);
568 si_pm4_set_reg(sctx->gs_rings, R_0088CC_VGT_GSVS_RING_SIZE,
569 gsvs_ring_size / 256);
570 }
571
572 si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_VERTEX, SI_RING_ESGS,
573 sctx->esgs_ring, 0, esgs_ring_size,
574 true, true, 4, 64);
575 si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_GEOMETRY, SI_RING_ESGS,
576 sctx->esgs_ring, 0, esgs_ring_size,
577 false, false, 0, 0);
578 si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_VERTEX, SI_RING_GSVS,
579 sctx->gsvs_ring, 0, gsvs_ring_size,
580 false, false, 0, 0);
581 }
582
583 static void si_update_derived_state(struct si_context *sctx)
584 {
585 struct pipe_context * ctx = (struct pipe_context*)sctx;
586
587 if (!sctx->blitter->running) {
588 /* Flush depth textures which need to be flushed. */
589 for (int i = 0; i < SI_NUM_SHADERS; i++) {
590 if (sctx->samplers[i].depth_texture_mask) {
591 si_flush_depth_textures(sctx, &sctx->samplers[i]);
592 }
593 if (sctx->samplers[i].compressed_colortex_mask) {
594 si_decompress_color_textures(sctx, &sctx->samplers[i]);
595 }
596 }
597 }
598
599 if (sctx->gs_shader) {
600 si_shader_select(ctx, sctx->gs_shader);
601
602 if (!sctx->gs_shader->current->pm4) {
603 si_shader_gs(sctx->gs_shader->current);
604 si_shader_vs(sctx->gs_shader->current->gs_copy_shader);
605 }
606
607 si_pm4_bind_state(sctx, gs, sctx->gs_shader->current->pm4);
608 si_pm4_bind_state(sctx, vs, sctx->gs_shader->current->gs_copy_shader->pm4);
609
610 sctx->b.streamout.stride_in_dw = sctx->gs_shader->so.stride;
611
612 si_shader_select(ctx, sctx->vs_shader);
613
614 if (!sctx->vs_shader->current->pm4)
615 si_shader_es(sctx->vs_shader->current);
616
617 si_pm4_bind_state(sctx, es, sctx->vs_shader->current->pm4);
618
619 if (!sctx->gs_rings)
620 si_init_gs_rings(sctx);
621 if (sctx->emitted.named.gs_rings != sctx->gs_rings)
622 sctx->b.flags |= R600_CONTEXT_VGT_FLUSH;
623 si_pm4_bind_state(sctx, gs_rings, sctx->gs_rings);
624
625 si_set_ring_buffer(ctx, PIPE_SHADER_GEOMETRY, SI_RING_GSVS,
626 sctx->gsvs_ring,
627 sctx->gs_shader->gs_max_out_vertices *
628 sctx->gs_shader->info.num_outputs * 16,
629 64, true, true, 4, 16);
630
631 if (!sctx->gs_on) {
632 sctx->gs_on = CALLOC_STRUCT(si_pm4_state);
633
634 si_pm4_set_reg(sctx->gs_on, R_028B54_VGT_SHADER_STAGES_EN,
635 S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) |
636 S_028B54_GS_EN(1) |
637 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER));
638 }
639 si_pm4_bind_state(sctx, gs_onoff, sctx->gs_on);
640 } else {
641 si_shader_select(ctx, sctx->vs_shader);
642
643 if (!sctx->vs_shader->current->pm4)
644 si_shader_vs(sctx->vs_shader->current);
645
646 si_pm4_bind_state(sctx, vs, sctx->vs_shader->current->pm4);
647
648 sctx->b.streamout.stride_in_dw = sctx->vs_shader->so.stride;
649
650 if (!sctx->gs_off) {
651 sctx->gs_off = CALLOC_STRUCT(si_pm4_state);
652
653 si_pm4_set_reg(sctx->gs_off, R_028A40_VGT_GS_MODE, 0);
654 si_pm4_set_reg(sctx->gs_off, R_028B54_VGT_SHADER_STAGES_EN, 0);
655 }
656 si_pm4_bind_state(sctx, gs_onoff, sctx->gs_off);
657 si_pm4_bind_state(sctx, gs_rings, NULL);
658 si_pm4_bind_state(sctx, gs, NULL);
659 si_pm4_bind_state(sctx, es, NULL);
660 }
661
662 si_shader_select(ctx, sctx->ps_shader);
663
664 if (!sctx->ps_shader->current) {
665 struct si_shader_selector *sel;
666
667 /* use a dummy shader if compiling the shader (variant) failed */
668 si_make_dummy_ps(sctx);
669 sel = sctx->dummy_pixel_shader;
670 si_shader_select(ctx, sel);
671 sctx->ps_shader->current = sel->current;
672 }
673
674 if (!sctx->ps_shader->current->pm4)
675 si_shader_ps(sctx->ps_shader->current);
676
677 si_pm4_bind_state(sctx, ps, sctx->ps_shader->current->pm4);
678
679 if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs)) {
680 /* XXX: Emitting the PS state even when only the VS changed
681 * fixes random failures with piglit glsl-max-varyings.
682 * Not sure why...
683 */
684 sctx->emitted.named.ps = NULL;
685 si_update_spi_map(sctx);
686 }
687
688 if (sctx->ps_db_shader_control != sctx->ps_shader->current->db_shader_control) {
689 sctx->ps_db_shader_control = sctx->ps_shader->current->db_shader_control;
690 sctx->db_render_state.dirty = true;
691 }
692 }
693
694 static void si_state_draw(struct si_context *sctx,
695 const struct pipe_draw_info *info,
696 const struct pipe_index_buffer *ib)
697 {
698 unsigned sh_base_reg = (sctx->gs_shader ? R_00B330_SPI_SHADER_USER_DATA_ES_0 :
699 R_00B130_SPI_SHADER_USER_DATA_VS_0);
700 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
701
702 if (pm4 == NULL)
703 return;
704
705 if (info->count_from_stream_output) {
706 struct r600_so_target *t =
707 (struct r600_so_target*)info->count_from_stream_output;
708 uint64_t va = t->buf_filled_size->gpu_address +
709 t->buf_filled_size_offset;
710
711 si_pm4_set_reg(pm4, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
712 t->stride_in_dw);
713
714 si_pm4_cmd_begin(pm4, PKT3_COPY_DATA);
715 si_pm4_cmd_add(pm4,
716 COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
717 COPY_DATA_DST_SEL(COPY_DATA_REG) |
718 COPY_DATA_WR_CONFIRM);
719 si_pm4_cmd_add(pm4, va); /* src address lo */
720 si_pm4_cmd_add(pm4, va >> 32UL); /* src address hi */
721 si_pm4_cmd_add(pm4, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
722 si_pm4_cmd_add(pm4, 0); /* unused */
723 si_pm4_add_bo(pm4, t->buf_filled_size, RADEON_USAGE_READ,
724 RADEON_PRIO_MIN);
725 si_pm4_cmd_end(pm4, true);
726 }
727
728 /* draw packet */
729 si_pm4_cmd_begin(pm4, PKT3_INDEX_TYPE);
730 if (ib->index_size == 4) {
731 si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_32 | (SI_BIG_ENDIAN ?
732 V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
733 } else {
734 si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_16 | (SI_BIG_ENDIAN ?
735 V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
736 }
737 si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
738
739 if (!info->indirect) {
740 si_pm4_cmd_begin(pm4, PKT3_NUM_INSTANCES);
741 si_pm4_cmd_add(pm4, info->instance_count);
742 si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
743
744 si_pm4_set_reg(pm4, sh_base_reg + SI_SGPR_BASE_VERTEX * 4,
745 info->indexed ? info->index_bias : info->start);
746 si_pm4_set_reg(pm4, sh_base_reg + SI_SGPR_START_INSTANCE * 4,
747 info->start_instance);
748 } else {
749 si_pm4_add_bo(pm4, (struct r600_resource *)info->indirect,
750 RADEON_USAGE_READ, RADEON_PRIO_MIN);
751 }
752
753 if (info->indexed) {
754 uint32_t max_size = (ib->buffer->width0 - ib->offset) /
755 sctx->index_buffer.index_size;
756 uint64_t va = r600_resource(ib->buffer)->gpu_address + ib->offset;
757
758 si_pm4_add_bo(pm4, (struct r600_resource *)ib->buffer, RADEON_USAGE_READ,
759 RADEON_PRIO_MIN);
760
761 if (info->indirect) {
762 uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
763 si_cmd_draw_index_indirect(pm4, indirect_va, va, max_size,
764 info->indirect_offset,
765 sh_base_reg + SI_SGPR_BASE_VERTEX * 4,
766 sh_base_reg + SI_SGPR_START_INSTANCE * 4,
767 sctx->b.predicate_drawing);
768 } else {
769 va += info->start * ib->index_size;
770 si_cmd_draw_index_2(pm4, max_size, va, info->count,
771 V_0287F0_DI_SRC_SEL_DMA,
772 sctx->b.predicate_drawing);
773 }
774 } else {
775 if (info->indirect) {
776 uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
777 si_cmd_draw_indirect(pm4, indirect_va, info->indirect_offset,
778 sh_base_reg + SI_SGPR_BASE_VERTEX * 4,
779 sh_base_reg + SI_SGPR_START_INSTANCE * 4,
780 sctx->b.predicate_drawing);
781 } else {
782 si_cmd_draw_index_auto(pm4, info->count,
783 V_0287F0_DI_SRC_SEL_AUTO_INDEX |
784 S_0287F0_USE_OPAQUE(!!info->count_from_stream_output),
785 sctx->b.predicate_drawing);
786 }
787 }
788
789 si_pm4_set_state(sctx, draw, pm4);
790 }
791
792 void si_emit_cache_flush(struct r600_common_context *sctx, struct r600_atom *atom)
793 {
794 struct radeon_winsys_cs *cs = sctx->rings.gfx.cs;
795 uint32_t cp_coher_cntl = 0;
796 uint32_t compute =
797 PKT3_SHADER_TYPE_S(!!(sctx->flags & R600_CONTEXT_FLAG_COMPUTE));
798
799 /* XXX SI flushes both ICACHE and KCACHE if either flag is set.
800 * XXX CIK shouldn't have this issue. Test CIK before separating the flags
801 * XXX to ensure there is no regression. Also find out if there is another
802 * XXX way to flush either ICACHE or KCACHE but not both for SI. */
803 if (sctx->flags & (R600_CONTEXT_INV_SHADER_CACHE |
804 R600_CONTEXT_INV_CONST_CACHE)) {
805 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1) |
806 S_0085F0_SH_KCACHE_ACTION_ENA(1);
807 }
808 if (sctx->flags & (R600_CONTEXT_INV_TEX_CACHE |
809 R600_CONTEXT_STREAMOUT_FLUSH)) {
810 cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1) |
811 S_0085F0_TCL1_ACTION_ENA(1);
812 }
813 if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB) {
814 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
815 S_0085F0_CB0_DEST_BASE_ENA(1) |
816 S_0085F0_CB1_DEST_BASE_ENA(1) |
817 S_0085F0_CB2_DEST_BASE_ENA(1) |
818 S_0085F0_CB3_DEST_BASE_ENA(1) |
819 S_0085F0_CB4_DEST_BASE_ENA(1) |
820 S_0085F0_CB5_DEST_BASE_ENA(1) |
821 S_0085F0_CB6_DEST_BASE_ENA(1) |
822 S_0085F0_CB7_DEST_BASE_ENA(1);
823 }
824 if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB) {
825 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
826 S_0085F0_DB_DEST_BASE_ENA(1);
827 }
828
829 if (cp_coher_cntl) {
830 if (sctx->chip_class >= CIK) {
831 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) | compute);
832 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
833 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
834 radeon_emit(cs, 0xff); /* CP_COHER_SIZE_HI */
835 radeon_emit(cs, 0); /* CP_COHER_BASE */
836 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
837 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
838 } else {
839 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0) | compute);
840 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
841 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
842 radeon_emit(cs, 0); /* CP_COHER_BASE */
843 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
844 }
845 }
846
847 if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META) {
848 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
849 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
850 }
851 if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META) {
852 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
853 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
854 }
855 if (sctx->flags & R600_CONTEXT_FLUSH_WITH_INV_L2) {
856 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
857 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH) | EVENT_INDEX(7) |
858 EVENT_WRITE_INV_L2);
859 }
860
861 if (sctx->flags & (R600_CONTEXT_WAIT_3D_IDLE |
862 R600_CONTEXT_PS_PARTIAL_FLUSH)) {
863 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
864 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
865 } else if (sctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) {
866 /* Needed if streamout buffers are going to be used as a source. */
867 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
868 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
869 }
870
871 if (sctx->flags & R600_CONTEXT_CS_PARTIAL_FLUSH) {
872 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
873 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
874 }
875
876 if (sctx->flags & R600_CONTEXT_VGT_FLUSH) {
877 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
878 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
879 }
880 if (sctx->flags & R600_CONTEXT_VGT_STREAMOUT_SYNC) {
881 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
882 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
883 }
884
885 sctx->flags = 0;
886 }
887
888 const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 21 }; /* number of CS dwords */
889
890 static void si_get_draw_start_count(struct si_context *sctx,
891 const struct pipe_draw_info *info,
892 unsigned *start, unsigned *count)
893 {
894 if (info->indirect) {
895 struct r600_resource *indirect =
896 (struct r600_resource*)info->indirect;
897 int *data = r600_buffer_map_sync_with_rings(&sctx->b,
898 indirect, PIPE_TRANSFER_READ);
899 data += info->indirect_offset/sizeof(int);
900 *start = data[2];
901 *count = data[0];
902 } else {
903 *start = info->start;
904 *count = info->count;
905 }
906 }
907
908 void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
909 {
910 struct si_context *sctx = (struct si_context *)ctx;
911 struct pipe_index_buffer ib = {};
912 uint32_t i;
913
914 if (!info->count && !info->indirect &&
915 (info->indexed || !info->count_from_stream_output))
916 return;
917
918 if (!sctx->ps_shader || !sctx->vs_shader)
919 return;
920
921 si_update_derived_state(sctx);
922
923 if (sctx->vertex_buffers_dirty) {
924 si_update_vertex_buffers(sctx);
925 sctx->vertex_buffers_dirty = false;
926 }
927
928 if (info->indexed) {
929 /* Initialize the index buffer struct. */
930 pipe_resource_reference(&ib.buffer, sctx->index_buffer.buffer);
931 ib.user_buffer = sctx->index_buffer.user_buffer;
932 ib.index_size = sctx->index_buffer.index_size;
933 ib.offset = sctx->index_buffer.offset;
934
935 /* Translate or upload, if needed. */
936 if (ib.index_size == 1) {
937 struct pipe_resource *out_buffer = NULL;
938 unsigned out_offset, start, count, start_offset;
939 void *ptr;
940
941 si_get_draw_start_count(sctx, info, &start, &count);
942 start_offset = start * ib.index_size;
943
944 u_upload_alloc(sctx->b.uploader, start_offset, count * 2,
945 &out_offset, &out_buffer, &ptr);
946
947 util_shorten_ubyte_elts_to_userptr(&sctx->b.b, &ib, 0,
948 ib.offset + start_offset,
949 count, ptr);
950
951 pipe_resource_reference(&ib.buffer, NULL);
952 ib.user_buffer = NULL;
953 ib.buffer = out_buffer;
954 /* info->start will be added by the drawing code */
955 ib.offset = out_offset - start_offset;
956 ib.index_size = 2;
957 } else if (ib.user_buffer && !ib.buffer) {
958 unsigned start, count, start_offset;
959
960 si_get_draw_start_count(sctx, info, &start, &count);
961 start_offset = start * ib.index_size;
962
963 u_upload_data(sctx->b.uploader, start_offset, count * ib.index_size,
964 (char*)ib.user_buffer + start_offset,
965 &ib.offset, &ib.buffer);
966 /* info->start will be added by the drawing code */
967 ib.offset -= start_offset;
968 }
969 }
970
971 if (!si_update_draw_info_state(sctx, info, &ib))
972 return;
973
974 si_state_draw(sctx, info, &ib);
975
976 sctx->pm4_dirty_cdwords += si_pm4_dirty_dw(sctx);
977
978 /* Check flush flags. */
979 if (sctx->b.flags)
980 sctx->atoms.s.cache_flush->dirty = true;
981
982 si_need_cs_space(sctx, 0, TRUE);
983
984 /* Emit states. */
985 for (i = 0; i < SI_NUM_ATOMS(sctx); i++) {
986 if (sctx->atoms.array[i]->dirty) {
987 sctx->atoms.array[i]->emit(&sctx->b, sctx->atoms.array[i]);
988 sctx->atoms.array[i]->dirty = false;
989 }
990 }
991
992 si_pm4_emit_dirty(sctx);
993 sctx->pm4_dirty_cdwords = 0;
994
995 #if SI_TRACE_CS
996 if (sctx->screen->b.trace_bo) {
997 si_trace_emit(sctx);
998 }
999 #endif
1000
1001 /* Workaround for a VGT hang when streamout is enabled.
1002 * It must be done after drawing. */
1003 if (sctx->b.family == CHIP_HAWAII &&
1004 (sctx->b.streamout.streamout_enabled ||
1005 sctx->b.streamout.prims_gen_query_enabled)) {
1006 sctx->b.flags |= R600_CONTEXT_VGT_STREAMOUT_SYNC;
1007 }
1008
1009 /* Set the depth buffer as dirty. */
1010 if (sctx->framebuffer.state.zsbuf) {
1011 struct pipe_surface *surf = sctx->framebuffer.state.zsbuf;
1012 struct r600_texture *rtex = (struct r600_texture *)surf->texture;
1013
1014 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
1015 }
1016 if (sctx->framebuffer.compressed_cb_mask) {
1017 struct pipe_surface *surf;
1018 struct r600_texture *rtex;
1019 unsigned mask = sctx->framebuffer.compressed_cb_mask;
1020
1021 do {
1022 unsigned i = u_bit_scan(&mask);
1023 surf = sctx->framebuffer.state.cbufs[i];
1024 rtex = (struct r600_texture*)surf->texture;
1025
1026 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
1027 } while (mask);
1028 }
1029
1030 pipe_resource_reference(&ib.buffer, NULL);
1031 sctx->b.num_draw_calls++;
1032 }
1033
1034 #if SI_TRACE_CS
1035 void si_trace_emit(struct si_context *sctx)
1036 {
1037 struct si_screen *sscreen = sctx->screen;
1038 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
1039 uint64_t va;
1040
1041 va = sscreen->b.trace_bo->gpu_address;
1042 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, sscreen->b.trace_bo,
1043 RADEON_USAGE_READWRITE, RADEON_PRIO_MIN);
1044 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1045 radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC) |
1046 PKT3_WRITE_DATA_WR_CONFIRM |
1047 PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
1048 radeon_emit(cs, va & 0xFFFFFFFFUL);
1049 radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
1050 radeon_emit(cs, cs->cdw);
1051 radeon_emit(cs, sscreen->b.cs_count);
1052 }
1053 #endif