radeonsi: compile geometry shaders immediately
[mesa.git] / src / gallium / drivers / radeonsi / si_state_shaders.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Christian König <christian.koenig@amd.com>
25 * Marek Olšák <maraeo@gmail.com>
26 */
27
28 #include "si_pipe.h"
29 #include "si_shader.h"
30 #include "sid.h"
31 #include "radeon/r600_cs.h"
32
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_ureg.h"
35 #include "util/u_memory.h"
36 #include "util/u_prim.h"
37 #include "util/u_simple_shaders.h"
38
39 static void si_set_tesseval_regs(struct si_shader *shader,
40 struct si_pm4_state *pm4)
41 {
42 struct tgsi_shader_info *info = &shader->selector->info;
43 unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE];
44 unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING];
45 bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW];
46 bool tes_point_mode = info->properties[TGSI_PROPERTY_TES_POINT_MODE];
47 unsigned type, partitioning, topology;
48
49 switch (tes_prim_mode) {
50 case PIPE_PRIM_LINES:
51 type = V_028B6C_TESS_ISOLINE;
52 break;
53 case PIPE_PRIM_TRIANGLES:
54 type = V_028B6C_TESS_TRIANGLE;
55 break;
56 case PIPE_PRIM_QUADS:
57 type = V_028B6C_TESS_QUAD;
58 break;
59 default:
60 assert(0);
61 return;
62 }
63
64 switch (tes_spacing) {
65 case PIPE_TESS_SPACING_FRACTIONAL_ODD:
66 partitioning = V_028B6C_PART_FRAC_ODD;
67 break;
68 case PIPE_TESS_SPACING_FRACTIONAL_EVEN:
69 partitioning = V_028B6C_PART_FRAC_EVEN;
70 break;
71 case PIPE_TESS_SPACING_EQUAL:
72 partitioning = V_028B6C_PART_INTEGER;
73 break;
74 default:
75 assert(0);
76 return;
77 }
78
79 if (tes_point_mode)
80 topology = V_028B6C_OUTPUT_POINT;
81 else if (tes_prim_mode == PIPE_PRIM_LINES)
82 topology = V_028B6C_OUTPUT_LINE;
83 else if (tes_vertex_order_cw)
84 /* for some reason, this must be the other way around */
85 topology = V_028B6C_OUTPUT_TRIANGLE_CCW;
86 else
87 topology = V_028B6C_OUTPUT_TRIANGLE_CW;
88
89 si_pm4_set_reg(pm4, R_028B6C_VGT_TF_PARAM,
90 S_028B6C_TYPE(type) |
91 S_028B6C_PARTITIONING(partitioning) |
92 S_028B6C_TOPOLOGY(topology));
93 }
94
95 static void si_shader_ls(struct si_shader *shader)
96 {
97 struct si_pm4_state *pm4;
98 unsigned num_sgprs, num_user_sgprs;
99 unsigned vgpr_comp_cnt;
100 uint64_t va;
101
102 pm4 = shader->pm4 = CALLOC_STRUCT(si_pm4_state);
103 if (!pm4)
104 return;
105
106 va = shader->bo->gpu_address;
107 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_USER_SHADER);
108
109 /* We need at least 2 components for LS.
110 * VGPR0-3: (VertexID, RelAutoindex, ???, InstanceID). */
111 vgpr_comp_cnt = shader->uses_instanceid ? 3 : 1;
112
113 num_user_sgprs = SI_LS_NUM_USER_SGPR;
114 num_sgprs = shader->config.num_sgprs;
115 if (num_user_sgprs > num_sgprs) {
116 /* Last 2 reserved SGPRs are used for VCC */
117 num_sgprs = num_user_sgprs + 2;
118 }
119 assert(num_sgprs <= 104);
120
121 si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
122 si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, va >> 40);
123
124 shader->config.rsrc1 = S_00B528_VGPRS((shader->config.num_vgprs - 1) / 4) |
125 S_00B528_SGPRS((num_sgprs - 1) / 8) |
126 S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt) |
127 S_00B528_DX10_CLAMP(1);
128 shader->config.rsrc2 = S_00B52C_USER_SGPR(num_user_sgprs) |
129 S_00B52C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
130 }
131
132 static void si_shader_hs(struct si_shader *shader)
133 {
134 struct si_pm4_state *pm4;
135 unsigned num_sgprs, num_user_sgprs;
136 uint64_t va;
137
138 pm4 = shader->pm4 = CALLOC_STRUCT(si_pm4_state);
139 if (!pm4)
140 return;
141
142 va = shader->bo->gpu_address;
143 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_USER_SHADER);
144
145 num_user_sgprs = SI_TCS_NUM_USER_SGPR;
146 num_sgprs = shader->config.num_sgprs;
147 /* One SGPR after user SGPRs is pre-loaded with tessellation factor
148 * buffer offset. */
149 if ((num_user_sgprs + 1) > num_sgprs) {
150 /* Last 2 reserved SGPRs are used for VCC */
151 num_sgprs = num_user_sgprs + 1 + 2;
152 }
153 assert(num_sgprs <= 104);
154
155 si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8);
156 si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, va >> 40);
157 si_pm4_set_reg(pm4, R_00B428_SPI_SHADER_PGM_RSRC1_HS,
158 S_00B428_VGPRS((shader->config.num_vgprs - 1) / 4) |
159 S_00B428_SGPRS((num_sgprs - 1) / 8) |
160 S_00B428_DX10_CLAMP(1));
161 si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
162 S_00B42C_USER_SGPR(num_user_sgprs) |
163 S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
164 }
165
166 static void si_shader_es(struct si_shader *shader)
167 {
168 struct si_pm4_state *pm4;
169 unsigned num_sgprs, num_user_sgprs;
170 unsigned vgpr_comp_cnt;
171 uint64_t va;
172
173 pm4 = shader->pm4 = CALLOC_STRUCT(si_pm4_state);
174
175 if (!pm4)
176 return;
177
178 va = shader->bo->gpu_address;
179 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_USER_SHADER);
180
181 if (shader->selector->type == PIPE_SHADER_VERTEX) {
182 vgpr_comp_cnt = shader->uses_instanceid ? 3 : 0;
183 num_user_sgprs = SI_ES_NUM_USER_SGPR;
184 } else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
185 vgpr_comp_cnt = 3; /* all components are needed for TES */
186 num_user_sgprs = SI_TES_NUM_USER_SGPR;
187 } else
188 unreachable("invalid shader selector type");
189
190 num_sgprs = shader->config.num_sgprs;
191 /* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
192 if ((num_user_sgprs + 1) > num_sgprs) {
193 /* Last 2 reserved SGPRs are used for VCC */
194 num_sgprs = num_user_sgprs + 1 + 2;
195 }
196 assert(num_sgprs <= 104);
197
198 si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
199 shader->selector->esgs_itemsize / 4);
200 si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
201 si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, va >> 40);
202 si_pm4_set_reg(pm4, R_00B328_SPI_SHADER_PGM_RSRC1_ES,
203 S_00B328_VGPRS((shader->config.num_vgprs - 1) / 4) |
204 S_00B328_SGPRS((num_sgprs - 1) / 8) |
205 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt) |
206 S_00B328_DX10_CLAMP(1));
207 si_pm4_set_reg(pm4, R_00B32C_SPI_SHADER_PGM_RSRC2_ES,
208 S_00B32C_USER_SGPR(num_user_sgprs) |
209 S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
210
211 if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
212 si_set_tesseval_regs(shader, pm4);
213 }
214
215 /**
216 * Calculate the appropriate setting of VGT_GS_MODE when \p shader is a
217 * geometry shader.
218 */
219 static uint32_t si_vgt_gs_mode(struct si_shader *shader)
220 {
221 unsigned gs_max_vert_out = shader->selector->gs_max_out_vertices;
222 unsigned cut_mode;
223
224 if (gs_max_vert_out <= 128) {
225 cut_mode = V_028A40_GS_CUT_128;
226 } else if (gs_max_vert_out <= 256) {
227 cut_mode = V_028A40_GS_CUT_256;
228 } else if (gs_max_vert_out <= 512) {
229 cut_mode = V_028A40_GS_CUT_512;
230 } else {
231 assert(gs_max_vert_out <= 1024);
232 cut_mode = V_028A40_GS_CUT_1024;
233 }
234
235 return S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
236 S_028A40_CUT_MODE(cut_mode)|
237 S_028A40_ES_WRITE_OPTIMIZE(1) |
238 S_028A40_GS_WRITE_OPTIMIZE(1);
239 }
240
241 static void si_shader_gs(struct si_shader *shader)
242 {
243 unsigned gs_vert_itemsize = shader->selector->gsvs_vertex_size;
244 unsigned gsvs_itemsize = shader->selector->max_gsvs_emit_size >> 2;
245 unsigned gs_num_invocations = shader->selector->gs_num_invocations;
246 struct si_pm4_state *pm4;
247 unsigned num_sgprs, num_user_sgprs;
248 uint64_t va;
249 unsigned max_stream = shader->selector->max_gs_stream;
250
251 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
252 assert(gsvs_itemsize < (1 << 15));
253
254 pm4 = shader->pm4 = CALLOC_STRUCT(si_pm4_state);
255
256 if (!pm4)
257 return;
258
259 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(shader));
260
261 si_pm4_set_reg(pm4, R_028A60_VGT_GSVS_RING_OFFSET_1, gsvs_itemsize);
262 si_pm4_set_reg(pm4, R_028A64_VGT_GSVS_RING_OFFSET_2, gsvs_itemsize * ((max_stream >= 2) ? 2 : 1));
263 si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, gsvs_itemsize * ((max_stream >= 3) ? 3 : 1));
264
265 si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gsvs_itemsize * (max_stream + 1));
266
267 si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, shader->selector->gs_max_out_vertices);
268
269 si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, gs_vert_itemsize >> 2);
270 si_pm4_set_reg(pm4, R_028B60_VGT_GS_VERT_ITEMSIZE_1, (max_stream >= 1) ? gs_vert_itemsize >> 2 : 0);
271 si_pm4_set_reg(pm4, R_028B64_VGT_GS_VERT_ITEMSIZE_2, (max_stream >= 2) ? gs_vert_itemsize >> 2 : 0);
272 si_pm4_set_reg(pm4, R_028B68_VGT_GS_VERT_ITEMSIZE_3, (max_stream >= 3) ? gs_vert_itemsize >> 2 : 0);
273
274 si_pm4_set_reg(pm4, R_028B90_VGT_GS_INSTANCE_CNT,
275 S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
276 S_028B90_ENABLE(gs_num_invocations > 0));
277
278 va = shader->bo->gpu_address;
279 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_USER_SHADER);
280 si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
281 si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40);
282
283 num_user_sgprs = SI_GS_NUM_USER_SGPR;
284 num_sgprs = shader->config.num_sgprs;
285 /* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
286 if ((num_user_sgprs + 2) > num_sgprs) {
287 /* Last 2 reserved SGPRs are used for VCC */
288 num_sgprs = num_user_sgprs + 2 + 2;
289 }
290 assert(num_sgprs <= 104);
291
292 si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
293 S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
294 S_00B228_SGPRS((num_sgprs - 1) / 8) |
295 S_00B228_DX10_CLAMP(1));
296 si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
297 S_00B22C_USER_SGPR(num_user_sgprs) |
298 S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
299 }
300
301 /**
302 * Compute the state for \p shader, which will run as a vertex shader on the
303 * hardware.
304 *
305 * If \p gs is non-NULL, it points to the geometry shader for which this shader
306 * is the copy shader.
307 */
308 static void si_shader_vs(struct si_shader *shader, struct si_shader *gs)
309 {
310 struct si_pm4_state *pm4;
311 unsigned num_sgprs, num_user_sgprs;
312 unsigned nparams, vgpr_comp_cnt;
313 uint64_t va;
314 unsigned window_space =
315 shader->selector->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
316 bool enable_prim_id = si_vs_exports_prim_id(shader);
317
318 pm4 = shader->pm4 = CALLOC_STRUCT(si_pm4_state);
319
320 if (!pm4)
321 return;
322
323 /* We always write VGT_GS_MODE in the VS state, because every switch
324 * between different shader pipelines involving a different GS or no
325 * GS at all involves a switch of the VS (different GS use different
326 * copy shaders). On the other hand, when the API switches from a GS to
327 * no GS and then back to the same GS used originally, the GS state is
328 * not sent again.
329 */
330 if (!gs) {
331 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE,
332 S_028A40_MODE(enable_prim_id ? V_028A40_GS_SCENARIO_A : 0));
333 si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, enable_prim_id);
334 } else {
335 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(gs));
336 si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, 0);
337 }
338
339 va = shader->bo->gpu_address;
340 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_USER_SHADER);
341
342 if (gs) {
343 vgpr_comp_cnt = 0; /* only VertexID is needed for GS-COPY. */
344 num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR;
345 } else if (shader->selector->type == PIPE_SHADER_VERTEX) {
346 vgpr_comp_cnt = shader->uses_instanceid ? 3 : (enable_prim_id ? 2 : 0);
347 num_user_sgprs = SI_VS_NUM_USER_SGPR;
348 } else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
349 vgpr_comp_cnt = 3; /* all components are needed for TES */
350 num_user_sgprs = SI_TES_NUM_USER_SGPR;
351 } else
352 unreachable("invalid shader selector type");
353
354 num_sgprs = shader->config.num_sgprs;
355 if (num_user_sgprs > num_sgprs) {
356 /* Last 2 reserved SGPRs are used for VCC */
357 num_sgprs = num_user_sgprs + 2;
358 }
359 assert(num_sgprs <= 104);
360
361 /* VS is required to export at least one param. */
362 nparams = MAX2(shader->nr_param_exports, 1);
363 si_pm4_set_reg(pm4, R_0286C4_SPI_VS_OUT_CONFIG,
364 S_0286C4_VS_EXPORT_COUNT(nparams - 1));
365
366 si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT,
367 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
368 S_02870C_POS1_EXPORT_FORMAT(shader->nr_pos_exports > 1 ?
369 V_02870C_SPI_SHADER_4COMP :
370 V_02870C_SPI_SHADER_NONE) |
371 S_02870C_POS2_EXPORT_FORMAT(shader->nr_pos_exports > 2 ?
372 V_02870C_SPI_SHADER_4COMP :
373 V_02870C_SPI_SHADER_NONE) |
374 S_02870C_POS3_EXPORT_FORMAT(shader->nr_pos_exports > 3 ?
375 V_02870C_SPI_SHADER_4COMP :
376 V_02870C_SPI_SHADER_NONE));
377
378 si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
379 si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, va >> 40);
380 si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS,
381 S_00B128_VGPRS((shader->config.num_vgprs - 1) / 4) |
382 S_00B128_SGPRS((num_sgprs - 1) / 8) |
383 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt) |
384 S_00B128_DX10_CLAMP(1));
385 si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS,
386 S_00B12C_USER_SGPR(num_user_sgprs) |
387 S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) |
388 S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) |
389 S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
390 S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
391 S_00B12C_SO_EN(!!shader->selector->so.num_outputs) |
392 S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
393 if (window_space)
394 si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL,
395 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
396 else
397 si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL,
398 S_028818_VTX_W0_FMT(1) |
399 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
400 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
401 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
402
403 if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
404 si_set_tesseval_regs(shader, pm4);
405 }
406
407 static unsigned si_get_ps_num_interp(struct si_shader *ps)
408 {
409 struct tgsi_shader_info *info = &ps->selector->info;
410 unsigned num_colors = !!(info->colors_read & 0x0f) +
411 !!(info->colors_read & 0xf0);
412 unsigned num_interp = ps->selector->info.num_inputs +
413 (ps->key.ps.color_two_side ? num_colors : 0);
414
415 assert(num_interp <= 32);
416 return MIN2(num_interp, 32);
417 }
418
419 static unsigned si_get_spi_shader_col_format(struct si_shader *shader)
420 {
421 unsigned value = shader->key.ps.spi_shader_col_format;
422 unsigned i, num_targets = (util_last_bit(value) + 3) / 4;
423
424 /* If the i-th target format is set, all previous target formats must
425 * be non-zero to avoid hangs.
426 */
427 for (i = 0; i < num_targets; i++)
428 if (!(value & (0xf << (i * 4))))
429 value |= V_028714_SPI_SHADER_32_R << (i * 4);
430
431 return value;
432 }
433
434 static unsigned si_get_cb_shader_mask(unsigned spi_shader_col_format)
435 {
436 unsigned i, cb_shader_mask = 0;
437
438 for (i = 0; i < 8; i++) {
439 switch ((spi_shader_col_format >> (i * 4)) & 0xf) {
440 case V_028714_SPI_SHADER_ZERO:
441 break;
442 case V_028714_SPI_SHADER_32_R:
443 cb_shader_mask |= 0x1 << (i * 4);
444 break;
445 case V_028714_SPI_SHADER_32_GR:
446 cb_shader_mask |= 0x3 << (i * 4);
447 break;
448 case V_028714_SPI_SHADER_32_AR:
449 cb_shader_mask |= 0x9 << (i * 4);
450 break;
451 case V_028714_SPI_SHADER_FP16_ABGR:
452 case V_028714_SPI_SHADER_UNORM16_ABGR:
453 case V_028714_SPI_SHADER_SNORM16_ABGR:
454 case V_028714_SPI_SHADER_UINT16_ABGR:
455 case V_028714_SPI_SHADER_SINT16_ABGR:
456 case V_028714_SPI_SHADER_32_ABGR:
457 cb_shader_mask |= 0xf << (i * 4);
458 break;
459 default:
460 assert(0);
461 }
462 }
463 return cb_shader_mask;
464 }
465
466 static void si_shader_ps(struct si_shader *shader)
467 {
468 struct tgsi_shader_info *info = &shader->selector->info;
469 struct si_pm4_state *pm4;
470 unsigned spi_ps_in_control, spi_shader_col_format, cb_shader_mask;
471 unsigned num_sgprs, num_user_sgprs;
472 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
473 uint64_t va;
474 bool has_centroid;
475 unsigned input_ena = shader->config.spi_ps_input_ena;
476
477 /* we need to enable at least one of them, otherwise we hang the GPU */
478 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena) ||
479 G_0286CC_PERSP_CENTER_ENA(input_ena) ||
480 G_0286CC_PERSP_CENTROID_ENA(input_ena) ||
481 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena) ||
482 G_0286CC_LINEAR_SAMPLE_ENA(input_ena) ||
483 G_0286CC_LINEAR_CENTER_ENA(input_ena) ||
484 G_0286CC_LINEAR_CENTROID_ENA(input_ena) ||
485 G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena));
486
487 pm4 = shader->pm4 = CALLOC_STRUCT(si_pm4_state);
488
489 if (!pm4)
490 return;
491
492 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
493 * Possible vaules:
494 * 0 -> Position = pixel center
495 * 1 -> Position = pixel centroid
496 * 2 -> Position = at sample position
497 *
498 * From GLSL 4.5 specification, section 7.1:
499 * "The variable gl_FragCoord is available as an input variable from
500 * within fragment shaders and it holds the window relative coordinates
501 * (x, y, z, 1/w) values for the fragment. If multi-sampling, this
502 * value can be for any location within the pixel, or one of the
503 * fragment samples. The use of centroid does not further restrict
504 * this value to be inside the current primitive."
505 *
506 * Meaning that centroid has no effect and we can return anything within
507 * the pixel. Thus, return the value at sample position, because that's
508 * the most accurate one shaders can get.
509 */
510 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(2);
511
512 if (info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] ==
513 TGSI_FS_COORD_PIXEL_CENTER_INTEGER)
514 spi_baryc_cntl |= S_0286E0_POS_FLOAT_ULC(1);
515
516 spi_shader_col_format = si_get_spi_shader_col_format(shader);
517 cb_shader_mask = si_get_cb_shader_mask(spi_shader_col_format);
518
519 /* This must be non-zero for alpha-test/kill to work.
520 * The hardware ignores the EXEC mask if no export memory is allocated.
521 * Don't add this to CB_SHADER_MASK.
522 */
523 if (!spi_shader_col_format &&
524 !info->writes_z && !info->writes_stencil && !info->writes_samplemask &&
525 (shader->selector->info.uses_kill ||
526 shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS))
527 spi_shader_col_format = V_028714_SPI_SHADER_32_R;
528
529 si_pm4_set_reg(pm4, R_0286CC_SPI_PS_INPUT_ENA, input_ena);
530 si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR,
531 shader->config.spi_ps_input_addr);
532
533 /* Set interpolation controls. */
534 has_centroid = G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena) ||
535 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena);
536
537 spi_ps_in_control = S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader)) |
538 S_0286D8_BC_OPTIMIZE_DISABLE(has_centroid);
539
540 /* Set registers. */
541 si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
542 si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
543
544 si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT,
545 info->writes_samplemask ? V_028710_SPI_SHADER_32_ABGR :
546 info->writes_stencil ? V_028710_SPI_SHADER_32_GR :
547 info->writes_z ? V_028710_SPI_SHADER_32_R :
548 V_028710_SPI_SHADER_ZERO);
549
550 si_pm4_set_reg(pm4, R_028714_SPI_SHADER_COL_FORMAT, spi_shader_col_format);
551 si_pm4_set_reg(pm4, R_02823C_CB_SHADER_MASK, cb_shader_mask);
552
553 va = shader->bo->gpu_address;
554 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_USER_SHADER);
555 si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
556 si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, va >> 40);
557
558 num_user_sgprs = SI_PS_NUM_USER_SGPR;
559 num_sgprs = shader->config.num_sgprs;
560 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
561 if ((num_user_sgprs + 1) > num_sgprs) {
562 /* Last 2 reserved SGPRs are used for VCC */
563 num_sgprs = num_user_sgprs + 1 + 2;
564 }
565 assert(num_sgprs <= 104);
566
567 si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS,
568 S_00B028_VGPRS((shader->config.num_vgprs - 1) / 4) |
569 S_00B028_SGPRS((num_sgprs - 1) / 8) |
570 S_00B028_DX10_CLAMP(1));
571 si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
572 S_00B02C_EXTRA_LDS_SIZE(shader->config.lds_size) |
573 S_00B02C_USER_SGPR(num_user_sgprs) |
574 S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
575 }
576
577 static void si_shader_init_pm4_state(struct si_shader *shader)
578 {
579
580 if (shader->pm4)
581 si_pm4_free_state_simple(shader->pm4);
582
583 switch (shader->selector->type) {
584 case PIPE_SHADER_VERTEX:
585 if (shader->key.vs.as_ls)
586 si_shader_ls(shader);
587 else if (shader->key.vs.as_es)
588 si_shader_es(shader);
589 else
590 si_shader_vs(shader, NULL);
591 break;
592 case PIPE_SHADER_TESS_CTRL:
593 si_shader_hs(shader);
594 break;
595 case PIPE_SHADER_TESS_EVAL:
596 if (shader->key.tes.as_es)
597 si_shader_es(shader);
598 else
599 si_shader_vs(shader, NULL);
600 break;
601 case PIPE_SHADER_GEOMETRY:
602 si_shader_gs(shader);
603 si_shader_vs(shader->gs_copy_shader, shader);
604 break;
605 case PIPE_SHADER_FRAGMENT:
606 si_shader_ps(shader);
607 break;
608 default:
609 assert(0);
610 }
611 }
612
613 static unsigned si_get_alpha_test_func(struct si_context *sctx)
614 {
615 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
616 if (sctx->queued.named.dsa &&
617 !sctx->framebuffer.cb0_is_integer)
618 return sctx->queued.named.dsa->alpha_func;
619
620 return PIPE_FUNC_ALWAYS;
621 }
622
623 /* Compute the key for the hw shader variant */
624 static inline void si_shader_selector_key(struct pipe_context *ctx,
625 struct si_shader_selector *sel,
626 union si_shader_key *key)
627 {
628 struct si_context *sctx = (struct si_context *)ctx;
629 unsigned i;
630
631 memset(key, 0, sizeof(*key));
632
633 switch (sel->type) {
634 case PIPE_SHADER_VERTEX:
635 if (sctx->vertex_elements)
636 for (i = 0; i < sctx->vertex_elements->count; ++i)
637 key->vs.instance_divisors[i] =
638 sctx->vertex_elements->elements[i].instance_divisor;
639
640 if (sctx->tes_shader.cso)
641 key->vs.as_ls = 1;
642 else if (sctx->gs_shader.cso)
643 key->vs.as_es = 1;
644
645 if (!sctx->gs_shader.cso && sctx->ps_shader.cso &&
646 sctx->ps_shader.cso->info.uses_primid)
647 key->vs.export_prim_id = 1;
648 break;
649 case PIPE_SHADER_TESS_CTRL:
650 key->tcs.prim_mode =
651 sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
652 break;
653 case PIPE_SHADER_TESS_EVAL:
654 if (sctx->gs_shader.cso)
655 key->tes.as_es = 1;
656 else if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
657 key->tes.export_prim_id = 1;
658 break;
659 case PIPE_SHADER_GEOMETRY:
660 break;
661 case PIPE_SHADER_FRAGMENT: {
662 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
663 struct si_state_blend *blend = sctx->queued.named.blend;
664
665 if (sel->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] &&
666 sel->info.colors_written == 0x1)
667 key->ps.last_cbuf = MAX2(sctx->framebuffer.state.nr_cbufs, 1) - 1;
668
669 if (blend) {
670 /* Select the shader color format based on whether
671 * blending or alpha are needed.
672 */
673 key->ps.spi_shader_col_format =
674 (blend->blend_enable_4bit & blend->need_src_alpha_4bit &
675 sctx->framebuffer.spi_shader_col_format_blend_alpha) |
676 (blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
677 sctx->framebuffer.spi_shader_col_format_blend) |
678 (~blend->blend_enable_4bit & blend->need_src_alpha_4bit &
679 sctx->framebuffer.spi_shader_col_format_alpha) |
680 (~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
681 sctx->framebuffer.spi_shader_col_format);
682 } else
683 key->ps.spi_shader_col_format = sctx->framebuffer.spi_shader_col_format;
684
685 /* If alpha-to-coverage is enabled, we have to export alpha
686 * even if there is no color buffer.
687 */
688 if (!(key->ps.spi_shader_col_format & 0xf) &&
689 blend && blend->alpha_to_coverage)
690 key->ps.spi_shader_col_format |= V_028710_SPI_SHADER_32_AR;
691
692 /* On SI and CIK except Hawaii, the CB doesn't clamp outputs
693 * to the range supported by the type if a channel has less
694 * than 16 bits and the export format is 16_ABGR.
695 */
696 if (sctx->b.chip_class <= CIK && sctx->b.family != CHIP_HAWAII)
697 key->ps.color_is_int8 = sctx->framebuffer.color_is_int8;
698
699 /* Disable unwritten outputs (if WRITE_ALL_CBUFS isn't enabled). */
700 if (!key->ps.last_cbuf) {
701 key->ps.spi_shader_col_format &= sel->colors_written_4bit;
702 key->ps.color_is_int8 &= sel->info.colors_written;
703 }
704
705 if (rs) {
706 bool is_poly = (sctx->current_rast_prim >= PIPE_PRIM_TRIANGLES &&
707 sctx->current_rast_prim <= PIPE_PRIM_POLYGON) ||
708 sctx->current_rast_prim >= PIPE_PRIM_TRIANGLES_ADJACENCY;
709 bool is_line = !is_poly && sctx->current_rast_prim != PIPE_PRIM_POINTS;
710
711 key->ps.color_two_side = rs->two_side && sel->info.colors_read;
712
713 if (sctx->queued.named.blend) {
714 key->ps.alpha_to_one = sctx->queued.named.blend->alpha_to_one &&
715 rs->multisample_enable &&
716 !sctx->framebuffer.cb0_is_integer;
717 }
718
719 key->ps.poly_stipple = rs->poly_stipple_enable && is_poly;
720 key->ps.poly_line_smoothing = ((is_poly && rs->poly_smooth) ||
721 (is_line && rs->line_smooth)) &&
722 sctx->framebuffer.nr_samples <= 1;
723 key->ps.clamp_color = rs->clamp_fragment_color;
724
725 key->ps.force_persample_interp = rs->force_persample_interp &&
726 rs->multisample_enable &&
727 sctx->framebuffer.nr_samples > 1 &&
728 sctx->ps_iter_samples > 1 &&
729 (sel->info.uses_persp_center ||
730 sel->info.uses_persp_centroid ||
731 sel->info.uses_linear_center ||
732 sel->info.uses_linear_centroid);
733 }
734
735 key->ps.alpha_func = si_get_alpha_test_func(sctx);
736 break;
737 }
738 default:
739 assert(0);
740 }
741 }
742
743 /* Select the hw shader variant depending on the current state. */
744 static int si_shader_select_with_key(struct pipe_context *ctx,
745 struct si_shader_ctx_state *state,
746 union si_shader_key *key)
747 {
748 struct si_context *sctx = (struct si_context *)ctx;
749 struct si_shader_selector *sel = state->cso;
750 struct si_shader *current = state->current;
751 struct si_shader *iter, *shader = NULL;
752 int r;
753
754 /* Check if we don't need to change anything.
755 * This path is also used for most shaders that don't need multiple
756 * variants, it will cost just a computation of the key and this
757 * test. */
758 if (likely(current && memcmp(&current->key, key, sizeof(*key)) == 0))
759 return 0;
760
761 pipe_mutex_lock(sel->mutex);
762
763 /* Find the shader variant. */
764 for (iter = sel->first_variant; iter; iter = iter->next_variant) {
765 /* Don't check the "current" shader. We checked it above. */
766 if (current != iter &&
767 memcmp(&iter->key, key, sizeof(*key)) == 0) {
768 state->current = iter;
769 pipe_mutex_unlock(sel->mutex);
770 return 0;
771 }
772 }
773
774 /* Build a new shader. */
775 shader = CALLOC_STRUCT(si_shader);
776 if (!shader) {
777 pipe_mutex_unlock(sel->mutex);
778 return -ENOMEM;
779 }
780 shader->selector = sel;
781 shader->key = *key;
782
783 r = si_shader_create(sctx->screen, sctx->tm, shader, &sctx->b.debug);
784 if (unlikely(r)) {
785 R600_ERR("Failed to build shader variant (type=%u) %d\n",
786 sel->type, r);
787 FREE(shader);
788 pipe_mutex_unlock(sel->mutex);
789 return r;
790 }
791 si_shader_init_pm4_state(shader);
792
793 if (!sel->last_variant) {
794 sel->first_variant = shader;
795 sel->last_variant = shader;
796 } else {
797 sel->last_variant->next_variant = shader;
798 sel->last_variant = shader;
799 }
800 state->current = shader;
801 pipe_mutex_unlock(sel->mutex);
802 return 0;
803 }
804
805 static int si_shader_select(struct pipe_context *ctx,
806 struct si_shader_ctx_state *state)
807 {
808 union si_shader_key key;
809
810 si_shader_selector_key(ctx, state->cso, &key);
811 return si_shader_select_with_key(ctx, state, &key);
812 }
813
814 static void *si_create_shader_selector(struct pipe_context *ctx,
815 const struct pipe_shader_state *state)
816 {
817 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
818 struct si_shader_selector *sel = CALLOC_STRUCT(si_shader_selector);
819 int i;
820
821 if (!sel)
822 return NULL;
823
824 sel->tokens = tgsi_dup_tokens(state->tokens);
825 if (!sel->tokens) {
826 FREE(sel);
827 return NULL;
828 }
829
830 sel->so = state->stream_output;
831 tgsi_scan_shader(state->tokens, &sel->info);
832 sel->type = util_pipe_shader_from_tgsi_processor(sel->info.processor);
833 p_atomic_inc(&sscreen->b.num_shaders_created);
834
835 /* Set which opcode uses which (i,j) pair. */
836 if (sel->info.uses_persp_opcode_interp_centroid)
837 sel->info.uses_persp_centroid = true;
838
839 if (sel->info.uses_linear_opcode_interp_centroid)
840 sel->info.uses_linear_centroid = true;
841
842 if (sel->info.uses_persp_opcode_interp_offset ||
843 sel->info.uses_persp_opcode_interp_sample)
844 sel->info.uses_persp_center = true;
845
846 if (sel->info.uses_linear_opcode_interp_offset ||
847 sel->info.uses_linear_opcode_interp_sample)
848 sel->info.uses_linear_center = true;
849
850 switch (sel->type) {
851 case PIPE_SHADER_GEOMETRY:
852 sel->gs_output_prim =
853 sel->info.properties[TGSI_PROPERTY_GS_OUTPUT_PRIM];
854 sel->gs_max_out_vertices =
855 sel->info.properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES];
856 sel->gs_num_invocations =
857 sel->info.properties[TGSI_PROPERTY_GS_INVOCATIONS];
858 sel->gsvs_vertex_size = sel->info.num_outputs * 16;
859 sel->max_gsvs_emit_size = sel->gsvs_vertex_size *
860 sel->gs_max_out_vertices;
861
862 sel->max_gs_stream = 0;
863 for (i = 0; i < sel->so.num_outputs; i++)
864 sel->max_gs_stream = MAX2(sel->max_gs_stream,
865 sel->so.output[i].stream);
866
867 sel->gs_input_verts_per_prim =
868 u_vertices_per_prim(sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]);
869 break;
870
871 case PIPE_SHADER_VERTEX:
872 case PIPE_SHADER_TESS_CTRL:
873 case PIPE_SHADER_TESS_EVAL:
874 for (i = 0; i < sel->info.num_outputs; i++) {
875 unsigned name = sel->info.output_semantic_name[i];
876 unsigned index = sel->info.output_semantic_index[i];
877
878 switch (name) {
879 case TGSI_SEMANTIC_TESSINNER:
880 case TGSI_SEMANTIC_TESSOUTER:
881 case TGSI_SEMANTIC_PATCH:
882 sel->patch_outputs_written |=
883 1llu << si_shader_io_get_unique_index(name, index);
884 break;
885 default:
886 sel->outputs_written |=
887 1llu << si_shader_io_get_unique_index(name, index);
888 }
889 }
890 sel->esgs_itemsize = util_last_bit64(sel->outputs_written) * 16;
891 break;
892
893 case PIPE_SHADER_FRAGMENT:
894 for (i = 0; i < 8; i++)
895 if (sel->info.colors_written & (1 << i))
896 sel->colors_written_4bit |= 0xf << (4 * i);
897 break;
898 }
899
900 /* DB_SHADER_CONTROL */
901 sel->db_shader_control =
902 S_02880C_Z_EXPORT_ENABLE(sel->info.writes_z) |
903 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(sel->info.writes_stencil) |
904 S_02880C_MASK_EXPORT_ENABLE(sel->info.writes_samplemask) |
905 S_02880C_KILL_ENABLE(sel->info.uses_kill);
906
907 switch (sel->info.properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT]) {
908 case TGSI_FS_DEPTH_LAYOUT_GREATER:
909 sel->db_shader_control |=
910 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z);
911 break;
912 case TGSI_FS_DEPTH_LAYOUT_LESS:
913 sel->db_shader_control |=
914 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z);
915 break;
916 }
917
918 /* Pre-compilation. */
919 if (sel->type == PIPE_SHADER_GEOMETRY ||
920 sscreen->b.debug_flags & DBG_PRECOMPILE) {
921 struct si_shader_ctx_state state = {sel};
922 union si_shader_key key;
923
924 memset(&key, 0, sizeof(key));
925
926 /* Set reasonable defaults, so that the shader key doesn't
927 * cause any code to be eliminated.
928 */
929 switch (sel->type) {
930 case PIPE_SHADER_TESS_CTRL:
931 key.tcs.prim_mode = PIPE_PRIM_TRIANGLES;
932 break;
933 case PIPE_SHADER_FRAGMENT:
934 key.ps.alpha_func = PIPE_FUNC_ALWAYS;
935 for (i = 0; i < 8; i++)
936 if (sel->info.colors_written & (1 << i))
937 key.ps.spi_shader_col_format |=
938 V_028710_SPI_SHADER_FP16_ABGR << (i * 4);
939 break;
940 }
941
942 if (si_shader_select_with_key(ctx, &state, &key)) {
943 fprintf(stderr, "radeonsi: can't create a shader\n");
944 tgsi_free_tokens(sel->tokens);
945 FREE(sel);
946 return NULL;
947 }
948 }
949
950 pipe_mutex_init(sel->mutex);
951 return sel;
952 }
953
954 /**
955 * Normally, we only emit 1 viewport and 1 scissor if no shader is using
956 * the VIEWPORT_INDEX output, and emitting the other viewports and scissors
957 * is delayed. When a shader with VIEWPORT_INDEX appears, this should be
958 * called to emit the rest.
959 */
960 static void si_update_viewports_and_scissors(struct si_context *sctx)
961 {
962 struct tgsi_shader_info *info = si_get_vs_info(sctx);
963
964 if (!info || !info->writes_viewport_index)
965 return;
966
967 if (sctx->scissors.dirty_mask)
968 si_mark_atom_dirty(sctx, &sctx->scissors.atom);
969 if (sctx->viewports.dirty_mask)
970 si_mark_atom_dirty(sctx, &sctx->viewports.atom);
971 }
972
973 static void si_bind_vs_shader(struct pipe_context *ctx, void *state)
974 {
975 struct si_context *sctx = (struct si_context *)ctx;
976 struct si_shader_selector *sel = state;
977
978 if (sctx->vs_shader.cso == sel)
979 return;
980
981 sctx->vs_shader.cso = sel;
982 sctx->vs_shader.current = sel ? sel->first_variant : NULL;
983 si_mark_atom_dirty(sctx, &sctx->clip_regs);
984 si_update_viewports_and_scissors(sctx);
985 }
986
987 static void si_bind_gs_shader(struct pipe_context *ctx, void *state)
988 {
989 struct si_context *sctx = (struct si_context *)ctx;
990 struct si_shader_selector *sel = state;
991 bool enable_changed = !!sctx->gs_shader.cso != !!sel;
992
993 if (sctx->gs_shader.cso == sel)
994 return;
995
996 sctx->gs_shader.cso = sel;
997 sctx->gs_shader.current = sel ? sel->first_variant : NULL;
998 si_mark_atom_dirty(sctx, &sctx->clip_regs);
999 sctx->last_rast_prim = -1; /* reset this so that it gets updated */
1000
1001 if (enable_changed)
1002 si_shader_change_notify(sctx);
1003 si_update_viewports_and_scissors(sctx);
1004 }
1005
1006 static void si_bind_tcs_shader(struct pipe_context *ctx, void *state)
1007 {
1008 struct si_context *sctx = (struct si_context *)ctx;
1009 struct si_shader_selector *sel = state;
1010 bool enable_changed = !!sctx->tcs_shader.cso != !!sel;
1011
1012 if (sctx->tcs_shader.cso == sel)
1013 return;
1014
1015 sctx->tcs_shader.cso = sel;
1016 sctx->tcs_shader.current = sel ? sel->first_variant : NULL;
1017
1018 if (enable_changed)
1019 sctx->last_tcs = NULL; /* invalidate derived tess state */
1020 }
1021
1022 static void si_bind_tes_shader(struct pipe_context *ctx, void *state)
1023 {
1024 struct si_context *sctx = (struct si_context *)ctx;
1025 struct si_shader_selector *sel = state;
1026 bool enable_changed = !!sctx->tes_shader.cso != !!sel;
1027
1028 if (sctx->tes_shader.cso == sel)
1029 return;
1030
1031 sctx->tes_shader.cso = sel;
1032 sctx->tes_shader.current = sel ? sel->first_variant : NULL;
1033 si_mark_atom_dirty(sctx, &sctx->clip_regs);
1034 sctx->last_rast_prim = -1; /* reset this so that it gets updated */
1035
1036 if (enable_changed) {
1037 si_shader_change_notify(sctx);
1038 sctx->last_tes_sh_base = -1; /* invalidate derived tess state */
1039 }
1040 si_update_viewports_and_scissors(sctx);
1041 }
1042
1043 static void si_bind_ps_shader(struct pipe_context *ctx, void *state)
1044 {
1045 struct si_context *sctx = (struct si_context *)ctx;
1046 struct si_shader_selector *sel = state;
1047
1048 /* skip if supplied shader is one already in use */
1049 if (sctx->ps_shader.cso == sel)
1050 return;
1051
1052 sctx->ps_shader.cso = sel;
1053 sctx->ps_shader.current = sel ? sel->first_variant : NULL;
1054 si_mark_atom_dirty(sctx, &sctx->cb_render_state);
1055 }
1056
1057 static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
1058 {
1059 if (shader->pm4) {
1060 switch (shader->selector->type) {
1061 case PIPE_SHADER_VERTEX:
1062 if (shader->key.vs.as_ls)
1063 si_pm4_delete_state(sctx, ls, shader->pm4);
1064 else if (shader->key.vs.as_es)
1065 si_pm4_delete_state(sctx, es, shader->pm4);
1066 else
1067 si_pm4_delete_state(sctx, vs, shader->pm4);
1068 break;
1069 case PIPE_SHADER_TESS_CTRL:
1070 si_pm4_delete_state(sctx, hs, shader->pm4);
1071 break;
1072 case PIPE_SHADER_TESS_EVAL:
1073 if (shader->key.tes.as_es)
1074 si_pm4_delete_state(sctx, es, shader->pm4);
1075 else
1076 si_pm4_delete_state(sctx, vs, shader->pm4);
1077 break;
1078 case PIPE_SHADER_GEOMETRY:
1079 si_pm4_delete_state(sctx, gs, shader->pm4);
1080 si_pm4_delete_state(sctx, vs, shader->gs_copy_shader->pm4);
1081 break;
1082 case PIPE_SHADER_FRAGMENT:
1083 si_pm4_delete_state(sctx, ps, shader->pm4);
1084 break;
1085 }
1086 }
1087
1088 si_shader_destroy(shader);
1089 free(shader);
1090 }
1091
1092 static void si_delete_shader_selector(struct pipe_context *ctx, void *state)
1093 {
1094 struct si_context *sctx = (struct si_context *)ctx;
1095 struct si_shader_selector *sel = (struct si_shader_selector *)state;
1096 struct si_shader *p = sel->first_variant, *c;
1097 struct si_shader_ctx_state *current_shader[SI_NUM_SHADERS] = {
1098 [PIPE_SHADER_VERTEX] = &sctx->vs_shader,
1099 [PIPE_SHADER_TESS_CTRL] = &sctx->tcs_shader,
1100 [PIPE_SHADER_TESS_EVAL] = &sctx->tes_shader,
1101 [PIPE_SHADER_GEOMETRY] = &sctx->gs_shader,
1102 [PIPE_SHADER_FRAGMENT] = &sctx->ps_shader,
1103 };
1104
1105 if (current_shader[sel->type]->cso == sel) {
1106 current_shader[sel->type]->cso = NULL;
1107 current_shader[sel->type]->current = NULL;
1108 }
1109
1110 while (p) {
1111 c = p->next_variant;
1112 si_delete_shader(sctx, p);
1113 p = c;
1114 }
1115
1116 pipe_mutex_destroy(sel->mutex);
1117 free(sel->tokens);
1118 free(sel);
1119 }
1120
1121 static unsigned si_get_ps_input_cntl(struct si_context *sctx,
1122 struct si_shader *vs, unsigned name,
1123 unsigned index, unsigned interpolate)
1124 {
1125 struct tgsi_shader_info *vsinfo = &vs->selector->info;
1126 unsigned j, ps_input_cntl = 0;
1127
1128 if (interpolate == TGSI_INTERPOLATE_CONSTANT ||
1129 (interpolate == TGSI_INTERPOLATE_COLOR && sctx->flatshade))
1130 ps_input_cntl |= S_028644_FLAT_SHADE(1);
1131
1132 if (name == TGSI_SEMANTIC_PCOORD ||
1133 (name == TGSI_SEMANTIC_TEXCOORD &&
1134 sctx->sprite_coord_enable & (1 << index))) {
1135 ps_input_cntl |= S_028644_PT_SPRITE_TEX(1);
1136 }
1137
1138 for (j = 0; j < vsinfo->num_outputs; j++) {
1139 if (name == vsinfo->output_semantic_name[j] &&
1140 index == vsinfo->output_semantic_index[j]) {
1141 ps_input_cntl |= S_028644_OFFSET(vs->vs_output_param_offset[j]);
1142 break;
1143 }
1144 }
1145
1146 if (name == TGSI_SEMANTIC_PRIMID)
1147 /* PrimID is written after the last output. */
1148 ps_input_cntl |= S_028644_OFFSET(vs->vs_output_param_offset[vsinfo->num_outputs]);
1149 else if (j == vsinfo->num_outputs && !G_028644_PT_SPRITE_TEX(ps_input_cntl)) {
1150 /* No corresponding output found, load defaults into input.
1151 * Don't set any other bits.
1152 * (FLAT_SHADE=1 completely changes behavior) */
1153 ps_input_cntl = S_028644_OFFSET(0x20);
1154 }
1155 return ps_input_cntl;
1156 }
1157
1158 static void si_emit_spi_map(struct si_context *sctx, struct r600_atom *atom)
1159 {
1160 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1161 struct si_shader *ps = sctx->ps_shader.current;
1162 struct si_shader *vs = si_get_vs_state(sctx);
1163 struct tgsi_shader_info *psinfo = ps ? &ps->selector->info : NULL;
1164 unsigned i, num_interp, num_written = 0, bcol_interp[2];
1165
1166 if (!ps || !ps->selector->info.num_inputs)
1167 return;
1168
1169 num_interp = si_get_ps_num_interp(ps);
1170 assert(num_interp > 0);
1171 radeon_set_context_reg_seq(cs, R_028644_SPI_PS_INPUT_CNTL_0, num_interp);
1172
1173 for (i = 0; i < psinfo->num_inputs; i++) {
1174 unsigned name = psinfo->input_semantic_name[i];
1175 unsigned index = psinfo->input_semantic_index[i];
1176 unsigned interpolate = psinfo->input_interpolate[i];
1177
1178 radeon_emit(cs, si_get_ps_input_cntl(sctx, vs, name, index,
1179 interpolate));
1180 num_written++;
1181
1182 if (name == TGSI_SEMANTIC_COLOR) {
1183 assert(index < ARRAY_SIZE(bcol_interp));
1184 bcol_interp[index] = interpolate;
1185 }
1186 }
1187
1188 if (ps->key.ps.color_two_side) {
1189 unsigned bcol = TGSI_SEMANTIC_BCOLOR;
1190
1191 for (i = 0; i < 2; i++) {
1192 if (!(psinfo->colors_read & (0xf << (i * 4))))
1193 continue;
1194
1195 radeon_emit(cs, si_get_ps_input_cntl(sctx, vs, bcol,
1196 i, bcol_interp[i]));
1197 num_written++;
1198 }
1199 }
1200 assert(num_interp == num_written);
1201 }
1202
1203 /**
1204 * Writing CONFIG or UCONFIG VGT registers requires VGT_FLUSH before that.
1205 */
1206 static void si_init_config_add_vgt_flush(struct si_context *sctx)
1207 {
1208 if (sctx->init_config_has_vgt_flush)
1209 return;
1210
1211 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
1212 si_pm4_cmd_begin(sctx->init_config, PKT3_EVENT_WRITE);
1213 si_pm4_cmd_add(sctx->init_config, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1214 si_pm4_cmd_end(sctx->init_config, false);
1215 sctx->init_config_has_vgt_flush = true;
1216 }
1217
1218 /* Initialize state related to ESGS / GSVS ring buffers */
1219 static bool si_update_gs_ring_buffers(struct si_context *sctx)
1220 {
1221 struct si_shader_selector *es =
1222 sctx->tes_shader.cso ? sctx->tes_shader.cso : sctx->vs_shader.cso;
1223 struct si_shader_selector *gs = sctx->gs_shader.cso;
1224 struct si_pm4_state *pm4;
1225
1226 /* Chip constants. */
1227 unsigned num_se = sctx->screen->b.info.max_se;
1228 unsigned wave_size = 64;
1229 unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
1230 unsigned gs_vertex_reuse = 16 * num_se; /* GS_VERTEX_REUSE register (per SE) */
1231 unsigned alignment = 256 * num_se;
1232 /* The maximum size is 63.999 MB per SE. */
1233 unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
1234
1235 /* Calculate the minimum size. */
1236 unsigned min_esgs_ring_size = align(es->esgs_itemsize * gs_vertex_reuse *
1237 wave_size, alignment);
1238
1239 /* These are recommended sizes, not minimum sizes. */
1240 unsigned esgs_ring_size = max_gs_waves * 2 * wave_size *
1241 es->esgs_itemsize * gs->gs_input_verts_per_prim;
1242 unsigned gsvs_ring_size = max_gs_waves * 2 * wave_size *
1243 gs->max_gsvs_emit_size * (gs->max_gs_stream + 1);
1244
1245 min_esgs_ring_size = align(min_esgs_ring_size, alignment);
1246 esgs_ring_size = align(esgs_ring_size, alignment);
1247 gsvs_ring_size = align(gsvs_ring_size, alignment);
1248
1249 esgs_ring_size = CLAMP(esgs_ring_size, min_esgs_ring_size, max_size);
1250 gsvs_ring_size = MIN2(gsvs_ring_size, max_size);
1251
1252 /* Some rings don't have to be allocated if shaders don't use them.
1253 * (e.g. no varyings between ES and GS or GS and VS)
1254 */
1255 bool update_esgs = esgs_ring_size &&
1256 (!sctx->esgs_ring ||
1257 sctx->esgs_ring->width0 < esgs_ring_size);
1258 bool update_gsvs = gsvs_ring_size &&
1259 (!sctx->gsvs_ring ||
1260 sctx->gsvs_ring->width0 < gsvs_ring_size);
1261
1262 if (!update_esgs && !update_gsvs)
1263 return true;
1264
1265 if (update_esgs) {
1266 pipe_resource_reference(&sctx->esgs_ring, NULL);
1267 sctx->esgs_ring = pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
1268 PIPE_USAGE_DEFAULT,
1269 esgs_ring_size);
1270 if (!sctx->esgs_ring)
1271 return false;
1272 }
1273
1274 if (update_gsvs) {
1275 pipe_resource_reference(&sctx->gsvs_ring, NULL);
1276 sctx->gsvs_ring = pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
1277 PIPE_USAGE_DEFAULT,
1278 gsvs_ring_size);
1279 if (!sctx->gsvs_ring)
1280 return false;
1281 }
1282
1283 /* Create the "init_config_gs_rings" state. */
1284 pm4 = CALLOC_STRUCT(si_pm4_state);
1285 if (!pm4)
1286 return false;
1287
1288 if (sctx->b.chip_class >= CIK) {
1289 if (sctx->esgs_ring)
1290 si_pm4_set_reg(pm4, R_030900_VGT_ESGS_RING_SIZE,
1291 sctx->esgs_ring->width0 / 256);
1292 if (sctx->gsvs_ring)
1293 si_pm4_set_reg(pm4, R_030904_VGT_GSVS_RING_SIZE,
1294 sctx->gsvs_ring->width0 / 256);
1295 } else {
1296 if (sctx->esgs_ring)
1297 si_pm4_set_reg(pm4, R_0088C8_VGT_ESGS_RING_SIZE,
1298 sctx->esgs_ring->width0 / 256);
1299 if (sctx->gsvs_ring)
1300 si_pm4_set_reg(pm4, R_0088CC_VGT_GSVS_RING_SIZE,
1301 sctx->gsvs_ring->width0 / 256);
1302 }
1303
1304 /* Set the state. */
1305 if (sctx->init_config_gs_rings)
1306 si_pm4_free_state(sctx, sctx->init_config_gs_rings, ~0);
1307 sctx->init_config_gs_rings = pm4;
1308
1309 if (!sctx->init_config_has_vgt_flush) {
1310 si_init_config_add_vgt_flush(sctx);
1311 si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
1312 }
1313
1314 /* Flush the context to re-emit both init_config states. */
1315 sctx->b.initial_gfx_cs_size = 0; /* force flush */
1316 si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
1317
1318 /* Set ring bindings. */
1319 if (sctx->esgs_ring) {
1320 si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_VERTEX, SI_RING_ESGS,
1321 sctx->esgs_ring, 0, sctx->esgs_ring->width0,
1322 true, true, 4, 64, 0);
1323 si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_GEOMETRY, SI_RING_ESGS,
1324 sctx->esgs_ring, 0, sctx->esgs_ring->width0,
1325 false, false, 0, 0, 0);
1326 }
1327 if (sctx->gsvs_ring)
1328 si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_VERTEX, SI_RING_GSVS,
1329 sctx->gsvs_ring, 0, sctx->gsvs_ring->width0,
1330 false, false, 0, 0, 0);
1331 return true;
1332 }
1333
1334 static void si_update_gsvs_ring_bindings(struct si_context *sctx)
1335 {
1336 unsigned gsvs_itemsize = sctx->gs_shader.cso->max_gsvs_emit_size;
1337 uint64_t offset;
1338
1339 if (!sctx->gsvs_ring || gsvs_itemsize == sctx->last_gsvs_itemsize)
1340 return;
1341
1342 sctx->last_gsvs_itemsize = gsvs_itemsize;
1343
1344 si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_GEOMETRY, SI_RING_GSVS,
1345 sctx->gsvs_ring, gsvs_itemsize,
1346 64, true, true, 4, 16, 0);
1347
1348 offset = gsvs_itemsize * 64;
1349 si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_GEOMETRY, SI_RING_GSVS_1,
1350 sctx->gsvs_ring, gsvs_itemsize,
1351 64, true, true, 4, 16, offset);
1352
1353 offset = (gsvs_itemsize * 2) * 64;
1354 si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_GEOMETRY, SI_RING_GSVS_2,
1355 sctx->gsvs_ring, gsvs_itemsize,
1356 64, true, true, 4, 16, offset);
1357
1358 offset = (gsvs_itemsize * 3) * 64;
1359 si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_GEOMETRY, SI_RING_GSVS_3,
1360 sctx->gsvs_ring, gsvs_itemsize,
1361 64, true, true, 4, 16, offset);
1362 }
1363
1364 /**
1365 * @returns 1 if \p sel has been updated to use a new scratch buffer
1366 * 0 if not
1367 * < 0 if there was a failure
1368 */
1369 static int si_update_scratch_buffer(struct si_context *sctx,
1370 struct si_shader *shader)
1371 {
1372 uint64_t scratch_va = sctx->scratch_buffer->gpu_address;
1373 int r;
1374
1375 if (!shader)
1376 return 0;
1377
1378 /* This shader doesn't need a scratch buffer */
1379 if (shader->config.scratch_bytes_per_wave == 0)
1380 return 0;
1381
1382 /* This shader is already configured to use the current
1383 * scratch buffer. */
1384 if (shader->scratch_bo == sctx->scratch_buffer)
1385 return 0;
1386
1387 assert(sctx->scratch_buffer);
1388
1389 si_shader_apply_scratch_relocs(sctx, shader, scratch_va);
1390
1391 /* Replace the shader bo with a new bo that has the relocs applied. */
1392 r = si_shader_binary_upload(sctx->screen, shader);
1393 if (r)
1394 return r;
1395
1396 /* Update the shader state to use the new shader bo. */
1397 si_shader_init_pm4_state(shader);
1398
1399 r600_resource_reference(&shader->scratch_bo, sctx->scratch_buffer);
1400
1401 return 1;
1402 }
1403
1404 static unsigned si_get_current_scratch_buffer_size(struct si_context *sctx)
1405 {
1406 return sctx->scratch_buffer ? sctx->scratch_buffer->b.b.width0 : 0;
1407 }
1408
1409 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader *shader)
1410 {
1411 return shader ? shader->config.scratch_bytes_per_wave : 0;
1412 }
1413
1414 static unsigned si_get_max_scratch_bytes_per_wave(struct si_context *sctx)
1415 {
1416 unsigned bytes = 0;
1417
1418 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current));
1419 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current));
1420 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current));
1421 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tcs_shader.current));
1422 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current));
1423 return bytes;
1424 }
1425
1426 static bool si_update_spi_tmpring_size(struct si_context *sctx)
1427 {
1428 unsigned current_scratch_buffer_size =
1429 si_get_current_scratch_buffer_size(sctx);
1430 unsigned scratch_bytes_per_wave =
1431 si_get_max_scratch_bytes_per_wave(sctx);
1432 unsigned scratch_needed_size = scratch_bytes_per_wave *
1433 sctx->scratch_waves;
1434 unsigned spi_tmpring_size;
1435 int r;
1436
1437 if (scratch_needed_size > 0) {
1438 if (scratch_needed_size > current_scratch_buffer_size) {
1439 /* Create a bigger scratch buffer */
1440 pipe_resource_reference(
1441 (struct pipe_resource**)&sctx->scratch_buffer,
1442 NULL);
1443
1444 sctx->scratch_buffer =
1445 si_resource_create_custom(&sctx->screen->b.b,
1446 PIPE_USAGE_DEFAULT, scratch_needed_size);
1447 if (!sctx->scratch_buffer)
1448 return false;
1449 sctx->emit_scratch_reloc = true;
1450 }
1451
1452 /* Update the shaders, so they are using the latest scratch. The
1453 * scratch buffer may have been changed since these shaders were
1454 * last used, so we still need to try to update them, even if
1455 * they require scratch buffers smaller than the current size.
1456 */
1457 r = si_update_scratch_buffer(sctx, sctx->ps_shader.current);
1458 if (r < 0)
1459 return false;
1460 if (r == 1)
1461 si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4);
1462
1463 r = si_update_scratch_buffer(sctx, sctx->gs_shader.current);
1464 if (r < 0)
1465 return false;
1466 if (r == 1)
1467 si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
1468
1469 r = si_update_scratch_buffer(sctx, sctx->tcs_shader.current);
1470 if (r < 0)
1471 return false;
1472 if (r == 1)
1473 si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4);
1474
1475 /* VS can be bound as LS, ES, or VS. */
1476 r = si_update_scratch_buffer(sctx, sctx->vs_shader.current);
1477 if (r < 0)
1478 return false;
1479 if (r == 1) {
1480 if (sctx->tes_shader.current)
1481 si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
1482 else if (sctx->gs_shader.current)
1483 si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
1484 else
1485 si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
1486 }
1487
1488 /* TES can be bound as ES or VS. */
1489 r = si_update_scratch_buffer(sctx, sctx->tes_shader.current);
1490 if (r < 0)
1491 return false;
1492 if (r == 1) {
1493 if (sctx->gs_shader.current)
1494 si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
1495 else
1496 si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
1497 }
1498 }
1499
1500 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
1501 assert((scratch_needed_size & ~0x3FF) == scratch_needed_size &&
1502 "scratch size should already be aligned correctly.");
1503
1504 spi_tmpring_size = S_0286E8_WAVES(sctx->scratch_waves) |
1505 S_0286E8_WAVESIZE(scratch_bytes_per_wave >> 10);
1506 if (spi_tmpring_size != sctx->spi_tmpring_size) {
1507 sctx->spi_tmpring_size = spi_tmpring_size;
1508 sctx->emit_scratch_reloc = true;
1509 }
1510 return true;
1511 }
1512
1513 static void si_init_tess_factor_ring(struct si_context *sctx)
1514 {
1515 assert(!sctx->tf_ring);
1516
1517 sctx->tf_ring = pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
1518 PIPE_USAGE_DEFAULT,
1519 32768 * sctx->screen->b.info.max_se);
1520 if (!sctx->tf_ring)
1521 return;
1522
1523 assert(((sctx->tf_ring->width0 / 4) & C_030938_SIZE) == 0);
1524
1525 si_init_config_add_vgt_flush(sctx);
1526
1527 /* Append these registers to the init config state. */
1528 if (sctx->b.chip_class >= CIK) {
1529 si_pm4_set_reg(sctx->init_config, R_030938_VGT_TF_RING_SIZE,
1530 S_030938_SIZE(sctx->tf_ring->width0 / 4));
1531 si_pm4_set_reg(sctx->init_config, R_030940_VGT_TF_MEMORY_BASE,
1532 r600_resource(sctx->tf_ring)->gpu_address >> 8);
1533 } else {
1534 si_pm4_set_reg(sctx->init_config, R_008988_VGT_TF_RING_SIZE,
1535 S_008988_SIZE(sctx->tf_ring->width0 / 4));
1536 si_pm4_set_reg(sctx->init_config, R_0089B8_VGT_TF_MEMORY_BASE,
1537 r600_resource(sctx->tf_ring)->gpu_address >> 8);
1538 }
1539
1540 /* Flush the context to re-emit the init_config state.
1541 * This is done only once in a lifetime of a context.
1542 */
1543 si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
1544 sctx->b.initial_gfx_cs_size = 0; /* force flush */
1545 si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
1546
1547 si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_TESS_CTRL,
1548 SI_RING_TESS_FACTOR, sctx->tf_ring, 0,
1549 sctx->tf_ring->width0, false, false, 0, 0, 0);
1550 }
1551
1552 /**
1553 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
1554 * VS passes its outputs to TES directly, so the fixed-function shader only
1555 * has to write TESSOUTER and TESSINNER.
1556 */
1557 static void si_generate_fixed_func_tcs(struct si_context *sctx)
1558 {
1559 struct ureg_src const0, const1;
1560 struct ureg_dst tessouter, tessinner;
1561 struct ureg_program *ureg = ureg_create(TGSI_PROCESSOR_TESS_CTRL);
1562
1563 if (!ureg)
1564 return; /* if we get here, we're screwed */
1565
1566 assert(!sctx->fixed_func_tcs_shader.cso);
1567
1568 ureg_DECL_constant2D(ureg, 0, 1, SI_DRIVER_STATE_CONST_BUF);
1569 const0 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 0),
1570 SI_DRIVER_STATE_CONST_BUF);
1571 const1 = ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT, 1),
1572 SI_DRIVER_STATE_CONST_BUF);
1573
1574 tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0);
1575 tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0);
1576
1577 ureg_MOV(ureg, tessouter, const0);
1578 ureg_MOV(ureg, tessinner, const1);
1579 ureg_END(ureg);
1580
1581 sctx->fixed_func_tcs_shader.cso =
1582 ureg_create_shader_and_destroy(ureg, &sctx->b.b);
1583 }
1584
1585 static void si_update_vgt_shader_config(struct si_context *sctx)
1586 {
1587 /* Calculate the index of the config.
1588 * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */
1589 unsigned index = 2*!!sctx->tes_shader.cso + !!sctx->gs_shader.cso;
1590 struct si_pm4_state **pm4 = &sctx->vgt_shader_config[index];
1591
1592 if (!*pm4) {
1593 uint32_t stages = 0;
1594
1595 *pm4 = CALLOC_STRUCT(si_pm4_state);
1596
1597 if (sctx->tes_shader.cso) {
1598 stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) |
1599 S_028B54_HS_EN(1);
1600
1601 if (sctx->gs_shader.cso)
1602 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) |
1603 S_028B54_GS_EN(1) |
1604 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
1605 else
1606 stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS);
1607 } else if (sctx->gs_shader.cso) {
1608 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) |
1609 S_028B54_GS_EN(1) |
1610 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
1611 }
1612
1613 si_pm4_set_reg(*pm4, R_028B54_VGT_SHADER_STAGES_EN, stages);
1614 }
1615 si_pm4_bind_state(sctx, vgt_shader_config, *pm4);
1616 }
1617
1618 static void si_update_so(struct si_context *sctx, struct si_shader_selector *shader)
1619 {
1620 struct pipe_stream_output_info *so = &shader->so;
1621 uint32_t enabled_stream_buffers_mask = 0;
1622 int i;
1623
1624 for (i = 0; i < so->num_outputs; i++)
1625 enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << (so->output[i].stream * 4);
1626 sctx->b.streamout.enabled_stream_buffers_mask = enabled_stream_buffers_mask;
1627 sctx->b.streamout.stride_in_dw = shader->so.stride;
1628 }
1629
1630 bool si_update_shaders(struct si_context *sctx)
1631 {
1632 struct pipe_context *ctx = (struct pipe_context*)sctx;
1633 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
1634 int r;
1635
1636 /* Update stages before GS. */
1637 if (sctx->tes_shader.cso) {
1638 if (!sctx->tf_ring) {
1639 si_init_tess_factor_ring(sctx);
1640 if (!sctx->tf_ring)
1641 return false;
1642 }
1643
1644 /* VS as LS */
1645 r = si_shader_select(ctx, &sctx->vs_shader);
1646 if (r)
1647 return false;
1648 si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
1649
1650 if (sctx->tcs_shader.cso) {
1651 r = si_shader_select(ctx, &sctx->tcs_shader);
1652 if (r)
1653 return false;
1654 si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4);
1655 } else {
1656 if (!sctx->fixed_func_tcs_shader.cso) {
1657 si_generate_fixed_func_tcs(sctx);
1658 if (!sctx->fixed_func_tcs_shader.cso)
1659 return false;
1660 }
1661
1662 r = si_shader_select(ctx, &sctx->fixed_func_tcs_shader);
1663 if (r)
1664 return false;
1665 si_pm4_bind_state(sctx, hs,
1666 sctx->fixed_func_tcs_shader.current->pm4);
1667 }
1668
1669 r = si_shader_select(ctx, &sctx->tes_shader);
1670 if (r)
1671 return false;
1672
1673 if (sctx->gs_shader.cso) {
1674 /* TES as ES */
1675 si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
1676 } else {
1677 /* TES as VS */
1678 si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
1679 si_update_so(sctx, sctx->tes_shader.cso);
1680 }
1681 } else if (sctx->gs_shader.cso) {
1682 /* VS as ES */
1683 r = si_shader_select(ctx, &sctx->vs_shader);
1684 if (r)
1685 return false;
1686 si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
1687 } else {
1688 /* VS as VS */
1689 r = si_shader_select(ctx, &sctx->vs_shader);
1690 if (r)
1691 return false;
1692 si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
1693 si_update_so(sctx, sctx->vs_shader.cso);
1694 }
1695
1696 /* Update GS. */
1697 if (sctx->gs_shader.cso) {
1698 r = si_shader_select(ctx, &sctx->gs_shader);
1699 if (r)
1700 return false;
1701 si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
1702 si_pm4_bind_state(sctx, vs, sctx->gs_shader.current->gs_copy_shader->pm4);
1703 si_update_so(sctx, sctx->gs_shader.cso);
1704
1705 if (!si_update_gs_ring_buffers(sctx))
1706 return false;
1707
1708 si_update_gsvs_ring_bindings(sctx);
1709 } else {
1710 si_pm4_bind_state(sctx, gs, NULL);
1711 si_pm4_bind_state(sctx, es, NULL);
1712 }
1713
1714 si_update_vgt_shader_config(sctx);
1715
1716 if (sctx->ps_shader.cso) {
1717 unsigned db_shader_control =
1718 sctx->ps_shader.cso->db_shader_control |
1719 S_02880C_KILL_ENABLE(si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS);
1720
1721 r = si_shader_select(ctx, &sctx->ps_shader);
1722 if (r)
1723 return false;
1724 si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4);
1725
1726 if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs) ||
1727 sctx->sprite_coord_enable != rs->sprite_coord_enable ||
1728 sctx->flatshade != rs->flatshade) {
1729 sctx->sprite_coord_enable = rs->sprite_coord_enable;
1730 sctx->flatshade = rs->flatshade;
1731 si_mark_atom_dirty(sctx, &sctx->spi_map);
1732 }
1733
1734 if (sctx->b.family == CHIP_STONEY && si_pm4_state_changed(sctx, ps))
1735 si_mark_atom_dirty(sctx, &sctx->cb_render_state);
1736
1737 if (sctx->ps_db_shader_control != db_shader_control) {
1738 sctx->ps_db_shader_control = db_shader_control;
1739 si_mark_atom_dirty(sctx, &sctx->db_render_state);
1740 }
1741
1742 if (sctx->smoothing_enabled != sctx->ps_shader.current->key.ps.poly_line_smoothing) {
1743 sctx->smoothing_enabled = sctx->ps_shader.current->key.ps.poly_line_smoothing;
1744 si_mark_atom_dirty(sctx, &sctx->msaa_config);
1745
1746 if (sctx->b.chip_class == SI)
1747 si_mark_atom_dirty(sctx, &sctx->db_render_state);
1748 }
1749 }
1750
1751 if (si_pm4_state_changed(sctx, ls) ||
1752 si_pm4_state_changed(sctx, hs) ||
1753 si_pm4_state_changed(sctx, es) ||
1754 si_pm4_state_changed(sctx, gs) ||
1755 si_pm4_state_changed(sctx, vs) ||
1756 si_pm4_state_changed(sctx, ps)) {
1757 if (!si_update_spi_tmpring_size(sctx))
1758 return false;
1759 }
1760 return true;
1761 }
1762
1763 void si_init_shader_functions(struct si_context *sctx)
1764 {
1765 si_init_atom(sctx, &sctx->spi_map, &sctx->atoms.s.spi_map, si_emit_spi_map);
1766
1767 sctx->b.b.create_vs_state = si_create_shader_selector;
1768 sctx->b.b.create_tcs_state = si_create_shader_selector;
1769 sctx->b.b.create_tes_state = si_create_shader_selector;
1770 sctx->b.b.create_gs_state = si_create_shader_selector;
1771 sctx->b.b.create_fs_state = si_create_shader_selector;
1772
1773 sctx->b.b.bind_vs_state = si_bind_vs_shader;
1774 sctx->b.b.bind_tcs_state = si_bind_tcs_shader;
1775 sctx->b.b.bind_tes_state = si_bind_tes_shader;
1776 sctx->b.b.bind_gs_state = si_bind_gs_shader;
1777 sctx->b.b.bind_fs_state = si_bind_ps_shader;
1778
1779 sctx->b.b.delete_vs_state = si_delete_shader_selector;
1780 sctx->b.b.delete_tcs_state = si_delete_shader_selector;
1781 sctx->b.b.delete_tes_state = si_delete_shader_selector;
1782 sctx->b.b.delete_gs_state = si_delete_shader_selector;
1783 sctx->b.b.delete_fs_state = si_delete_shader_selector;
1784 }