2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
25 * Marek Olšák <maraeo@gmail.com>
29 #include "si_shader.h"
31 #include "radeon/r600_cs.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_ureg.h"
35 #include "util/u_memory.h"
36 #include "util/u_simple_shaders.h"
38 static void si_set_tesseval_regs(struct si_shader
*shader
,
39 struct si_pm4_state
*pm4
)
41 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
42 unsigned tes_prim_mode
= info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
43 unsigned tes_spacing
= info
->properties
[TGSI_PROPERTY_TES_SPACING
];
44 bool tes_vertex_order_cw
= info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
];
45 bool tes_point_mode
= info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
];
46 unsigned type
, partitioning
, topology
;
48 switch (tes_prim_mode
) {
50 type
= V_028B6C_TESS_ISOLINE
;
52 case PIPE_PRIM_TRIANGLES
:
53 type
= V_028B6C_TESS_TRIANGLE
;
56 type
= V_028B6C_TESS_QUAD
;
63 switch (tes_spacing
) {
64 case PIPE_TESS_SPACING_FRACTIONAL_ODD
:
65 partitioning
= V_028B6C_PART_FRAC_ODD
;
67 case PIPE_TESS_SPACING_FRACTIONAL_EVEN
:
68 partitioning
= V_028B6C_PART_FRAC_EVEN
;
70 case PIPE_TESS_SPACING_EQUAL
:
71 partitioning
= V_028B6C_PART_INTEGER
;
79 topology
= V_028B6C_OUTPUT_POINT
;
80 else if (tes_prim_mode
== PIPE_PRIM_LINES
)
81 topology
= V_028B6C_OUTPUT_LINE
;
82 else if (tes_vertex_order_cw
)
83 /* for some reason, this must be the other way around */
84 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
86 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
88 si_pm4_set_reg(pm4
, R_028B6C_VGT_TF_PARAM
,
90 S_028B6C_PARTITIONING(partitioning
) |
91 S_028B6C_TOPOLOGY(topology
));
94 static void si_shader_ls(struct si_shader
*shader
)
96 struct si_pm4_state
*pm4
;
97 unsigned num_sgprs
, num_user_sgprs
;
98 unsigned vgpr_comp_cnt
;
101 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
105 va
= shader
->bo
->gpu_address
;
106 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
108 /* We need at least 2 components for LS.
109 * VGPR0-3: (VertexID, RelAutoindex, ???, InstanceID). */
110 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 1;
112 num_user_sgprs
= SI_LS_NUM_USER_SGPR
;
113 num_sgprs
= shader
->num_sgprs
;
114 if (num_user_sgprs
> num_sgprs
) {
115 /* Last 2 reserved SGPRs are used for VCC */
116 num_sgprs
= num_user_sgprs
+ 2;
118 assert(num_sgprs
<= 104);
120 si_pm4_set_reg(pm4
, R_00B520_SPI_SHADER_PGM_LO_LS
, va
>> 8);
121 si_pm4_set_reg(pm4
, R_00B524_SPI_SHADER_PGM_HI_LS
, va
>> 40);
123 shader
->ls_rsrc1
= S_00B528_VGPRS((shader
->num_vgprs
- 1) / 4) |
124 S_00B528_SGPRS((num_sgprs
- 1) / 8) |
125 S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt
) |
126 S_00B528_DX10_CLAMP(shader
->dx10_clamp_mode
);
127 shader
->ls_rsrc2
= S_00B52C_USER_SGPR(num_user_sgprs
) |
128 S_00B52C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0);
131 static void si_shader_hs(struct si_shader
*shader
)
133 struct si_pm4_state
*pm4
;
134 unsigned num_sgprs
, num_user_sgprs
;
137 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
141 va
= shader
->bo
->gpu_address
;
142 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
144 num_user_sgprs
= SI_TCS_NUM_USER_SGPR
;
145 num_sgprs
= shader
->num_sgprs
;
146 /* One SGPR after user SGPRs is pre-loaded with tessellation factor
148 if ((num_user_sgprs
+ 1) > num_sgprs
) {
149 /* Last 2 reserved SGPRs are used for VCC */
150 num_sgprs
= num_user_sgprs
+ 1 + 2;
152 assert(num_sgprs
<= 104);
154 si_pm4_set_reg(pm4
, R_00B420_SPI_SHADER_PGM_LO_HS
, va
>> 8);
155 si_pm4_set_reg(pm4
, R_00B424_SPI_SHADER_PGM_HI_HS
, va
>> 40);
156 si_pm4_set_reg(pm4
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
,
157 S_00B428_VGPRS((shader
->num_vgprs
- 1) / 4) |
158 S_00B428_SGPRS((num_sgprs
- 1) / 8) |
159 S_00B428_DX10_CLAMP(shader
->dx10_clamp_mode
));
160 si_pm4_set_reg(pm4
, R_00B42C_SPI_SHADER_PGM_RSRC2_HS
,
161 S_00B42C_USER_SGPR(num_user_sgprs
) |
162 S_00B42C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
165 static void si_shader_es(struct si_shader
*shader
)
167 struct si_pm4_state
*pm4
;
168 unsigned num_sgprs
, num_user_sgprs
;
169 unsigned vgpr_comp_cnt
;
172 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
177 va
= shader
->bo
->gpu_address
;
178 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
180 if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
181 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 0;
182 num_user_sgprs
= SI_ES_NUM_USER_SGPR
;
183 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
184 vgpr_comp_cnt
= 3; /* all components are needed for TES */
185 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
187 unreachable("invalid shader selector type");
189 num_sgprs
= shader
->num_sgprs
;
190 /* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
191 if ((num_user_sgprs
+ 1) > num_sgprs
) {
192 /* Last 2 reserved SGPRs are used for VCC */
193 num_sgprs
= num_user_sgprs
+ 1 + 2;
195 assert(num_sgprs
<= 104);
197 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
198 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, va
>> 40);
199 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
200 S_00B328_VGPRS((shader
->num_vgprs
- 1) / 4) |
201 S_00B328_SGPRS((num_sgprs
- 1) / 8) |
202 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
) |
203 S_00B328_DX10_CLAMP(shader
->dx10_clamp_mode
));
204 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
205 S_00B32C_USER_SGPR(num_user_sgprs
) |
206 S_00B32C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
208 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
209 si_set_tesseval_regs(shader
, pm4
);
212 static void si_shader_gs(struct si_shader
*shader
)
214 unsigned gs_vert_itemsize
= shader
->selector
->gsvs_vertex_size
;
215 unsigned gs_max_vert_out
= shader
->selector
->gs_max_out_vertices
;
216 unsigned gsvs_itemsize
= shader
->selector
->max_gsvs_emit_size
>> 2;
217 unsigned gs_num_invocations
= shader
->selector
->gs_num_invocations
;
219 struct si_pm4_state
*pm4
;
220 unsigned num_sgprs
, num_user_sgprs
;
222 unsigned max_stream
= shader
->selector
->max_gs_stream
;
224 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
225 assert(gsvs_itemsize
< (1 << 15));
227 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
232 if (gs_max_vert_out
<= 128) {
233 cut_mode
= V_028A40_GS_CUT_128
;
234 } else if (gs_max_vert_out
<= 256) {
235 cut_mode
= V_028A40_GS_CUT_256
;
236 } else if (gs_max_vert_out
<= 512) {
237 cut_mode
= V_028A40_GS_CUT_512
;
239 assert(gs_max_vert_out
<= 1024);
240 cut_mode
= V_028A40_GS_CUT_1024
;
243 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
244 S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
245 S_028A40_CUT_MODE(cut_mode
)|
246 S_028A40_ES_WRITE_OPTIMIZE(1) |
247 S_028A40_GS_WRITE_OPTIMIZE(1));
249 si_pm4_set_reg(pm4
, R_028A60_VGT_GSVS_RING_OFFSET_1
, gsvs_itemsize
);
250 si_pm4_set_reg(pm4
, R_028A64_VGT_GSVS_RING_OFFSET_2
, gsvs_itemsize
* ((max_stream
>= 2) ? 2 : 1));
251 si_pm4_set_reg(pm4
, R_028A68_VGT_GSVS_RING_OFFSET_3
, gsvs_itemsize
* ((max_stream
>= 3) ? 3 : 1));
253 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
254 util_bitcount64(shader
->selector
->inputs_read
) * (16 >> 2));
255 si_pm4_set_reg(pm4
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, gsvs_itemsize
* (max_stream
+ 1));
257 si_pm4_set_reg(pm4
, R_028B38_VGT_GS_MAX_VERT_OUT
, gs_max_vert_out
);
259 si_pm4_set_reg(pm4
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, gs_vert_itemsize
>> 2);
260 si_pm4_set_reg(pm4
, R_028B60_VGT_GS_VERT_ITEMSIZE_1
, (max_stream
>= 1) ? gs_vert_itemsize
>> 2 : 0);
261 si_pm4_set_reg(pm4
, R_028B64_VGT_GS_VERT_ITEMSIZE_2
, (max_stream
>= 2) ? gs_vert_itemsize
>> 2 : 0);
262 si_pm4_set_reg(pm4
, R_028B68_VGT_GS_VERT_ITEMSIZE_3
, (max_stream
>= 3) ? gs_vert_itemsize
>> 2 : 0);
264 si_pm4_set_reg(pm4
, R_028B90_VGT_GS_INSTANCE_CNT
,
265 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
266 S_028B90_ENABLE(gs_num_invocations
> 0));
268 va
= shader
->bo
->gpu_address
;
269 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
270 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
271 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, va
>> 40);
273 num_user_sgprs
= SI_GS_NUM_USER_SGPR
;
274 num_sgprs
= shader
->num_sgprs
;
275 /* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
276 if ((num_user_sgprs
+ 2) > num_sgprs
) {
277 /* Last 2 reserved SGPRs are used for VCC */
278 num_sgprs
= num_user_sgprs
+ 2 + 2;
280 assert(num_sgprs
<= 104);
282 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
283 S_00B228_VGPRS((shader
->num_vgprs
- 1) / 4) |
284 S_00B228_SGPRS((num_sgprs
- 1) / 8) |
285 S_00B228_DX10_CLAMP(shader
->dx10_clamp_mode
));
286 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
287 S_00B22C_USER_SGPR(num_user_sgprs
) |
288 S_00B22C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
291 static void si_shader_vs(struct si_shader
*shader
)
293 struct si_pm4_state
*pm4
;
294 unsigned num_sgprs
, num_user_sgprs
;
295 unsigned nparams
, vgpr_comp_cnt
;
297 unsigned window_space
=
298 shader
->selector
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
299 bool enable_prim_id
= si_vs_exports_prim_id(shader
);
301 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
306 /* If this is the GS copy shader, the GS state writes this register.
307 * Otherwise, the VS state writes it.
309 if (!shader
->is_gs_copy_shader
) {
310 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
311 S_028A40_MODE(enable_prim_id
? V_028A40_GS_SCENARIO_A
: 0));
312 si_pm4_set_reg(pm4
, R_028A84_VGT_PRIMITIVEID_EN
, enable_prim_id
);
314 si_pm4_set_reg(pm4
, R_028A84_VGT_PRIMITIVEID_EN
, 0);
316 va
= shader
->bo
->gpu_address
;
317 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
319 if (shader
->is_gs_copy_shader
) {
320 vgpr_comp_cnt
= 0; /* only VertexID is needed for GS-COPY. */
321 num_user_sgprs
= SI_GSCOPY_NUM_USER_SGPR
;
322 } else if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
323 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : (enable_prim_id
? 2 : 0);
324 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
325 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
326 vgpr_comp_cnt
= 3; /* all components are needed for TES */
327 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
329 unreachable("invalid shader selector type");
331 num_sgprs
= shader
->num_sgprs
;
332 if (num_user_sgprs
> num_sgprs
) {
333 /* Last 2 reserved SGPRs are used for VCC */
334 num_sgprs
= num_user_sgprs
+ 2;
336 assert(num_sgprs
<= 104);
338 /* VS is required to export at least one param. */
339 nparams
= MAX2(shader
->nr_param_exports
, 1);
340 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
341 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
343 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
344 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
345 S_02870C_POS1_EXPORT_FORMAT(shader
->nr_pos_exports
> 1 ?
346 V_02870C_SPI_SHADER_4COMP
:
347 V_02870C_SPI_SHADER_NONE
) |
348 S_02870C_POS2_EXPORT_FORMAT(shader
->nr_pos_exports
> 2 ?
349 V_02870C_SPI_SHADER_4COMP
:
350 V_02870C_SPI_SHADER_NONE
) |
351 S_02870C_POS3_EXPORT_FORMAT(shader
->nr_pos_exports
> 3 ?
352 V_02870C_SPI_SHADER_4COMP
:
353 V_02870C_SPI_SHADER_NONE
));
355 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
356 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
357 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
358 S_00B128_VGPRS((shader
->num_vgprs
- 1) / 4) |
359 S_00B128_SGPRS((num_sgprs
- 1) / 8) |
360 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
361 S_00B128_DX10_CLAMP(shader
->dx10_clamp_mode
));
362 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
363 S_00B12C_USER_SGPR(num_user_sgprs
) |
364 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
365 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
366 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
367 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
368 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
) |
369 S_00B12C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
371 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
372 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
374 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
375 S_028818_VTX_W0_FMT(1) |
376 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
377 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
378 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
380 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
381 si_set_tesseval_regs(shader
, pm4
);
384 static void si_shader_ps(struct si_shader
*shader
)
386 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
387 struct si_pm4_state
*pm4
;
388 unsigned i
, spi_ps_in_control
;
389 unsigned num_sgprs
, num_user_sgprs
;
390 unsigned spi_baryc_cntl
= 0;
394 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
399 for (i
= 0; i
< info
->num_inputs
; i
++) {
400 switch (info
->input_semantic_name
[i
]) {
401 case TGSI_SEMANTIC_POSITION
:
402 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
404 * 0 -> Position = pixel center (default)
405 * 1 -> Position = pixel centroid
406 * 2 -> Position = at sample position
408 switch (info
->input_interpolate_loc
[i
]) {
409 case TGSI_INTERPOLATE_LOC_CENTROID
:
410 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(1);
412 case TGSI_INTERPOLATE_LOC_SAMPLE
:
413 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
417 if (info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] ==
418 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)
419 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_ULC(1);
424 has_centroid
= G_0286CC_PERSP_CENTROID_ENA(shader
->spi_ps_input_ena
) ||
425 G_0286CC_LINEAR_CENTROID_ENA(shader
->spi_ps_input_ena
);
427 spi_ps_in_control
= S_0286D8_NUM_INTERP(shader
->nparam
) |
428 S_0286D8_BC_OPTIMIZE_DISABLE(has_centroid
);
430 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
431 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
433 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
, shader
->spi_shader_z_format
);
434 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
,
435 shader
->spi_shader_col_format
);
436 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, shader
->cb_shader_mask
);
438 va
= shader
->bo
->gpu_address
;
439 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
440 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
441 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
443 num_user_sgprs
= SI_PS_NUM_USER_SGPR
;
444 num_sgprs
= shader
->num_sgprs
;
445 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
446 if ((num_user_sgprs
+ 1) > num_sgprs
) {
447 /* Last 2 reserved SGPRs are used for VCC */
448 num_sgprs
= num_user_sgprs
+ 1 + 2;
450 assert(num_sgprs
<= 104);
452 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
453 S_00B028_VGPRS((shader
->num_vgprs
- 1) / 4) |
454 S_00B028_SGPRS((num_sgprs
- 1) / 8) |
455 S_00B028_DX10_CLAMP(shader
->dx10_clamp_mode
));
456 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
457 S_00B02C_EXTRA_LDS_SIZE(shader
->lds_size
) |
458 S_00B02C_USER_SGPR(num_user_sgprs
) |
459 S_00B32C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
462 static void si_shader_init_pm4_state(struct si_shader
*shader
)
466 si_pm4_free_state_simple(shader
->pm4
);
468 switch (shader
->selector
->type
) {
469 case PIPE_SHADER_VERTEX
:
470 if (shader
->key
.vs
.as_ls
)
471 si_shader_ls(shader
);
472 else if (shader
->key
.vs
.as_es
)
473 si_shader_es(shader
);
475 si_shader_vs(shader
);
477 case PIPE_SHADER_TESS_CTRL
:
478 si_shader_hs(shader
);
480 case PIPE_SHADER_TESS_EVAL
:
481 if (shader
->key
.tes
.as_es
)
482 si_shader_es(shader
);
484 si_shader_vs(shader
);
486 case PIPE_SHADER_GEOMETRY
:
487 si_shader_gs(shader
);
488 si_shader_vs(shader
->gs_copy_shader
);
490 case PIPE_SHADER_FRAGMENT
:
491 si_shader_ps(shader
);
498 /* Compute the key for the hw shader variant */
499 static inline void si_shader_selector_key(struct pipe_context
*ctx
,
500 struct si_shader_selector
*sel
,
501 union si_shader_key
*key
)
503 struct si_context
*sctx
= (struct si_context
*)ctx
;
506 memset(key
, 0, sizeof(*key
));
509 case PIPE_SHADER_VERTEX
:
510 if (sctx
->vertex_elements
)
511 for (i
= 0; i
< sctx
->vertex_elements
->count
; ++i
)
512 key
->vs
.instance_divisors
[i
] =
513 sctx
->vertex_elements
->elements
[i
].instance_divisor
;
515 if (sctx
->tes_shader
.cso
)
517 else if (sctx
->gs_shader
.cso
) {
519 key
->vs
.es_enabled_outputs
= sctx
->gs_shader
.cso
->inputs_read
;
522 if (!sctx
->gs_shader
.cso
&& sctx
->ps_shader
.cso
&&
523 sctx
->ps_shader
.cso
->info
.uses_primid
)
524 key
->vs
.export_prim_id
= 1;
526 case PIPE_SHADER_TESS_CTRL
:
528 sctx
->tes_shader
.cso
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
530 case PIPE_SHADER_TESS_EVAL
:
531 if (sctx
->gs_shader
.cso
) {
533 key
->tes
.es_enabled_outputs
= sctx
->gs_shader
.cso
->inputs_read
;
534 } else if (sctx
->ps_shader
.cso
&& sctx
->ps_shader
.cso
->info
.uses_primid
)
535 key
->tes
.export_prim_id
= 1;
537 case PIPE_SHADER_GEOMETRY
:
539 case PIPE_SHADER_FRAGMENT
: {
540 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
542 if (sel
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
])
543 key
->ps
.last_cbuf
= MAX2(sctx
->framebuffer
.state
.nr_cbufs
, 1) - 1;
544 key
->ps
.export_16bpc
= sctx
->framebuffer
.export_16bpc
;
547 bool is_poly
= (sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES
&&
548 sctx
->current_rast_prim
<= PIPE_PRIM_POLYGON
) ||
549 sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES_ADJACENCY
;
550 bool is_line
= !is_poly
&& sctx
->current_rast_prim
!= PIPE_PRIM_POINTS
;
552 key
->ps
.color_two_side
= rs
->two_side
;
554 if (sctx
->queued
.named
.blend
) {
555 key
->ps
.alpha_to_one
= sctx
->queued
.named
.blend
->alpha_to_one
&&
556 rs
->multisample_enable
&&
557 !sctx
->framebuffer
.cb0_is_integer
;
560 key
->ps
.poly_stipple
= rs
->poly_stipple_enable
&& is_poly
;
561 key
->ps
.poly_line_smoothing
= ((is_poly
&& rs
->poly_smooth
) ||
562 (is_line
&& rs
->line_smooth
)) &&
563 sctx
->framebuffer
.nr_samples
<= 1;
564 key
->ps
.clamp_color
= rs
->clamp_fragment_color
;
567 key
->ps
.alpha_func
= PIPE_FUNC_ALWAYS
;
568 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
569 if (sctx
->queued
.named
.dsa
&&
570 !sctx
->framebuffer
.cb0_is_integer
)
571 key
->ps
.alpha_func
= sctx
->queued
.named
.dsa
->alpha_func
;
579 /* Select the hw shader variant depending on the current state. */
580 static int si_shader_select(struct pipe_context
*ctx
,
581 struct si_shader_ctx_state
*state
)
583 struct si_context
*sctx
= (struct si_context
*)ctx
;
584 struct si_shader_selector
*sel
= state
->cso
;
585 struct si_shader
*current
= state
->current
;
586 union si_shader_key key
;
587 struct si_shader
*iter
, *shader
= NULL
;
590 si_shader_selector_key(ctx
, sel
, &key
);
592 /* Check if we don't need to change anything.
593 * This path is also used for most shaders that don't need multiple
594 * variants, it will cost just a computation of the key and this
596 if (likely(current
&& memcmp(¤t
->key
, &key
, sizeof(key
)) == 0))
599 pipe_mutex_lock(sel
->mutex
);
601 /* Find the shader variant. */
602 for (iter
= sel
->first_variant
; iter
; iter
= iter
->next_variant
) {
603 /* Don't check the "current" shader. We checked it above. */
604 if (current
!= iter
&&
605 memcmp(&iter
->key
, &key
, sizeof(key
)) == 0) {
606 state
->current
= iter
;
607 pipe_mutex_unlock(sel
->mutex
);
612 /* Build a new shader. */
613 shader
= CALLOC_STRUCT(si_shader
);
615 pipe_mutex_unlock(sel
->mutex
);
618 shader
->selector
= sel
;
621 r
= si_shader_create(sctx
->screen
, sctx
->tm
, shader
);
623 R600_ERR("Failed to build shader variant (type=%u) %d\n",
626 pipe_mutex_unlock(sel
->mutex
);
629 si_shader_init_pm4_state(shader
);
631 if (!sel
->last_variant
) {
632 sel
->first_variant
= shader
;
633 sel
->last_variant
= shader
;
635 sel
->last_variant
->next_variant
= shader
;
636 sel
->last_variant
= shader
;
638 state
->current
= shader
;
639 p_atomic_inc(&sctx
->screen
->b
.num_compilations
);
640 pipe_mutex_unlock(sel
->mutex
);
644 static void *si_create_shader_selector(struct pipe_context
*ctx
,
645 const struct pipe_shader_state
*state
)
647 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
648 struct si_shader_selector
*sel
= CALLOC_STRUCT(si_shader_selector
);
654 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
660 sel
->so
= state
->stream_output
;
661 tgsi_scan_shader(state
->tokens
, &sel
->info
);
662 sel
->type
= util_pipe_shader_from_tgsi_processor(sel
->info
.processor
);
663 p_atomic_inc(&sscreen
->b
.num_shaders_created
);
665 /* First set which opcode uses which (i,j) pair. */
666 if (sel
->info
.uses_persp_opcode_interp_centroid
)
667 sel
->info
.uses_persp_centroid
= true;
669 if (sel
->info
.uses_linear_opcode_interp_centroid
)
670 sel
->info
.uses_linear_centroid
= true;
672 if (sel
->info
.uses_persp_opcode_interp_offset
||
673 sel
->info
.uses_persp_opcode_interp_sample
)
674 sel
->info
.uses_persp_center
= true;
676 if (sel
->info
.uses_linear_opcode_interp_offset
||
677 sel
->info
.uses_linear_opcode_interp_sample
)
678 sel
->info
.uses_linear_center
= true;
680 /* Determine if the shader has to use a conditional assignment when
681 * emulating force_persample_interp.
683 sel
->forces_persample_interp_for_persp
=
684 sel
->info
.uses_persp_center
+
685 sel
->info
.uses_persp_centroid
+
686 sel
->info
.uses_persp_sample
>= 2;
688 sel
->forces_persample_interp_for_linear
=
689 sel
->info
.uses_linear_center
+
690 sel
->info
.uses_linear_centroid
+
691 sel
->info
.uses_linear_sample
>= 2;
694 case PIPE_SHADER_GEOMETRY
:
695 sel
->gs_output_prim
=
696 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
697 sel
->gs_max_out_vertices
=
698 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
699 sel
->gs_num_invocations
=
700 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
701 sel
->gsvs_vertex_size
= sel
->info
.num_outputs
* 16;
702 sel
->max_gsvs_emit_size
= sel
->gsvs_vertex_size
*
703 sel
->gs_max_out_vertices
;
705 sel
->max_gs_stream
= 0;
706 for (i
= 0; i
< sel
->so
.num_outputs
; i
++)
707 sel
->max_gs_stream
= MAX2(sel
->max_gs_stream
,
708 sel
->so
.output
[i
].stream
);
710 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
711 unsigned name
= sel
->info
.input_semantic_name
[i
];
712 unsigned index
= sel
->info
.input_semantic_index
[i
];
715 case TGSI_SEMANTIC_PRIMID
:
719 1llu << si_shader_io_get_unique_index(name
, index
);
724 case PIPE_SHADER_VERTEX
:
725 case PIPE_SHADER_TESS_CTRL
:
726 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
727 unsigned name
= sel
->info
.output_semantic_name
[i
];
728 unsigned index
= sel
->info
.output_semantic_index
[i
];
731 case TGSI_SEMANTIC_TESSINNER
:
732 case TGSI_SEMANTIC_TESSOUTER
:
733 case TGSI_SEMANTIC_PATCH
:
734 sel
->patch_outputs_written
|=
735 1llu << si_shader_io_get_unique_index(name
, index
);
738 sel
->outputs_written
|=
739 1llu << si_shader_io_get_unique_index(name
, index
);
743 case PIPE_SHADER_FRAGMENT
:
744 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
745 unsigned name
= sel
->info
.output_semantic_name
[i
];
746 unsigned index
= sel
->info
.output_semantic_index
[i
];
748 if (name
== TGSI_SEMANTIC_COLOR
)
749 sel
->ps_colors_written
|= 1 << index
;
754 if (sscreen
->b
.debug_flags
& DBG_PRECOMPILE
) {
755 struct si_shader_ctx_state state
= {sel
};
757 if (si_shader_select(ctx
, &state
)) {
758 fprintf(stderr
, "radeonsi: can't create a shader\n");
759 tgsi_free_tokens(sel
->tokens
);
765 pipe_mutex_init(sel
->mutex
);
770 * Normally, we only emit 1 viewport and 1 scissor if no shader is using
771 * the VIEWPORT_INDEX output, and emitting the other viewports and scissors
772 * is delayed. When a shader with VIEWPORT_INDEX appears, this should be
773 * called to emit the rest.
775 static void si_update_viewports_and_scissors(struct si_context
*sctx
)
777 struct tgsi_shader_info
*info
= si_get_vs_info(sctx
);
779 if (!info
|| !info
->writes_viewport_index
)
782 if (sctx
->scissors
.dirty_mask
)
783 si_mark_atom_dirty(sctx
, &sctx
->scissors
.atom
);
784 if (sctx
->viewports
.dirty_mask
)
785 si_mark_atom_dirty(sctx
, &sctx
->viewports
.atom
);
788 static void si_bind_vs_shader(struct pipe_context
*ctx
, void *state
)
790 struct si_context
*sctx
= (struct si_context
*)ctx
;
791 struct si_shader_selector
*sel
= state
;
793 if (sctx
->vs_shader
.cso
== sel
)
796 sctx
->vs_shader
.cso
= sel
;
797 sctx
->vs_shader
.current
= sel
? sel
->first_variant
: NULL
;
798 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
799 si_update_viewports_and_scissors(sctx
);
802 static void si_bind_gs_shader(struct pipe_context
*ctx
, void *state
)
804 struct si_context
*sctx
= (struct si_context
*)ctx
;
805 struct si_shader_selector
*sel
= state
;
806 bool enable_changed
= !!sctx
->gs_shader
.cso
!= !!sel
;
808 if (sctx
->gs_shader
.cso
== sel
)
811 sctx
->gs_shader
.cso
= sel
;
812 sctx
->gs_shader
.current
= sel
? sel
->first_variant
: NULL
;
813 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
814 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
817 si_shader_change_notify(sctx
);
818 si_update_viewports_and_scissors(sctx
);
821 static void si_bind_tcs_shader(struct pipe_context
*ctx
, void *state
)
823 struct si_context
*sctx
= (struct si_context
*)ctx
;
824 struct si_shader_selector
*sel
= state
;
825 bool enable_changed
= !!sctx
->tcs_shader
.cso
!= !!sel
;
827 if (sctx
->tcs_shader
.cso
== sel
)
830 sctx
->tcs_shader
.cso
= sel
;
831 sctx
->tcs_shader
.current
= sel
? sel
->first_variant
: NULL
;
834 sctx
->last_tcs
= NULL
; /* invalidate derived tess state */
837 static void si_bind_tes_shader(struct pipe_context
*ctx
, void *state
)
839 struct si_context
*sctx
= (struct si_context
*)ctx
;
840 struct si_shader_selector
*sel
= state
;
841 bool enable_changed
= !!sctx
->tes_shader
.cso
!= !!sel
;
843 if (sctx
->tes_shader
.cso
== sel
)
846 sctx
->tes_shader
.cso
= sel
;
847 sctx
->tes_shader
.current
= sel
? sel
->first_variant
: NULL
;
848 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
849 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
851 if (enable_changed
) {
852 si_shader_change_notify(sctx
);
853 sctx
->last_tes_sh_base
= -1; /* invalidate derived tess state */
855 si_update_viewports_and_scissors(sctx
);
858 static void si_bind_ps_shader(struct pipe_context
*ctx
, void *state
)
860 struct si_context
*sctx
= (struct si_context
*)ctx
;
861 struct si_shader_selector
*sel
= state
;
863 /* skip if supplied shader is one already in use */
864 if (sctx
->ps_shader
.cso
== sel
)
867 sctx
->ps_shader
.cso
= sel
;
868 sctx
->ps_shader
.current
= sel
? sel
->first_variant
: NULL
;
869 si_mark_atom_dirty(sctx
, &sctx
->cb_target_mask
);
872 static void si_delete_shader_selector(struct pipe_context
*ctx
, void *state
)
874 struct si_context
*sctx
= (struct si_context
*)ctx
;
875 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
876 struct si_shader
*p
= sel
->first_variant
, *c
;
877 struct si_shader_ctx_state
*current_shader
[SI_NUM_SHADERS
] = {
878 [PIPE_SHADER_VERTEX
] = &sctx
->vs_shader
,
879 [PIPE_SHADER_TESS_CTRL
] = &sctx
->tcs_shader
,
880 [PIPE_SHADER_TESS_EVAL
] = &sctx
->tes_shader
,
881 [PIPE_SHADER_GEOMETRY
] = &sctx
->gs_shader
,
882 [PIPE_SHADER_FRAGMENT
] = &sctx
->ps_shader
,
885 if (current_shader
[sel
->type
]->cso
== sel
) {
886 current_shader
[sel
->type
]->cso
= NULL
;
887 current_shader
[sel
->type
]->current
= NULL
;
893 case PIPE_SHADER_VERTEX
:
895 si_pm4_delete_state(sctx
, ls
, p
->pm4
);
896 else if (p
->key
.vs
.as_es
)
897 si_pm4_delete_state(sctx
, es
, p
->pm4
);
899 si_pm4_delete_state(sctx
, vs
, p
->pm4
);
901 case PIPE_SHADER_TESS_CTRL
:
902 si_pm4_delete_state(sctx
, hs
, p
->pm4
);
904 case PIPE_SHADER_TESS_EVAL
:
905 if (p
->key
.tes
.as_es
)
906 si_pm4_delete_state(sctx
, es
, p
->pm4
);
908 si_pm4_delete_state(sctx
, vs
, p
->pm4
);
910 case PIPE_SHADER_GEOMETRY
:
911 si_pm4_delete_state(sctx
, gs
, p
->pm4
);
912 si_pm4_delete_state(sctx
, vs
, p
->gs_copy_shader
->pm4
);
914 case PIPE_SHADER_FRAGMENT
:
915 si_pm4_delete_state(sctx
, ps
, p
->pm4
);
919 si_shader_destroy(p
);
924 pipe_mutex_destroy(sel
->mutex
);
929 static void si_emit_spi_map(struct si_context
*sctx
, struct r600_atom
*atom
)
931 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
932 struct si_shader
*ps
= sctx
->ps_shader
.current
;
933 struct si_shader
*vs
= si_get_vs_state(sctx
);
934 struct tgsi_shader_info
*psinfo
;
935 struct tgsi_shader_info
*vsinfo
= &vs
->selector
->info
;
936 unsigned i
, j
, tmp
, num_written
= 0;
938 if (!ps
|| !ps
->nparam
)
941 psinfo
= &ps
->selector
->info
;
943 radeon_set_context_reg_seq(cs
, R_028644_SPI_PS_INPUT_CNTL_0
, ps
->nparam
);
945 for (i
= 0; i
< psinfo
->num_inputs
; i
++) {
946 unsigned name
= psinfo
->input_semantic_name
[i
];
947 unsigned index
= psinfo
->input_semantic_index
[i
];
948 unsigned interpolate
= psinfo
->input_interpolate
[i
];
949 unsigned param_offset
= ps
->ps_input_param_offset
[i
];
951 if (name
== TGSI_SEMANTIC_POSITION
||
952 name
== TGSI_SEMANTIC_FACE
)
953 /* Read from preloaded VGPRs, not parameters */
959 if (interpolate
== TGSI_INTERPOLATE_CONSTANT
||
960 (interpolate
== TGSI_INTERPOLATE_COLOR
&& sctx
->flatshade
))
961 tmp
|= S_028644_FLAT_SHADE(1);
963 if (name
== TGSI_SEMANTIC_PCOORD
||
964 (name
== TGSI_SEMANTIC_TEXCOORD
&&
965 sctx
->sprite_coord_enable
& (1 << index
))) {
966 tmp
|= S_028644_PT_SPRITE_TEX(1);
969 for (j
= 0; j
< vsinfo
->num_outputs
; j
++) {
970 if (name
== vsinfo
->output_semantic_name
[j
] &&
971 index
== vsinfo
->output_semantic_index
[j
]) {
972 tmp
|= S_028644_OFFSET(vs
->vs_output_param_offset
[j
]);
977 if (name
== TGSI_SEMANTIC_PRIMID
)
978 /* PrimID is written after the last output. */
979 tmp
|= S_028644_OFFSET(vs
->vs_output_param_offset
[vsinfo
->num_outputs
]);
980 else if (j
== vsinfo
->num_outputs
&& !G_028644_PT_SPRITE_TEX(tmp
)) {
981 /* No corresponding output found, load defaults into input.
982 * Don't set any other bits.
983 * (FLAT_SHADE=1 completely changes behavior) */
984 tmp
= S_028644_OFFSET(0x20);
987 assert(param_offset
== num_written
);
988 radeon_emit(cs
, tmp
);
991 if (name
== TGSI_SEMANTIC_COLOR
&&
992 ps
->key
.ps
.color_two_side
) {
993 name
= TGSI_SEMANTIC_BCOLOR
;
998 assert(ps
->nparam
== num_written
);
1001 static void si_emit_spi_ps_input(struct si_context
*sctx
, struct r600_atom
*atom
)
1003 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
1004 struct si_shader
*ps
= sctx
->ps_shader
.current
;
1010 input_ena
= ps
->spi_ps_input_ena
;
1012 /* we need to enable at least one of them, otherwise we hang the GPU */
1013 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena
) ||
1014 G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1015 G_0286CC_PERSP_CENTROID_ENA(input_ena
) ||
1016 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena
) ||
1017 G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) ||
1018 G_0286CC_LINEAR_CENTER_ENA(input_ena
) ||
1019 G_0286CC_LINEAR_CENTROID_ENA(input_ena
) ||
1020 G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena
));
1022 if (sctx
->force_persample_interp
) {
1023 unsigned num_persp
= G_0286CC_PERSP_SAMPLE_ENA(input_ena
) +
1024 G_0286CC_PERSP_CENTER_ENA(input_ena
) +
1025 G_0286CC_PERSP_CENTROID_ENA(input_ena
);
1026 unsigned num_linear
= G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) +
1027 G_0286CC_LINEAR_CENTER_ENA(input_ena
) +
1028 G_0286CC_LINEAR_CENTROID_ENA(input_ena
);
1030 /* If only one set of (i,j) coordinates is used, we can disable
1031 * CENTER/CENTROID, enable SAMPLE and it will load SAMPLE coordinates
1032 * where CENTER/CENTROID are expected, effectively forcing per-sample
1035 if (num_persp
== 1) {
1036 input_ena
&= C_0286CC_PERSP_CENTER_ENA
;
1037 input_ena
&= C_0286CC_PERSP_CENTROID_ENA
;
1038 input_ena
|= G_0286CC_PERSP_SAMPLE_ENA(1);
1040 if (num_linear
== 1) {
1041 input_ena
&= C_0286CC_LINEAR_CENTER_ENA
;
1042 input_ena
&= C_0286CC_LINEAR_CENTROID_ENA
;
1043 input_ena
|= G_0286CC_LINEAR_SAMPLE_ENA(1);
1046 /* If at least 2 sets of coordinates are used, we can't use this
1047 * trick and have to select SAMPLE using a conditional assignment
1048 * in the shader with "force_persample_interp" being a shader constant.
1052 radeon_set_context_reg_seq(cs
, R_0286CC_SPI_PS_INPUT_ENA
, 2);
1053 radeon_emit(cs
, input_ena
);
1054 radeon_emit(cs
, input_ena
);
1056 if (ps
->selector
->forces_persample_interp_for_persp
||
1057 ps
->selector
->forces_persample_interp_for_linear
)
1058 radeon_set_sh_reg(cs
, R_00B030_SPI_SHADER_USER_DATA_PS_0
+
1059 SI_SGPR_PS_STATE_BITS
* 4,
1060 sctx
->force_persample_interp
);
1064 * Writing CONFIG or UCONFIG VGT registers requires VGT_FLUSH before that.
1066 static void si_init_config_add_vgt_flush(struct si_context
*sctx
)
1068 if (sctx
->init_config_has_vgt_flush
)
1071 si_pm4_cmd_begin(sctx
->init_config
, PKT3_EVENT_WRITE
);
1072 si_pm4_cmd_add(sctx
->init_config
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
1073 si_pm4_cmd_end(sctx
->init_config
, false);
1074 sctx
->init_config_has_vgt_flush
= true;
1077 /* Initialize state related to ESGS / GSVS ring buffers */
1078 static void si_init_gs_rings(struct si_context
*sctx
)
1080 unsigned esgs_ring_size
= 128 * 1024;
1081 unsigned gsvs_ring_size
= 60 * 1024 * 1024;
1083 assert(!sctx
->esgs_ring
&& !sctx
->gsvs_ring
);
1085 sctx
->esgs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1086 PIPE_USAGE_DEFAULT
, esgs_ring_size
);
1087 if (!sctx
->esgs_ring
)
1090 sctx
->gsvs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1091 PIPE_USAGE_DEFAULT
, gsvs_ring_size
);
1092 if (!sctx
->gsvs_ring
) {
1093 pipe_resource_reference(&sctx
->esgs_ring
, NULL
);
1097 si_init_config_add_vgt_flush(sctx
);
1099 /* Append these registers to the init config state. */
1100 if (sctx
->b
.chip_class
>= CIK
) {
1101 if (sctx
->b
.chip_class
>= VI
) {
1102 /* The maximum sizes are 63.999 MB on VI, because
1103 * the register fields only have 18 bits. */
1104 assert(esgs_ring_size
/ 256 < (1 << 18));
1105 assert(gsvs_ring_size
/ 256 < (1 << 18));
1107 si_pm4_set_reg(sctx
->init_config
, R_030900_VGT_ESGS_RING_SIZE
,
1108 esgs_ring_size
/ 256);
1109 si_pm4_set_reg(sctx
->init_config
, R_030904_VGT_GSVS_RING_SIZE
,
1110 gsvs_ring_size
/ 256);
1112 si_pm4_set_reg(sctx
->init_config
, R_0088C8_VGT_ESGS_RING_SIZE
,
1113 esgs_ring_size
/ 256);
1114 si_pm4_set_reg(sctx
->init_config
, R_0088CC_VGT_GSVS_RING_SIZE
,
1115 gsvs_ring_size
/ 256);
1118 /* Flush the context to re-emit the init_config state.
1119 * This is done only once in a lifetime of a context.
1121 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
1122 sctx
->b
.initial_gfx_cs_size
= 0; /* force flush */
1123 si_context_gfx_flush(sctx
, RADEON_FLUSH_ASYNC
, NULL
);
1125 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_ESGS
,
1126 sctx
->esgs_ring
, 0, esgs_ring_size
,
1127 true, true, 4, 64, 0);
1128 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_ESGS
,
1129 sctx
->esgs_ring
, 0, esgs_ring_size
,
1130 false, false, 0, 0, 0);
1131 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_GSVS
,
1132 sctx
->gsvs_ring
, 0, gsvs_ring_size
,
1133 false, false, 0, 0, 0);
1136 static void si_update_gs_rings(struct si_context
*sctx
)
1138 unsigned gsvs_itemsize
= sctx
->gs_shader
.cso
->max_gsvs_emit_size
;
1141 if (gsvs_itemsize
== sctx
->last_gsvs_itemsize
)
1144 sctx
->last_gsvs_itemsize
= gsvs_itemsize
;
1146 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS
,
1147 sctx
->gsvs_ring
, gsvs_itemsize
,
1148 64, true, true, 4, 16, 0);
1150 offset
= gsvs_itemsize
* 64;
1151 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS_1
,
1152 sctx
->gsvs_ring
, gsvs_itemsize
,
1153 64, true, true, 4, 16, offset
);
1155 offset
= (gsvs_itemsize
* 2) * 64;
1156 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS_2
,
1157 sctx
->gsvs_ring
, gsvs_itemsize
,
1158 64, true, true, 4, 16, offset
);
1160 offset
= (gsvs_itemsize
* 3) * 64;
1161 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS_3
,
1162 sctx
->gsvs_ring
, gsvs_itemsize
,
1163 64, true, true, 4, 16, offset
);
1167 * @returns 1 if \p sel has been updated to use a new scratch buffer
1169 * < 0 if there was a failure
1171 static int si_update_scratch_buffer(struct si_context
*sctx
,
1172 struct si_shader
*shader
)
1174 uint64_t scratch_va
= sctx
->scratch_buffer
->gpu_address
;
1180 /* This shader doesn't need a scratch buffer */
1181 if (shader
->scratch_bytes_per_wave
== 0)
1184 /* This shader is already configured to use the current
1185 * scratch buffer. */
1186 if (shader
->scratch_bo
== sctx
->scratch_buffer
)
1189 assert(sctx
->scratch_buffer
);
1191 si_shader_apply_scratch_relocs(sctx
, shader
, scratch_va
);
1193 /* Replace the shader bo with a new bo that has the relocs applied. */
1194 r
= si_shader_binary_upload(sctx
->screen
, shader
);
1198 /* Update the shader state to use the new shader bo. */
1199 si_shader_init_pm4_state(shader
);
1201 r600_resource_reference(&shader
->scratch_bo
, sctx
->scratch_buffer
);
1206 static unsigned si_get_current_scratch_buffer_size(struct si_context
*sctx
)
1208 return sctx
->scratch_buffer
? sctx
->scratch_buffer
->b
.b
.width0
: 0;
1211 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader
*shader
)
1213 return shader
? shader
->scratch_bytes_per_wave
: 0;
1216 static unsigned si_get_max_scratch_bytes_per_wave(struct si_context
*sctx
)
1220 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->ps_shader
.current
));
1221 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->gs_shader
.current
));
1222 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->vs_shader
.current
));
1223 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->tcs_shader
.current
));
1224 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->tes_shader
.current
));
1228 static bool si_update_spi_tmpring_size(struct si_context
*sctx
)
1230 unsigned current_scratch_buffer_size
=
1231 si_get_current_scratch_buffer_size(sctx
);
1232 unsigned scratch_bytes_per_wave
=
1233 si_get_max_scratch_bytes_per_wave(sctx
);
1234 unsigned scratch_needed_size
= scratch_bytes_per_wave
*
1235 sctx
->scratch_waves
;
1238 if (scratch_needed_size
> 0) {
1239 if (scratch_needed_size
> current_scratch_buffer_size
) {
1240 /* Create a bigger scratch buffer */
1241 pipe_resource_reference(
1242 (struct pipe_resource
**)&sctx
->scratch_buffer
,
1245 sctx
->scratch_buffer
=
1246 si_resource_create_custom(&sctx
->screen
->b
.b
,
1247 PIPE_USAGE_DEFAULT
, scratch_needed_size
);
1248 if (!sctx
->scratch_buffer
)
1250 sctx
->emit_scratch_reloc
= true;
1253 /* Update the shaders, so they are using the latest scratch. The
1254 * scratch buffer may have been changed since these shaders were
1255 * last used, so we still need to try to update them, even if
1256 * they require scratch buffers smaller than the current size.
1258 r
= si_update_scratch_buffer(sctx
, sctx
->ps_shader
.current
);
1262 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
.current
->pm4
);
1264 r
= si_update_scratch_buffer(sctx
, sctx
->gs_shader
.current
);
1268 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
.current
->pm4
);
1270 r
= si_update_scratch_buffer(sctx
, sctx
->tcs_shader
.current
);
1274 si_pm4_bind_state(sctx
, hs
, sctx
->tcs_shader
.current
->pm4
);
1276 /* VS can be bound as LS, ES, or VS. */
1277 r
= si_update_scratch_buffer(sctx
, sctx
->vs_shader
.current
);
1281 if (sctx
->tes_shader
.current
)
1282 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
.current
->pm4
);
1283 else if (sctx
->gs_shader
.current
)
1284 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
.current
->pm4
);
1286 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
.current
->pm4
);
1289 /* TES can be bound as ES or VS. */
1290 r
= si_update_scratch_buffer(sctx
, sctx
->tes_shader
.current
);
1294 if (sctx
->gs_shader
.current
)
1295 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
.current
->pm4
);
1297 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
.current
->pm4
);
1301 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
1302 assert((scratch_needed_size
& ~0x3FF) == scratch_needed_size
&&
1303 "scratch size should already be aligned correctly.");
1305 sctx
->spi_tmpring_size
= S_0286E8_WAVES(sctx
->scratch_waves
) |
1306 S_0286E8_WAVESIZE(scratch_bytes_per_wave
>> 10);
1310 static void si_init_tess_factor_ring(struct si_context
*sctx
)
1312 assert(!sctx
->tf_ring
);
1314 sctx
->tf_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1316 32768 * sctx
->screen
->b
.info
.max_se
);
1320 assert(((sctx
->tf_ring
->width0
/ 4) & C_030938_SIZE
) == 0);
1322 si_init_config_add_vgt_flush(sctx
);
1324 /* Append these registers to the init config state. */
1325 if (sctx
->b
.chip_class
>= CIK
) {
1326 si_pm4_set_reg(sctx
->init_config
, R_030938_VGT_TF_RING_SIZE
,
1327 S_030938_SIZE(sctx
->tf_ring
->width0
/ 4));
1328 si_pm4_set_reg(sctx
->init_config
, R_030940_VGT_TF_MEMORY_BASE
,
1329 r600_resource(sctx
->tf_ring
)->gpu_address
>> 8);
1331 si_pm4_set_reg(sctx
->init_config
, R_008988_VGT_TF_RING_SIZE
,
1332 S_008988_SIZE(sctx
->tf_ring
->width0
/ 4));
1333 si_pm4_set_reg(sctx
->init_config
, R_0089B8_VGT_TF_MEMORY_BASE
,
1334 r600_resource(sctx
->tf_ring
)->gpu_address
>> 8);
1337 /* Flush the context to re-emit the init_config state.
1338 * This is done only once in a lifetime of a context.
1340 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
1341 sctx
->b
.initial_gfx_cs_size
= 0; /* force flush */
1342 si_context_gfx_flush(sctx
, RADEON_FLUSH_ASYNC
, NULL
);
1344 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_TESS_CTRL
,
1345 SI_RING_TESS_FACTOR
, sctx
->tf_ring
, 0,
1346 sctx
->tf_ring
->width0
, false, false, 0, 0, 0);
1350 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
1351 * VS passes its outputs to TES directly, so the fixed-function shader only
1352 * has to write TESSOUTER and TESSINNER.
1354 static void si_generate_fixed_func_tcs(struct si_context
*sctx
)
1356 struct ureg_src const0
, const1
;
1357 struct ureg_dst tessouter
, tessinner
;
1358 struct ureg_program
*ureg
= ureg_create(TGSI_PROCESSOR_TESS_CTRL
);
1361 return; /* if we get here, we're screwed */
1363 assert(!sctx
->fixed_func_tcs_shader
.cso
);
1365 ureg_DECL_constant2D(ureg
, 0, 1, SI_DRIVER_STATE_CONST_BUF
);
1366 const0
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 0),
1367 SI_DRIVER_STATE_CONST_BUF
);
1368 const1
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 1),
1369 SI_DRIVER_STATE_CONST_BUF
);
1371 tessouter
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSOUTER
, 0);
1372 tessinner
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSINNER
, 0);
1374 ureg_MOV(ureg
, tessouter
, const0
);
1375 ureg_MOV(ureg
, tessinner
, const1
);
1378 sctx
->fixed_func_tcs_shader
.cso
=
1379 ureg_create_shader_and_destroy(ureg
, &sctx
->b
.b
);
1382 static void si_update_vgt_shader_config(struct si_context
*sctx
)
1384 /* Calculate the index of the config.
1385 * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */
1386 unsigned index
= 2*!!sctx
->tes_shader
.cso
+ !!sctx
->gs_shader
.cso
;
1387 struct si_pm4_state
**pm4
= &sctx
->vgt_shader_config
[index
];
1390 uint32_t stages
= 0;
1392 *pm4
= CALLOC_STRUCT(si_pm4_state
);
1394 if (sctx
->tes_shader
.cso
) {
1395 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
1398 if (sctx
->gs_shader
.cso
)
1399 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
1401 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
1403 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
1404 } else if (sctx
->gs_shader
.cso
) {
1405 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
1407 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
1410 si_pm4_set_reg(*pm4
, R_028B54_VGT_SHADER_STAGES_EN
, stages
);
1412 si_pm4_bind_state(sctx
, vgt_shader_config
, *pm4
);
1415 static void si_update_so(struct si_context
*sctx
, struct si_shader_selector
*shader
)
1417 struct pipe_stream_output_info
*so
= &shader
->so
;
1418 uint32_t enabled_stream_buffers_mask
= 0;
1421 for (i
= 0; i
< so
->num_outputs
; i
++)
1422 enabled_stream_buffers_mask
|= (1 << so
->output
[i
].output_buffer
) << (so
->output
[i
].stream
* 4);
1423 sctx
->b
.streamout
.enabled_stream_buffers_mask
= enabled_stream_buffers_mask
;
1424 sctx
->b
.streamout
.stride_in_dw
= shader
->so
.stride
;
1427 bool si_update_shaders(struct si_context
*sctx
)
1429 struct pipe_context
*ctx
= (struct pipe_context
*)sctx
;
1430 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
1433 /* Update stages before GS. */
1434 if (sctx
->tes_shader
.cso
) {
1435 if (!sctx
->tf_ring
) {
1436 si_init_tess_factor_ring(sctx
);
1442 r
= si_shader_select(ctx
, &sctx
->vs_shader
);
1445 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
.current
->pm4
);
1447 if (sctx
->tcs_shader
.cso
) {
1448 r
= si_shader_select(ctx
, &sctx
->tcs_shader
);
1451 si_pm4_bind_state(sctx
, hs
, sctx
->tcs_shader
.current
->pm4
);
1453 if (!sctx
->fixed_func_tcs_shader
.cso
) {
1454 si_generate_fixed_func_tcs(sctx
);
1455 if (!sctx
->fixed_func_tcs_shader
.cso
)
1459 r
= si_shader_select(ctx
, &sctx
->fixed_func_tcs_shader
);
1462 si_pm4_bind_state(sctx
, hs
,
1463 sctx
->fixed_func_tcs_shader
.current
->pm4
);
1466 r
= si_shader_select(ctx
, &sctx
->tes_shader
);
1470 if (sctx
->gs_shader
.cso
) {
1472 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
.current
->pm4
);
1475 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
.current
->pm4
);
1476 si_update_so(sctx
, sctx
->tes_shader
.cso
);
1478 } else if (sctx
->gs_shader
.cso
) {
1480 r
= si_shader_select(ctx
, &sctx
->vs_shader
);
1483 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
.current
->pm4
);
1486 r
= si_shader_select(ctx
, &sctx
->vs_shader
);
1489 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
.current
->pm4
);
1490 si_update_so(sctx
, sctx
->vs_shader
.cso
);
1494 if (sctx
->gs_shader
.cso
) {
1495 r
= si_shader_select(ctx
, &sctx
->gs_shader
);
1498 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
.current
->pm4
);
1499 si_pm4_bind_state(sctx
, vs
, sctx
->gs_shader
.current
->gs_copy_shader
->pm4
);
1500 si_update_so(sctx
, sctx
->gs_shader
.cso
);
1502 if (!sctx
->gsvs_ring
) {
1503 si_init_gs_rings(sctx
);
1504 if (!sctx
->gsvs_ring
)
1508 si_update_gs_rings(sctx
);
1510 si_pm4_bind_state(sctx
, gs
, NULL
);
1511 si_pm4_bind_state(sctx
, es
, NULL
);
1514 si_update_vgt_shader_config(sctx
);
1516 if (sctx
->ps_shader
.cso
) {
1517 r
= si_shader_select(ctx
, &sctx
->ps_shader
);
1520 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
.current
->pm4
);
1522 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
) ||
1523 sctx
->sprite_coord_enable
!= rs
->sprite_coord_enable
||
1524 sctx
->flatshade
!= rs
->flatshade
) {
1525 sctx
->sprite_coord_enable
= rs
->sprite_coord_enable
;
1526 sctx
->flatshade
= rs
->flatshade
;
1527 si_mark_atom_dirty(sctx
, &sctx
->spi_map
);
1530 if (si_pm4_state_changed(sctx
, ps
) ||
1531 sctx
->force_persample_interp
!= rs
->force_persample_interp
) {
1532 sctx
->force_persample_interp
= rs
->force_persample_interp
;
1533 si_mark_atom_dirty(sctx
, &sctx
->spi_ps_input
);
1536 if (sctx
->ps_db_shader_control
!= sctx
->ps_shader
.current
->db_shader_control
) {
1537 sctx
->ps_db_shader_control
= sctx
->ps_shader
.current
->db_shader_control
;
1538 si_mark_atom_dirty(sctx
, &sctx
->db_render_state
);
1541 if (sctx
->smoothing_enabled
!= sctx
->ps_shader
.current
->key
.ps
.poly_line_smoothing
) {
1542 sctx
->smoothing_enabled
= sctx
->ps_shader
.current
->key
.ps
.poly_line_smoothing
;
1543 si_mark_atom_dirty(sctx
, &sctx
->msaa_config
);
1545 if (sctx
->b
.chip_class
== SI
)
1546 si_mark_atom_dirty(sctx
, &sctx
->db_render_state
);
1550 if (si_pm4_state_changed(sctx
, ls
) ||
1551 si_pm4_state_changed(sctx
, hs
) ||
1552 si_pm4_state_changed(sctx
, es
) ||
1553 si_pm4_state_changed(sctx
, gs
) ||
1554 si_pm4_state_changed(sctx
, vs
) ||
1555 si_pm4_state_changed(sctx
, ps
)) {
1556 if (!si_update_spi_tmpring_size(sctx
))
1562 void si_init_shader_functions(struct si_context
*sctx
)
1564 si_init_atom(sctx
, &sctx
->spi_map
, &sctx
->atoms
.s
.spi_map
, si_emit_spi_map
);
1565 si_init_atom(sctx
, &sctx
->spi_ps_input
, &sctx
->atoms
.s
.spi_ps_input
, si_emit_spi_ps_input
);
1567 sctx
->b
.b
.create_vs_state
= si_create_shader_selector
;
1568 sctx
->b
.b
.create_tcs_state
= si_create_shader_selector
;
1569 sctx
->b
.b
.create_tes_state
= si_create_shader_selector
;
1570 sctx
->b
.b
.create_gs_state
= si_create_shader_selector
;
1571 sctx
->b
.b
.create_fs_state
= si_create_shader_selector
;
1573 sctx
->b
.b
.bind_vs_state
= si_bind_vs_shader
;
1574 sctx
->b
.b
.bind_tcs_state
= si_bind_tcs_shader
;
1575 sctx
->b
.b
.bind_tes_state
= si_bind_tes_shader
;
1576 sctx
->b
.b
.bind_gs_state
= si_bind_gs_shader
;
1577 sctx
->b
.b
.bind_fs_state
= si_bind_ps_shader
;
1579 sctx
->b
.b
.delete_vs_state
= si_delete_shader_selector
;
1580 sctx
->b
.b
.delete_tcs_state
= si_delete_shader_selector
;
1581 sctx
->b
.b
.delete_tes_state
= si_delete_shader_selector
;
1582 sctx
->b
.b
.delete_gs_state
= si_delete_shader_selector
;
1583 sctx
->b
.b
.delete_fs_state
= si_delete_shader_selector
;