2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
25 * Marek Olšák <maraeo@gmail.com>
29 #include "si_shader.h"
31 #include "radeon/r600_cs.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_ureg.h"
35 #include "util/u_memory.h"
36 #include "util/u_simple_shaders.h"
38 static void si_set_tesseval_regs(struct si_shader
*shader
,
39 struct si_pm4_state
*pm4
)
41 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
42 unsigned tes_prim_mode
= info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
43 unsigned tes_spacing
= info
->properties
[TGSI_PROPERTY_TES_SPACING
];
44 bool tes_vertex_order_cw
= info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
];
45 bool tes_point_mode
= info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
];
46 unsigned type
, partitioning
, topology
;
48 switch (tes_prim_mode
) {
50 type
= V_028B6C_TESS_ISOLINE
;
52 case PIPE_PRIM_TRIANGLES
:
53 type
= V_028B6C_TESS_TRIANGLE
;
56 type
= V_028B6C_TESS_QUAD
;
63 switch (tes_spacing
) {
64 case PIPE_TESS_SPACING_FRACTIONAL_ODD
:
65 partitioning
= V_028B6C_PART_FRAC_ODD
;
67 case PIPE_TESS_SPACING_FRACTIONAL_EVEN
:
68 partitioning
= V_028B6C_PART_FRAC_EVEN
;
70 case PIPE_TESS_SPACING_EQUAL
:
71 partitioning
= V_028B6C_PART_INTEGER
;
79 topology
= V_028B6C_OUTPUT_POINT
;
80 else if (tes_prim_mode
== PIPE_PRIM_LINES
)
81 topology
= V_028B6C_OUTPUT_LINE
;
82 else if (tes_vertex_order_cw
)
83 /* for some reason, this must be the other way around */
84 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
86 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
88 si_pm4_set_reg(pm4
, R_028B6C_VGT_TF_PARAM
,
90 S_028B6C_PARTITIONING(partitioning
) |
91 S_028B6C_TOPOLOGY(topology
));
94 static void si_shader_ls(struct si_shader
*shader
)
96 struct si_pm4_state
*pm4
;
97 unsigned num_sgprs
, num_user_sgprs
;
98 unsigned vgpr_comp_cnt
;
101 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
105 va
= shader
->bo
->gpu_address
;
106 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
108 /* We need at least 2 components for LS.
109 * VGPR0-3: (VertexID, RelAutoindex, ???, InstanceID). */
110 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 1;
112 num_user_sgprs
= SI_LS_NUM_USER_SGPR
;
113 num_sgprs
= shader
->num_sgprs
;
114 if (num_user_sgprs
> num_sgprs
) {
115 /* Last 2 reserved SGPRs are used for VCC */
116 num_sgprs
= num_user_sgprs
+ 2;
118 assert(num_sgprs
<= 104);
120 si_pm4_set_reg(pm4
, R_00B520_SPI_SHADER_PGM_LO_LS
, va
>> 8);
121 si_pm4_set_reg(pm4
, R_00B524_SPI_SHADER_PGM_HI_LS
, va
>> 40);
123 shader
->ls_rsrc1
= S_00B528_VGPRS((shader
->num_vgprs
- 1) / 4) |
124 S_00B528_SGPRS((num_sgprs
- 1) / 8) |
125 S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt
) |
126 S_00B528_DX10_CLAMP(shader
->dx10_clamp_mode
);
127 shader
->ls_rsrc2
= S_00B52C_USER_SGPR(num_user_sgprs
) |
128 S_00B52C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0);
131 static void si_shader_hs(struct si_shader
*shader
)
133 struct si_pm4_state
*pm4
;
134 unsigned num_sgprs
, num_user_sgprs
;
137 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
141 va
= shader
->bo
->gpu_address
;
142 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
144 num_user_sgprs
= SI_TCS_NUM_USER_SGPR
;
145 num_sgprs
= shader
->num_sgprs
;
146 /* One SGPR after user SGPRs is pre-loaded with tessellation factor
148 if ((num_user_sgprs
+ 1) > num_sgprs
) {
149 /* Last 2 reserved SGPRs are used for VCC */
150 num_sgprs
= num_user_sgprs
+ 1 + 2;
152 assert(num_sgprs
<= 104);
154 si_pm4_set_reg(pm4
, R_00B420_SPI_SHADER_PGM_LO_HS
, va
>> 8);
155 si_pm4_set_reg(pm4
, R_00B424_SPI_SHADER_PGM_HI_HS
, va
>> 40);
156 si_pm4_set_reg(pm4
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
,
157 S_00B428_VGPRS((shader
->num_vgprs
- 1) / 4) |
158 S_00B428_SGPRS((num_sgprs
- 1) / 8) |
159 S_00B428_DX10_CLAMP(shader
->dx10_clamp_mode
));
160 si_pm4_set_reg(pm4
, R_00B42C_SPI_SHADER_PGM_RSRC2_HS
,
161 S_00B42C_USER_SGPR(num_user_sgprs
) |
162 S_00B42C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
165 static void si_shader_es(struct si_shader
*shader
)
167 struct si_pm4_state
*pm4
;
168 unsigned num_sgprs
, num_user_sgprs
;
169 unsigned vgpr_comp_cnt
;
172 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
177 va
= shader
->bo
->gpu_address
;
178 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
180 if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
181 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 0;
182 num_user_sgprs
= SI_ES_NUM_USER_SGPR
;
183 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
184 vgpr_comp_cnt
= 3; /* all components are needed for TES */
185 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
187 unreachable("invalid shader selector type");
189 num_sgprs
= shader
->num_sgprs
;
190 /* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
191 if ((num_user_sgprs
+ 1) > num_sgprs
) {
192 /* Last 2 reserved SGPRs are used for VCC */
193 num_sgprs
= num_user_sgprs
+ 1 + 2;
195 assert(num_sgprs
<= 104);
197 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
198 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, va
>> 40);
199 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
200 S_00B328_VGPRS((shader
->num_vgprs
- 1) / 4) |
201 S_00B328_SGPRS((num_sgprs
- 1) / 8) |
202 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
) |
203 S_00B328_DX10_CLAMP(shader
->dx10_clamp_mode
));
204 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
205 S_00B32C_USER_SGPR(num_user_sgprs
) |
206 S_00B32C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
208 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
209 si_set_tesseval_regs(shader
, pm4
);
212 static void si_shader_gs(struct si_shader
*shader
)
214 unsigned gs_vert_itemsize
= shader
->selector
->gsvs_vertex_size
;
215 unsigned gs_max_vert_out
= shader
->selector
->gs_max_out_vertices
;
216 unsigned gsvs_itemsize
= shader
->selector
->max_gsvs_emit_size
>> 2;
217 unsigned gs_num_invocations
= shader
->selector
->gs_num_invocations
;
219 struct si_pm4_state
*pm4
;
220 unsigned num_sgprs
, num_user_sgprs
;
222 unsigned max_stream
= shader
->selector
->max_gs_stream
;
224 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
225 assert(gsvs_itemsize
< (1 << 15));
227 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
232 if (gs_max_vert_out
<= 128) {
233 cut_mode
= V_028A40_GS_CUT_128
;
234 } else if (gs_max_vert_out
<= 256) {
235 cut_mode
= V_028A40_GS_CUT_256
;
236 } else if (gs_max_vert_out
<= 512) {
237 cut_mode
= V_028A40_GS_CUT_512
;
239 assert(gs_max_vert_out
<= 1024);
240 cut_mode
= V_028A40_GS_CUT_1024
;
243 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
244 S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
245 S_028A40_CUT_MODE(cut_mode
)|
246 S_028A40_ES_WRITE_OPTIMIZE(1) |
247 S_028A40_GS_WRITE_OPTIMIZE(1));
249 si_pm4_set_reg(pm4
, R_028A60_VGT_GSVS_RING_OFFSET_1
, gsvs_itemsize
);
250 si_pm4_set_reg(pm4
, R_028A64_VGT_GSVS_RING_OFFSET_2
, gsvs_itemsize
* ((max_stream
>= 2) ? 2 : 1));
251 si_pm4_set_reg(pm4
, R_028A68_VGT_GSVS_RING_OFFSET_3
, gsvs_itemsize
* ((max_stream
>= 3) ? 3 : 1));
253 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
254 shader
->selector
->esgs_itemsize
/ 4);
255 si_pm4_set_reg(pm4
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, gsvs_itemsize
* (max_stream
+ 1));
257 si_pm4_set_reg(pm4
, R_028B38_VGT_GS_MAX_VERT_OUT
, gs_max_vert_out
);
259 si_pm4_set_reg(pm4
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, gs_vert_itemsize
>> 2);
260 si_pm4_set_reg(pm4
, R_028B60_VGT_GS_VERT_ITEMSIZE_1
, (max_stream
>= 1) ? gs_vert_itemsize
>> 2 : 0);
261 si_pm4_set_reg(pm4
, R_028B64_VGT_GS_VERT_ITEMSIZE_2
, (max_stream
>= 2) ? gs_vert_itemsize
>> 2 : 0);
262 si_pm4_set_reg(pm4
, R_028B68_VGT_GS_VERT_ITEMSIZE_3
, (max_stream
>= 3) ? gs_vert_itemsize
>> 2 : 0);
264 si_pm4_set_reg(pm4
, R_028B90_VGT_GS_INSTANCE_CNT
,
265 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
266 S_028B90_ENABLE(gs_num_invocations
> 0));
268 va
= shader
->bo
->gpu_address
;
269 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
270 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
271 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, va
>> 40);
273 num_user_sgprs
= SI_GS_NUM_USER_SGPR
;
274 num_sgprs
= shader
->num_sgprs
;
275 /* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
276 if ((num_user_sgprs
+ 2) > num_sgprs
) {
277 /* Last 2 reserved SGPRs are used for VCC */
278 num_sgprs
= num_user_sgprs
+ 2 + 2;
280 assert(num_sgprs
<= 104);
282 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
283 S_00B228_VGPRS((shader
->num_vgprs
- 1) / 4) |
284 S_00B228_SGPRS((num_sgprs
- 1) / 8) |
285 S_00B228_DX10_CLAMP(shader
->dx10_clamp_mode
));
286 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
287 S_00B22C_USER_SGPR(num_user_sgprs
) |
288 S_00B22C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
291 static void si_shader_vs(struct si_shader
*shader
)
293 struct si_pm4_state
*pm4
;
294 unsigned num_sgprs
, num_user_sgprs
;
295 unsigned nparams
, vgpr_comp_cnt
;
297 unsigned window_space
=
298 shader
->selector
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
299 bool enable_prim_id
= si_vs_exports_prim_id(shader
);
301 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
306 /* If this is the GS copy shader, the GS state writes this register.
307 * Otherwise, the VS state writes it.
309 if (!shader
->is_gs_copy_shader
) {
310 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
311 S_028A40_MODE(enable_prim_id
? V_028A40_GS_SCENARIO_A
: 0));
312 si_pm4_set_reg(pm4
, R_028A84_VGT_PRIMITIVEID_EN
, enable_prim_id
);
314 si_pm4_set_reg(pm4
, R_028A84_VGT_PRIMITIVEID_EN
, 0);
316 va
= shader
->bo
->gpu_address
;
317 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
319 if (shader
->is_gs_copy_shader
) {
320 vgpr_comp_cnt
= 0; /* only VertexID is needed for GS-COPY. */
321 num_user_sgprs
= SI_GSCOPY_NUM_USER_SGPR
;
322 } else if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
323 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : (enable_prim_id
? 2 : 0);
324 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
325 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
326 vgpr_comp_cnt
= 3; /* all components are needed for TES */
327 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
329 unreachable("invalid shader selector type");
331 num_sgprs
= shader
->num_sgprs
;
332 if (num_user_sgprs
> num_sgprs
) {
333 /* Last 2 reserved SGPRs are used for VCC */
334 num_sgprs
= num_user_sgprs
+ 2;
336 assert(num_sgprs
<= 104);
338 /* VS is required to export at least one param. */
339 nparams
= MAX2(shader
->nr_param_exports
, 1);
340 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
341 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
343 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
344 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
345 S_02870C_POS1_EXPORT_FORMAT(shader
->nr_pos_exports
> 1 ?
346 V_02870C_SPI_SHADER_4COMP
:
347 V_02870C_SPI_SHADER_NONE
) |
348 S_02870C_POS2_EXPORT_FORMAT(shader
->nr_pos_exports
> 2 ?
349 V_02870C_SPI_SHADER_4COMP
:
350 V_02870C_SPI_SHADER_NONE
) |
351 S_02870C_POS3_EXPORT_FORMAT(shader
->nr_pos_exports
> 3 ?
352 V_02870C_SPI_SHADER_4COMP
:
353 V_02870C_SPI_SHADER_NONE
));
355 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
356 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
357 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
358 S_00B128_VGPRS((shader
->num_vgprs
- 1) / 4) |
359 S_00B128_SGPRS((num_sgprs
- 1) / 8) |
360 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
361 S_00B128_DX10_CLAMP(shader
->dx10_clamp_mode
));
362 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
363 S_00B12C_USER_SGPR(num_user_sgprs
) |
364 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
365 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
366 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
367 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
368 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
) |
369 S_00B12C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
371 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
372 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
374 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
375 S_028818_VTX_W0_FMT(1) |
376 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
377 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
378 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
380 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
381 si_set_tesseval_regs(shader
, pm4
);
384 static void si_shader_ps(struct si_shader
*shader
)
386 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
387 struct si_pm4_state
*pm4
;
388 unsigned i
, spi_ps_in_control
;
389 unsigned num_sgprs
, num_user_sgprs
;
390 unsigned spi_baryc_cntl
= 0;
394 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
399 for (i
= 0; i
< info
->num_inputs
; i
++) {
400 switch (info
->input_semantic_name
[i
]) {
401 case TGSI_SEMANTIC_POSITION
:
402 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
404 * 0 -> Position = pixel center (default)
405 * 1 -> Position = pixel centroid
406 * 2 -> Position = at sample position
408 switch (info
->input_interpolate_loc
[i
]) {
409 case TGSI_INTERPOLATE_LOC_CENTROID
:
410 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(1);
412 case TGSI_INTERPOLATE_LOC_SAMPLE
:
413 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
417 if (info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] ==
418 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)
419 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_ULC(1);
424 has_centroid
= G_0286CC_PERSP_CENTROID_ENA(shader
->spi_ps_input_ena
) ||
425 G_0286CC_LINEAR_CENTROID_ENA(shader
->spi_ps_input_ena
);
427 spi_ps_in_control
= S_0286D8_NUM_INTERP(shader
->nparam
) |
428 S_0286D8_BC_OPTIMIZE_DISABLE(has_centroid
);
430 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
431 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
433 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
, shader
->spi_shader_z_format
);
434 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
,
435 shader
->spi_shader_col_format
);
436 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, shader
->cb_shader_mask
);
438 va
= shader
->bo
->gpu_address
;
439 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
440 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
441 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
443 num_user_sgprs
= SI_PS_NUM_USER_SGPR
;
444 num_sgprs
= shader
->num_sgprs
;
445 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
446 if ((num_user_sgprs
+ 1) > num_sgprs
) {
447 /* Last 2 reserved SGPRs are used for VCC */
448 num_sgprs
= num_user_sgprs
+ 1 + 2;
450 assert(num_sgprs
<= 104);
452 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
453 S_00B028_VGPRS((shader
->num_vgprs
- 1) / 4) |
454 S_00B028_SGPRS((num_sgprs
- 1) / 8) |
455 S_00B028_DX10_CLAMP(shader
->dx10_clamp_mode
));
456 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
457 S_00B02C_EXTRA_LDS_SIZE(shader
->lds_size
) |
458 S_00B02C_USER_SGPR(num_user_sgprs
) |
459 S_00B32C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
462 static void si_shader_init_pm4_state(struct si_shader
*shader
)
466 si_pm4_free_state_simple(shader
->pm4
);
468 switch (shader
->selector
->type
) {
469 case PIPE_SHADER_VERTEX
:
470 if (shader
->key
.vs
.as_ls
)
471 si_shader_ls(shader
);
472 else if (shader
->key
.vs
.as_es
)
473 si_shader_es(shader
);
475 si_shader_vs(shader
);
477 case PIPE_SHADER_TESS_CTRL
:
478 si_shader_hs(shader
);
480 case PIPE_SHADER_TESS_EVAL
:
481 if (shader
->key
.tes
.as_es
)
482 si_shader_es(shader
);
484 si_shader_vs(shader
);
486 case PIPE_SHADER_GEOMETRY
:
487 si_shader_gs(shader
);
488 si_shader_vs(shader
->gs_copy_shader
);
490 case PIPE_SHADER_FRAGMENT
:
491 si_shader_ps(shader
);
498 /* Compute the key for the hw shader variant */
499 static inline void si_shader_selector_key(struct pipe_context
*ctx
,
500 struct si_shader_selector
*sel
,
501 union si_shader_key
*key
)
503 struct si_context
*sctx
= (struct si_context
*)ctx
;
506 memset(key
, 0, sizeof(*key
));
509 case PIPE_SHADER_VERTEX
:
510 if (sctx
->vertex_elements
)
511 for (i
= 0; i
< sctx
->vertex_elements
->count
; ++i
)
512 key
->vs
.instance_divisors
[i
] =
513 sctx
->vertex_elements
->elements
[i
].instance_divisor
;
515 if (sctx
->tes_shader
.cso
)
517 else if (sctx
->gs_shader
.cso
) {
519 key
->vs
.es_enabled_outputs
= sctx
->gs_shader
.cso
->inputs_read
;
522 if (!sctx
->gs_shader
.cso
&& sctx
->ps_shader
.cso
&&
523 sctx
->ps_shader
.cso
->info
.uses_primid
)
524 key
->vs
.export_prim_id
= 1;
526 case PIPE_SHADER_TESS_CTRL
:
528 sctx
->tes_shader
.cso
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
530 case PIPE_SHADER_TESS_EVAL
:
531 if (sctx
->gs_shader
.cso
) {
533 key
->tes
.es_enabled_outputs
= sctx
->gs_shader
.cso
->inputs_read
;
534 } else if (sctx
->ps_shader
.cso
&& sctx
->ps_shader
.cso
->info
.uses_primid
)
535 key
->tes
.export_prim_id
= 1;
537 case PIPE_SHADER_GEOMETRY
:
539 case PIPE_SHADER_FRAGMENT
: {
540 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
542 if (sel
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
])
543 key
->ps
.last_cbuf
= MAX2(sctx
->framebuffer
.state
.nr_cbufs
, 1) - 1;
544 key
->ps
.export_16bpc
= sctx
->framebuffer
.export_16bpc
;
547 bool is_poly
= (sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES
&&
548 sctx
->current_rast_prim
<= PIPE_PRIM_POLYGON
) ||
549 sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES_ADJACENCY
;
550 bool is_line
= !is_poly
&& sctx
->current_rast_prim
!= PIPE_PRIM_POINTS
;
552 key
->ps
.color_two_side
= rs
->two_side
;
554 if (sctx
->queued
.named
.blend
) {
555 key
->ps
.alpha_to_one
= sctx
->queued
.named
.blend
->alpha_to_one
&&
556 rs
->multisample_enable
&&
557 !sctx
->framebuffer
.cb0_is_integer
;
560 key
->ps
.poly_stipple
= rs
->poly_stipple_enable
&& is_poly
;
561 key
->ps
.poly_line_smoothing
= ((is_poly
&& rs
->poly_smooth
) ||
562 (is_line
&& rs
->line_smooth
)) &&
563 sctx
->framebuffer
.nr_samples
<= 1;
564 key
->ps
.clamp_color
= rs
->clamp_fragment_color
;
567 key
->ps
.alpha_func
= PIPE_FUNC_ALWAYS
;
568 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
569 if (sctx
->queued
.named
.dsa
&&
570 !sctx
->framebuffer
.cb0_is_integer
)
571 key
->ps
.alpha_func
= sctx
->queued
.named
.dsa
->alpha_func
;
579 /* Select the hw shader variant depending on the current state. */
580 static int si_shader_select(struct pipe_context
*ctx
,
581 struct si_shader_ctx_state
*state
)
583 struct si_context
*sctx
= (struct si_context
*)ctx
;
584 struct si_shader_selector
*sel
= state
->cso
;
585 struct si_shader
*current
= state
->current
;
586 union si_shader_key key
;
587 struct si_shader
*iter
, *shader
= NULL
;
590 si_shader_selector_key(ctx
, sel
, &key
);
592 /* Check if we don't need to change anything.
593 * This path is also used for most shaders that don't need multiple
594 * variants, it will cost just a computation of the key and this
596 if (likely(current
&& memcmp(¤t
->key
, &key
, sizeof(key
)) == 0))
599 pipe_mutex_lock(sel
->mutex
);
601 /* Find the shader variant. */
602 for (iter
= sel
->first_variant
; iter
; iter
= iter
->next_variant
) {
603 /* Don't check the "current" shader. We checked it above. */
604 if (current
!= iter
&&
605 memcmp(&iter
->key
, &key
, sizeof(key
)) == 0) {
606 state
->current
= iter
;
607 pipe_mutex_unlock(sel
->mutex
);
612 /* Build a new shader. */
613 shader
= CALLOC_STRUCT(si_shader
);
615 pipe_mutex_unlock(sel
->mutex
);
618 shader
->selector
= sel
;
621 r
= si_shader_create(sctx
->screen
, sctx
->tm
, shader
);
623 R600_ERR("Failed to build shader variant (type=%u) %d\n",
626 pipe_mutex_unlock(sel
->mutex
);
629 si_shader_init_pm4_state(shader
);
631 if (!sel
->last_variant
) {
632 sel
->first_variant
= shader
;
633 sel
->last_variant
= shader
;
635 sel
->last_variant
->next_variant
= shader
;
636 sel
->last_variant
= shader
;
638 state
->current
= shader
;
639 p_atomic_inc(&sctx
->screen
->b
.num_compilations
);
640 pipe_mutex_unlock(sel
->mutex
);
644 static void *si_create_shader_selector(struct pipe_context
*ctx
,
645 const struct pipe_shader_state
*state
)
647 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
648 struct si_shader_selector
*sel
= CALLOC_STRUCT(si_shader_selector
);
654 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
660 sel
->so
= state
->stream_output
;
661 tgsi_scan_shader(state
->tokens
, &sel
->info
);
662 sel
->type
= util_pipe_shader_from_tgsi_processor(sel
->info
.processor
);
663 p_atomic_inc(&sscreen
->b
.num_shaders_created
);
665 /* First set which opcode uses which (i,j) pair. */
666 if (sel
->info
.uses_persp_opcode_interp_centroid
)
667 sel
->info
.uses_persp_centroid
= true;
669 if (sel
->info
.uses_linear_opcode_interp_centroid
)
670 sel
->info
.uses_linear_centroid
= true;
672 if (sel
->info
.uses_persp_opcode_interp_offset
||
673 sel
->info
.uses_persp_opcode_interp_sample
)
674 sel
->info
.uses_persp_center
= true;
676 if (sel
->info
.uses_linear_opcode_interp_offset
||
677 sel
->info
.uses_linear_opcode_interp_sample
)
678 sel
->info
.uses_linear_center
= true;
680 /* Determine if the shader has to use a conditional assignment when
681 * emulating force_persample_interp.
683 sel
->forces_persample_interp_for_persp
=
684 sel
->info
.uses_persp_center
+
685 sel
->info
.uses_persp_centroid
+
686 sel
->info
.uses_persp_sample
>= 2;
688 sel
->forces_persample_interp_for_linear
=
689 sel
->info
.uses_linear_center
+
690 sel
->info
.uses_linear_centroid
+
691 sel
->info
.uses_linear_sample
>= 2;
694 case PIPE_SHADER_GEOMETRY
:
695 sel
->gs_output_prim
=
696 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
697 sel
->gs_max_out_vertices
=
698 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
699 sel
->gs_num_invocations
=
700 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
701 sel
->gsvs_vertex_size
= sel
->info
.num_outputs
* 16;
702 sel
->max_gsvs_emit_size
= sel
->gsvs_vertex_size
*
703 sel
->gs_max_out_vertices
;
705 sel
->max_gs_stream
= 0;
706 for (i
= 0; i
< sel
->so
.num_outputs
; i
++)
707 sel
->max_gs_stream
= MAX2(sel
->max_gs_stream
,
708 sel
->so
.output
[i
].stream
);
710 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
711 unsigned name
= sel
->info
.input_semantic_name
[i
];
712 unsigned index
= sel
->info
.input_semantic_index
[i
];
715 case TGSI_SEMANTIC_PRIMID
:
719 1llu << si_shader_io_get_unique_index(name
, index
);
724 case PIPE_SHADER_VERTEX
:
725 case PIPE_SHADER_TESS_CTRL
:
726 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
727 unsigned name
= sel
->info
.output_semantic_name
[i
];
728 unsigned index
= sel
->info
.output_semantic_index
[i
];
731 case TGSI_SEMANTIC_TESSINNER
:
732 case TGSI_SEMANTIC_TESSOUTER
:
733 case TGSI_SEMANTIC_PATCH
:
734 sel
->patch_outputs_written
|=
735 1llu << si_shader_io_get_unique_index(name
, index
);
738 sel
->outputs_written
|=
739 1llu << si_shader_io_get_unique_index(name
, index
);
742 sel
->esgs_itemsize
= util_last_bit64(sel
->outputs_written
) * 16;
744 case PIPE_SHADER_FRAGMENT
:
745 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
746 unsigned name
= sel
->info
.output_semantic_name
[i
];
747 unsigned index
= sel
->info
.output_semantic_index
[i
];
749 if (name
== TGSI_SEMANTIC_COLOR
)
750 sel
->ps_colors_written
|= 1 << index
;
755 if (sscreen
->b
.debug_flags
& DBG_PRECOMPILE
) {
756 struct si_shader_ctx_state state
= {sel
};
758 if (si_shader_select(ctx
, &state
)) {
759 fprintf(stderr
, "radeonsi: can't create a shader\n");
760 tgsi_free_tokens(sel
->tokens
);
766 pipe_mutex_init(sel
->mutex
);
771 * Normally, we only emit 1 viewport and 1 scissor if no shader is using
772 * the VIEWPORT_INDEX output, and emitting the other viewports and scissors
773 * is delayed. When a shader with VIEWPORT_INDEX appears, this should be
774 * called to emit the rest.
776 static void si_update_viewports_and_scissors(struct si_context
*sctx
)
778 struct tgsi_shader_info
*info
= si_get_vs_info(sctx
);
780 if (!info
|| !info
->writes_viewport_index
)
783 if (sctx
->scissors
.dirty_mask
)
784 si_mark_atom_dirty(sctx
, &sctx
->scissors
.atom
);
785 if (sctx
->viewports
.dirty_mask
)
786 si_mark_atom_dirty(sctx
, &sctx
->viewports
.atom
);
789 static void si_bind_vs_shader(struct pipe_context
*ctx
, void *state
)
791 struct si_context
*sctx
= (struct si_context
*)ctx
;
792 struct si_shader_selector
*sel
= state
;
794 if (sctx
->vs_shader
.cso
== sel
)
797 sctx
->vs_shader
.cso
= sel
;
798 sctx
->vs_shader
.current
= sel
? sel
->first_variant
: NULL
;
799 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
800 si_update_viewports_and_scissors(sctx
);
803 static void si_bind_gs_shader(struct pipe_context
*ctx
, void *state
)
805 struct si_context
*sctx
= (struct si_context
*)ctx
;
806 struct si_shader_selector
*sel
= state
;
807 bool enable_changed
= !!sctx
->gs_shader
.cso
!= !!sel
;
809 if (sctx
->gs_shader
.cso
== sel
)
812 sctx
->gs_shader
.cso
= sel
;
813 sctx
->gs_shader
.current
= sel
? sel
->first_variant
: NULL
;
814 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
815 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
818 si_shader_change_notify(sctx
);
819 si_update_viewports_and_scissors(sctx
);
822 static void si_bind_tcs_shader(struct pipe_context
*ctx
, void *state
)
824 struct si_context
*sctx
= (struct si_context
*)ctx
;
825 struct si_shader_selector
*sel
= state
;
826 bool enable_changed
= !!sctx
->tcs_shader
.cso
!= !!sel
;
828 if (sctx
->tcs_shader
.cso
== sel
)
831 sctx
->tcs_shader
.cso
= sel
;
832 sctx
->tcs_shader
.current
= sel
? sel
->first_variant
: NULL
;
835 sctx
->last_tcs
= NULL
; /* invalidate derived tess state */
838 static void si_bind_tes_shader(struct pipe_context
*ctx
, void *state
)
840 struct si_context
*sctx
= (struct si_context
*)ctx
;
841 struct si_shader_selector
*sel
= state
;
842 bool enable_changed
= !!sctx
->tes_shader
.cso
!= !!sel
;
844 if (sctx
->tes_shader
.cso
== sel
)
847 sctx
->tes_shader
.cso
= sel
;
848 sctx
->tes_shader
.current
= sel
? sel
->first_variant
: NULL
;
849 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
850 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
852 if (enable_changed
) {
853 si_shader_change_notify(sctx
);
854 sctx
->last_tes_sh_base
= -1; /* invalidate derived tess state */
856 si_update_viewports_and_scissors(sctx
);
859 static void si_bind_ps_shader(struct pipe_context
*ctx
, void *state
)
861 struct si_context
*sctx
= (struct si_context
*)ctx
;
862 struct si_shader_selector
*sel
= state
;
864 /* skip if supplied shader is one already in use */
865 if (sctx
->ps_shader
.cso
== sel
)
868 sctx
->ps_shader
.cso
= sel
;
869 sctx
->ps_shader
.current
= sel
? sel
->first_variant
: NULL
;
870 si_mark_atom_dirty(sctx
, &sctx
->cb_target_mask
);
873 static void si_delete_shader_selector(struct pipe_context
*ctx
, void *state
)
875 struct si_context
*sctx
= (struct si_context
*)ctx
;
876 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
877 struct si_shader
*p
= sel
->first_variant
, *c
;
878 struct si_shader_ctx_state
*current_shader
[SI_NUM_SHADERS
] = {
879 [PIPE_SHADER_VERTEX
] = &sctx
->vs_shader
,
880 [PIPE_SHADER_TESS_CTRL
] = &sctx
->tcs_shader
,
881 [PIPE_SHADER_TESS_EVAL
] = &sctx
->tes_shader
,
882 [PIPE_SHADER_GEOMETRY
] = &sctx
->gs_shader
,
883 [PIPE_SHADER_FRAGMENT
] = &sctx
->ps_shader
,
886 if (current_shader
[sel
->type
]->cso
== sel
) {
887 current_shader
[sel
->type
]->cso
= NULL
;
888 current_shader
[sel
->type
]->current
= NULL
;
894 case PIPE_SHADER_VERTEX
:
896 si_pm4_delete_state(sctx
, ls
, p
->pm4
);
897 else if (p
->key
.vs
.as_es
)
898 si_pm4_delete_state(sctx
, es
, p
->pm4
);
900 si_pm4_delete_state(sctx
, vs
, p
->pm4
);
902 case PIPE_SHADER_TESS_CTRL
:
903 si_pm4_delete_state(sctx
, hs
, p
->pm4
);
905 case PIPE_SHADER_TESS_EVAL
:
906 if (p
->key
.tes
.as_es
)
907 si_pm4_delete_state(sctx
, es
, p
->pm4
);
909 si_pm4_delete_state(sctx
, vs
, p
->pm4
);
911 case PIPE_SHADER_GEOMETRY
:
912 si_pm4_delete_state(sctx
, gs
, p
->pm4
);
913 si_pm4_delete_state(sctx
, vs
, p
->gs_copy_shader
->pm4
);
915 case PIPE_SHADER_FRAGMENT
:
916 si_pm4_delete_state(sctx
, ps
, p
->pm4
);
920 si_shader_destroy(p
);
925 pipe_mutex_destroy(sel
->mutex
);
930 static void si_emit_spi_map(struct si_context
*sctx
, struct r600_atom
*atom
)
932 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
933 struct si_shader
*ps
= sctx
->ps_shader
.current
;
934 struct si_shader
*vs
= si_get_vs_state(sctx
);
935 struct tgsi_shader_info
*psinfo
;
936 struct tgsi_shader_info
*vsinfo
= &vs
->selector
->info
;
937 unsigned i
, j
, tmp
, num_written
= 0;
939 if (!ps
|| !ps
->nparam
)
942 psinfo
= &ps
->selector
->info
;
944 radeon_set_context_reg_seq(cs
, R_028644_SPI_PS_INPUT_CNTL_0
, ps
->nparam
);
946 for (i
= 0; i
< psinfo
->num_inputs
; i
++) {
947 unsigned name
= psinfo
->input_semantic_name
[i
];
948 unsigned index
= psinfo
->input_semantic_index
[i
];
949 unsigned interpolate
= psinfo
->input_interpolate
[i
];
950 unsigned param_offset
= ps
->ps_input_param_offset
[i
];
952 if (name
== TGSI_SEMANTIC_POSITION
||
953 name
== TGSI_SEMANTIC_FACE
)
954 /* Read from preloaded VGPRs, not parameters */
960 if (interpolate
== TGSI_INTERPOLATE_CONSTANT
||
961 (interpolate
== TGSI_INTERPOLATE_COLOR
&& sctx
->flatshade
))
962 tmp
|= S_028644_FLAT_SHADE(1);
964 if (name
== TGSI_SEMANTIC_PCOORD
||
965 (name
== TGSI_SEMANTIC_TEXCOORD
&&
966 sctx
->sprite_coord_enable
& (1 << index
))) {
967 tmp
|= S_028644_PT_SPRITE_TEX(1);
970 for (j
= 0; j
< vsinfo
->num_outputs
; j
++) {
971 if (name
== vsinfo
->output_semantic_name
[j
] &&
972 index
== vsinfo
->output_semantic_index
[j
]) {
973 tmp
|= S_028644_OFFSET(vs
->vs_output_param_offset
[j
]);
978 if (name
== TGSI_SEMANTIC_PRIMID
)
979 /* PrimID is written after the last output. */
980 tmp
|= S_028644_OFFSET(vs
->vs_output_param_offset
[vsinfo
->num_outputs
]);
981 else if (j
== vsinfo
->num_outputs
&& !G_028644_PT_SPRITE_TEX(tmp
)) {
982 /* No corresponding output found, load defaults into input.
983 * Don't set any other bits.
984 * (FLAT_SHADE=1 completely changes behavior) */
985 tmp
= S_028644_OFFSET(0x20);
988 assert(param_offset
== num_written
);
989 radeon_emit(cs
, tmp
);
992 if (name
== TGSI_SEMANTIC_COLOR
&&
993 ps
->key
.ps
.color_two_side
) {
994 name
= TGSI_SEMANTIC_BCOLOR
;
999 assert(ps
->nparam
== num_written
);
1002 static void si_emit_spi_ps_input(struct si_context
*sctx
, struct r600_atom
*atom
)
1004 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
1005 struct si_shader
*ps
= sctx
->ps_shader
.current
;
1011 input_ena
= ps
->spi_ps_input_ena
;
1013 /* we need to enable at least one of them, otherwise we hang the GPU */
1014 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena
) ||
1015 G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1016 G_0286CC_PERSP_CENTROID_ENA(input_ena
) ||
1017 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena
) ||
1018 G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) ||
1019 G_0286CC_LINEAR_CENTER_ENA(input_ena
) ||
1020 G_0286CC_LINEAR_CENTROID_ENA(input_ena
) ||
1021 G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena
));
1023 if (sctx
->force_persample_interp
) {
1024 unsigned num_persp
= G_0286CC_PERSP_SAMPLE_ENA(input_ena
) +
1025 G_0286CC_PERSP_CENTER_ENA(input_ena
) +
1026 G_0286CC_PERSP_CENTROID_ENA(input_ena
);
1027 unsigned num_linear
= G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) +
1028 G_0286CC_LINEAR_CENTER_ENA(input_ena
) +
1029 G_0286CC_LINEAR_CENTROID_ENA(input_ena
);
1031 /* If only one set of (i,j) coordinates is used, we can disable
1032 * CENTER/CENTROID, enable SAMPLE and it will load SAMPLE coordinates
1033 * where CENTER/CENTROID are expected, effectively forcing per-sample
1036 if (num_persp
== 1) {
1037 input_ena
&= C_0286CC_PERSP_CENTER_ENA
;
1038 input_ena
&= C_0286CC_PERSP_CENTROID_ENA
;
1039 input_ena
|= G_0286CC_PERSP_SAMPLE_ENA(1);
1041 if (num_linear
== 1) {
1042 input_ena
&= C_0286CC_LINEAR_CENTER_ENA
;
1043 input_ena
&= C_0286CC_LINEAR_CENTROID_ENA
;
1044 input_ena
|= G_0286CC_LINEAR_SAMPLE_ENA(1);
1047 /* If at least 2 sets of coordinates are used, we can't use this
1048 * trick and have to select SAMPLE using a conditional assignment
1049 * in the shader with "force_persample_interp" being a shader constant.
1053 radeon_set_context_reg_seq(cs
, R_0286CC_SPI_PS_INPUT_ENA
, 2);
1054 radeon_emit(cs
, input_ena
);
1055 radeon_emit(cs
, input_ena
);
1057 if (ps
->selector
->forces_persample_interp_for_persp
||
1058 ps
->selector
->forces_persample_interp_for_linear
)
1059 radeon_set_sh_reg(cs
, R_00B030_SPI_SHADER_USER_DATA_PS_0
+
1060 SI_SGPR_PS_STATE_BITS
* 4,
1061 sctx
->force_persample_interp
);
1065 * Writing CONFIG or UCONFIG VGT registers requires VGT_FLUSH before that.
1067 static void si_init_config_add_vgt_flush(struct si_context
*sctx
)
1069 if (sctx
->init_config_has_vgt_flush
)
1072 si_pm4_cmd_begin(sctx
->init_config
, PKT3_EVENT_WRITE
);
1073 si_pm4_cmd_add(sctx
->init_config
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
1074 si_pm4_cmd_end(sctx
->init_config
, false);
1075 sctx
->init_config_has_vgt_flush
= true;
1078 /* Initialize state related to ESGS / GSVS ring buffers */
1079 static void si_init_gs_rings(struct si_context
*sctx
)
1081 unsigned esgs_ring_size
= 128 * 1024;
1082 unsigned gsvs_ring_size
= 60 * 1024 * 1024;
1084 assert(!sctx
->esgs_ring
&& !sctx
->gsvs_ring
);
1086 sctx
->esgs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1087 PIPE_USAGE_DEFAULT
, esgs_ring_size
);
1088 if (!sctx
->esgs_ring
)
1091 sctx
->gsvs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1092 PIPE_USAGE_DEFAULT
, gsvs_ring_size
);
1093 if (!sctx
->gsvs_ring
) {
1094 pipe_resource_reference(&sctx
->esgs_ring
, NULL
);
1098 si_init_config_add_vgt_flush(sctx
);
1100 /* Append these registers to the init config state. */
1101 if (sctx
->b
.chip_class
>= CIK
) {
1102 if (sctx
->b
.chip_class
>= VI
) {
1103 /* The maximum sizes are 63.999 MB on VI, because
1104 * the register fields only have 18 bits. */
1105 assert(esgs_ring_size
/ 256 < (1 << 18));
1106 assert(gsvs_ring_size
/ 256 < (1 << 18));
1108 si_pm4_set_reg(sctx
->init_config
, R_030900_VGT_ESGS_RING_SIZE
,
1109 esgs_ring_size
/ 256);
1110 si_pm4_set_reg(sctx
->init_config
, R_030904_VGT_GSVS_RING_SIZE
,
1111 gsvs_ring_size
/ 256);
1113 si_pm4_set_reg(sctx
->init_config
, R_0088C8_VGT_ESGS_RING_SIZE
,
1114 esgs_ring_size
/ 256);
1115 si_pm4_set_reg(sctx
->init_config
, R_0088CC_VGT_GSVS_RING_SIZE
,
1116 gsvs_ring_size
/ 256);
1119 /* Flush the context to re-emit the init_config state.
1120 * This is done only once in a lifetime of a context.
1122 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
1123 sctx
->b
.initial_gfx_cs_size
= 0; /* force flush */
1124 si_context_gfx_flush(sctx
, RADEON_FLUSH_ASYNC
, NULL
);
1126 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_ESGS
,
1127 sctx
->esgs_ring
, 0, esgs_ring_size
,
1128 true, true, 4, 64, 0);
1129 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_ESGS
,
1130 sctx
->esgs_ring
, 0, esgs_ring_size
,
1131 false, false, 0, 0, 0);
1132 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_GSVS
,
1133 sctx
->gsvs_ring
, 0, gsvs_ring_size
,
1134 false, false, 0, 0, 0);
1137 static void si_update_gs_rings(struct si_context
*sctx
)
1139 unsigned gsvs_itemsize
= sctx
->gs_shader
.cso
->max_gsvs_emit_size
;
1142 if (gsvs_itemsize
== sctx
->last_gsvs_itemsize
)
1145 sctx
->last_gsvs_itemsize
= gsvs_itemsize
;
1147 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS
,
1148 sctx
->gsvs_ring
, gsvs_itemsize
,
1149 64, true, true, 4, 16, 0);
1151 offset
= gsvs_itemsize
* 64;
1152 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS_1
,
1153 sctx
->gsvs_ring
, gsvs_itemsize
,
1154 64, true, true, 4, 16, offset
);
1156 offset
= (gsvs_itemsize
* 2) * 64;
1157 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS_2
,
1158 sctx
->gsvs_ring
, gsvs_itemsize
,
1159 64, true, true, 4, 16, offset
);
1161 offset
= (gsvs_itemsize
* 3) * 64;
1162 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS_3
,
1163 sctx
->gsvs_ring
, gsvs_itemsize
,
1164 64, true, true, 4, 16, offset
);
1168 * @returns 1 if \p sel has been updated to use a new scratch buffer
1170 * < 0 if there was a failure
1172 static int si_update_scratch_buffer(struct si_context
*sctx
,
1173 struct si_shader
*shader
)
1175 uint64_t scratch_va
= sctx
->scratch_buffer
->gpu_address
;
1181 /* This shader doesn't need a scratch buffer */
1182 if (shader
->scratch_bytes_per_wave
== 0)
1185 /* This shader is already configured to use the current
1186 * scratch buffer. */
1187 if (shader
->scratch_bo
== sctx
->scratch_buffer
)
1190 assert(sctx
->scratch_buffer
);
1192 si_shader_apply_scratch_relocs(sctx
, shader
, scratch_va
);
1194 /* Replace the shader bo with a new bo that has the relocs applied. */
1195 r
= si_shader_binary_upload(sctx
->screen
, shader
);
1199 /* Update the shader state to use the new shader bo. */
1200 si_shader_init_pm4_state(shader
);
1202 r600_resource_reference(&shader
->scratch_bo
, sctx
->scratch_buffer
);
1207 static unsigned si_get_current_scratch_buffer_size(struct si_context
*sctx
)
1209 return sctx
->scratch_buffer
? sctx
->scratch_buffer
->b
.b
.width0
: 0;
1212 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader
*shader
)
1214 return shader
? shader
->scratch_bytes_per_wave
: 0;
1217 static unsigned si_get_max_scratch_bytes_per_wave(struct si_context
*sctx
)
1221 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->ps_shader
.current
));
1222 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->gs_shader
.current
));
1223 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->vs_shader
.current
));
1224 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->tcs_shader
.current
));
1225 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->tes_shader
.current
));
1229 static bool si_update_spi_tmpring_size(struct si_context
*sctx
)
1231 unsigned current_scratch_buffer_size
=
1232 si_get_current_scratch_buffer_size(sctx
);
1233 unsigned scratch_bytes_per_wave
=
1234 si_get_max_scratch_bytes_per_wave(sctx
);
1235 unsigned scratch_needed_size
= scratch_bytes_per_wave
*
1236 sctx
->scratch_waves
;
1239 if (scratch_needed_size
> 0) {
1240 if (scratch_needed_size
> current_scratch_buffer_size
) {
1241 /* Create a bigger scratch buffer */
1242 pipe_resource_reference(
1243 (struct pipe_resource
**)&sctx
->scratch_buffer
,
1246 sctx
->scratch_buffer
=
1247 si_resource_create_custom(&sctx
->screen
->b
.b
,
1248 PIPE_USAGE_DEFAULT
, scratch_needed_size
);
1249 if (!sctx
->scratch_buffer
)
1251 sctx
->emit_scratch_reloc
= true;
1254 /* Update the shaders, so they are using the latest scratch. The
1255 * scratch buffer may have been changed since these shaders were
1256 * last used, so we still need to try to update them, even if
1257 * they require scratch buffers smaller than the current size.
1259 r
= si_update_scratch_buffer(sctx
, sctx
->ps_shader
.current
);
1263 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
.current
->pm4
);
1265 r
= si_update_scratch_buffer(sctx
, sctx
->gs_shader
.current
);
1269 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
.current
->pm4
);
1271 r
= si_update_scratch_buffer(sctx
, sctx
->tcs_shader
.current
);
1275 si_pm4_bind_state(sctx
, hs
, sctx
->tcs_shader
.current
->pm4
);
1277 /* VS can be bound as LS, ES, or VS. */
1278 r
= si_update_scratch_buffer(sctx
, sctx
->vs_shader
.current
);
1282 if (sctx
->tes_shader
.current
)
1283 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
.current
->pm4
);
1284 else if (sctx
->gs_shader
.current
)
1285 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
.current
->pm4
);
1287 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
.current
->pm4
);
1290 /* TES can be bound as ES or VS. */
1291 r
= si_update_scratch_buffer(sctx
, sctx
->tes_shader
.current
);
1295 if (sctx
->gs_shader
.current
)
1296 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
.current
->pm4
);
1298 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
.current
->pm4
);
1302 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
1303 assert((scratch_needed_size
& ~0x3FF) == scratch_needed_size
&&
1304 "scratch size should already be aligned correctly.");
1306 sctx
->spi_tmpring_size
= S_0286E8_WAVES(sctx
->scratch_waves
) |
1307 S_0286E8_WAVESIZE(scratch_bytes_per_wave
>> 10);
1311 static void si_init_tess_factor_ring(struct si_context
*sctx
)
1313 assert(!sctx
->tf_ring
);
1315 sctx
->tf_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1317 32768 * sctx
->screen
->b
.info
.max_se
);
1321 assert(((sctx
->tf_ring
->width0
/ 4) & C_030938_SIZE
) == 0);
1323 si_init_config_add_vgt_flush(sctx
);
1325 /* Append these registers to the init config state. */
1326 if (sctx
->b
.chip_class
>= CIK
) {
1327 si_pm4_set_reg(sctx
->init_config
, R_030938_VGT_TF_RING_SIZE
,
1328 S_030938_SIZE(sctx
->tf_ring
->width0
/ 4));
1329 si_pm4_set_reg(sctx
->init_config
, R_030940_VGT_TF_MEMORY_BASE
,
1330 r600_resource(sctx
->tf_ring
)->gpu_address
>> 8);
1332 si_pm4_set_reg(sctx
->init_config
, R_008988_VGT_TF_RING_SIZE
,
1333 S_008988_SIZE(sctx
->tf_ring
->width0
/ 4));
1334 si_pm4_set_reg(sctx
->init_config
, R_0089B8_VGT_TF_MEMORY_BASE
,
1335 r600_resource(sctx
->tf_ring
)->gpu_address
>> 8);
1338 /* Flush the context to re-emit the init_config state.
1339 * This is done only once in a lifetime of a context.
1341 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
1342 sctx
->b
.initial_gfx_cs_size
= 0; /* force flush */
1343 si_context_gfx_flush(sctx
, RADEON_FLUSH_ASYNC
, NULL
);
1345 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_TESS_CTRL
,
1346 SI_RING_TESS_FACTOR
, sctx
->tf_ring
, 0,
1347 sctx
->tf_ring
->width0
, false, false, 0, 0, 0);
1351 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
1352 * VS passes its outputs to TES directly, so the fixed-function shader only
1353 * has to write TESSOUTER and TESSINNER.
1355 static void si_generate_fixed_func_tcs(struct si_context
*sctx
)
1357 struct ureg_src const0
, const1
;
1358 struct ureg_dst tessouter
, tessinner
;
1359 struct ureg_program
*ureg
= ureg_create(TGSI_PROCESSOR_TESS_CTRL
);
1362 return; /* if we get here, we're screwed */
1364 assert(!sctx
->fixed_func_tcs_shader
.cso
);
1366 ureg_DECL_constant2D(ureg
, 0, 1, SI_DRIVER_STATE_CONST_BUF
);
1367 const0
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 0),
1368 SI_DRIVER_STATE_CONST_BUF
);
1369 const1
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 1),
1370 SI_DRIVER_STATE_CONST_BUF
);
1372 tessouter
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSOUTER
, 0);
1373 tessinner
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSINNER
, 0);
1375 ureg_MOV(ureg
, tessouter
, const0
);
1376 ureg_MOV(ureg
, tessinner
, const1
);
1379 sctx
->fixed_func_tcs_shader
.cso
=
1380 ureg_create_shader_and_destroy(ureg
, &sctx
->b
.b
);
1383 static void si_update_vgt_shader_config(struct si_context
*sctx
)
1385 /* Calculate the index of the config.
1386 * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */
1387 unsigned index
= 2*!!sctx
->tes_shader
.cso
+ !!sctx
->gs_shader
.cso
;
1388 struct si_pm4_state
**pm4
= &sctx
->vgt_shader_config
[index
];
1391 uint32_t stages
= 0;
1393 *pm4
= CALLOC_STRUCT(si_pm4_state
);
1395 if (sctx
->tes_shader
.cso
) {
1396 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
1399 if (sctx
->gs_shader
.cso
)
1400 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
1402 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
1404 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
1405 } else if (sctx
->gs_shader
.cso
) {
1406 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
1408 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
1411 si_pm4_set_reg(*pm4
, R_028B54_VGT_SHADER_STAGES_EN
, stages
);
1413 si_pm4_bind_state(sctx
, vgt_shader_config
, *pm4
);
1416 static void si_update_so(struct si_context
*sctx
, struct si_shader_selector
*shader
)
1418 struct pipe_stream_output_info
*so
= &shader
->so
;
1419 uint32_t enabled_stream_buffers_mask
= 0;
1422 for (i
= 0; i
< so
->num_outputs
; i
++)
1423 enabled_stream_buffers_mask
|= (1 << so
->output
[i
].output_buffer
) << (so
->output
[i
].stream
* 4);
1424 sctx
->b
.streamout
.enabled_stream_buffers_mask
= enabled_stream_buffers_mask
;
1425 sctx
->b
.streamout
.stride_in_dw
= shader
->so
.stride
;
1428 bool si_update_shaders(struct si_context
*sctx
)
1430 struct pipe_context
*ctx
= (struct pipe_context
*)sctx
;
1431 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
1434 /* Update stages before GS. */
1435 if (sctx
->tes_shader
.cso
) {
1436 if (!sctx
->tf_ring
) {
1437 si_init_tess_factor_ring(sctx
);
1443 r
= si_shader_select(ctx
, &sctx
->vs_shader
);
1446 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
.current
->pm4
);
1448 if (sctx
->tcs_shader
.cso
) {
1449 r
= si_shader_select(ctx
, &sctx
->tcs_shader
);
1452 si_pm4_bind_state(sctx
, hs
, sctx
->tcs_shader
.current
->pm4
);
1454 if (!sctx
->fixed_func_tcs_shader
.cso
) {
1455 si_generate_fixed_func_tcs(sctx
);
1456 if (!sctx
->fixed_func_tcs_shader
.cso
)
1460 r
= si_shader_select(ctx
, &sctx
->fixed_func_tcs_shader
);
1463 si_pm4_bind_state(sctx
, hs
,
1464 sctx
->fixed_func_tcs_shader
.current
->pm4
);
1467 r
= si_shader_select(ctx
, &sctx
->tes_shader
);
1471 if (sctx
->gs_shader
.cso
) {
1473 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
.current
->pm4
);
1476 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
.current
->pm4
);
1477 si_update_so(sctx
, sctx
->tes_shader
.cso
);
1479 } else if (sctx
->gs_shader
.cso
) {
1481 r
= si_shader_select(ctx
, &sctx
->vs_shader
);
1484 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
.current
->pm4
);
1487 r
= si_shader_select(ctx
, &sctx
->vs_shader
);
1490 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
.current
->pm4
);
1491 si_update_so(sctx
, sctx
->vs_shader
.cso
);
1495 if (sctx
->gs_shader
.cso
) {
1496 r
= si_shader_select(ctx
, &sctx
->gs_shader
);
1499 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
.current
->pm4
);
1500 si_pm4_bind_state(sctx
, vs
, sctx
->gs_shader
.current
->gs_copy_shader
->pm4
);
1501 si_update_so(sctx
, sctx
->gs_shader
.cso
);
1503 if (!sctx
->gsvs_ring
) {
1504 si_init_gs_rings(sctx
);
1505 if (!sctx
->gsvs_ring
)
1509 si_update_gs_rings(sctx
);
1511 si_pm4_bind_state(sctx
, gs
, NULL
);
1512 si_pm4_bind_state(sctx
, es
, NULL
);
1515 si_update_vgt_shader_config(sctx
);
1517 if (sctx
->ps_shader
.cso
) {
1518 r
= si_shader_select(ctx
, &sctx
->ps_shader
);
1521 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
.current
->pm4
);
1523 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
) ||
1524 sctx
->sprite_coord_enable
!= rs
->sprite_coord_enable
||
1525 sctx
->flatshade
!= rs
->flatshade
) {
1526 sctx
->sprite_coord_enable
= rs
->sprite_coord_enable
;
1527 sctx
->flatshade
= rs
->flatshade
;
1528 si_mark_atom_dirty(sctx
, &sctx
->spi_map
);
1531 if (si_pm4_state_changed(sctx
, ps
) ||
1532 sctx
->force_persample_interp
!= rs
->force_persample_interp
) {
1533 sctx
->force_persample_interp
= rs
->force_persample_interp
;
1534 si_mark_atom_dirty(sctx
, &sctx
->spi_ps_input
);
1537 if (sctx
->ps_db_shader_control
!= sctx
->ps_shader
.current
->db_shader_control
) {
1538 sctx
->ps_db_shader_control
= sctx
->ps_shader
.current
->db_shader_control
;
1539 si_mark_atom_dirty(sctx
, &sctx
->db_render_state
);
1542 if (sctx
->smoothing_enabled
!= sctx
->ps_shader
.current
->key
.ps
.poly_line_smoothing
) {
1543 sctx
->smoothing_enabled
= sctx
->ps_shader
.current
->key
.ps
.poly_line_smoothing
;
1544 si_mark_atom_dirty(sctx
, &sctx
->msaa_config
);
1546 if (sctx
->b
.chip_class
== SI
)
1547 si_mark_atom_dirty(sctx
, &sctx
->db_render_state
);
1551 if (si_pm4_state_changed(sctx
, ls
) ||
1552 si_pm4_state_changed(sctx
, hs
) ||
1553 si_pm4_state_changed(sctx
, es
) ||
1554 si_pm4_state_changed(sctx
, gs
) ||
1555 si_pm4_state_changed(sctx
, vs
) ||
1556 si_pm4_state_changed(sctx
, ps
)) {
1557 if (!si_update_spi_tmpring_size(sctx
))
1563 void si_init_shader_functions(struct si_context
*sctx
)
1565 si_init_atom(sctx
, &sctx
->spi_map
, &sctx
->atoms
.s
.spi_map
, si_emit_spi_map
);
1566 si_init_atom(sctx
, &sctx
->spi_ps_input
, &sctx
->atoms
.s
.spi_ps_input
, si_emit_spi_ps_input
);
1568 sctx
->b
.b
.create_vs_state
= si_create_shader_selector
;
1569 sctx
->b
.b
.create_tcs_state
= si_create_shader_selector
;
1570 sctx
->b
.b
.create_tes_state
= si_create_shader_selector
;
1571 sctx
->b
.b
.create_gs_state
= si_create_shader_selector
;
1572 sctx
->b
.b
.create_fs_state
= si_create_shader_selector
;
1574 sctx
->b
.b
.bind_vs_state
= si_bind_vs_shader
;
1575 sctx
->b
.b
.bind_tcs_state
= si_bind_tcs_shader
;
1576 sctx
->b
.b
.bind_tes_state
= si_bind_tes_shader
;
1577 sctx
->b
.b
.bind_gs_state
= si_bind_gs_shader
;
1578 sctx
->b
.b
.bind_fs_state
= si_bind_ps_shader
;
1580 sctx
->b
.b
.delete_vs_state
= si_delete_shader_selector
;
1581 sctx
->b
.b
.delete_tcs_state
= si_delete_shader_selector
;
1582 sctx
->b
.b
.delete_tes_state
= si_delete_shader_selector
;
1583 sctx
->b
.b
.delete_gs_state
= si_delete_shader_selector
;
1584 sctx
->b
.b
.delete_fs_state
= si_delete_shader_selector
;