2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
25 * Marek Olšák <maraeo@gmail.com>
29 #include "si_shader.h"
31 #include "radeon/r600_cs.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_ureg.h"
35 #include "util/u_memory.h"
36 #include "util/u_simple_shaders.h"
38 static void si_set_tesseval_regs(struct si_shader
*shader
,
39 struct si_pm4_state
*pm4
)
41 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
42 unsigned tes_prim_mode
= info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
43 unsigned tes_spacing
= info
->properties
[TGSI_PROPERTY_TES_SPACING
];
44 bool tes_vertex_order_cw
= info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
];
45 bool tes_point_mode
= info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
];
46 unsigned type
, partitioning
, topology
;
48 switch (tes_prim_mode
) {
50 type
= V_028B6C_TESS_ISOLINE
;
52 case PIPE_PRIM_TRIANGLES
:
53 type
= V_028B6C_TESS_TRIANGLE
;
56 type
= V_028B6C_TESS_QUAD
;
63 switch (tes_spacing
) {
64 case PIPE_TESS_SPACING_FRACTIONAL_ODD
:
65 partitioning
= V_028B6C_PART_FRAC_ODD
;
67 case PIPE_TESS_SPACING_FRACTIONAL_EVEN
:
68 partitioning
= V_028B6C_PART_FRAC_EVEN
;
70 case PIPE_TESS_SPACING_EQUAL
:
71 partitioning
= V_028B6C_PART_INTEGER
;
79 topology
= V_028B6C_OUTPUT_POINT
;
80 else if (tes_prim_mode
== PIPE_PRIM_LINES
)
81 topology
= V_028B6C_OUTPUT_LINE
;
82 else if (tes_vertex_order_cw
)
83 /* for some reason, this must be the other way around */
84 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
86 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
88 si_pm4_set_reg(pm4
, R_028B6C_VGT_TF_PARAM
,
90 S_028B6C_PARTITIONING(partitioning
) |
91 S_028B6C_TOPOLOGY(topology
));
94 static void si_shader_ls(struct si_shader
*shader
)
96 struct si_pm4_state
*pm4
;
97 unsigned num_sgprs
, num_user_sgprs
;
98 unsigned vgpr_comp_cnt
;
101 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
105 va
= shader
->bo
->gpu_address
;
106 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
108 /* We need at least 2 components for LS.
109 * VGPR0-3: (VertexID, RelAutoindex, ???, InstanceID). */
110 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 1;
112 num_user_sgprs
= SI_LS_NUM_USER_SGPR
;
113 num_sgprs
= shader
->num_sgprs
;
114 if (num_user_sgprs
> num_sgprs
) {
115 /* Last 2 reserved SGPRs are used for VCC */
116 num_sgprs
= num_user_sgprs
+ 2;
118 assert(num_sgprs
<= 104);
120 si_pm4_set_reg(pm4
, R_00B520_SPI_SHADER_PGM_LO_LS
, va
>> 8);
121 si_pm4_set_reg(pm4
, R_00B524_SPI_SHADER_PGM_HI_LS
, va
>> 40);
123 shader
->ls_rsrc1
= S_00B528_VGPRS((shader
->num_vgprs
- 1) / 4) |
124 S_00B528_SGPRS((num_sgprs
- 1) / 8) |
125 S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt
);
126 shader
->ls_rsrc2
= S_00B52C_USER_SGPR(num_user_sgprs
) |
127 S_00B52C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0);
130 static void si_shader_hs(struct si_shader
*shader
)
132 struct si_pm4_state
*pm4
;
133 unsigned num_sgprs
, num_user_sgprs
;
136 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
140 va
= shader
->bo
->gpu_address
;
141 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
143 num_user_sgprs
= SI_TCS_NUM_USER_SGPR
;
144 num_sgprs
= shader
->num_sgprs
;
145 /* One SGPR after user SGPRs is pre-loaded with tessellation factor
147 if ((num_user_sgprs
+ 1) > num_sgprs
) {
148 /* Last 2 reserved SGPRs are used for VCC */
149 num_sgprs
= num_user_sgprs
+ 1 + 2;
151 assert(num_sgprs
<= 104);
153 si_pm4_set_reg(pm4
, R_00B420_SPI_SHADER_PGM_LO_HS
, va
>> 8);
154 si_pm4_set_reg(pm4
, R_00B424_SPI_SHADER_PGM_HI_HS
, va
>> 40);
155 si_pm4_set_reg(pm4
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
,
156 S_00B428_VGPRS((shader
->num_vgprs
- 1) / 4) |
157 S_00B428_SGPRS((num_sgprs
- 1) / 8));
158 si_pm4_set_reg(pm4
, R_00B42C_SPI_SHADER_PGM_RSRC2_HS
,
159 S_00B42C_USER_SGPR(num_user_sgprs
) |
160 S_00B42C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
163 static void si_shader_es(struct si_shader
*shader
)
165 struct si_pm4_state
*pm4
;
166 unsigned num_sgprs
, num_user_sgprs
;
167 unsigned vgpr_comp_cnt
;
170 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
175 va
= shader
->bo
->gpu_address
;
176 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
178 if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
179 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 0;
180 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
181 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
182 vgpr_comp_cnt
= 3; /* all components are needed for TES */
183 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
185 unreachable("invalid shader selector type");
187 num_sgprs
= shader
->num_sgprs
;
188 /* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
189 if ((num_user_sgprs
+ 1) > num_sgprs
) {
190 /* Last 2 reserved SGPRs are used for VCC */
191 num_sgprs
= num_user_sgprs
+ 1 + 2;
193 assert(num_sgprs
<= 104);
195 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
196 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, va
>> 40);
197 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
198 S_00B328_VGPRS((shader
->num_vgprs
- 1) / 4) |
199 S_00B328_SGPRS((num_sgprs
- 1) / 8) |
200 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
) |
201 S_00B328_DX10_CLAMP(shader
->dx10_clamp_mode
));
202 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
203 S_00B32C_USER_SGPR(num_user_sgprs
) |
204 S_00B32C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
206 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
207 si_set_tesseval_regs(shader
, pm4
);
210 static unsigned si_gs_get_max_stream(struct si_shader
*shader
)
212 struct pipe_stream_output_info
*so
= &shader
->selector
->so
;
213 unsigned max_stream
= 0, i
;
215 if (so
->num_outputs
== 0)
218 for (i
= 0; i
< so
->num_outputs
; i
++) {
219 if (so
->output
[i
].stream
> max_stream
)
220 max_stream
= so
->output
[i
].stream
;
225 static void si_shader_gs(struct si_shader
*shader
)
227 unsigned gs_vert_itemsize
= shader
->selector
->info
.num_outputs
* 16;
228 unsigned gs_max_vert_out
= shader
->selector
->gs_max_out_vertices
;
229 unsigned gsvs_itemsize
= (gs_vert_itemsize
* gs_max_vert_out
) >> 2;
230 unsigned gs_num_invocations
= shader
->selector
->gs_num_invocations
;
232 struct si_pm4_state
*pm4
;
233 unsigned num_sgprs
, num_user_sgprs
;
235 unsigned max_stream
= si_gs_get_max_stream(shader
);
237 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
238 assert(gsvs_itemsize
< (1 << 15));
240 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
245 if (gs_max_vert_out
<= 128) {
246 cut_mode
= V_028A40_GS_CUT_128
;
247 } else if (gs_max_vert_out
<= 256) {
248 cut_mode
= V_028A40_GS_CUT_256
;
249 } else if (gs_max_vert_out
<= 512) {
250 cut_mode
= V_028A40_GS_CUT_512
;
252 assert(gs_max_vert_out
<= 1024);
253 cut_mode
= V_028A40_GS_CUT_1024
;
256 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
257 S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
258 S_028A40_CUT_MODE(cut_mode
)|
259 S_028A40_ES_WRITE_OPTIMIZE(1) |
260 S_028A40_GS_WRITE_OPTIMIZE(1));
262 si_pm4_set_reg(pm4
, R_028A60_VGT_GSVS_RING_OFFSET_1
, gsvs_itemsize
);
263 si_pm4_set_reg(pm4
, R_028A64_VGT_GSVS_RING_OFFSET_2
, gsvs_itemsize
* ((max_stream
>= 2) ? 2 : 1));
264 si_pm4_set_reg(pm4
, R_028A68_VGT_GSVS_RING_OFFSET_3
, gsvs_itemsize
* ((max_stream
>= 3) ? 3 : 1));
266 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
267 util_bitcount64(shader
->selector
->inputs_read
) * (16 >> 2));
268 si_pm4_set_reg(pm4
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, gsvs_itemsize
* (max_stream
+ 1));
270 si_pm4_set_reg(pm4
, R_028B38_VGT_GS_MAX_VERT_OUT
, gs_max_vert_out
);
272 si_pm4_set_reg(pm4
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, gs_vert_itemsize
>> 2);
273 si_pm4_set_reg(pm4
, R_028B60_VGT_GS_VERT_ITEMSIZE_1
, (max_stream
>= 1) ? gs_vert_itemsize
>> 2 : 0);
274 si_pm4_set_reg(pm4
, R_028B64_VGT_GS_VERT_ITEMSIZE_2
, (max_stream
>= 2) ? gs_vert_itemsize
>> 2 : 0);
275 si_pm4_set_reg(pm4
, R_028B68_VGT_GS_VERT_ITEMSIZE_3
, (max_stream
>= 3) ? gs_vert_itemsize
>> 2 : 0);
277 si_pm4_set_reg(pm4
, R_028B90_VGT_GS_INSTANCE_CNT
,
278 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
279 S_028B90_ENABLE(gs_num_invocations
> 0));
281 va
= shader
->bo
->gpu_address
;
282 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
283 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
284 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, va
>> 40);
286 num_user_sgprs
= SI_GS_NUM_USER_SGPR
;
287 num_sgprs
= shader
->num_sgprs
;
288 /* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
289 if ((num_user_sgprs
+ 2) > num_sgprs
) {
290 /* Last 2 reserved SGPRs are used for VCC */
291 num_sgprs
= num_user_sgprs
+ 2 + 2;
293 assert(num_sgprs
<= 104);
295 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
296 S_00B228_VGPRS((shader
->num_vgprs
- 1) / 4) |
297 S_00B228_SGPRS((num_sgprs
- 1) / 8) |
298 S_00B228_DX10_CLAMP(shader
->dx10_clamp_mode
));
299 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
300 S_00B22C_USER_SGPR(num_user_sgprs
) |
301 S_00B22C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
304 static void si_shader_vs(struct si_shader
*shader
)
306 struct si_pm4_state
*pm4
;
307 unsigned num_sgprs
, num_user_sgprs
;
308 unsigned nparams
, vgpr_comp_cnt
;
310 unsigned window_space
=
311 shader
->selector
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
312 bool enable_prim_id
= si_vs_exports_prim_id(shader
);
314 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
319 /* If this is the GS copy shader, the GS state writes this register.
320 * Otherwise, the VS state writes it.
322 if (!shader
->is_gs_copy_shader
) {
323 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
324 S_028A40_MODE(enable_prim_id
? V_028A40_GS_SCENARIO_A
: 0));
325 si_pm4_set_reg(pm4
, R_028A84_VGT_PRIMITIVEID_EN
, enable_prim_id
);
327 si_pm4_set_reg(pm4
, R_028A84_VGT_PRIMITIVEID_EN
, 0);
329 va
= shader
->bo
->gpu_address
;
330 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
332 if (shader
->is_gs_copy_shader
) {
333 vgpr_comp_cnt
= 0; /* only VertexID is needed for GS-COPY. */
334 num_user_sgprs
= SI_GSCOPY_NUM_USER_SGPR
;
335 } else if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
336 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : (enable_prim_id
? 2 : 0);
337 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
338 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
339 vgpr_comp_cnt
= 3; /* all components are needed for TES */
340 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
342 unreachable("invalid shader selector type");
344 num_sgprs
= shader
->num_sgprs
;
345 if (num_user_sgprs
> num_sgprs
) {
346 /* Last 2 reserved SGPRs are used for VCC */
347 num_sgprs
= num_user_sgprs
+ 2;
349 assert(num_sgprs
<= 104);
351 /* VS is required to export at least one param. */
352 nparams
= MAX2(shader
->nr_param_exports
, 1);
353 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
354 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
356 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
357 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
358 S_02870C_POS1_EXPORT_FORMAT(shader
->nr_pos_exports
> 1 ?
359 V_02870C_SPI_SHADER_4COMP
:
360 V_02870C_SPI_SHADER_NONE
) |
361 S_02870C_POS2_EXPORT_FORMAT(shader
->nr_pos_exports
> 2 ?
362 V_02870C_SPI_SHADER_4COMP
:
363 V_02870C_SPI_SHADER_NONE
) |
364 S_02870C_POS3_EXPORT_FORMAT(shader
->nr_pos_exports
> 3 ?
365 V_02870C_SPI_SHADER_4COMP
:
366 V_02870C_SPI_SHADER_NONE
));
368 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
369 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
370 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
371 S_00B128_VGPRS((shader
->num_vgprs
- 1) / 4) |
372 S_00B128_SGPRS((num_sgprs
- 1) / 8) |
373 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
374 S_00B128_DX10_CLAMP(shader
->dx10_clamp_mode
));
375 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
376 S_00B12C_USER_SGPR(num_user_sgprs
) |
377 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
378 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
379 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
380 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
381 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
) |
382 S_00B12C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
384 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
385 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
387 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
388 S_028818_VTX_W0_FMT(1) |
389 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
390 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
391 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
393 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
394 si_set_tesseval_regs(shader
, pm4
);
397 static void si_shader_ps(struct si_shader
*shader
)
399 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
400 struct si_pm4_state
*pm4
;
401 unsigned i
, spi_ps_in_control
;
402 unsigned num_sgprs
, num_user_sgprs
;
403 unsigned spi_baryc_cntl
= 0, spi_ps_input_ena
;
406 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
411 for (i
= 0; i
< info
->num_inputs
; i
++) {
412 switch (info
->input_semantic_name
[i
]) {
413 case TGSI_SEMANTIC_POSITION
:
414 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
416 * 0 -> Position = pixel center (default)
417 * 1 -> Position = pixel centroid
418 * 2 -> Position = at sample position
420 switch (info
->input_interpolate_loc
[i
]) {
421 case TGSI_INTERPOLATE_LOC_CENTROID
:
422 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(1);
424 case TGSI_INTERPOLATE_LOC_SAMPLE
:
425 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
429 if (info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] ==
430 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)
431 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_ULC(1);
436 spi_ps_in_control
= S_0286D8_NUM_INTERP(shader
->nparam
) |
437 S_0286D8_BC_OPTIMIZE_DISABLE(1);
439 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
440 spi_ps_input_ena
= shader
->spi_ps_input_ena
;
441 /* we need to enable at least one of them, otherwise we hang the GPU */
442 assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena
) ||
443 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena
) ||
444 G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena
) ||
445 G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena
) ||
446 G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena
) ||
447 G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena
) ||
448 G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena
) ||
449 G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena
));
451 si_pm4_set_reg(pm4
, R_0286CC_SPI_PS_INPUT_ENA
, spi_ps_input_ena
);
452 si_pm4_set_reg(pm4
, R_0286D0_SPI_PS_INPUT_ADDR
, spi_ps_input_ena
);
453 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
455 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
, shader
->spi_shader_z_format
);
456 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
,
457 shader
->spi_shader_col_format
);
458 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, shader
->cb_shader_mask
);
460 va
= shader
->bo
->gpu_address
;
461 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
462 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
463 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
465 num_user_sgprs
= SI_PS_NUM_USER_SGPR
;
466 num_sgprs
= shader
->num_sgprs
;
467 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
468 if ((num_user_sgprs
+ 1) > num_sgprs
) {
469 /* Last 2 reserved SGPRs are used for VCC */
470 num_sgprs
= num_user_sgprs
+ 1 + 2;
472 assert(num_sgprs
<= 104);
474 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
475 S_00B028_VGPRS((shader
->num_vgprs
- 1) / 4) |
476 S_00B028_SGPRS((num_sgprs
- 1) / 8) |
477 S_00B028_DX10_CLAMP(shader
->dx10_clamp_mode
));
478 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
479 S_00B02C_EXTRA_LDS_SIZE(shader
->lds_size
) |
480 S_00B02C_USER_SGPR(num_user_sgprs
) |
481 S_00B32C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
484 static void si_shader_init_pm4_state(struct si_shader
*shader
)
488 si_pm4_free_state_simple(shader
->pm4
);
490 switch (shader
->selector
->type
) {
491 case PIPE_SHADER_VERTEX
:
492 if (shader
->key
.vs
.as_ls
)
493 si_shader_ls(shader
);
494 else if (shader
->key
.vs
.as_es
)
495 si_shader_es(shader
);
497 si_shader_vs(shader
);
499 case PIPE_SHADER_TESS_CTRL
:
500 si_shader_hs(shader
);
502 case PIPE_SHADER_TESS_EVAL
:
503 if (shader
->key
.tes
.as_es
)
504 si_shader_es(shader
);
506 si_shader_vs(shader
);
508 case PIPE_SHADER_GEOMETRY
:
509 si_shader_gs(shader
);
510 si_shader_vs(shader
->gs_copy_shader
);
512 case PIPE_SHADER_FRAGMENT
:
513 si_shader_ps(shader
);
520 /* Compute the key for the hw shader variant */
521 static inline void si_shader_selector_key(struct pipe_context
*ctx
,
522 struct si_shader_selector
*sel
,
523 union si_shader_key
*key
)
525 struct si_context
*sctx
= (struct si_context
*)ctx
;
528 memset(key
, 0, sizeof(*key
));
531 case PIPE_SHADER_VERTEX
:
532 if (sctx
->vertex_elements
)
533 for (i
= 0; i
< sctx
->vertex_elements
->count
; ++i
)
534 key
->vs
.instance_divisors
[i
] =
535 sctx
->vertex_elements
->elements
[i
].instance_divisor
;
537 if (sctx
->tes_shader
)
539 else if (sctx
->gs_shader
) {
541 key
->vs
.es_enabled_outputs
= sctx
->gs_shader
->inputs_read
;
544 if (!sctx
->gs_shader
&& sctx
->ps_shader
&&
545 sctx
->ps_shader
->info
.uses_primid
)
546 key
->vs
.export_prim_id
= 1;
548 case PIPE_SHADER_TESS_CTRL
:
550 sctx
->tes_shader
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
552 case PIPE_SHADER_TESS_EVAL
:
553 if (sctx
->gs_shader
) {
555 key
->tes
.es_enabled_outputs
= sctx
->gs_shader
->inputs_read
;
556 } else if (sctx
->ps_shader
&& sctx
->ps_shader
->info
.uses_primid
)
557 key
->tes
.export_prim_id
= 1;
559 case PIPE_SHADER_GEOMETRY
:
561 case PIPE_SHADER_FRAGMENT
: {
562 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
564 if (sel
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
])
565 key
->ps
.last_cbuf
= MAX2(sctx
->framebuffer
.state
.nr_cbufs
, 1) - 1;
566 key
->ps
.export_16bpc
= sctx
->framebuffer
.export_16bpc
;
569 bool is_poly
= (sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES
&&
570 sctx
->current_rast_prim
<= PIPE_PRIM_POLYGON
) ||
571 sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES_ADJACENCY
;
572 bool is_line
= !is_poly
&& sctx
->current_rast_prim
!= PIPE_PRIM_POINTS
;
574 key
->ps
.color_two_side
= rs
->two_side
;
576 if (sctx
->queued
.named
.blend
) {
577 key
->ps
.alpha_to_one
= sctx
->queued
.named
.blend
->alpha_to_one
&&
578 rs
->multisample_enable
&&
579 !sctx
->framebuffer
.cb0_is_integer
;
582 key
->ps
.poly_stipple
= rs
->poly_stipple_enable
&& is_poly
;
583 key
->ps
.poly_line_smoothing
= ((is_poly
&& rs
->poly_smooth
) ||
584 (is_line
&& rs
->line_smooth
)) &&
585 sctx
->framebuffer
.nr_samples
<= 1;
588 key
->ps
.alpha_func
= PIPE_FUNC_ALWAYS
;
589 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
590 if (sctx
->queued
.named
.dsa
&&
591 !sctx
->framebuffer
.cb0_is_integer
)
592 key
->ps
.alpha_func
= sctx
->queued
.named
.dsa
->alpha_func
;
600 /* Select the hw shader variant depending on the current state. */
601 static int si_shader_select(struct pipe_context
*ctx
,
602 struct si_shader_selector
*sel
)
604 struct si_context
*sctx
= (struct si_context
*)ctx
;
605 union si_shader_key key
;
606 struct si_shader
* shader
= NULL
;
609 si_shader_selector_key(ctx
, sel
, &key
);
611 /* Check if we don't need to change anything.
612 * This path is also used for most shaders that don't need multiple
613 * variants, it will cost just a computation of the key and this
615 if (likely(sel
->current
&& memcmp(&sel
->current
->key
, &key
, sizeof(key
)) == 0)) {
619 /* lookup if we have other variants in the list */
620 if (sel
->num_shaders
> 1) {
621 struct si_shader
*p
= sel
->current
, *c
= p
->next_variant
;
623 while (c
&& memcmp(&c
->key
, &key
, sizeof(key
)) != 0) {
629 p
->next_variant
= c
->next_variant
;
635 shader
->next_variant
= sel
->current
;
636 sel
->current
= shader
;
638 shader
= CALLOC(1, sizeof(struct si_shader
));
639 shader
->selector
= sel
;
642 shader
->next_variant
= sel
->current
;
643 sel
->current
= shader
;
644 r
= si_shader_create((struct si_screen
*)ctx
->screen
, sctx
->tm
,
647 R600_ERR("Failed to build shader variant (type=%u) %d\n",
653 si_shader_init_pm4_state(shader
);
655 p_atomic_inc(&sctx
->screen
->b
.num_compilations
);
661 static void *si_create_shader_state(struct pipe_context
*ctx
,
662 const struct pipe_shader_state
*state
,
663 unsigned pipe_shader_type
)
665 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
666 struct si_shader_selector
*sel
= CALLOC_STRUCT(si_shader_selector
);
672 sel
->type
= pipe_shader_type
;
673 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
679 sel
->so
= state
->stream_output
;
680 tgsi_scan_shader(state
->tokens
, &sel
->info
);
681 p_atomic_inc(&sscreen
->b
.num_shaders_created
);
683 switch (pipe_shader_type
) {
684 case PIPE_SHADER_GEOMETRY
:
685 sel
->gs_output_prim
=
686 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
687 sel
->gs_max_out_vertices
=
688 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
689 sel
->gs_num_invocations
=
690 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
691 sel
->gsvs_itemsize
= sel
->info
.num_outputs
* 16 *
692 sel
->gs_max_out_vertices
;
694 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
695 unsigned name
= sel
->info
.input_semantic_name
[i
];
696 unsigned index
= sel
->info
.input_semantic_index
[i
];
699 case TGSI_SEMANTIC_PRIMID
:
703 1llu << si_shader_io_get_unique_index(name
, index
);
708 case PIPE_SHADER_VERTEX
:
709 case PIPE_SHADER_TESS_CTRL
:
710 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
711 unsigned name
= sel
->info
.output_semantic_name
[i
];
712 unsigned index
= sel
->info
.output_semantic_index
[i
];
715 case TGSI_SEMANTIC_TESSINNER
:
716 case TGSI_SEMANTIC_TESSOUTER
:
717 case TGSI_SEMANTIC_PATCH
:
718 sel
->patch_outputs_written
|=
719 1llu << si_shader_io_get_unique_index(name
, index
);
722 sel
->outputs_written
|=
723 1llu << si_shader_io_get_unique_index(name
, index
);
727 case PIPE_SHADER_FRAGMENT
:
728 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
729 unsigned name
= sel
->info
.output_semantic_name
[i
];
730 unsigned index
= sel
->info
.output_semantic_index
[i
];
732 if (name
== TGSI_SEMANTIC_COLOR
)
733 sel
->ps_colors_written
|= 1 << index
;
738 if (sscreen
->b
.debug_flags
& DBG_PRECOMPILE
)
739 si_shader_select(ctx
, sel
);
744 static void *si_create_fs_state(struct pipe_context
*ctx
,
745 const struct pipe_shader_state
*state
)
747 return si_create_shader_state(ctx
, state
, PIPE_SHADER_FRAGMENT
);
750 static void *si_create_gs_state(struct pipe_context
*ctx
,
751 const struct pipe_shader_state
*state
)
753 return si_create_shader_state(ctx
, state
, PIPE_SHADER_GEOMETRY
);
756 static void *si_create_vs_state(struct pipe_context
*ctx
,
757 const struct pipe_shader_state
*state
)
759 return si_create_shader_state(ctx
, state
, PIPE_SHADER_VERTEX
);
762 static void *si_create_tcs_state(struct pipe_context
*ctx
,
763 const struct pipe_shader_state
*state
)
765 return si_create_shader_state(ctx
, state
, PIPE_SHADER_TESS_CTRL
);
768 static void *si_create_tes_state(struct pipe_context
*ctx
,
769 const struct pipe_shader_state
*state
)
771 return si_create_shader_state(ctx
, state
, PIPE_SHADER_TESS_EVAL
);
775 * Normally, we only emit 1 viewport and 1 scissor if no shader is using
776 * the VIEWPORT_INDEX output, and emitting the other viewports and scissors
777 * is delayed. When a shader with VIEWPORT_INDEX appears, this should be
778 * called to emit the rest.
780 static void si_update_viewports_and_scissors(struct si_context
*sctx
)
782 struct tgsi_shader_info
*info
= si_get_vs_info(sctx
);
784 if (!info
|| !info
->writes_viewport_index
)
787 if (sctx
->scissors
.dirty_mask
)
788 si_mark_atom_dirty(sctx
, &sctx
->scissors
.atom
);
789 if (sctx
->viewports
.dirty_mask
)
790 si_mark_atom_dirty(sctx
, &sctx
->viewports
.atom
);
793 static void si_bind_vs_shader(struct pipe_context
*ctx
, void *state
)
795 struct si_context
*sctx
= (struct si_context
*)ctx
;
796 struct si_shader_selector
*sel
= state
;
798 if (sctx
->vs_shader
== sel
|| !sel
)
801 sctx
->vs_shader
= sel
;
802 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
803 si_update_viewports_and_scissors(sctx
);
806 static void si_bind_gs_shader(struct pipe_context
*ctx
, void *state
)
808 struct si_context
*sctx
= (struct si_context
*)ctx
;
809 struct si_shader_selector
*sel
= state
;
810 bool enable_changed
= !!sctx
->gs_shader
!= !!sel
;
812 if (sctx
->gs_shader
== sel
)
815 sctx
->gs_shader
= sel
;
816 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
817 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
820 si_shader_change_notify(sctx
);
821 si_update_viewports_and_scissors(sctx
);
824 static void si_bind_tcs_shader(struct pipe_context
*ctx
, void *state
)
826 struct si_context
*sctx
= (struct si_context
*)ctx
;
827 struct si_shader_selector
*sel
= state
;
828 bool enable_changed
= !!sctx
->tcs_shader
!= !!sel
;
830 if (sctx
->tcs_shader
== sel
)
833 sctx
->tcs_shader
= sel
;
836 sctx
->last_tcs
= NULL
; /* invalidate derived tess state */
839 static void si_bind_tes_shader(struct pipe_context
*ctx
, void *state
)
841 struct si_context
*sctx
= (struct si_context
*)ctx
;
842 struct si_shader_selector
*sel
= state
;
843 bool enable_changed
= !!sctx
->tes_shader
!= !!sel
;
845 if (sctx
->tes_shader
== sel
)
848 sctx
->tes_shader
= sel
;
849 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
850 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
852 if (enable_changed
) {
853 si_shader_change_notify(sctx
);
854 sctx
->last_tes_sh_base
= -1; /* invalidate derived tess state */
856 si_update_viewports_and_scissors(sctx
);
859 static void si_make_dummy_ps(struct si_context
*sctx
)
861 if (!sctx
->dummy_pixel_shader
) {
862 sctx
->dummy_pixel_shader
=
863 util_make_fragment_cloneinput_shader(&sctx
->b
.b
, 0,
864 TGSI_SEMANTIC_GENERIC
,
865 TGSI_INTERPOLATE_CONSTANT
);
869 static void si_bind_ps_shader(struct pipe_context
*ctx
, void *state
)
871 struct si_context
*sctx
= (struct si_context
*)ctx
;
872 struct si_shader_selector
*sel
= state
;
874 /* skip if supplied shader is one already in use */
875 if (sctx
->ps_shader
== sel
)
878 /* use a dummy shader if binding a NULL shader */
880 si_make_dummy_ps(sctx
);
881 sel
= sctx
->dummy_pixel_shader
;
884 sctx
->ps_shader
= sel
;
885 si_mark_atom_dirty(sctx
, &sctx
->cb_target_mask
);
888 static void si_delete_shader_selector(struct pipe_context
*ctx
,
889 struct si_shader_selector
*sel
)
891 struct si_context
*sctx
= (struct si_context
*)ctx
;
892 struct si_shader
*p
= sel
->current
, *c
;
897 case PIPE_SHADER_VERTEX
:
899 si_pm4_delete_state(sctx
, ls
, p
->pm4
);
900 else if (p
->key
.vs
.as_es
)
901 si_pm4_delete_state(sctx
, es
, p
->pm4
);
903 si_pm4_delete_state(sctx
, vs
, p
->pm4
);
905 case PIPE_SHADER_TESS_CTRL
:
906 si_pm4_delete_state(sctx
, hs
, p
->pm4
);
908 case PIPE_SHADER_TESS_EVAL
:
909 if (p
->key
.tes
.as_es
)
910 si_pm4_delete_state(sctx
, es
, p
->pm4
);
912 si_pm4_delete_state(sctx
, vs
, p
->pm4
);
914 case PIPE_SHADER_GEOMETRY
:
915 si_pm4_delete_state(sctx
, gs
, p
->pm4
);
916 si_pm4_delete_state(sctx
, vs
, p
->gs_copy_shader
->pm4
);
918 case PIPE_SHADER_FRAGMENT
:
919 si_pm4_delete_state(sctx
, ps
, p
->pm4
);
923 si_shader_destroy(ctx
, p
);
932 static void si_delete_vs_shader(struct pipe_context
*ctx
, void *state
)
934 struct si_context
*sctx
= (struct si_context
*)ctx
;
935 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
937 if (sctx
->vs_shader
== sel
) {
938 sctx
->vs_shader
= NULL
;
941 si_delete_shader_selector(ctx
, sel
);
944 static void si_delete_gs_shader(struct pipe_context
*ctx
, void *state
)
946 struct si_context
*sctx
= (struct si_context
*)ctx
;
947 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
949 if (sctx
->gs_shader
== sel
) {
950 sctx
->gs_shader
= NULL
;
953 si_delete_shader_selector(ctx
, sel
);
956 static void si_delete_ps_shader(struct pipe_context
*ctx
, void *state
)
958 struct si_context
*sctx
= (struct si_context
*)ctx
;
959 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
961 if (sctx
->ps_shader
== sel
) {
962 sctx
->ps_shader
= NULL
;
965 si_delete_shader_selector(ctx
, sel
);
968 static void si_delete_tcs_shader(struct pipe_context
*ctx
, void *state
)
970 struct si_context
*sctx
= (struct si_context
*)ctx
;
971 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
973 if (sctx
->tcs_shader
== sel
) {
974 sctx
->tcs_shader
= NULL
;
977 si_delete_shader_selector(ctx
, sel
);
980 static void si_delete_tes_shader(struct pipe_context
*ctx
, void *state
)
982 struct si_context
*sctx
= (struct si_context
*)ctx
;
983 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
985 if (sctx
->tes_shader
== sel
) {
986 sctx
->tes_shader
= NULL
;
989 si_delete_shader_selector(ctx
, sel
);
992 static void si_emit_spi_map(struct si_context
*sctx
, struct r600_atom
*atom
)
994 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
995 struct si_shader
*ps
= sctx
->ps_shader
->current
;
996 struct si_shader
*vs
= si_get_vs_state(sctx
);
997 struct tgsi_shader_info
*psinfo
= &ps
->selector
->info
;
998 struct tgsi_shader_info
*vsinfo
= &vs
->selector
->info
;
999 unsigned i
, j
, tmp
, num_written
= 0;
1004 radeon_set_context_reg_seq(cs
, R_028644_SPI_PS_INPUT_CNTL_0
, ps
->nparam
);
1006 for (i
= 0; i
< psinfo
->num_inputs
; i
++) {
1007 unsigned name
= psinfo
->input_semantic_name
[i
];
1008 unsigned index
= psinfo
->input_semantic_index
[i
];
1009 unsigned interpolate
= psinfo
->input_interpolate
[i
];
1010 unsigned param_offset
= ps
->ps_input_param_offset
[i
];
1012 if (name
== TGSI_SEMANTIC_POSITION
||
1013 name
== TGSI_SEMANTIC_FACE
)
1014 /* Read from preloaded VGPRs, not parameters */
1020 if (interpolate
== TGSI_INTERPOLATE_CONSTANT
||
1021 (interpolate
== TGSI_INTERPOLATE_COLOR
&& sctx
->flatshade
))
1022 tmp
|= S_028644_FLAT_SHADE(1);
1024 if (name
== TGSI_SEMANTIC_PCOORD
||
1025 (name
== TGSI_SEMANTIC_TEXCOORD
&&
1026 sctx
->sprite_coord_enable
& (1 << index
))) {
1027 tmp
|= S_028644_PT_SPRITE_TEX(1);
1030 for (j
= 0; j
< vsinfo
->num_outputs
; j
++) {
1031 if (name
== vsinfo
->output_semantic_name
[j
] &&
1032 index
== vsinfo
->output_semantic_index
[j
]) {
1033 tmp
|= S_028644_OFFSET(vs
->vs_output_param_offset
[j
]);
1038 if (name
== TGSI_SEMANTIC_PRIMID
)
1039 /* PrimID is written after the last output. */
1040 tmp
|= S_028644_OFFSET(vs
->vs_output_param_offset
[vsinfo
->num_outputs
]);
1041 else if (j
== vsinfo
->num_outputs
&& !G_028644_PT_SPRITE_TEX(tmp
)) {
1042 /* No corresponding output found, load defaults into input.
1043 * Don't set any other bits.
1044 * (FLAT_SHADE=1 completely changes behavior) */
1045 tmp
= S_028644_OFFSET(0x20);
1048 assert(param_offset
== num_written
);
1049 radeon_emit(cs
, tmp
);
1052 if (name
== TGSI_SEMANTIC_COLOR
&&
1053 ps
->key
.ps
.color_two_side
) {
1054 name
= TGSI_SEMANTIC_BCOLOR
;
1059 assert(ps
->nparam
== num_written
);
1062 /* Initialize state related to ESGS / GSVS ring buffers */
1063 static void si_init_gs_rings(struct si_context
*sctx
)
1065 unsigned esgs_ring_size
= 128 * 1024;
1066 unsigned gsvs_ring_size
= 60 * 1024 * 1024;
1068 assert(!sctx
->esgs_ring
&& !sctx
->gsvs_ring
);
1070 sctx
->esgs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1071 PIPE_USAGE_DEFAULT
, esgs_ring_size
);
1073 sctx
->gsvs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1074 PIPE_USAGE_DEFAULT
, gsvs_ring_size
);
1076 /* Append these registers to the init config state. */
1077 if (sctx
->b
.chip_class
>= CIK
) {
1078 if (sctx
->b
.chip_class
>= VI
) {
1079 /* The maximum sizes are 63.999 MB on VI, because
1080 * the register fields only have 18 bits. */
1081 assert(esgs_ring_size
/ 256 < (1 << 18));
1082 assert(gsvs_ring_size
/ 256 < (1 << 18));
1084 si_pm4_set_reg(sctx
->init_config
, R_030900_VGT_ESGS_RING_SIZE
,
1085 esgs_ring_size
/ 256);
1086 si_pm4_set_reg(sctx
->init_config
, R_030904_VGT_GSVS_RING_SIZE
,
1087 gsvs_ring_size
/ 256);
1089 si_pm4_set_reg(sctx
->init_config
, R_0088C8_VGT_ESGS_RING_SIZE
,
1090 esgs_ring_size
/ 256);
1091 si_pm4_set_reg(sctx
->init_config
, R_0088CC_VGT_GSVS_RING_SIZE
,
1092 gsvs_ring_size
/ 256);
1095 /* Flush the context to re-emit the init_config state.
1096 * This is done only once in a lifetime of a context.
1098 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
1099 sctx
->b
.initial_gfx_cs_size
= 0; /* force flush */
1100 si_context_gfx_flush(sctx
, RADEON_FLUSH_ASYNC
, NULL
);
1102 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_ESGS
,
1103 sctx
->esgs_ring
, 0, esgs_ring_size
,
1104 true, true, 4, 64, 0);
1105 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_ESGS
,
1106 sctx
->esgs_ring
, 0, esgs_ring_size
,
1107 false, false, 0, 0, 0);
1108 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_GSVS
,
1109 sctx
->gsvs_ring
, 0, gsvs_ring_size
,
1110 false, false, 0, 0, 0);
1113 static void si_update_gs_rings(struct si_context
*sctx
)
1115 unsigned gsvs_itemsize
= sctx
->gs_shader
->gsvs_itemsize
;
1118 if (gsvs_itemsize
== sctx
->last_gsvs_itemsize
)
1121 sctx
->last_gsvs_itemsize
= gsvs_itemsize
;
1123 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS
,
1124 sctx
->gsvs_ring
, gsvs_itemsize
,
1125 64, true, true, 4, 16, 0);
1127 offset
= gsvs_itemsize
* 64;
1128 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS_1
,
1129 sctx
->gsvs_ring
, gsvs_itemsize
,
1130 64, true, true, 4, 16, offset
);
1132 offset
= (gsvs_itemsize
* 2) * 64;
1133 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS_2
,
1134 sctx
->gsvs_ring
, gsvs_itemsize
,
1135 64, true, true, 4, 16, offset
);
1137 offset
= (gsvs_itemsize
* 3) * 64;
1138 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS_3
,
1139 sctx
->gsvs_ring
, gsvs_itemsize
,
1140 64, true, true, 4, 16, offset
);
1144 * @returns 1 if \p sel has been updated to use a new scratch buffer and 0
1147 static unsigned si_update_scratch_buffer(struct si_context
*sctx
,
1148 struct si_shader_selector
*sel
)
1150 struct si_shader
*shader
;
1151 uint64_t scratch_va
= sctx
->scratch_buffer
->gpu_address
;
1156 shader
= sel
->current
;
1158 /* This shader doesn't need a scratch buffer */
1159 if (shader
->scratch_bytes_per_wave
== 0)
1162 /* This shader is already configured to use the current
1163 * scratch buffer. */
1164 if (shader
->scratch_bo
== sctx
->scratch_buffer
)
1167 assert(sctx
->scratch_buffer
);
1169 si_shader_apply_scratch_relocs(sctx
, shader
, scratch_va
);
1171 /* Replace the shader bo with a new bo that has the relocs applied. */
1172 si_shader_binary_upload(sctx
->screen
, shader
);
1174 /* Update the shader state to use the new shader bo. */
1175 si_shader_init_pm4_state(shader
);
1177 r600_resource_reference(&shader
->scratch_bo
, sctx
->scratch_buffer
);
1182 static unsigned si_get_current_scratch_buffer_size(struct si_context
*sctx
)
1184 if (!sctx
->scratch_buffer
)
1187 return sctx
->scratch_buffer
->b
.b
.width0
;
1190 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_context
*sctx
,
1191 struct si_shader_selector
*sel
)
1196 return sel
->current
->scratch_bytes_per_wave
;
1199 static unsigned si_get_max_scratch_bytes_per_wave(struct si_context
*sctx
)
1203 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->ps_shader
));
1204 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->gs_shader
));
1205 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->vs_shader
));
1206 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->tcs_shader
));
1207 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->tes_shader
));
1211 static void si_update_spi_tmpring_size(struct si_context
*sctx
)
1213 unsigned current_scratch_buffer_size
=
1214 si_get_current_scratch_buffer_size(sctx
);
1215 unsigned scratch_bytes_per_wave
=
1216 si_get_max_scratch_bytes_per_wave(sctx
);
1217 unsigned scratch_needed_size
= scratch_bytes_per_wave
*
1218 sctx
->scratch_waves
;
1220 if (scratch_needed_size
> 0) {
1222 if (scratch_needed_size
> current_scratch_buffer_size
) {
1223 /* Create a bigger scratch buffer */
1224 pipe_resource_reference(
1225 (struct pipe_resource
**)&sctx
->scratch_buffer
,
1228 sctx
->scratch_buffer
=
1229 si_resource_create_custom(&sctx
->screen
->b
.b
,
1230 PIPE_USAGE_DEFAULT
, scratch_needed_size
);
1233 /* Update the shaders, so they are using the latest scratch. The
1234 * scratch buffer may have been changed since these shaders were
1235 * last used, so we still need to try to update them, even if
1236 * they require scratch buffers smaller than the current size.
1238 if (si_update_scratch_buffer(sctx
, sctx
->ps_shader
))
1239 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
->current
->pm4
);
1240 if (si_update_scratch_buffer(sctx
, sctx
->gs_shader
))
1241 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
->current
->pm4
);
1242 if (si_update_scratch_buffer(sctx
, sctx
->tcs_shader
))
1243 si_pm4_bind_state(sctx
, hs
, sctx
->tcs_shader
->current
->pm4
);
1245 /* VS can be bound as LS, ES, or VS. */
1246 if (sctx
->tes_shader
) {
1247 if (si_update_scratch_buffer(sctx
, sctx
->vs_shader
))
1248 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
->current
->pm4
);
1249 } else if (sctx
->gs_shader
) {
1250 if (si_update_scratch_buffer(sctx
, sctx
->vs_shader
))
1251 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
->current
->pm4
);
1253 if (si_update_scratch_buffer(sctx
, sctx
->vs_shader
))
1254 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
->current
->pm4
);
1257 /* TES can be bound as ES or VS. */
1258 if (sctx
->gs_shader
) {
1259 if (si_update_scratch_buffer(sctx
, sctx
->tes_shader
))
1260 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
->current
->pm4
);
1262 if (si_update_scratch_buffer(sctx
, sctx
->tes_shader
))
1263 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
->current
->pm4
);
1267 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
1268 assert((scratch_needed_size
& ~0x3FF) == scratch_needed_size
&&
1269 "scratch size should already be aligned correctly.");
1271 sctx
->spi_tmpring_size
= S_0286E8_WAVES(sctx
->scratch_waves
) |
1272 S_0286E8_WAVESIZE(scratch_bytes_per_wave
>> 10);
1275 static void si_init_tess_factor_ring(struct si_context
*sctx
)
1277 assert(!sctx
->tf_ring
);
1279 sctx
->tf_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1281 32768 * sctx
->screen
->b
.info
.max_se
);
1282 assert(((sctx
->tf_ring
->width0
/ 4) & C_030938_SIZE
) == 0);
1284 /* Append these registers to the init config state. */
1285 if (sctx
->b
.chip_class
>= CIK
) {
1286 si_pm4_set_reg(sctx
->init_config
, R_030938_VGT_TF_RING_SIZE
,
1287 S_030938_SIZE(sctx
->tf_ring
->width0
/ 4));
1288 si_pm4_set_reg(sctx
->init_config
, R_030940_VGT_TF_MEMORY_BASE
,
1289 r600_resource(sctx
->tf_ring
)->gpu_address
>> 8);
1291 si_pm4_set_reg(sctx
->init_config
, R_008988_VGT_TF_RING_SIZE
,
1292 S_008988_SIZE(sctx
->tf_ring
->width0
/ 4));
1293 si_pm4_set_reg(sctx
->init_config
, R_0089B8_VGT_TF_MEMORY_BASE
,
1294 r600_resource(sctx
->tf_ring
)->gpu_address
>> 8);
1297 /* Flush the context to re-emit the init_config state.
1298 * This is done only once in a lifetime of a context.
1300 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
1301 sctx
->b
.initial_gfx_cs_size
= 0; /* force flush */
1302 si_context_gfx_flush(sctx
, RADEON_FLUSH_ASYNC
, NULL
);
1304 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_TESS_CTRL
,
1305 SI_RING_TESS_FACTOR
, sctx
->tf_ring
, 0,
1306 sctx
->tf_ring
->width0
, false, false, 0, 0, 0);
1310 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
1311 * VS passes its outputs to TES directly, so the fixed-function shader only
1312 * has to write TESSOUTER and TESSINNER.
1314 static void si_generate_fixed_func_tcs(struct si_context
*sctx
)
1316 struct ureg_src const0
, const1
;
1317 struct ureg_dst tessouter
, tessinner
;
1318 struct ureg_program
*ureg
= ureg_create(TGSI_PROCESSOR_TESS_CTRL
);
1321 return; /* if we get here, we're screwed */
1323 assert(!sctx
->fixed_func_tcs_shader
);
1325 ureg_DECL_constant2D(ureg
, 0, 1, SI_DRIVER_STATE_CONST_BUF
);
1326 const0
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 0),
1327 SI_DRIVER_STATE_CONST_BUF
);
1328 const1
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 1),
1329 SI_DRIVER_STATE_CONST_BUF
);
1331 tessouter
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSOUTER
, 0);
1332 tessinner
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSINNER
, 0);
1334 ureg_MOV(ureg
, tessouter
, const0
);
1335 ureg_MOV(ureg
, tessinner
, const1
);
1338 sctx
->fixed_func_tcs_shader
=
1339 ureg_create_shader_and_destroy(ureg
, &sctx
->b
.b
);
1340 assert(sctx
->fixed_func_tcs_shader
);
1343 static void si_update_vgt_shader_config(struct si_context
*sctx
)
1345 /* Calculate the index of the config.
1346 * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */
1347 unsigned index
= 2*!!sctx
->tes_shader
+ !!sctx
->gs_shader
;
1348 struct si_pm4_state
**pm4
= &sctx
->vgt_shader_config
[index
];
1351 uint32_t stages
= 0;
1353 *pm4
= CALLOC_STRUCT(si_pm4_state
);
1355 if (sctx
->tes_shader
) {
1356 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
1359 if (sctx
->gs_shader
)
1360 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
1362 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
1364 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
1365 } else if (sctx
->gs_shader
) {
1366 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
1368 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
1371 si_pm4_set_reg(*pm4
, R_028B54_VGT_SHADER_STAGES_EN
, stages
);
1373 si_pm4_bind_state(sctx
, vgt_shader_config
, *pm4
);
1376 static void si_update_so(struct si_context
*sctx
, struct si_shader_selector
*shader
)
1378 struct pipe_stream_output_info
*so
= &shader
->so
;
1379 uint32_t enabled_stream_buffers_mask
= 0;
1382 for (i
= 0; i
< so
->num_outputs
; i
++)
1383 enabled_stream_buffers_mask
|= (1 << so
->output
[i
].output_buffer
) << (so
->output
[i
].stream
* 4);
1384 sctx
->b
.streamout
.enabled_stream_buffers_mask
= enabled_stream_buffers_mask
;
1385 sctx
->b
.streamout
.stride_in_dw
= shader
->so
.stride
;
1388 void si_update_shaders(struct si_context
*sctx
)
1390 struct pipe_context
*ctx
= (struct pipe_context
*)sctx
;
1391 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
1393 /* Update stages before GS. */
1394 if (sctx
->tes_shader
) {
1396 si_init_tess_factor_ring(sctx
);
1399 si_shader_select(ctx
, sctx
->vs_shader
);
1400 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
->current
->pm4
);
1402 if (sctx
->tcs_shader
) {
1403 si_shader_select(ctx
, sctx
->tcs_shader
);
1404 si_pm4_bind_state(sctx
, hs
, sctx
->tcs_shader
->current
->pm4
);
1406 if (!sctx
->fixed_func_tcs_shader
)
1407 si_generate_fixed_func_tcs(sctx
);
1408 si_shader_select(ctx
, sctx
->fixed_func_tcs_shader
);
1409 si_pm4_bind_state(sctx
, hs
,
1410 sctx
->fixed_func_tcs_shader
->current
->pm4
);
1413 si_shader_select(ctx
, sctx
->tes_shader
);
1414 if (sctx
->gs_shader
) {
1416 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
->current
->pm4
);
1419 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
->current
->pm4
);
1420 si_update_so(sctx
, sctx
->tes_shader
);
1422 } else if (sctx
->gs_shader
) {
1424 si_shader_select(ctx
, sctx
->vs_shader
);
1425 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
->current
->pm4
);
1428 si_shader_select(ctx
, sctx
->vs_shader
);
1429 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
->current
->pm4
);
1430 si_update_so(sctx
, sctx
->vs_shader
);
1434 if (sctx
->gs_shader
) {
1435 si_shader_select(ctx
, sctx
->gs_shader
);
1436 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
->current
->pm4
);
1437 si_pm4_bind_state(sctx
, vs
, sctx
->gs_shader
->current
->gs_copy_shader
->pm4
);
1438 si_update_so(sctx
, sctx
->gs_shader
);
1440 if (!sctx
->gsvs_ring
)
1441 si_init_gs_rings(sctx
);
1443 si_update_gs_rings(sctx
);
1445 si_pm4_bind_state(sctx
, gs
, NULL
);
1446 si_pm4_bind_state(sctx
, es
, NULL
);
1449 si_update_vgt_shader_config(sctx
);
1451 si_shader_select(ctx
, sctx
->ps_shader
);
1453 if (!sctx
->ps_shader
->current
) {
1454 struct si_shader_selector
*sel
;
1456 /* use a dummy shader if compiling the shader (variant) failed */
1457 si_make_dummy_ps(sctx
);
1458 sel
= sctx
->dummy_pixel_shader
;
1459 si_shader_select(ctx
, sel
);
1460 sctx
->ps_shader
->current
= sel
->current
;
1463 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
->current
->pm4
);
1465 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
) ||
1466 sctx
->sprite_coord_enable
!= rs
->sprite_coord_enable
||
1467 sctx
->flatshade
!= rs
->flatshade
) {
1468 sctx
->sprite_coord_enable
= rs
->sprite_coord_enable
;
1469 sctx
->flatshade
= rs
->flatshade
;
1470 si_mark_atom_dirty(sctx
, &sctx
->spi_map
);
1473 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
) ||
1474 si_pm4_state_changed(sctx
, gs
)) {
1475 si_update_spi_tmpring_size(sctx
);
1478 if (sctx
->ps_db_shader_control
!= sctx
->ps_shader
->current
->db_shader_control
) {
1479 sctx
->ps_db_shader_control
= sctx
->ps_shader
->current
->db_shader_control
;
1480 si_mark_atom_dirty(sctx
, &sctx
->db_render_state
);
1483 if (sctx
->smoothing_enabled
!= sctx
->ps_shader
->current
->key
.ps
.poly_line_smoothing
) {
1484 sctx
->smoothing_enabled
= sctx
->ps_shader
->current
->key
.ps
.poly_line_smoothing
;
1485 si_mark_atom_dirty(sctx
, &sctx
->msaa_config
);
1487 if (sctx
->b
.chip_class
== SI
)
1488 si_mark_atom_dirty(sctx
, &sctx
->db_render_state
);
1492 void si_init_shader_functions(struct si_context
*sctx
)
1494 si_init_atom(sctx
, &sctx
->spi_map
, &sctx
->atoms
.s
.spi_map
, si_emit_spi_map
);
1496 sctx
->b
.b
.create_vs_state
= si_create_vs_state
;
1497 sctx
->b
.b
.create_tcs_state
= si_create_tcs_state
;
1498 sctx
->b
.b
.create_tes_state
= si_create_tes_state
;
1499 sctx
->b
.b
.create_gs_state
= si_create_gs_state
;
1500 sctx
->b
.b
.create_fs_state
= si_create_fs_state
;
1502 sctx
->b
.b
.bind_vs_state
= si_bind_vs_shader
;
1503 sctx
->b
.b
.bind_tcs_state
= si_bind_tcs_shader
;
1504 sctx
->b
.b
.bind_tes_state
= si_bind_tes_shader
;
1505 sctx
->b
.b
.bind_gs_state
= si_bind_gs_shader
;
1506 sctx
->b
.b
.bind_fs_state
= si_bind_ps_shader
;
1508 sctx
->b
.b
.delete_vs_state
= si_delete_vs_shader
;
1509 sctx
->b
.b
.delete_tcs_state
= si_delete_tcs_shader
;
1510 sctx
->b
.b
.delete_tes_state
= si_delete_tes_shader
;
1511 sctx
->b
.b
.delete_gs_state
= si_delete_gs_shader
;
1512 sctx
->b
.b
.delete_fs_state
= si_delete_ps_shader
;