2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
25 * Marek Olšák <maraeo@gmail.com>
29 #include "si_shader.h"
31 #include "radeon/r600_cs.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_ureg.h"
35 #include "util/u_memory.h"
36 #include "util/u_simple_shaders.h"
38 static void si_set_tesseval_regs(struct si_shader
*shader
,
39 struct si_pm4_state
*pm4
)
41 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
42 unsigned tes_prim_mode
= info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
43 unsigned tes_spacing
= info
->properties
[TGSI_PROPERTY_TES_SPACING
];
44 bool tes_vertex_order_cw
= info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
];
45 bool tes_point_mode
= info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
];
46 unsigned type
, partitioning
, topology
;
48 switch (tes_prim_mode
) {
50 type
= V_028B6C_TESS_ISOLINE
;
52 case PIPE_PRIM_TRIANGLES
:
53 type
= V_028B6C_TESS_TRIANGLE
;
56 type
= V_028B6C_TESS_QUAD
;
63 switch (tes_spacing
) {
64 case PIPE_TESS_SPACING_FRACTIONAL_ODD
:
65 partitioning
= V_028B6C_PART_FRAC_ODD
;
67 case PIPE_TESS_SPACING_FRACTIONAL_EVEN
:
68 partitioning
= V_028B6C_PART_FRAC_EVEN
;
70 case PIPE_TESS_SPACING_EQUAL
:
71 partitioning
= V_028B6C_PART_INTEGER
;
79 topology
= V_028B6C_OUTPUT_POINT
;
80 else if (tes_prim_mode
== PIPE_PRIM_LINES
)
81 topology
= V_028B6C_OUTPUT_LINE
;
82 else if (tes_vertex_order_cw
)
83 /* for some reason, this must be the other way around */
84 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
86 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
88 si_pm4_set_reg(pm4
, R_028B6C_VGT_TF_PARAM
,
90 S_028B6C_PARTITIONING(partitioning
) |
91 S_028B6C_TOPOLOGY(topology
));
94 static void si_shader_ls(struct si_shader
*shader
)
96 struct si_pm4_state
*pm4
;
97 unsigned num_sgprs
, num_user_sgprs
;
98 unsigned vgpr_comp_cnt
;
101 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
105 va
= shader
->bo
->gpu_address
;
106 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
108 /* We need at least 2 components for LS.
109 * VGPR0-3: (VertexID, RelAutoindex, ???, InstanceID). */
110 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 1;
112 num_user_sgprs
= SI_LS_NUM_USER_SGPR
;
113 num_sgprs
= shader
->num_sgprs
;
114 if (num_user_sgprs
> num_sgprs
) {
115 /* Last 2 reserved SGPRs are used for VCC */
116 num_sgprs
= num_user_sgprs
+ 2;
118 assert(num_sgprs
<= 104);
120 si_pm4_set_reg(pm4
, R_00B520_SPI_SHADER_PGM_LO_LS
, va
>> 8);
121 si_pm4_set_reg(pm4
, R_00B524_SPI_SHADER_PGM_HI_LS
, va
>> 40);
123 shader
->ls_rsrc1
= S_00B528_VGPRS((shader
->num_vgprs
- 1) / 4) |
124 S_00B528_SGPRS((num_sgprs
- 1) / 8) |
125 S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt
);
126 shader
->ls_rsrc2
= S_00B52C_USER_SGPR(num_user_sgprs
) |
127 S_00B52C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0);
130 static void si_shader_hs(struct si_shader
*shader
)
132 struct si_pm4_state
*pm4
;
133 unsigned num_sgprs
, num_user_sgprs
;
136 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
140 va
= shader
->bo
->gpu_address
;
141 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
143 num_user_sgprs
= SI_TCS_NUM_USER_SGPR
;
144 num_sgprs
= shader
->num_sgprs
;
145 /* One SGPR after user SGPRs is pre-loaded with tessellation factor
147 if ((num_user_sgprs
+ 1) > num_sgprs
) {
148 /* Last 2 reserved SGPRs are used for VCC */
149 num_sgprs
= num_user_sgprs
+ 1 + 2;
151 assert(num_sgprs
<= 104);
153 si_pm4_set_reg(pm4
, R_00B420_SPI_SHADER_PGM_LO_HS
, va
>> 8);
154 si_pm4_set_reg(pm4
, R_00B424_SPI_SHADER_PGM_HI_HS
, va
>> 40);
155 si_pm4_set_reg(pm4
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
,
156 S_00B428_VGPRS((shader
->num_vgprs
- 1) / 4) |
157 S_00B428_SGPRS((num_sgprs
- 1) / 8));
158 si_pm4_set_reg(pm4
, R_00B42C_SPI_SHADER_PGM_RSRC2_HS
,
159 S_00B42C_USER_SGPR(num_user_sgprs
) |
160 S_00B42C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
163 static void si_shader_es(struct si_shader
*shader
)
165 struct si_pm4_state
*pm4
;
166 unsigned num_sgprs
, num_user_sgprs
;
167 unsigned vgpr_comp_cnt
;
170 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
175 va
= shader
->bo
->gpu_address
;
176 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
178 if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
179 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 0;
180 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
181 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
182 vgpr_comp_cnt
= 3; /* all components are needed for TES */
183 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
185 unreachable("invalid shader selector type");
187 num_sgprs
= shader
->num_sgprs
;
188 /* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
189 if ((num_user_sgprs
+ 1) > num_sgprs
) {
190 /* Last 2 reserved SGPRs are used for VCC */
191 num_sgprs
= num_user_sgprs
+ 1 + 2;
193 assert(num_sgprs
<= 104);
195 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
196 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, va
>> 40);
197 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
198 S_00B328_VGPRS((shader
->num_vgprs
- 1) / 4) |
199 S_00B328_SGPRS((num_sgprs
- 1) / 8) |
200 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
) |
201 S_00B328_DX10_CLAMP(shader
->dx10_clamp_mode
));
202 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
203 S_00B32C_USER_SGPR(num_user_sgprs
) |
204 S_00B32C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
206 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
207 si_set_tesseval_regs(shader
, pm4
);
210 static unsigned si_gs_get_max_stream(struct si_shader
*shader
)
212 struct pipe_stream_output_info
*so
= &shader
->selector
->so
;
213 unsigned max_stream
= 0, i
;
215 if (so
->num_outputs
== 0)
218 for (i
= 0; i
< so
->num_outputs
; i
++) {
219 if (so
->output
[i
].stream
> max_stream
)
220 max_stream
= so
->output
[i
].stream
;
225 static void si_shader_gs(struct si_shader
*shader
)
227 unsigned gs_vert_itemsize
= shader
->selector
->info
.num_outputs
* 16;
228 unsigned gs_max_vert_out
= shader
->selector
->gs_max_out_vertices
;
229 unsigned gsvs_itemsize
= (gs_vert_itemsize
* gs_max_vert_out
) >> 2;
230 unsigned gs_num_invocations
= shader
->selector
->gs_num_invocations
;
232 struct si_pm4_state
*pm4
;
233 unsigned num_sgprs
, num_user_sgprs
;
235 unsigned max_stream
= si_gs_get_max_stream(shader
);
237 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
238 assert(gsvs_itemsize
< (1 << 15));
240 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
245 if (gs_max_vert_out
<= 128) {
246 cut_mode
= V_028A40_GS_CUT_128
;
247 } else if (gs_max_vert_out
<= 256) {
248 cut_mode
= V_028A40_GS_CUT_256
;
249 } else if (gs_max_vert_out
<= 512) {
250 cut_mode
= V_028A40_GS_CUT_512
;
252 assert(gs_max_vert_out
<= 1024);
253 cut_mode
= V_028A40_GS_CUT_1024
;
256 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
257 S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
258 S_028A40_CUT_MODE(cut_mode
)|
259 S_028A40_ES_WRITE_OPTIMIZE(1) |
260 S_028A40_GS_WRITE_OPTIMIZE(1));
262 si_pm4_set_reg(pm4
, R_028A60_VGT_GSVS_RING_OFFSET_1
, gsvs_itemsize
);
263 si_pm4_set_reg(pm4
, R_028A64_VGT_GSVS_RING_OFFSET_2
, gsvs_itemsize
* ((max_stream
>= 2) ? 2 : 1));
264 si_pm4_set_reg(pm4
, R_028A68_VGT_GSVS_RING_OFFSET_3
, gsvs_itemsize
* ((max_stream
>= 3) ? 3 : 1));
266 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
267 util_bitcount64(shader
->selector
->inputs_read
) * (16 >> 2));
268 si_pm4_set_reg(pm4
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, gsvs_itemsize
* (max_stream
+ 1));
270 si_pm4_set_reg(pm4
, R_028B38_VGT_GS_MAX_VERT_OUT
, gs_max_vert_out
);
272 si_pm4_set_reg(pm4
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, gs_vert_itemsize
>> 2);
273 si_pm4_set_reg(pm4
, R_028B60_VGT_GS_VERT_ITEMSIZE_1
, (max_stream
>= 1) ? gs_vert_itemsize
>> 2 : 0);
274 si_pm4_set_reg(pm4
, R_028B64_VGT_GS_VERT_ITEMSIZE_2
, (max_stream
>= 2) ? gs_vert_itemsize
>> 2 : 0);
275 si_pm4_set_reg(pm4
, R_028B68_VGT_GS_VERT_ITEMSIZE_3
, (max_stream
>= 3) ? gs_vert_itemsize
>> 2 : 0);
277 si_pm4_set_reg(pm4
, R_028B90_VGT_GS_INSTANCE_CNT
,
278 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
279 S_028B90_ENABLE(gs_num_invocations
> 0));
281 va
= shader
->bo
->gpu_address
;
282 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
283 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
284 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, va
>> 40);
286 num_user_sgprs
= SI_GS_NUM_USER_SGPR
;
287 num_sgprs
= shader
->num_sgprs
;
288 /* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
289 if ((num_user_sgprs
+ 2) > num_sgprs
) {
290 /* Last 2 reserved SGPRs are used for VCC */
291 num_sgprs
= num_user_sgprs
+ 2 + 2;
293 assert(num_sgprs
<= 104);
295 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
296 S_00B228_VGPRS((shader
->num_vgprs
- 1) / 4) |
297 S_00B228_SGPRS((num_sgprs
- 1) / 8) |
298 S_00B228_DX10_CLAMP(shader
->dx10_clamp_mode
));
299 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
300 S_00B22C_USER_SGPR(num_user_sgprs
) |
301 S_00B22C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
304 static void si_shader_vs(struct si_shader
*shader
)
306 struct si_pm4_state
*pm4
;
307 unsigned num_sgprs
, num_user_sgprs
;
308 unsigned nparams
, vgpr_comp_cnt
;
310 unsigned window_space
=
311 shader
->selector
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
312 bool enable_prim_id
= si_vs_exports_prim_id(shader
);
314 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
319 /* If this is the GS copy shader, the GS state writes this register.
320 * Otherwise, the VS state writes it.
322 if (!shader
->is_gs_copy_shader
) {
323 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
324 S_028A40_MODE(enable_prim_id
? V_028A40_GS_SCENARIO_A
: 0));
325 si_pm4_set_reg(pm4
, R_028A84_VGT_PRIMITIVEID_EN
, enable_prim_id
);
327 si_pm4_set_reg(pm4
, R_028A84_VGT_PRIMITIVEID_EN
, 0);
329 va
= shader
->bo
->gpu_address
;
330 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
332 if (shader
->is_gs_copy_shader
) {
333 vgpr_comp_cnt
= 0; /* only VertexID is needed for GS-COPY. */
334 num_user_sgprs
= SI_GSCOPY_NUM_USER_SGPR
;
335 } else if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
336 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : (enable_prim_id
? 2 : 0);
337 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
338 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
339 vgpr_comp_cnt
= 3; /* all components are needed for TES */
340 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
342 unreachable("invalid shader selector type");
344 num_sgprs
= shader
->num_sgprs
;
345 if (num_user_sgprs
> num_sgprs
) {
346 /* Last 2 reserved SGPRs are used for VCC */
347 num_sgprs
= num_user_sgprs
+ 2;
349 assert(num_sgprs
<= 104);
351 /* VS is required to export at least one param. */
352 nparams
= MAX2(shader
->nr_param_exports
, 1);
353 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
354 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
356 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
357 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
358 S_02870C_POS1_EXPORT_FORMAT(shader
->nr_pos_exports
> 1 ?
359 V_02870C_SPI_SHADER_4COMP
:
360 V_02870C_SPI_SHADER_NONE
) |
361 S_02870C_POS2_EXPORT_FORMAT(shader
->nr_pos_exports
> 2 ?
362 V_02870C_SPI_SHADER_4COMP
:
363 V_02870C_SPI_SHADER_NONE
) |
364 S_02870C_POS3_EXPORT_FORMAT(shader
->nr_pos_exports
> 3 ?
365 V_02870C_SPI_SHADER_4COMP
:
366 V_02870C_SPI_SHADER_NONE
));
368 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
369 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
370 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
371 S_00B128_VGPRS((shader
->num_vgprs
- 1) / 4) |
372 S_00B128_SGPRS((num_sgprs
- 1) / 8) |
373 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
374 S_00B128_DX10_CLAMP(shader
->dx10_clamp_mode
));
375 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
376 S_00B12C_USER_SGPR(num_user_sgprs
) |
377 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
378 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
379 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
380 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
381 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
) |
382 S_00B12C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
384 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
385 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
387 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
388 S_028818_VTX_W0_FMT(1) |
389 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
390 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
391 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
393 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
394 si_set_tesseval_regs(shader
, pm4
);
397 static void si_shader_ps(struct si_shader
*shader
)
399 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
400 struct si_pm4_state
*pm4
;
401 unsigned i
, spi_ps_in_control
;
402 unsigned num_sgprs
, num_user_sgprs
;
403 unsigned spi_baryc_cntl
= 0;
406 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
411 for (i
= 0; i
< info
->num_inputs
; i
++) {
412 switch (info
->input_semantic_name
[i
]) {
413 case TGSI_SEMANTIC_POSITION
:
414 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
416 * 0 -> Position = pixel center (default)
417 * 1 -> Position = pixel centroid
418 * 2 -> Position = at sample position
420 switch (info
->input_interpolate_loc
[i
]) {
421 case TGSI_INTERPOLATE_LOC_CENTROID
:
422 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(1);
424 case TGSI_INTERPOLATE_LOC_SAMPLE
:
425 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
429 if (info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] ==
430 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)
431 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_ULC(1);
436 spi_ps_in_control
= S_0286D8_NUM_INTERP(shader
->nparam
) |
437 S_0286D8_BC_OPTIMIZE_DISABLE(1);
439 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
440 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
442 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
, shader
->spi_shader_z_format
);
443 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
,
444 shader
->spi_shader_col_format
);
445 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, shader
->cb_shader_mask
);
447 va
= shader
->bo
->gpu_address
;
448 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_USER_SHADER
);
449 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
450 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
452 num_user_sgprs
= SI_PS_NUM_USER_SGPR
;
453 num_sgprs
= shader
->num_sgprs
;
454 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
455 if ((num_user_sgprs
+ 1) > num_sgprs
) {
456 /* Last 2 reserved SGPRs are used for VCC */
457 num_sgprs
= num_user_sgprs
+ 1 + 2;
459 assert(num_sgprs
<= 104);
461 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
462 S_00B028_VGPRS((shader
->num_vgprs
- 1) / 4) |
463 S_00B028_SGPRS((num_sgprs
- 1) / 8) |
464 S_00B028_DX10_CLAMP(shader
->dx10_clamp_mode
));
465 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
466 S_00B02C_EXTRA_LDS_SIZE(shader
->lds_size
) |
467 S_00B02C_USER_SGPR(num_user_sgprs
) |
468 S_00B32C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
471 static void si_shader_init_pm4_state(struct si_shader
*shader
)
475 si_pm4_free_state_simple(shader
->pm4
);
477 switch (shader
->selector
->type
) {
478 case PIPE_SHADER_VERTEX
:
479 if (shader
->key
.vs
.as_ls
)
480 si_shader_ls(shader
);
481 else if (shader
->key
.vs
.as_es
)
482 si_shader_es(shader
);
484 si_shader_vs(shader
);
486 case PIPE_SHADER_TESS_CTRL
:
487 si_shader_hs(shader
);
489 case PIPE_SHADER_TESS_EVAL
:
490 if (shader
->key
.tes
.as_es
)
491 si_shader_es(shader
);
493 si_shader_vs(shader
);
495 case PIPE_SHADER_GEOMETRY
:
496 si_shader_gs(shader
);
497 si_shader_vs(shader
->gs_copy_shader
);
499 case PIPE_SHADER_FRAGMENT
:
500 si_shader_ps(shader
);
507 /* Compute the key for the hw shader variant */
508 static inline void si_shader_selector_key(struct pipe_context
*ctx
,
509 struct si_shader_selector
*sel
,
510 union si_shader_key
*key
)
512 struct si_context
*sctx
= (struct si_context
*)ctx
;
515 memset(key
, 0, sizeof(*key
));
518 case PIPE_SHADER_VERTEX
:
519 if (sctx
->vertex_elements
)
520 for (i
= 0; i
< sctx
->vertex_elements
->count
; ++i
)
521 key
->vs
.instance_divisors
[i
] =
522 sctx
->vertex_elements
->elements
[i
].instance_divisor
;
524 if (sctx
->tes_shader
)
526 else if (sctx
->gs_shader
) {
528 key
->vs
.es_enabled_outputs
= sctx
->gs_shader
->inputs_read
;
531 if (!sctx
->gs_shader
&& sctx
->ps_shader
&&
532 sctx
->ps_shader
->info
.uses_primid
)
533 key
->vs
.export_prim_id
= 1;
535 case PIPE_SHADER_TESS_CTRL
:
537 sctx
->tes_shader
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
539 case PIPE_SHADER_TESS_EVAL
:
540 if (sctx
->gs_shader
) {
542 key
->tes
.es_enabled_outputs
= sctx
->gs_shader
->inputs_read
;
543 } else if (sctx
->ps_shader
&& sctx
->ps_shader
->info
.uses_primid
)
544 key
->tes
.export_prim_id
= 1;
546 case PIPE_SHADER_GEOMETRY
:
548 case PIPE_SHADER_FRAGMENT
: {
549 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
551 if (sel
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
])
552 key
->ps
.last_cbuf
= MAX2(sctx
->framebuffer
.state
.nr_cbufs
, 1) - 1;
553 key
->ps
.export_16bpc
= sctx
->framebuffer
.export_16bpc
;
556 bool is_poly
= (sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES
&&
557 sctx
->current_rast_prim
<= PIPE_PRIM_POLYGON
) ||
558 sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES_ADJACENCY
;
559 bool is_line
= !is_poly
&& sctx
->current_rast_prim
!= PIPE_PRIM_POINTS
;
561 key
->ps
.color_two_side
= rs
->two_side
;
563 if (sctx
->queued
.named
.blend
) {
564 key
->ps
.alpha_to_one
= sctx
->queued
.named
.blend
->alpha_to_one
&&
565 rs
->multisample_enable
&&
566 !sctx
->framebuffer
.cb0_is_integer
;
569 key
->ps
.poly_stipple
= rs
->poly_stipple_enable
&& is_poly
;
570 key
->ps
.poly_line_smoothing
= ((is_poly
&& rs
->poly_smooth
) ||
571 (is_line
&& rs
->line_smooth
)) &&
572 sctx
->framebuffer
.nr_samples
<= 1;
575 key
->ps
.alpha_func
= PIPE_FUNC_ALWAYS
;
576 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
577 if (sctx
->queued
.named
.dsa
&&
578 !sctx
->framebuffer
.cb0_is_integer
)
579 key
->ps
.alpha_func
= sctx
->queued
.named
.dsa
->alpha_func
;
587 /* Select the hw shader variant depending on the current state. */
588 static int si_shader_select(struct pipe_context
*ctx
,
589 struct si_shader_selector
*sel
)
591 struct si_context
*sctx
= (struct si_context
*)ctx
;
592 union si_shader_key key
;
593 struct si_shader
* shader
= NULL
;
596 si_shader_selector_key(ctx
, sel
, &key
);
598 /* Check if we don't need to change anything.
599 * This path is also used for most shaders that don't need multiple
600 * variants, it will cost just a computation of the key and this
602 if (likely(sel
->current
&& memcmp(&sel
->current
->key
, &key
, sizeof(key
)) == 0)) {
606 /* lookup if we have other variants in the list */
607 if (sel
->num_shaders
> 1) {
608 struct si_shader
*p
= sel
->current
, *c
= p
->next_variant
;
610 while (c
&& memcmp(&c
->key
, &key
, sizeof(key
)) != 0) {
616 p
->next_variant
= c
->next_variant
;
622 shader
->next_variant
= sel
->current
;
623 sel
->current
= shader
;
625 shader
= CALLOC(1, sizeof(struct si_shader
));
626 shader
->selector
= sel
;
629 shader
->next_variant
= sel
->current
;
630 sel
->current
= shader
;
631 r
= si_shader_create((struct si_screen
*)ctx
->screen
, sctx
->tm
,
634 R600_ERR("Failed to build shader variant (type=%u) %d\n",
640 si_shader_init_pm4_state(shader
);
642 p_atomic_inc(&sctx
->screen
->b
.num_compilations
);
648 static void *si_create_shader_state(struct pipe_context
*ctx
,
649 const struct pipe_shader_state
*state
,
650 unsigned pipe_shader_type
)
652 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
653 struct si_shader_selector
*sel
= CALLOC_STRUCT(si_shader_selector
);
659 sel
->type
= pipe_shader_type
;
660 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
666 sel
->so
= state
->stream_output
;
667 tgsi_scan_shader(state
->tokens
, &sel
->info
);
668 p_atomic_inc(&sscreen
->b
.num_shaders_created
);
670 switch (pipe_shader_type
) {
671 case PIPE_SHADER_GEOMETRY
:
672 sel
->gs_output_prim
=
673 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
674 sel
->gs_max_out_vertices
=
675 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
676 sel
->gs_num_invocations
=
677 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
678 sel
->gsvs_itemsize
= sel
->info
.num_outputs
* 16 *
679 sel
->gs_max_out_vertices
;
681 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
682 unsigned name
= sel
->info
.input_semantic_name
[i
];
683 unsigned index
= sel
->info
.input_semantic_index
[i
];
686 case TGSI_SEMANTIC_PRIMID
:
690 1llu << si_shader_io_get_unique_index(name
, index
);
695 case PIPE_SHADER_VERTEX
:
696 case PIPE_SHADER_TESS_CTRL
:
697 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
698 unsigned name
= sel
->info
.output_semantic_name
[i
];
699 unsigned index
= sel
->info
.output_semantic_index
[i
];
702 case TGSI_SEMANTIC_TESSINNER
:
703 case TGSI_SEMANTIC_TESSOUTER
:
704 case TGSI_SEMANTIC_PATCH
:
705 sel
->patch_outputs_written
|=
706 1llu << si_shader_io_get_unique_index(name
, index
);
709 sel
->outputs_written
|=
710 1llu << si_shader_io_get_unique_index(name
, index
);
714 case PIPE_SHADER_FRAGMENT
:
715 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
716 unsigned name
= sel
->info
.output_semantic_name
[i
];
717 unsigned index
= sel
->info
.output_semantic_index
[i
];
719 if (name
== TGSI_SEMANTIC_COLOR
)
720 sel
->ps_colors_written
|= 1 << index
;
725 if (sscreen
->b
.debug_flags
& DBG_PRECOMPILE
)
726 if (si_shader_select(ctx
, sel
)) {
727 fprintf(stderr
, "radeonsi: can't create a shader\n");
728 tgsi_free_tokens(sel
->tokens
);
736 static void *si_create_fs_state(struct pipe_context
*ctx
,
737 const struct pipe_shader_state
*state
)
739 return si_create_shader_state(ctx
, state
, PIPE_SHADER_FRAGMENT
);
742 static void *si_create_gs_state(struct pipe_context
*ctx
,
743 const struct pipe_shader_state
*state
)
745 return si_create_shader_state(ctx
, state
, PIPE_SHADER_GEOMETRY
);
748 static void *si_create_vs_state(struct pipe_context
*ctx
,
749 const struct pipe_shader_state
*state
)
751 return si_create_shader_state(ctx
, state
, PIPE_SHADER_VERTEX
);
754 static void *si_create_tcs_state(struct pipe_context
*ctx
,
755 const struct pipe_shader_state
*state
)
757 return si_create_shader_state(ctx
, state
, PIPE_SHADER_TESS_CTRL
);
760 static void *si_create_tes_state(struct pipe_context
*ctx
,
761 const struct pipe_shader_state
*state
)
763 return si_create_shader_state(ctx
, state
, PIPE_SHADER_TESS_EVAL
);
767 * Normally, we only emit 1 viewport and 1 scissor if no shader is using
768 * the VIEWPORT_INDEX output, and emitting the other viewports and scissors
769 * is delayed. When a shader with VIEWPORT_INDEX appears, this should be
770 * called to emit the rest.
772 static void si_update_viewports_and_scissors(struct si_context
*sctx
)
774 struct tgsi_shader_info
*info
= si_get_vs_info(sctx
);
776 if (!info
|| !info
->writes_viewport_index
)
779 if (sctx
->scissors
.dirty_mask
)
780 si_mark_atom_dirty(sctx
, &sctx
->scissors
.atom
);
781 if (sctx
->viewports
.dirty_mask
)
782 si_mark_atom_dirty(sctx
, &sctx
->viewports
.atom
);
785 static void si_bind_vs_shader(struct pipe_context
*ctx
, void *state
)
787 struct si_context
*sctx
= (struct si_context
*)ctx
;
788 struct si_shader_selector
*sel
= state
;
790 if (sctx
->vs_shader
== sel
|| !sel
)
793 sctx
->vs_shader
= sel
;
794 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
795 si_update_viewports_and_scissors(sctx
);
798 static void si_bind_gs_shader(struct pipe_context
*ctx
, void *state
)
800 struct si_context
*sctx
= (struct si_context
*)ctx
;
801 struct si_shader_selector
*sel
= state
;
802 bool enable_changed
= !!sctx
->gs_shader
!= !!sel
;
804 if (sctx
->gs_shader
== sel
)
807 sctx
->gs_shader
= sel
;
808 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
809 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
812 si_shader_change_notify(sctx
);
813 si_update_viewports_and_scissors(sctx
);
816 static void si_bind_tcs_shader(struct pipe_context
*ctx
, void *state
)
818 struct si_context
*sctx
= (struct si_context
*)ctx
;
819 struct si_shader_selector
*sel
= state
;
820 bool enable_changed
= !!sctx
->tcs_shader
!= !!sel
;
822 if (sctx
->tcs_shader
== sel
)
825 sctx
->tcs_shader
= sel
;
828 sctx
->last_tcs
= NULL
; /* invalidate derived tess state */
831 static void si_bind_tes_shader(struct pipe_context
*ctx
, void *state
)
833 struct si_context
*sctx
= (struct si_context
*)ctx
;
834 struct si_shader_selector
*sel
= state
;
835 bool enable_changed
= !!sctx
->tes_shader
!= !!sel
;
837 if (sctx
->tes_shader
== sel
)
840 sctx
->tes_shader
= sel
;
841 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
842 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
844 if (enable_changed
) {
845 si_shader_change_notify(sctx
);
846 sctx
->last_tes_sh_base
= -1; /* invalidate derived tess state */
848 si_update_viewports_and_scissors(sctx
);
851 static void si_make_dummy_ps(struct si_context
*sctx
)
853 if (!sctx
->dummy_pixel_shader
) {
854 sctx
->dummy_pixel_shader
=
855 util_make_fragment_cloneinput_shader(&sctx
->b
.b
, 0,
856 TGSI_SEMANTIC_GENERIC
,
857 TGSI_INTERPOLATE_CONSTANT
);
861 static void si_bind_ps_shader(struct pipe_context
*ctx
, void *state
)
863 struct si_context
*sctx
= (struct si_context
*)ctx
;
864 struct si_shader_selector
*sel
= state
;
866 /* skip if supplied shader is one already in use */
867 if (sctx
->ps_shader
== sel
)
870 /* use a dummy shader if binding a NULL shader */
872 si_make_dummy_ps(sctx
);
873 sel
= sctx
->dummy_pixel_shader
;
876 sctx
->ps_shader
= sel
;
877 si_mark_atom_dirty(sctx
, &sctx
->cb_target_mask
);
880 static void si_delete_shader_selector(struct pipe_context
*ctx
,
881 struct si_shader_selector
*sel
)
883 struct si_context
*sctx
= (struct si_context
*)ctx
;
884 struct si_shader
*p
= sel
->current
, *c
;
889 case PIPE_SHADER_VERTEX
:
891 si_pm4_delete_state(sctx
, ls
, p
->pm4
);
892 else if (p
->key
.vs
.as_es
)
893 si_pm4_delete_state(sctx
, es
, p
->pm4
);
895 si_pm4_delete_state(sctx
, vs
, p
->pm4
);
897 case PIPE_SHADER_TESS_CTRL
:
898 si_pm4_delete_state(sctx
, hs
, p
->pm4
);
900 case PIPE_SHADER_TESS_EVAL
:
901 if (p
->key
.tes
.as_es
)
902 si_pm4_delete_state(sctx
, es
, p
->pm4
);
904 si_pm4_delete_state(sctx
, vs
, p
->pm4
);
906 case PIPE_SHADER_GEOMETRY
:
907 si_pm4_delete_state(sctx
, gs
, p
->pm4
);
908 si_pm4_delete_state(sctx
, vs
, p
->gs_copy_shader
->pm4
);
910 case PIPE_SHADER_FRAGMENT
:
911 si_pm4_delete_state(sctx
, ps
, p
->pm4
);
915 si_shader_destroy(ctx
, p
);
924 static void si_delete_vs_shader(struct pipe_context
*ctx
, void *state
)
926 struct si_context
*sctx
= (struct si_context
*)ctx
;
927 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
929 if (sctx
->vs_shader
== sel
) {
930 sctx
->vs_shader
= NULL
;
933 si_delete_shader_selector(ctx
, sel
);
936 static void si_delete_gs_shader(struct pipe_context
*ctx
, void *state
)
938 struct si_context
*sctx
= (struct si_context
*)ctx
;
939 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
941 if (sctx
->gs_shader
== sel
) {
942 sctx
->gs_shader
= NULL
;
945 si_delete_shader_selector(ctx
, sel
);
948 static void si_delete_ps_shader(struct pipe_context
*ctx
, void *state
)
950 struct si_context
*sctx
= (struct si_context
*)ctx
;
951 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
953 if (sctx
->ps_shader
== sel
) {
954 sctx
->ps_shader
= NULL
;
957 si_delete_shader_selector(ctx
, sel
);
960 static void si_delete_tcs_shader(struct pipe_context
*ctx
, void *state
)
962 struct si_context
*sctx
= (struct si_context
*)ctx
;
963 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
965 if (sctx
->tcs_shader
== sel
) {
966 sctx
->tcs_shader
= NULL
;
969 si_delete_shader_selector(ctx
, sel
);
972 static void si_delete_tes_shader(struct pipe_context
*ctx
, void *state
)
974 struct si_context
*sctx
= (struct si_context
*)ctx
;
975 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
977 if (sctx
->tes_shader
== sel
) {
978 sctx
->tes_shader
= NULL
;
981 si_delete_shader_selector(ctx
, sel
);
984 static void si_emit_spi_map(struct si_context
*sctx
, struct r600_atom
*atom
)
986 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
987 struct si_shader
*ps
= sctx
->ps_shader
->current
;
988 struct si_shader
*vs
= si_get_vs_state(sctx
);
989 struct tgsi_shader_info
*psinfo
= &ps
->selector
->info
;
990 struct tgsi_shader_info
*vsinfo
= &vs
->selector
->info
;
991 unsigned i
, j
, tmp
, num_written
= 0;
996 radeon_set_context_reg_seq(cs
, R_028644_SPI_PS_INPUT_CNTL_0
, ps
->nparam
);
998 for (i
= 0; i
< psinfo
->num_inputs
; i
++) {
999 unsigned name
= psinfo
->input_semantic_name
[i
];
1000 unsigned index
= psinfo
->input_semantic_index
[i
];
1001 unsigned interpolate
= psinfo
->input_interpolate
[i
];
1002 unsigned param_offset
= ps
->ps_input_param_offset
[i
];
1004 if (name
== TGSI_SEMANTIC_POSITION
||
1005 name
== TGSI_SEMANTIC_FACE
)
1006 /* Read from preloaded VGPRs, not parameters */
1012 if (interpolate
== TGSI_INTERPOLATE_CONSTANT
||
1013 (interpolate
== TGSI_INTERPOLATE_COLOR
&& sctx
->flatshade
))
1014 tmp
|= S_028644_FLAT_SHADE(1);
1016 if (name
== TGSI_SEMANTIC_PCOORD
||
1017 (name
== TGSI_SEMANTIC_TEXCOORD
&&
1018 sctx
->sprite_coord_enable
& (1 << index
))) {
1019 tmp
|= S_028644_PT_SPRITE_TEX(1);
1022 for (j
= 0; j
< vsinfo
->num_outputs
; j
++) {
1023 if (name
== vsinfo
->output_semantic_name
[j
] &&
1024 index
== vsinfo
->output_semantic_index
[j
]) {
1025 tmp
|= S_028644_OFFSET(vs
->vs_output_param_offset
[j
]);
1030 if (name
== TGSI_SEMANTIC_PRIMID
)
1031 /* PrimID is written after the last output. */
1032 tmp
|= S_028644_OFFSET(vs
->vs_output_param_offset
[vsinfo
->num_outputs
]);
1033 else if (j
== vsinfo
->num_outputs
&& !G_028644_PT_SPRITE_TEX(tmp
)) {
1034 /* No corresponding output found, load defaults into input.
1035 * Don't set any other bits.
1036 * (FLAT_SHADE=1 completely changes behavior) */
1037 tmp
= S_028644_OFFSET(0x20);
1040 assert(param_offset
== num_written
);
1041 radeon_emit(cs
, tmp
);
1044 if (name
== TGSI_SEMANTIC_COLOR
&&
1045 ps
->key
.ps
.color_two_side
) {
1046 name
= TGSI_SEMANTIC_BCOLOR
;
1051 assert(ps
->nparam
== num_written
);
1054 static void si_emit_spi_ps_input(struct si_context
*sctx
, struct r600_atom
*atom
)
1056 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
1057 struct si_shader
*ps
= sctx
->ps_shader
->current
;
1058 unsigned input_ena
= ps
->spi_ps_input_ena
;
1060 /* we need to enable at least one of them, otherwise we hang the GPU */
1061 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena
) ||
1062 G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1063 G_0286CC_PERSP_CENTROID_ENA(input_ena
) ||
1064 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena
) ||
1065 G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) ||
1066 G_0286CC_LINEAR_CENTER_ENA(input_ena
) ||
1067 G_0286CC_LINEAR_CENTROID_ENA(input_ena
) ||
1068 G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena
));
1070 if (sctx
->force_persample_interp
) {
1071 unsigned num_persp
= G_0286CC_PERSP_SAMPLE_ENA(input_ena
) +
1072 G_0286CC_PERSP_CENTER_ENA(input_ena
) +
1073 G_0286CC_PERSP_CENTROID_ENA(input_ena
);
1074 unsigned num_linear
= G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) +
1075 G_0286CC_LINEAR_CENTER_ENA(input_ena
) +
1076 G_0286CC_LINEAR_CENTROID_ENA(input_ena
);
1078 /* If only one set of (i,j) coordinates is used, we can disable
1079 * CENTER/CENTROID, enable SAMPLE and it will load SAMPLE coordinates
1080 * where CENTER/CENTROID are expected, effectively forcing per-sample
1083 if (num_persp
== 1) {
1084 input_ena
&= C_0286CC_PERSP_CENTER_ENA
;
1085 input_ena
&= C_0286CC_PERSP_CENTROID_ENA
;
1086 input_ena
|= G_0286CC_PERSP_SAMPLE_ENA(1);
1088 if (num_linear
== 1) {
1089 input_ena
&= C_0286CC_LINEAR_CENTER_ENA
;
1090 input_ena
&= C_0286CC_LINEAR_CENTROID_ENA
;
1091 input_ena
|= G_0286CC_LINEAR_SAMPLE_ENA(1);
1094 /* If at least 2 sets of coordinates are used, we can't use this
1095 * trick and have to select SAMPLE using a conditional assignment
1096 * in the shader with "force_persample_interp" being a shader constant.
1100 radeon_set_context_reg_seq(cs
, R_0286CC_SPI_PS_INPUT_ENA
, 2);
1101 radeon_emit(cs
, input_ena
);
1102 radeon_emit(cs
, input_ena
);
1105 /* Initialize state related to ESGS / GSVS ring buffers */
1106 static void si_init_gs_rings(struct si_context
*sctx
)
1108 unsigned esgs_ring_size
= 128 * 1024;
1109 unsigned gsvs_ring_size
= 60 * 1024 * 1024;
1111 assert(!sctx
->esgs_ring
&& !sctx
->gsvs_ring
);
1113 sctx
->esgs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1114 PIPE_USAGE_DEFAULT
, esgs_ring_size
);
1115 if (!sctx
->esgs_ring
)
1118 sctx
->gsvs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1119 PIPE_USAGE_DEFAULT
, gsvs_ring_size
);
1120 if (!sctx
->gsvs_ring
) {
1121 pipe_resource_reference(&sctx
->esgs_ring
, NULL
);
1125 /* Append these registers to the init config state. */
1126 if (sctx
->b
.chip_class
>= CIK
) {
1127 if (sctx
->b
.chip_class
>= VI
) {
1128 /* The maximum sizes are 63.999 MB on VI, because
1129 * the register fields only have 18 bits. */
1130 assert(esgs_ring_size
/ 256 < (1 << 18));
1131 assert(gsvs_ring_size
/ 256 < (1 << 18));
1133 si_pm4_set_reg(sctx
->init_config
, R_030900_VGT_ESGS_RING_SIZE
,
1134 esgs_ring_size
/ 256);
1135 si_pm4_set_reg(sctx
->init_config
, R_030904_VGT_GSVS_RING_SIZE
,
1136 gsvs_ring_size
/ 256);
1138 si_pm4_set_reg(sctx
->init_config
, R_0088C8_VGT_ESGS_RING_SIZE
,
1139 esgs_ring_size
/ 256);
1140 si_pm4_set_reg(sctx
->init_config
, R_0088CC_VGT_GSVS_RING_SIZE
,
1141 gsvs_ring_size
/ 256);
1144 /* Flush the context to re-emit the init_config state.
1145 * This is done only once in a lifetime of a context.
1147 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
1148 sctx
->b
.initial_gfx_cs_size
= 0; /* force flush */
1149 si_context_gfx_flush(sctx
, RADEON_FLUSH_ASYNC
, NULL
);
1151 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_ESGS
,
1152 sctx
->esgs_ring
, 0, esgs_ring_size
,
1153 true, true, 4, 64, 0);
1154 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_ESGS
,
1155 sctx
->esgs_ring
, 0, esgs_ring_size
,
1156 false, false, 0, 0, 0);
1157 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_GSVS
,
1158 sctx
->gsvs_ring
, 0, gsvs_ring_size
,
1159 false, false, 0, 0, 0);
1162 static void si_update_gs_rings(struct si_context
*sctx
)
1164 unsigned gsvs_itemsize
= sctx
->gs_shader
->gsvs_itemsize
;
1167 if (gsvs_itemsize
== sctx
->last_gsvs_itemsize
)
1170 sctx
->last_gsvs_itemsize
= gsvs_itemsize
;
1172 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS
,
1173 sctx
->gsvs_ring
, gsvs_itemsize
,
1174 64, true, true, 4, 16, 0);
1176 offset
= gsvs_itemsize
* 64;
1177 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS_1
,
1178 sctx
->gsvs_ring
, gsvs_itemsize
,
1179 64, true, true, 4, 16, offset
);
1181 offset
= (gsvs_itemsize
* 2) * 64;
1182 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS_2
,
1183 sctx
->gsvs_ring
, gsvs_itemsize
,
1184 64, true, true, 4, 16, offset
);
1186 offset
= (gsvs_itemsize
* 3) * 64;
1187 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS_3
,
1188 sctx
->gsvs_ring
, gsvs_itemsize
,
1189 64, true, true, 4, 16, offset
);
1193 * @returns 1 if \p sel has been updated to use a new scratch buffer
1195 * < 0 if there was a failure
1197 static int si_update_scratch_buffer(struct si_context
*sctx
,
1198 struct si_shader_selector
*sel
)
1200 struct si_shader
*shader
;
1201 uint64_t scratch_va
= sctx
->scratch_buffer
->gpu_address
;
1207 shader
= sel
->current
;
1209 /* This shader doesn't need a scratch buffer */
1210 if (shader
->scratch_bytes_per_wave
== 0)
1213 /* This shader is already configured to use the current
1214 * scratch buffer. */
1215 if (shader
->scratch_bo
== sctx
->scratch_buffer
)
1218 assert(sctx
->scratch_buffer
);
1220 si_shader_apply_scratch_relocs(sctx
, shader
, scratch_va
);
1222 /* Replace the shader bo with a new bo that has the relocs applied. */
1223 r
= si_shader_binary_upload(sctx
->screen
, shader
);
1227 /* Update the shader state to use the new shader bo. */
1228 si_shader_init_pm4_state(shader
);
1230 r600_resource_reference(&shader
->scratch_bo
, sctx
->scratch_buffer
);
1235 static unsigned si_get_current_scratch_buffer_size(struct si_context
*sctx
)
1237 if (!sctx
->scratch_buffer
)
1240 return sctx
->scratch_buffer
->b
.b
.width0
;
1243 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_context
*sctx
,
1244 struct si_shader_selector
*sel
)
1249 return sel
->current
->scratch_bytes_per_wave
;
1252 static unsigned si_get_max_scratch_bytes_per_wave(struct si_context
*sctx
)
1256 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->ps_shader
));
1257 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->gs_shader
));
1258 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->vs_shader
));
1259 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->tcs_shader
));
1260 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->tes_shader
));
1264 static bool si_update_spi_tmpring_size(struct si_context
*sctx
)
1266 unsigned current_scratch_buffer_size
=
1267 si_get_current_scratch_buffer_size(sctx
);
1268 unsigned scratch_bytes_per_wave
=
1269 si_get_max_scratch_bytes_per_wave(sctx
);
1270 unsigned scratch_needed_size
= scratch_bytes_per_wave
*
1271 sctx
->scratch_waves
;
1274 if (scratch_needed_size
> 0) {
1276 if (scratch_needed_size
> current_scratch_buffer_size
) {
1277 /* Create a bigger scratch buffer */
1278 pipe_resource_reference(
1279 (struct pipe_resource
**)&sctx
->scratch_buffer
,
1282 sctx
->scratch_buffer
=
1283 si_resource_create_custom(&sctx
->screen
->b
.b
,
1284 PIPE_USAGE_DEFAULT
, scratch_needed_size
);
1285 if (!sctx
->scratch_buffer
)
1287 sctx
->emit_scratch_reloc
= true;
1290 /* Update the shaders, so they are using the latest scratch. The
1291 * scratch buffer may have been changed since these shaders were
1292 * last used, so we still need to try to update them, even if
1293 * they require scratch buffers smaller than the current size.
1295 r
= si_update_scratch_buffer(sctx
, sctx
->ps_shader
);
1299 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
->current
->pm4
);
1301 r
= si_update_scratch_buffer(sctx
, sctx
->gs_shader
);
1305 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
->current
->pm4
);
1307 r
= si_update_scratch_buffer(sctx
, sctx
->tcs_shader
);
1311 si_pm4_bind_state(sctx
, hs
, sctx
->tcs_shader
->current
->pm4
);
1313 /* VS can be bound as LS, ES, or VS. */
1314 if (sctx
->tes_shader
) {
1315 r
= si_update_scratch_buffer(sctx
, sctx
->vs_shader
);
1319 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
->current
->pm4
);
1320 } else if (sctx
->gs_shader
) {
1321 r
= si_update_scratch_buffer(sctx
, sctx
->vs_shader
);
1325 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
->current
->pm4
);
1327 r
= si_update_scratch_buffer(sctx
, sctx
->vs_shader
);
1331 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
->current
->pm4
);
1334 /* TES can be bound as ES or VS. */
1335 if (sctx
->gs_shader
) {
1336 r
= si_update_scratch_buffer(sctx
, sctx
->tes_shader
);
1340 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
->current
->pm4
);
1342 r
= si_update_scratch_buffer(sctx
, sctx
->tes_shader
);
1346 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
->current
->pm4
);
1350 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
1351 assert((scratch_needed_size
& ~0x3FF) == scratch_needed_size
&&
1352 "scratch size should already be aligned correctly.");
1354 sctx
->spi_tmpring_size
= S_0286E8_WAVES(sctx
->scratch_waves
) |
1355 S_0286E8_WAVESIZE(scratch_bytes_per_wave
>> 10);
1359 static void si_init_tess_factor_ring(struct si_context
*sctx
)
1361 assert(!sctx
->tf_ring
);
1363 sctx
->tf_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1365 32768 * sctx
->screen
->b
.info
.max_se
);
1369 assert(((sctx
->tf_ring
->width0
/ 4) & C_030938_SIZE
) == 0);
1371 /* Append these registers to the init config state. */
1372 if (sctx
->b
.chip_class
>= CIK
) {
1373 si_pm4_set_reg(sctx
->init_config
, R_030938_VGT_TF_RING_SIZE
,
1374 S_030938_SIZE(sctx
->tf_ring
->width0
/ 4));
1375 si_pm4_set_reg(sctx
->init_config
, R_030940_VGT_TF_MEMORY_BASE
,
1376 r600_resource(sctx
->tf_ring
)->gpu_address
>> 8);
1378 si_pm4_set_reg(sctx
->init_config
, R_008988_VGT_TF_RING_SIZE
,
1379 S_008988_SIZE(sctx
->tf_ring
->width0
/ 4));
1380 si_pm4_set_reg(sctx
->init_config
, R_0089B8_VGT_TF_MEMORY_BASE
,
1381 r600_resource(sctx
->tf_ring
)->gpu_address
>> 8);
1384 /* Flush the context to re-emit the init_config state.
1385 * This is done only once in a lifetime of a context.
1387 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
1388 sctx
->b
.initial_gfx_cs_size
= 0; /* force flush */
1389 si_context_gfx_flush(sctx
, RADEON_FLUSH_ASYNC
, NULL
);
1391 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_TESS_CTRL
,
1392 SI_RING_TESS_FACTOR
, sctx
->tf_ring
, 0,
1393 sctx
->tf_ring
->width0
, false, false, 0, 0, 0);
1397 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
1398 * VS passes its outputs to TES directly, so the fixed-function shader only
1399 * has to write TESSOUTER and TESSINNER.
1401 static void si_generate_fixed_func_tcs(struct si_context
*sctx
)
1403 struct ureg_src const0
, const1
;
1404 struct ureg_dst tessouter
, tessinner
;
1405 struct ureg_program
*ureg
= ureg_create(TGSI_PROCESSOR_TESS_CTRL
);
1408 return; /* if we get here, we're screwed */
1410 assert(!sctx
->fixed_func_tcs_shader
);
1412 ureg_DECL_constant2D(ureg
, 0, 1, SI_DRIVER_STATE_CONST_BUF
);
1413 const0
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 0),
1414 SI_DRIVER_STATE_CONST_BUF
);
1415 const1
= ureg_src_dimension(ureg_src_register(TGSI_FILE_CONSTANT
, 1),
1416 SI_DRIVER_STATE_CONST_BUF
);
1418 tessouter
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSOUTER
, 0);
1419 tessinner
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSINNER
, 0);
1421 ureg_MOV(ureg
, tessouter
, const0
);
1422 ureg_MOV(ureg
, tessinner
, const1
);
1425 sctx
->fixed_func_tcs_shader
=
1426 ureg_create_shader_and_destroy(ureg
, &sctx
->b
.b
);
1429 static void si_update_vgt_shader_config(struct si_context
*sctx
)
1431 /* Calculate the index of the config.
1432 * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */
1433 unsigned index
= 2*!!sctx
->tes_shader
+ !!sctx
->gs_shader
;
1434 struct si_pm4_state
**pm4
= &sctx
->vgt_shader_config
[index
];
1437 uint32_t stages
= 0;
1439 *pm4
= CALLOC_STRUCT(si_pm4_state
);
1441 if (sctx
->tes_shader
) {
1442 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
1445 if (sctx
->gs_shader
)
1446 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
1448 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
1450 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
1451 } else if (sctx
->gs_shader
) {
1452 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
1454 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
1457 si_pm4_set_reg(*pm4
, R_028B54_VGT_SHADER_STAGES_EN
, stages
);
1459 si_pm4_bind_state(sctx
, vgt_shader_config
, *pm4
);
1462 static void si_update_so(struct si_context
*sctx
, struct si_shader_selector
*shader
)
1464 struct pipe_stream_output_info
*so
= &shader
->so
;
1465 uint32_t enabled_stream_buffers_mask
= 0;
1468 for (i
= 0; i
< so
->num_outputs
; i
++)
1469 enabled_stream_buffers_mask
|= (1 << so
->output
[i
].output_buffer
) << (so
->output
[i
].stream
* 4);
1470 sctx
->b
.streamout
.enabled_stream_buffers_mask
= enabled_stream_buffers_mask
;
1471 sctx
->b
.streamout
.stride_in_dw
= shader
->so
.stride
;
1474 bool si_update_shaders(struct si_context
*sctx
)
1476 struct pipe_context
*ctx
= (struct pipe_context
*)sctx
;
1477 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
1480 /* Update stages before GS. */
1481 if (sctx
->tes_shader
) {
1482 if (!sctx
->tf_ring
) {
1483 si_init_tess_factor_ring(sctx
);
1489 r
= si_shader_select(ctx
, sctx
->vs_shader
);
1492 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
->current
->pm4
);
1494 if (sctx
->tcs_shader
) {
1495 r
= si_shader_select(ctx
, sctx
->tcs_shader
);
1498 si_pm4_bind_state(sctx
, hs
, sctx
->tcs_shader
->current
->pm4
);
1500 if (!sctx
->fixed_func_tcs_shader
) {
1501 si_generate_fixed_func_tcs(sctx
);
1502 if (!sctx
->fixed_func_tcs_shader
)
1506 r
= si_shader_select(ctx
, sctx
->fixed_func_tcs_shader
);
1509 si_pm4_bind_state(sctx
, hs
,
1510 sctx
->fixed_func_tcs_shader
->current
->pm4
);
1513 r
= si_shader_select(ctx
, sctx
->tes_shader
);
1517 if (sctx
->gs_shader
) {
1519 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
->current
->pm4
);
1522 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
->current
->pm4
);
1523 si_update_so(sctx
, sctx
->tes_shader
);
1525 } else if (sctx
->gs_shader
) {
1527 r
= si_shader_select(ctx
, sctx
->vs_shader
);
1530 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
->current
->pm4
);
1533 r
= si_shader_select(ctx
, sctx
->vs_shader
);
1536 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
->current
->pm4
);
1537 si_update_so(sctx
, sctx
->vs_shader
);
1541 if (sctx
->gs_shader
) {
1542 r
= si_shader_select(ctx
, sctx
->gs_shader
);
1545 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
->current
->pm4
);
1546 si_pm4_bind_state(sctx
, vs
, sctx
->gs_shader
->current
->gs_copy_shader
->pm4
);
1547 si_update_so(sctx
, sctx
->gs_shader
);
1549 if (!sctx
->gsvs_ring
) {
1550 si_init_gs_rings(sctx
);
1551 if (!sctx
->gsvs_ring
)
1555 si_update_gs_rings(sctx
);
1557 si_pm4_bind_state(sctx
, gs
, NULL
);
1558 si_pm4_bind_state(sctx
, es
, NULL
);
1561 si_update_vgt_shader_config(sctx
);
1563 r
= si_shader_select(ctx
, sctx
->ps_shader
);
1566 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
->current
->pm4
);
1568 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
) ||
1569 sctx
->sprite_coord_enable
!= rs
->sprite_coord_enable
||
1570 sctx
->flatshade
!= rs
->flatshade
) {
1571 sctx
->sprite_coord_enable
= rs
->sprite_coord_enable
;
1572 sctx
->flatshade
= rs
->flatshade
;
1573 si_mark_atom_dirty(sctx
, &sctx
->spi_map
);
1576 if (si_pm4_state_changed(sctx
, ps
) ||
1577 sctx
->force_persample_interp
!= rs
->force_persample_interp
) {
1578 sctx
->force_persample_interp
= rs
->force_persample_interp
;
1579 si_mark_atom_dirty(sctx
, &sctx
->spi_ps_input
);
1582 if (si_pm4_state_changed(sctx
, ls
) ||
1583 si_pm4_state_changed(sctx
, hs
) ||
1584 si_pm4_state_changed(sctx
, es
) ||
1585 si_pm4_state_changed(sctx
, gs
) ||
1586 si_pm4_state_changed(sctx
, vs
) ||
1587 si_pm4_state_changed(sctx
, ps
)) {
1588 if (!si_update_spi_tmpring_size(sctx
))
1592 if (sctx
->ps_db_shader_control
!= sctx
->ps_shader
->current
->db_shader_control
) {
1593 sctx
->ps_db_shader_control
= sctx
->ps_shader
->current
->db_shader_control
;
1594 si_mark_atom_dirty(sctx
, &sctx
->db_render_state
);
1597 if (sctx
->smoothing_enabled
!= sctx
->ps_shader
->current
->key
.ps
.poly_line_smoothing
) {
1598 sctx
->smoothing_enabled
= sctx
->ps_shader
->current
->key
.ps
.poly_line_smoothing
;
1599 si_mark_atom_dirty(sctx
, &sctx
->msaa_config
);
1601 if (sctx
->b
.chip_class
== SI
)
1602 si_mark_atom_dirty(sctx
, &sctx
->db_render_state
);
1607 void si_init_shader_functions(struct si_context
*sctx
)
1609 si_init_atom(sctx
, &sctx
->spi_map
, &sctx
->atoms
.s
.spi_map
, si_emit_spi_map
);
1610 si_init_atom(sctx
, &sctx
->spi_ps_input
, &sctx
->atoms
.s
.spi_ps_input
, si_emit_spi_ps_input
);
1612 sctx
->b
.b
.create_vs_state
= si_create_vs_state
;
1613 sctx
->b
.b
.create_tcs_state
= si_create_tcs_state
;
1614 sctx
->b
.b
.create_tes_state
= si_create_tes_state
;
1615 sctx
->b
.b
.create_gs_state
= si_create_gs_state
;
1616 sctx
->b
.b
.create_fs_state
= si_create_fs_state
;
1618 sctx
->b
.b
.bind_vs_state
= si_bind_vs_shader
;
1619 sctx
->b
.b
.bind_tcs_state
= si_bind_tcs_shader
;
1620 sctx
->b
.b
.bind_tes_state
= si_bind_tes_shader
;
1621 sctx
->b
.b
.bind_gs_state
= si_bind_gs_shader
;
1622 sctx
->b
.b
.bind_fs_state
= si_bind_ps_shader
;
1624 sctx
->b
.b
.delete_vs_state
= si_delete_vs_shader
;
1625 sctx
->b
.b
.delete_tcs_state
= si_delete_tcs_shader
;
1626 sctx
->b
.b
.delete_tes_state
= si_delete_tes_shader
;
1627 sctx
->b
.b
.delete_gs_state
= si_delete_gs_shader
;
1628 sctx
->b
.b
.delete_fs_state
= si_delete_ps_shader
;