2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
25 * Marek Olšák <maraeo@gmail.com>
29 #include "si_shader.h"
32 #include "tgsi/tgsi_parse.h"
33 #include "util/u_memory.h"
34 #include "util/u_simple_shaders.h"
36 static void si_shader_es(struct si_shader
*shader
)
38 struct si_pm4_state
*pm4
;
39 unsigned num_sgprs
, num_user_sgprs
;
40 unsigned vgpr_comp_cnt
;
43 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
48 va
= shader
->bo
->gpu_address
;
49 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
51 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 0;
53 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
54 num_sgprs
= shader
->num_sgprs
;
55 /* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
56 if ((num_user_sgprs
+ 1) > num_sgprs
) {
57 /* Last 2 reserved SGPRs are used for VCC */
58 num_sgprs
= num_user_sgprs
+ 1 + 2;
60 assert(num_sgprs
<= 104);
62 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
63 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, va
>> 40);
64 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
65 S_00B328_VGPRS((shader
->num_vgprs
- 1) / 4) |
66 S_00B328_SGPRS((num_sgprs
- 1) / 8) |
67 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
) |
68 S_00B328_DX10_CLAMP(shader
->dx10_clamp_mode
));
69 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
70 S_00B32C_USER_SGPR(num_user_sgprs
) |
71 S_00B32C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
74 static void si_shader_gs(struct si_shader
*shader
)
76 unsigned gs_vert_itemsize
= shader
->selector
->info
.num_outputs
* (16 >> 2);
77 unsigned gs_max_vert_out
= shader
->selector
->gs_max_out_vertices
;
78 unsigned gsvs_itemsize
= gs_vert_itemsize
* gs_max_vert_out
;
79 unsigned gs_num_invocations
= shader
->selector
->gs_num_invocations
;
81 struct si_pm4_state
*pm4
;
82 unsigned num_sgprs
, num_user_sgprs
;
85 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
86 assert(gsvs_itemsize
< (1 << 15));
88 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
93 if (gs_max_vert_out
<= 128) {
94 cut_mode
= V_028A40_GS_CUT_128
;
95 } else if (gs_max_vert_out
<= 256) {
96 cut_mode
= V_028A40_GS_CUT_256
;
97 } else if (gs_max_vert_out
<= 512) {
98 cut_mode
= V_028A40_GS_CUT_512
;
100 assert(gs_max_vert_out
<= 1024);
101 cut_mode
= V_028A40_GS_CUT_1024
;
104 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
105 S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
106 S_028A40_CUT_MODE(cut_mode
)|
107 S_028A40_ES_WRITE_OPTIMIZE(1) |
108 S_028A40_GS_WRITE_OPTIMIZE(1));
110 si_pm4_set_reg(pm4
, R_028A60_VGT_GSVS_RING_OFFSET_1
, gsvs_itemsize
);
111 si_pm4_set_reg(pm4
, R_028A64_VGT_GSVS_RING_OFFSET_2
, gsvs_itemsize
);
112 si_pm4_set_reg(pm4
, R_028A68_VGT_GSVS_RING_OFFSET_3
, gsvs_itemsize
);
114 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
115 util_bitcount64(shader
->selector
->gs_used_inputs
) * (16 >> 2));
116 si_pm4_set_reg(pm4
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, gsvs_itemsize
);
118 si_pm4_set_reg(pm4
, R_028B38_VGT_GS_MAX_VERT_OUT
, gs_max_vert_out
);
120 si_pm4_set_reg(pm4
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, gs_vert_itemsize
);
122 si_pm4_set_reg(pm4
, R_028B90_VGT_GS_INSTANCE_CNT
,
123 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
124 S_028B90_ENABLE(gs_num_invocations
> 0));
126 va
= shader
->bo
->gpu_address
;
127 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
128 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
129 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, va
>> 40);
131 num_user_sgprs
= SI_GS_NUM_USER_SGPR
;
132 num_sgprs
= shader
->num_sgprs
;
133 /* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
134 if ((num_user_sgprs
+ 2) > num_sgprs
) {
135 /* Last 2 reserved SGPRs are used for VCC */
136 num_sgprs
= num_user_sgprs
+ 2 + 2;
138 assert(num_sgprs
<= 104);
140 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
141 S_00B228_VGPRS((shader
->num_vgprs
- 1) / 4) |
142 S_00B228_SGPRS((num_sgprs
- 1) / 8) |
143 S_00B228_DX10_CLAMP(shader
->dx10_clamp_mode
));
144 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
145 S_00B22C_USER_SGPR(num_user_sgprs
) |
146 S_00B22C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
149 static void si_shader_vs(struct si_shader
*shader
)
151 struct si_pm4_state
*pm4
;
152 unsigned num_sgprs
, num_user_sgprs
;
153 unsigned nparams
, vgpr_comp_cnt
;
155 unsigned window_space
=
156 shader
->selector
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
158 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
163 va
= shader
->bo
->gpu_address
;
164 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
166 if (shader
->is_gs_copy_shader
) {
167 vgpr_comp_cnt
= 0; /* only VertexID is needed for GS-COPY. */
168 num_user_sgprs
= SI_GSCOPY_NUM_USER_SGPR
;
169 } else if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
170 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 0;
171 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
175 num_sgprs
= shader
->num_sgprs
;
176 if (num_user_sgprs
> num_sgprs
) {
177 /* Last 2 reserved SGPRs are used for VCC */
178 num_sgprs
= num_user_sgprs
+ 2;
180 assert(num_sgprs
<= 104);
182 /* VS is required to export at least one param. */
183 nparams
= MAX2(shader
->nr_param_exports
, 1);
184 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
185 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
187 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
188 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
189 S_02870C_POS1_EXPORT_FORMAT(shader
->nr_pos_exports
> 1 ?
190 V_02870C_SPI_SHADER_4COMP
:
191 V_02870C_SPI_SHADER_NONE
) |
192 S_02870C_POS2_EXPORT_FORMAT(shader
->nr_pos_exports
> 2 ?
193 V_02870C_SPI_SHADER_4COMP
:
194 V_02870C_SPI_SHADER_NONE
) |
195 S_02870C_POS3_EXPORT_FORMAT(shader
->nr_pos_exports
> 3 ?
196 V_02870C_SPI_SHADER_4COMP
:
197 V_02870C_SPI_SHADER_NONE
));
199 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
200 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
201 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
202 S_00B128_VGPRS((shader
->num_vgprs
- 1) / 4) |
203 S_00B128_SGPRS((num_sgprs
- 1) / 8) |
204 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
205 S_00B128_DX10_CLAMP(shader
->dx10_clamp_mode
));
206 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
207 S_00B12C_USER_SGPR(num_user_sgprs
) |
208 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
209 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
210 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
211 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
212 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
) |
213 S_00B12C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
215 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
216 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
218 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
219 S_028818_VTX_W0_FMT(1) |
220 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
221 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
222 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
225 static void si_shader_ps(struct si_shader
*shader
)
227 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
228 struct si_pm4_state
*pm4
;
229 unsigned i
, spi_ps_in_control
;
230 unsigned num_sgprs
, num_user_sgprs
;
231 unsigned spi_baryc_cntl
= 0, spi_ps_input_ena
;
234 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
239 for (i
= 0; i
< info
->num_inputs
; i
++) {
240 switch (info
->input_semantic_name
[i
]) {
241 case TGSI_SEMANTIC_POSITION
:
242 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
244 * 0 -> Position = pixel center (default)
245 * 1 -> Position = pixel centroid
246 * 2 -> Position = at sample position
248 switch (info
->input_interpolate_loc
[i
]) {
249 case TGSI_INTERPOLATE_LOC_CENTROID
:
250 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(1);
252 case TGSI_INTERPOLATE_LOC_SAMPLE
:
253 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
257 if (info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] ==
258 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)
259 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_ULC(1);
264 spi_ps_in_control
= S_0286D8_NUM_INTERP(shader
->nparam
) |
265 S_0286D8_BC_OPTIMIZE_DISABLE(1);
267 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
268 spi_ps_input_ena
= shader
->spi_ps_input_ena
;
269 /* we need to enable at least one of them, otherwise we hang the GPU */
270 assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena
) ||
271 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena
) ||
272 G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena
) ||
273 G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena
) ||
274 G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena
) ||
275 G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena
) ||
276 G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena
) ||
277 G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena
));
279 si_pm4_set_reg(pm4
, R_0286CC_SPI_PS_INPUT_ENA
, spi_ps_input_ena
);
280 si_pm4_set_reg(pm4
, R_0286D0_SPI_PS_INPUT_ADDR
, spi_ps_input_ena
);
281 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
283 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
, shader
->spi_shader_z_format
);
284 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
,
285 shader
->spi_shader_col_format
);
286 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, shader
->cb_shader_mask
);
288 va
= shader
->bo
->gpu_address
;
289 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
290 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
291 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
293 num_user_sgprs
= SI_PS_NUM_USER_SGPR
;
294 num_sgprs
= shader
->num_sgprs
;
295 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
296 if ((num_user_sgprs
+ 1) > num_sgprs
) {
297 /* Last 2 reserved SGPRs are used for VCC */
298 num_sgprs
= num_user_sgprs
+ 1 + 2;
300 assert(num_sgprs
<= 104);
302 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
303 S_00B028_VGPRS((shader
->num_vgprs
- 1) / 4) |
304 S_00B028_SGPRS((num_sgprs
- 1) / 8) |
305 S_00B028_DX10_CLAMP(shader
->dx10_clamp_mode
));
306 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
307 S_00B02C_EXTRA_LDS_SIZE(shader
->lds_size
) |
308 S_00B02C_USER_SGPR(num_user_sgprs
) |
309 S_00B32C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
312 static void si_shader_init_pm4_state(struct si_shader
*shader
)
316 si_pm4_free_state_simple(shader
->pm4
);
318 switch (shader
->selector
->type
) {
319 case PIPE_SHADER_VERTEX
:
320 if (shader
->key
.vs
.as_es
)
321 si_shader_es(shader
);
323 si_shader_vs(shader
);
325 case PIPE_SHADER_GEOMETRY
:
326 si_shader_gs(shader
);
327 si_shader_vs(shader
->gs_copy_shader
);
329 case PIPE_SHADER_FRAGMENT
:
330 si_shader_ps(shader
);
337 /* Compute the key for the hw shader variant */
338 static inline void si_shader_selector_key(struct pipe_context
*ctx
,
339 struct si_shader_selector
*sel
,
340 union si_shader_key
*key
)
342 struct si_context
*sctx
= (struct si_context
*)ctx
;
345 memset(key
, 0, sizeof(*key
));
348 case PIPE_SHADER_VERTEX
:
349 if (sctx
->vertex_elements
)
350 for (i
= 0; i
< sctx
->vertex_elements
->count
; ++i
)
351 key
->vs
.instance_divisors
[i
] =
352 sctx
->vertex_elements
->elements
[i
].instance_divisor
;
354 if (sctx
->gs_shader
) {
356 key
->vs
.gs_used_inputs
= sctx
->gs_shader
->gs_used_inputs
;
359 case PIPE_SHADER_GEOMETRY
:
361 case PIPE_SHADER_FRAGMENT
: {
362 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
364 if (sel
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
])
365 key
->ps
.last_cbuf
= MAX2(sctx
->framebuffer
.state
.nr_cbufs
, 1) - 1;
366 key
->ps
.export_16bpc
= sctx
->framebuffer
.export_16bpc
;
369 bool is_poly
= (sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES
&&
370 sctx
->current_rast_prim
<= PIPE_PRIM_POLYGON
) ||
371 sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES_ADJACENCY
;
372 bool is_line
= !is_poly
&& sctx
->current_rast_prim
!= PIPE_PRIM_POINTS
;
374 key
->ps
.color_two_side
= rs
->two_side
;
376 if (sctx
->queued
.named
.blend
) {
377 key
->ps
.alpha_to_one
= sctx
->queued
.named
.blend
->alpha_to_one
&&
378 rs
->multisample_enable
&&
379 !sctx
->framebuffer
.cb0_is_integer
;
382 key
->ps
.poly_stipple
= rs
->poly_stipple_enable
&& is_poly
;
383 key
->ps
.poly_line_smoothing
= ((is_poly
&& rs
->poly_smooth
) ||
384 (is_line
&& rs
->line_smooth
)) &&
385 sctx
->framebuffer
.nr_samples
<= 1;
388 key
->ps
.alpha_func
= PIPE_FUNC_ALWAYS
;
389 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
390 if (sctx
->queued
.named
.dsa
&&
391 !sctx
->framebuffer
.cb0_is_integer
)
392 key
->ps
.alpha_func
= sctx
->queued
.named
.dsa
->alpha_func
;
400 /* Select the hw shader variant depending on the current state. */
401 static int si_shader_select(struct pipe_context
*ctx
,
402 struct si_shader_selector
*sel
)
404 struct si_context
*sctx
= (struct si_context
*)ctx
;
405 union si_shader_key key
;
406 struct si_shader
* shader
= NULL
;
409 si_shader_selector_key(ctx
, sel
, &key
);
411 /* Check if we don't need to change anything.
412 * This path is also used for most shaders that don't need multiple
413 * variants, it will cost just a computation of the key and this
415 if (likely(sel
->current
&& memcmp(&sel
->current
->key
, &key
, sizeof(key
)) == 0)) {
419 /* lookup if we have other variants in the list */
420 if (sel
->num_shaders
> 1) {
421 struct si_shader
*p
= sel
->current
, *c
= p
->next_variant
;
423 while (c
&& memcmp(&c
->key
, &key
, sizeof(key
)) != 0) {
429 p
->next_variant
= c
->next_variant
;
435 shader
->next_variant
= sel
->current
;
436 sel
->current
= shader
;
438 shader
= CALLOC(1, sizeof(struct si_shader
));
439 shader
->selector
= sel
;
442 shader
->next_variant
= sel
->current
;
443 sel
->current
= shader
;
444 r
= si_shader_create((struct si_screen
*)ctx
->screen
, sctx
->tm
,
447 R600_ERR("Failed to build shader variant (type=%u) %d\n",
453 si_shader_init_pm4_state(shader
);
460 static void *si_create_shader_state(struct pipe_context
*ctx
,
461 const struct pipe_shader_state
*state
,
462 unsigned pipe_shader_type
)
464 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
465 struct si_shader_selector
*sel
= CALLOC_STRUCT(si_shader_selector
);
468 sel
->type
= pipe_shader_type
;
469 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
470 sel
->so
= state
->stream_output
;
471 tgsi_scan_shader(state
->tokens
, &sel
->info
);
473 switch (pipe_shader_type
) {
474 case PIPE_SHADER_GEOMETRY
:
475 sel
->gs_output_prim
=
476 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
477 sel
->gs_max_out_vertices
=
478 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
479 sel
->gs_num_invocations
=
480 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
482 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
483 unsigned name
= sel
->info
.input_semantic_name
[i
];
484 unsigned index
= sel
->info
.input_semantic_index
[i
];
487 case TGSI_SEMANTIC_PRIMID
:
490 sel
->gs_used_inputs
|=
491 1llu << si_shader_io_get_unique_index(name
, index
);
496 if (sscreen
->b
.debug_flags
& DBG_PRECOMPILE
)
497 si_shader_select(ctx
, sel
);
502 static void *si_create_fs_state(struct pipe_context
*ctx
,
503 const struct pipe_shader_state
*state
)
505 return si_create_shader_state(ctx
, state
, PIPE_SHADER_FRAGMENT
);
508 static void *si_create_gs_state(struct pipe_context
*ctx
,
509 const struct pipe_shader_state
*state
)
511 return si_create_shader_state(ctx
, state
, PIPE_SHADER_GEOMETRY
);
514 static void *si_create_vs_state(struct pipe_context
*ctx
,
515 const struct pipe_shader_state
*state
)
517 return si_create_shader_state(ctx
, state
, PIPE_SHADER_VERTEX
);
520 static void si_bind_vs_shader(struct pipe_context
*ctx
, void *state
)
522 struct si_context
*sctx
= (struct si_context
*)ctx
;
523 struct si_shader_selector
*sel
= state
;
525 if (sctx
->vs_shader
== sel
|| !sel
)
528 sctx
->vs_shader
= sel
;
529 sctx
->clip_regs
.dirty
= true;
532 static void si_bind_gs_shader(struct pipe_context
*ctx
, void *state
)
534 struct si_context
*sctx
= (struct si_context
*)ctx
;
535 struct si_shader_selector
*sel
= state
;
537 if (sctx
->gs_shader
== sel
)
540 sctx
->gs_shader
= sel
;
541 sctx
->clip_regs
.dirty
= true;
542 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
545 static void si_make_dummy_ps(struct si_context
*sctx
)
547 if (!sctx
->dummy_pixel_shader
) {
548 sctx
->dummy_pixel_shader
=
549 util_make_fragment_cloneinput_shader(&sctx
->b
.b
, 0,
550 TGSI_SEMANTIC_GENERIC
,
551 TGSI_INTERPOLATE_CONSTANT
);
555 static void si_bind_ps_shader(struct pipe_context
*ctx
, void *state
)
557 struct si_context
*sctx
= (struct si_context
*)ctx
;
558 struct si_shader_selector
*sel
= state
;
560 /* skip if supplied shader is one already in use */
561 if (sctx
->ps_shader
== sel
)
564 /* use a dummy shader if binding a NULL shader */
566 si_make_dummy_ps(sctx
);
567 sel
= sctx
->dummy_pixel_shader
;
570 sctx
->ps_shader
= sel
;
573 static void si_delete_shader_selector(struct pipe_context
*ctx
,
574 struct si_shader_selector
*sel
)
576 struct si_context
*sctx
= (struct si_context
*)ctx
;
577 struct si_shader
*p
= sel
->current
, *c
;
582 case PIPE_SHADER_VERTEX
:
584 si_pm4_delete_state(sctx
, es
, p
->pm4
);
586 si_pm4_delete_state(sctx
, vs
, p
->pm4
);
588 case PIPE_SHADER_GEOMETRY
:
589 si_pm4_delete_state(sctx
, gs
, p
->pm4
);
590 si_pm4_delete_state(sctx
, vs
, p
->gs_copy_shader
->pm4
);
592 case PIPE_SHADER_FRAGMENT
:
593 si_pm4_delete_state(sctx
, ps
, p
->pm4
);
597 si_shader_destroy(ctx
, p
);
606 static void si_delete_vs_shader(struct pipe_context
*ctx
, void *state
)
608 struct si_context
*sctx
= (struct si_context
*)ctx
;
609 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
611 if (sctx
->vs_shader
== sel
) {
612 sctx
->vs_shader
= NULL
;
615 si_delete_shader_selector(ctx
, sel
);
618 static void si_delete_gs_shader(struct pipe_context
*ctx
, void *state
)
620 struct si_context
*sctx
= (struct si_context
*)ctx
;
621 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
623 if (sctx
->gs_shader
== sel
) {
624 sctx
->gs_shader
= NULL
;
627 si_delete_shader_selector(ctx
, sel
);
630 static void si_delete_ps_shader(struct pipe_context
*ctx
, void *state
)
632 struct si_context
*sctx
= (struct si_context
*)ctx
;
633 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
635 if (sctx
->ps_shader
== sel
) {
636 sctx
->ps_shader
= NULL
;
639 si_delete_shader_selector(ctx
, sel
);
642 static void si_update_spi_map(struct si_context
*sctx
)
644 struct si_shader
*ps
= sctx
->ps_shader
->current
;
645 struct si_shader
*vs
= si_get_vs_state(sctx
);
646 struct tgsi_shader_info
*psinfo
= &ps
->selector
->info
;
647 struct tgsi_shader_info
*vsinfo
= &vs
->selector
->info
;
648 struct si_pm4_state
*pm4
= CALLOC_STRUCT(si_pm4_state
);
651 for (i
= 0; i
< psinfo
->num_inputs
; i
++) {
652 unsigned name
= psinfo
->input_semantic_name
[i
];
653 unsigned index
= psinfo
->input_semantic_index
[i
];
654 unsigned interpolate
= psinfo
->input_interpolate
[i
];
655 unsigned param_offset
= ps
->ps_input_param_offset
[i
];
657 if (name
== TGSI_SEMANTIC_POSITION
||
658 name
== TGSI_SEMANTIC_FACE
)
659 /* Read from preloaded VGPRs, not parameters */
665 if (interpolate
== TGSI_INTERPOLATE_CONSTANT
||
666 (interpolate
== TGSI_INTERPOLATE_COLOR
&& sctx
->flatshade
))
667 tmp
|= S_028644_FLAT_SHADE(1);
669 if (name
== TGSI_SEMANTIC_PCOORD
||
670 (name
== TGSI_SEMANTIC_TEXCOORD
&&
671 sctx
->sprite_coord_enable
& (1 << index
))) {
672 tmp
|= S_028644_PT_SPRITE_TEX(1);
675 for (j
= 0; j
< vsinfo
->num_outputs
; j
++) {
676 if (name
== vsinfo
->output_semantic_name
[j
] &&
677 index
== vsinfo
->output_semantic_index
[j
]) {
678 tmp
|= S_028644_OFFSET(vs
->vs_output_param_offset
[j
]);
683 if (j
== vsinfo
->num_outputs
&& !G_028644_PT_SPRITE_TEX(tmp
)) {
684 /* No corresponding output found, load defaults into input.
685 * Don't set any other bits.
686 * (FLAT_SHADE=1 completely changes behavior) */
687 tmp
= S_028644_OFFSET(0x20);
691 R_028644_SPI_PS_INPUT_CNTL_0
+ param_offset
* 4,
694 if (name
== TGSI_SEMANTIC_COLOR
&&
695 ps
->key
.ps
.color_two_side
) {
696 name
= TGSI_SEMANTIC_BCOLOR
;
702 si_pm4_set_state(sctx
, spi
, pm4
);
705 /* Initialize state related to ESGS / GSVS ring buffers */
706 static void si_init_gs_rings(struct si_context
*sctx
)
708 unsigned esgs_ring_size
= 128 * 1024;
709 unsigned gsvs_ring_size
= 64 * 1024 * 1024;
711 assert(!sctx
->gs_rings
);
712 sctx
->gs_rings
= CALLOC_STRUCT(si_pm4_state
);
714 sctx
->esgs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
715 PIPE_USAGE_DEFAULT
, esgs_ring_size
);
717 sctx
->gsvs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
718 PIPE_USAGE_DEFAULT
, gsvs_ring_size
);
720 if (sctx
->b
.chip_class
>= CIK
) {
721 si_pm4_set_reg(sctx
->gs_rings
, R_030900_VGT_ESGS_RING_SIZE
,
722 esgs_ring_size
/ 256);
723 si_pm4_set_reg(sctx
->gs_rings
, R_030904_VGT_GSVS_RING_SIZE
,
724 gsvs_ring_size
/ 256);
726 si_pm4_set_reg(sctx
->gs_rings
, R_0088C8_VGT_ESGS_RING_SIZE
,
727 esgs_ring_size
/ 256);
728 si_pm4_set_reg(sctx
->gs_rings
, R_0088CC_VGT_GSVS_RING_SIZE
,
729 gsvs_ring_size
/ 256);
732 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_ESGS
,
733 sctx
->esgs_ring
, 0, esgs_ring_size
,
735 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_ESGS
,
736 sctx
->esgs_ring
, 0, esgs_ring_size
,
738 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_GSVS
,
739 sctx
->gsvs_ring
, 0, gsvs_ring_size
,
744 * @returns 1 if \p sel has been updated to use a new scratch buffer and 0
747 static unsigned si_update_scratch_buffer(struct si_context
*sctx
,
748 struct si_shader_selector
*sel
)
750 struct si_shader
*shader
;
751 uint64_t scratch_va
= sctx
->scratch_buffer
->gpu_address
;
756 shader
= sel
->current
;
758 /* This shader doesn't need a scratch buffer */
759 if (shader
->scratch_bytes_per_wave
== 0)
762 /* This shader is already configured to use the current
764 if (shader
->scratch_bo
== sctx
->scratch_buffer
)
767 assert(sctx
->scratch_buffer
);
769 si_shader_apply_scratch_relocs(sctx
, shader
, scratch_va
);
771 /* Replace the shader bo with a new bo that has the relocs applied. */
772 si_shader_binary_upload(sctx
->screen
, shader
);
774 /* Update the shader state to use the new shader bo. */
775 si_shader_init_pm4_state(shader
);
777 r600_resource_reference(&shader
->scratch_bo
, sctx
->scratch_buffer
);
782 static unsigned si_get_current_scratch_buffer_size(struct si_context
*sctx
)
784 if (!sctx
->scratch_buffer
)
787 return sctx
->scratch_buffer
->b
.b
.width0
;
790 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_context
*sctx
,
791 struct si_shader_selector
*sel
)
796 return sel
->current
->scratch_bytes_per_wave
;
799 static unsigned si_get_max_scratch_bytes_per_wave(struct si_context
*sctx
)
802 return MAX3(si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->ps_shader
),
803 si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->gs_shader
),
804 si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->vs_shader
));
807 static void si_update_spi_tmpring_size(struct si_context
*sctx
)
809 unsigned current_scratch_buffer_size
=
810 si_get_current_scratch_buffer_size(sctx
);
811 unsigned scratch_bytes_per_wave
=
812 si_get_max_scratch_bytes_per_wave(sctx
);
813 unsigned scratch_needed_size
= scratch_bytes_per_wave
*
816 if (scratch_needed_size
> 0) {
818 if (scratch_needed_size
> current_scratch_buffer_size
) {
819 /* Create a bigger scratch buffer */
820 pipe_resource_reference(
821 (struct pipe_resource
**)&sctx
->scratch_buffer
,
824 sctx
->scratch_buffer
=
825 si_resource_create_custom(&sctx
->screen
->b
.b
,
826 PIPE_USAGE_DEFAULT
, scratch_needed_size
);
829 /* Update the shaders, so they are using the latest scratch. The
830 * scratch buffer may have been changed since these shaders were
831 * last used, so we still need to try to update them, even if
832 * they require scratch buffers smaller than the current size.
834 if (si_update_scratch_buffer(sctx
, sctx
->ps_shader
))
835 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
->current
->pm4
);
836 if (si_update_scratch_buffer(sctx
, sctx
->gs_shader
))
837 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
->current
->pm4
);
839 /* VS can be bound as ES or VS. */
840 if (sctx
->gs_shader
) {
841 if (si_update_scratch_buffer(sctx
, sctx
->vs_shader
))
842 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
->current
->pm4
);
844 if (si_update_scratch_buffer(sctx
, sctx
->vs_shader
))
845 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
->current
->pm4
);
849 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
850 assert((scratch_needed_size
& ~0x3FF) == scratch_needed_size
&&
851 "scratch size should already be aligned correctly.");
853 sctx
->spi_tmpring_size
= S_0286E8_WAVES(sctx
->scratch_waves
) |
854 S_0286E8_WAVESIZE(scratch_bytes_per_wave
>> 10);
857 void si_update_shaders(struct si_context
*sctx
)
859 struct pipe_context
*ctx
= (struct pipe_context
*)sctx
;
860 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
862 if (sctx
->gs_shader
) {
863 si_shader_select(ctx
, sctx
->gs_shader
);
864 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
->current
->pm4
);
865 si_pm4_bind_state(sctx
, vs
, sctx
->gs_shader
->current
->gs_copy_shader
->pm4
);
867 sctx
->b
.streamout
.stride_in_dw
= sctx
->gs_shader
->so
.stride
;
869 si_shader_select(ctx
, sctx
->vs_shader
);
870 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
->current
->pm4
);
873 si_init_gs_rings(sctx
);
874 if (sctx
->emitted
.named
.gs_rings
!= sctx
->gs_rings
)
875 sctx
->b
.flags
|= SI_CONTEXT_VGT_FLUSH
;
876 si_pm4_bind_state(sctx
, gs_rings
, sctx
->gs_rings
);
878 si_set_ring_buffer(ctx
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS
,
880 sctx
->gs_shader
->gs_max_out_vertices
*
881 sctx
->gs_shader
->info
.num_outputs
* 16,
882 64, true, true, 4, 16);
885 sctx
->gs_on
= CALLOC_STRUCT(si_pm4_state
);
887 si_pm4_set_reg(sctx
->gs_on
, R_028B54_VGT_SHADER_STAGES_EN
,
888 S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
890 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
));
892 si_pm4_bind_state(sctx
, gs_onoff
, sctx
->gs_on
);
894 si_shader_select(ctx
, sctx
->vs_shader
);
895 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
->current
->pm4
);
897 sctx
->b
.streamout
.stride_in_dw
= sctx
->vs_shader
->so
.stride
;
900 sctx
->gs_off
= CALLOC_STRUCT(si_pm4_state
);
902 si_pm4_set_reg(sctx
->gs_off
, R_028A40_VGT_GS_MODE
, 0);
903 si_pm4_set_reg(sctx
->gs_off
, R_028B54_VGT_SHADER_STAGES_EN
, 0);
905 si_pm4_bind_state(sctx
, gs_onoff
, sctx
->gs_off
);
906 si_pm4_bind_state(sctx
, gs_rings
, NULL
);
907 si_pm4_bind_state(sctx
, gs
, NULL
);
908 si_pm4_bind_state(sctx
, es
, NULL
);
911 si_shader_select(ctx
, sctx
->ps_shader
);
913 if (!sctx
->ps_shader
->current
) {
914 struct si_shader_selector
*sel
;
916 /* use a dummy shader if compiling the shader (variant) failed */
917 si_make_dummy_ps(sctx
);
918 sel
= sctx
->dummy_pixel_shader
;
919 si_shader_select(ctx
, sel
);
920 sctx
->ps_shader
->current
= sel
->current
;
923 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
->current
->pm4
);
925 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
) ||
926 sctx
->sprite_coord_enable
!= rs
->sprite_coord_enable
||
927 sctx
->flatshade
!= rs
->flatshade
) {
928 sctx
->sprite_coord_enable
= rs
->sprite_coord_enable
;
929 sctx
->flatshade
= rs
->flatshade
;
930 si_update_spi_map(sctx
);
933 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
) ||
934 si_pm4_state_changed(sctx
, gs
)) {
935 si_update_spi_tmpring_size(sctx
);
938 if (sctx
->ps_db_shader_control
!= sctx
->ps_shader
->current
->db_shader_control
) {
939 sctx
->ps_db_shader_control
= sctx
->ps_shader
->current
->db_shader_control
;
940 sctx
->db_render_state
.dirty
= true;
943 if (sctx
->smoothing_enabled
!= sctx
->ps_shader
->current
->key
.ps
.poly_line_smoothing
) {
944 sctx
->smoothing_enabled
= sctx
->ps_shader
->current
->key
.ps
.poly_line_smoothing
;
945 sctx
->msaa_config
.dirty
= true;
947 if (sctx
->b
.chip_class
== SI
)
948 sctx
->db_render_state
.dirty
= true;
952 void si_init_shader_functions(struct si_context
*sctx
)
954 sctx
->b
.b
.create_vs_state
= si_create_vs_state
;
955 sctx
->b
.b
.create_gs_state
= si_create_gs_state
;
956 sctx
->b
.b
.create_fs_state
= si_create_fs_state
;
958 sctx
->b
.b
.bind_vs_state
= si_bind_vs_shader
;
959 sctx
->b
.b
.bind_gs_state
= si_bind_gs_shader
;
960 sctx
->b
.b
.bind_fs_state
= si_bind_ps_shader
;
962 sctx
->b
.b
.delete_vs_state
= si_delete_vs_shader
;
963 sctx
->b
.b
.delete_gs_state
= si_delete_gs_shader
;
964 sctx
->b
.b
.delete_fs_state
= si_delete_ps_shader
;