2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
25 * Marek Olšák <maraeo@gmail.com>
29 #include "si_shader.h"
32 #include "tgsi/tgsi_parse.h"
33 #include "util/u_memory.h"
34 #include "util/u_simple_shaders.h"
36 static void si_shader_es(struct si_shader
*shader
)
38 struct si_pm4_state
*pm4
;
39 unsigned num_sgprs
, num_user_sgprs
;
40 unsigned vgpr_comp_cnt
;
43 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
48 va
= shader
->bo
->gpu_address
;
49 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
51 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 0;
53 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
54 num_sgprs
= shader
->num_sgprs
;
55 /* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
56 if ((num_user_sgprs
+ 1) > num_sgprs
) {
57 /* Last 2 reserved SGPRs are used for VCC */
58 num_sgprs
= num_user_sgprs
+ 1 + 2;
60 assert(num_sgprs
<= 104);
62 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
63 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, va
>> 40);
64 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
65 S_00B328_VGPRS((shader
->num_vgprs
- 1) / 4) |
66 S_00B328_SGPRS((num_sgprs
- 1) / 8) |
67 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
) |
68 S_00B328_DX10_CLAMP(shader
->dx10_clamp_mode
));
69 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
70 S_00B32C_USER_SGPR(num_user_sgprs
) |
71 S_00B32C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
74 static void si_shader_gs(struct si_shader
*shader
)
76 unsigned gs_vert_itemsize
= shader
->selector
->info
.num_outputs
* (16 >> 2);
77 unsigned gs_max_vert_out
= shader
->selector
->gs_max_out_vertices
;
78 unsigned gsvs_itemsize
= gs_vert_itemsize
* gs_max_vert_out
;
80 struct si_pm4_state
*pm4
;
81 unsigned num_sgprs
, num_user_sgprs
;
84 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
85 assert(gsvs_itemsize
< (1 << 15));
87 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
92 if (gs_max_vert_out
<= 128) {
93 cut_mode
= V_028A40_GS_CUT_128
;
94 } else if (gs_max_vert_out
<= 256) {
95 cut_mode
= V_028A40_GS_CUT_256
;
96 } else if (gs_max_vert_out
<= 512) {
97 cut_mode
= V_028A40_GS_CUT_512
;
99 assert(gs_max_vert_out
<= 1024);
100 cut_mode
= V_028A40_GS_CUT_1024
;
103 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
104 S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
105 S_028A40_CUT_MODE(cut_mode
)|
106 S_028A40_ES_WRITE_OPTIMIZE(1) |
107 S_028A40_GS_WRITE_OPTIMIZE(1));
109 si_pm4_set_reg(pm4
, R_028A60_VGT_GSVS_RING_OFFSET_1
, gsvs_itemsize
);
110 si_pm4_set_reg(pm4
, R_028A64_VGT_GSVS_RING_OFFSET_2
, gsvs_itemsize
);
111 si_pm4_set_reg(pm4
, R_028A68_VGT_GSVS_RING_OFFSET_3
, gsvs_itemsize
);
113 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
114 util_bitcount64(shader
->selector
->gs_used_inputs
) * (16 >> 2));
115 si_pm4_set_reg(pm4
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, gsvs_itemsize
);
117 si_pm4_set_reg(pm4
, R_028B38_VGT_GS_MAX_VERT_OUT
, gs_max_vert_out
);
119 si_pm4_set_reg(pm4
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, gs_vert_itemsize
);
121 va
= shader
->bo
->gpu_address
;
122 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
123 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
124 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, va
>> 40);
126 num_user_sgprs
= SI_GS_NUM_USER_SGPR
;
127 num_sgprs
= shader
->num_sgprs
;
128 /* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
129 if ((num_user_sgprs
+ 2) > num_sgprs
) {
130 /* Last 2 reserved SGPRs are used for VCC */
131 num_sgprs
= num_user_sgprs
+ 2 + 2;
133 assert(num_sgprs
<= 104);
135 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
136 S_00B228_VGPRS((shader
->num_vgprs
- 1) / 4) |
137 S_00B228_SGPRS((num_sgprs
- 1) / 8) |
138 S_00B228_DX10_CLAMP(shader
->dx10_clamp_mode
));
139 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
140 S_00B22C_USER_SGPR(num_user_sgprs
) |
141 S_00B22C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
144 static void si_shader_vs(struct si_shader
*shader
)
146 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
147 struct si_pm4_state
*pm4
;
148 unsigned num_sgprs
, num_user_sgprs
;
149 unsigned nparams
, i
, vgpr_comp_cnt
;
151 unsigned window_space
=
152 shader
->selector
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
154 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
159 va
= shader
->bo
->gpu_address
;
160 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
162 if (shader
->is_gs_copy_shader
) {
163 vgpr_comp_cnt
= 0; /* only VertexID is needed for GS-COPY. */
164 num_user_sgprs
= SI_GSCOPY_NUM_USER_SGPR
;
165 } else if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
166 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 0;
167 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
171 num_sgprs
= shader
->num_sgprs
;
172 if (num_user_sgprs
> num_sgprs
) {
173 /* Last 2 reserved SGPRs are used for VCC */
174 num_sgprs
= num_user_sgprs
+ 2;
176 assert(num_sgprs
<= 104);
178 /* Certain attributes (position, psize, etc.) don't count as params.
179 * VS is required to export at least one param and r600_shader_from_tgsi()
180 * takes care of adding a dummy export.
182 for (nparams
= 0, i
= 0 ; i
< info
->num_outputs
; i
++) {
183 switch (info
->output_semantic_name
[i
]) {
184 case TGSI_SEMANTIC_CLIPVERTEX
:
185 case TGSI_SEMANTIC_POSITION
:
186 case TGSI_SEMANTIC_PSIZE
:
195 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
196 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
198 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
199 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
200 S_02870C_POS1_EXPORT_FORMAT(shader
->nr_pos_exports
> 1 ?
201 V_02870C_SPI_SHADER_4COMP
:
202 V_02870C_SPI_SHADER_NONE
) |
203 S_02870C_POS2_EXPORT_FORMAT(shader
->nr_pos_exports
> 2 ?
204 V_02870C_SPI_SHADER_4COMP
:
205 V_02870C_SPI_SHADER_NONE
) |
206 S_02870C_POS3_EXPORT_FORMAT(shader
->nr_pos_exports
> 3 ?
207 V_02870C_SPI_SHADER_4COMP
:
208 V_02870C_SPI_SHADER_NONE
));
210 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
211 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
212 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
213 S_00B128_VGPRS((shader
->num_vgprs
- 1) / 4) |
214 S_00B128_SGPRS((num_sgprs
- 1) / 8) |
215 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
216 S_00B128_DX10_CLAMP(shader
->dx10_clamp_mode
));
217 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
218 S_00B12C_USER_SGPR(num_user_sgprs
) |
219 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
220 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
221 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
222 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
223 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
) |
224 S_00B12C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
226 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
227 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
229 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
230 S_028818_VTX_W0_FMT(1) |
231 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
232 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
233 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
236 static void si_shader_ps(struct si_shader
*shader
)
238 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
239 struct si_pm4_state
*pm4
;
240 unsigned i
, spi_ps_in_control
;
241 unsigned num_sgprs
, num_user_sgprs
;
242 unsigned spi_baryc_cntl
= 0, spi_ps_input_ena
;
245 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
250 for (i
= 0; i
< info
->num_inputs
; i
++) {
251 switch (info
->input_semantic_name
[i
]) {
252 case TGSI_SEMANTIC_POSITION
:
253 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
255 * 0 -> Position = pixel center (default)
256 * 1 -> Position = pixel centroid
257 * 2 -> Position = at sample position
259 switch (info
->input_interpolate_loc
[i
]) {
260 case TGSI_INTERPOLATE_LOC_CENTROID
:
261 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(1);
263 case TGSI_INTERPOLATE_LOC_SAMPLE
:
264 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
268 if (info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] ==
269 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)
270 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_ULC(1);
275 spi_ps_in_control
= S_0286D8_NUM_INTERP(shader
->nparam
) |
276 S_0286D8_BC_OPTIMIZE_DISABLE(1);
278 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
279 spi_ps_input_ena
= shader
->spi_ps_input_ena
;
280 /* we need to enable at least one of them, otherwise we hang the GPU */
281 assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena
) ||
282 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena
) ||
283 G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena
) ||
284 G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena
) ||
285 G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena
) ||
286 G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena
) ||
287 G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena
) ||
288 G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena
));
290 si_pm4_set_reg(pm4
, R_0286CC_SPI_PS_INPUT_ENA
, spi_ps_input_ena
);
291 si_pm4_set_reg(pm4
, R_0286D0_SPI_PS_INPUT_ADDR
, spi_ps_input_ena
);
292 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
294 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
, shader
->spi_shader_z_format
);
295 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
,
296 shader
->spi_shader_col_format
);
297 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, shader
->cb_shader_mask
);
299 va
= shader
->bo
->gpu_address
;
300 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
301 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
302 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
304 num_user_sgprs
= SI_PS_NUM_USER_SGPR
;
305 num_sgprs
= shader
->num_sgprs
;
306 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
307 if ((num_user_sgprs
+ 1) > num_sgprs
) {
308 /* Last 2 reserved SGPRs are used for VCC */
309 num_sgprs
= num_user_sgprs
+ 1 + 2;
311 assert(num_sgprs
<= 104);
313 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
314 S_00B028_VGPRS((shader
->num_vgprs
- 1) / 4) |
315 S_00B028_SGPRS((num_sgprs
- 1) / 8) |
316 S_00B028_DX10_CLAMP(shader
->dx10_clamp_mode
));
317 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
318 S_00B02C_EXTRA_LDS_SIZE(shader
->lds_size
) |
319 S_00B02C_USER_SGPR(num_user_sgprs
) |
320 S_00B32C_SCRATCH_EN(shader
->scratch_bytes_per_wave
> 0));
323 static void si_shader_init_pm4_state(struct si_shader
*shader
)
327 si_pm4_free_state_simple(shader
->pm4
);
329 switch (shader
->selector
->type
) {
330 case PIPE_SHADER_VERTEX
:
331 if (shader
->key
.vs
.as_es
)
332 si_shader_es(shader
);
334 si_shader_vs(shader
);
336 case PIPE_SHADER_GEOMETRY
:
337 si_shader_gs(shader
);
338 si_shader_vs(shader
->gs_copy_shader
);
340 case PIPE_SHADER_FRAGMENT
:
341 si_shader_ps(shader
);
348 /* Compute the key for the hw shader variant */
349 static INLINE
void si_shader_selector_key(struct pipe_context
*ctx
,
350 struct si_shader_selector
*sel
,
351 union si_shader_key
*key
)
353 struct si_context
*sctx
= (struct si_context
*)ctx
;
354 memset(key
, 0, sizeof(*key
));
356 if (sel
->type
== PIPE_SHADER_VERTEX
) {
358 if (!sctx
->vertex_elements
)
361 for (i
= 0; i
< sctx
->vertex_elements
->count
; ++i
)
362 key
->vs
.instance_divisors
[i
] = sctx
->vertex_elements
->elements
[i
].instance_divisor
;
364 if (sctx
->gs_shader
) {
366 key
->vs
.gs_used_inputs
= sctx
->gs_shader
->gs_used_inputs
;
368 } else if (sel
->type
== PIPE_SHADER_FRAGMENT
) {
369 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
371 if (sel
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
])
372 key
->ps
.last_cbuf
= MAX2(sctx
->framebuffer
.state
.nr_cbufs
, 1) - 1;
373 key
->ps
.export_16bpc
= sctx
->framebuffer
.export_16bpc
;
376 key
->ps
.color_two_side
= rs
->two_side
;
378 if (sctx
->queued
.named
.blend
) {
379 key
->ps
.alpha_to_one
= sctx
->queued
.named
.blend
->alpha_to_one
&&
380 rs
->multisample_enable
&&
381 !sctx
->framebuffer
.cb0_is_integer
;
384 key
->ps
.poly_stipple
= rs
->poly_stipple_enable
&&
385 ((sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES
&&
386 sctx
->current_rast_prim
<= PIPE_PRIM_POLYGON
) ||
387 sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES_ADJACENCY
);
390 key
->ps
.alpha_func
= PIPE_FUNC_ALWAYS
;
392 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
393 if (sctx
->queued
.named
.dsa
&&
394 !sctx
->framebuffer
.cb0_is_integer
)
395 key
->ps
.alpha_func
= sctx
->queued
.named
.dsa
->alpha_func
;
399 /* Select the hw shader variant depending on the current state. */
400 static int si_shader_select(struct pipe_context
*ctx
,
401 struct si_shader_selector
*sel
)
403 union si_shader_key key
;
404 struct si_shader
* shader
= NULL
;
407 si_shader_selector_key(ctx
, sel
, &key
);
409 /* Check if we don't need to change anything.
410 * This path is also used for most shaders that don't need multiple
411 * variants, it will cost just a computation of the key and this
413 if (likely(sel
->current
&& memcmp(&sel
->current
->key
, &key
, sizeof(key
)) == 0)) {
417 /* lookup if we have other variants in the list */
418 if (sel
->num_shaders
> 1) {
419 struct si_shader
*p
= sel
->current
, *c
= p
->next_variant
;
421 while (c
&& memcmp(&c
->key
, &key
, sizeof(key
)) != 0) {
427 p
->next_variant
= c
->next_variant
;
433 shader
->next_variant
= sel
->current
;
434 sel
->current
= shader
;
436 shader
= CALLOC(1, sizeof(struct si_shader
));
437 shader
->selector
= sel
;
440 shader
->next_variant
= sel
->current
;
441 sel
->current
= shader
;
442 r
= si_shader_create((struct si_screen
*)ctx
->screen
, shader
);
444 R600_ERR("Failed to build shader variant (type=%u) %d\n",
450 si_shader_init_pm4_state(shader
);
457 static void *si_create_shader_state(struct pipe_context
*ctx
,
458 const struct pipe_shader_state
*state
,
459 unsigned pipe_shader_type
)
461 struct si_shader_selector
*sel
= CALLOC_STRUCT(si_shader_selector
);
464 sel
->type
= pipe_shader_type
;
465 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
466 sel
->so
= state
->stream_output
;
467 tgsi_scan_shader(state
->tokens
, &sel
->info
);
469 switch (pipe_shader_type
) {
470 case PIPE_SHADER_GEOMETRY
:
471 sel
->gs_output_prim
=
472 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
473 sel
->gs_max_out_vertices
=
474 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
476 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
477 unsigned name
= sel
->info
.input_semantic_name
[i
];
478 unsigned index
= sel
->info
.input_semantic_index
[i
];
481 case TGSI_SEMANTIC_PRIMID
:
484 sel
->gs_used_inputs
|=
485 1llu << si_shader_io_get_unique_index(name
, index
);
493 static void *si_create_fs_state(struct pipe_context
*ctx
,
494 const struct pipe_shader_state
*state
)
496 return si_create_shader_state(ctx
, state
, PIPE_SHADER_FRAGMENT
);
499 static void *si_create_gs_state(struct pipe_context
*ctx
,
500 const struct pipe_shader_state
*state
)
502 return si_create_shader_state(ctx
, state
, PIPE_SHADER_GEOMETRY
);
505 static void *si_create_vs_state(struct pipe_context
*ctx
,
506 const struct pipe_shader_state
*state
)
508 return si_create_shader_state(ctx
, state
, PIPE_SHADER_VERTEX
);
511 static void si_bind_vs_shader(struct pipe_context
*ctx
, void *state
)
513 struct si_context
*sctx
= (struct si_context
*)ctx
;
514 struct si_shader_selector
*sel
= state
;
516 if (sctx
->vs_shader
== sel
|| !sel
)
519 sctx
->vs_shader
= sel
;
520 sctx
->clip_regs
.dirty
= true;
523 static void si_bind_gs_shader(struct pipe_context
*ctx
, void *state
)
525 struct si_context
*sctx
= (struct si_context
*)ctx
;
526 struct si_shader_selector
*sel
= state
;
528 if (sctx
->gs_shader
== sel
)
531 sctx
->gs_shader
= sel
;
532 sctx
->clip_regs
.dirty
= true;
533 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
536 static void si_make_dummy_ps(struct si_context
*sctx
)
538 if (!sctx
->dummy_pixel_shader
) {
539 sctx
->dummy_pixel_shader
=
540 util_make_fragment_cloneinput_shader(&sctx
->b
.b
, 0,
541 TGSI_SEMANTIC_GENERIC
,
542 TGSI_INTERPOLATE_CONSTANT
);
546 static void si_bind_ps_shader(struct pipe_context
*ctx
, void *state
)
548 struct si_context
*sctx
= (struct si_context
*)ctx
;
549 struct si_shader_selector
*sel
= state
;
551 /* skip if supplied shader is one already in use */
552 if (sctx
->ps_shader
== sel
)
555 /* use a dummy shader if binding a NULL shader */
557 si_make_dummy_ps(sctx
);
558 sel
= sctx
->dummy_pixel_shader
;
561 sctx
->ps_shader
= sel
;
564 static void si_delete_shader_selector(struct pipe_context
*ctx
,
565 struct si_shader_selector
*sel
)
567 struct si_context
*sctx
= (struct si_context
*)ctx
;
568 struct si_shader
*p
= sel
->current
, *c
;
572 if (sel
->type
== PIPE_SHADER_GEOMETRY
) {
573 si_pm4_delete_state(sctx
, gs
, p
->pm4
);
574 si_pm4_delete_state(sctx
, vs
, p
->gs_copy_shader
->pm4
);
575 } else if (sel
->type
== PIPE_SHADER_FRAGMENT
)
576 si_pm4_delete_state(sctx
, ps
, p
->pm4
);
577 else if (p
->key
.vs
.as_es
)
578 si_pm4_delete_state(sctx
, es
, p
->pm4
);
580 si_pm4_delete_state(sctx
, vs
, p
->pm4
);
581 si_shader_destroy(ctx
, p
);
590 static void si_delete_vs_shader(struct pipe_context
*ctx
, void *state
)
592 struct si_context
*sctx
= (struct si_context
*)ctx
;
593 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
595 if (sctx
->vs_shader
== sel
) {
596 sctx
->vs_shader
= NULL
;
599 si_delete_shader_selector(ctx
, sel
);
602 static void si_delete_gs_shader(struct pipe_context
*ctx
, void *state
)
604 struct si_context
*sctx
= (struct si_context
*)ctx
;
605 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
607 if (sctx
->gs_shader
== sel
) {
608 sctx
->gs_shader
= NULL
;
611 si_delete_shader_selector(ctx
, sel
);
614 static void si_delete_ps_shader(struct pipe_context
*ctx
, void *state
)
616 struct si_context
*sctx
= (struct si_context
*)ctx
;
617 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
619 if (sctx
->ps_shader
== sel
) {
620 sctx
->ps_shader
= NULL
;
623 si_delete_shader_selector(ctx
, sel
);
626 static void si_update_spi_map(struct si_context
*sctx
)
628 struct si_shader
*ps
= sctx
->ps_shader
->current
;
629 struct si_shader
*vs
= si_get_vs_state(sctx
);
630 struct tgsi_shader_info
*psinfo
= &ps
->selector
->info
;
631 struct tgsi_shader_info
*vsinfo
= &vs
->selector
->info
;
632 struct si_pm4_state
*pm4
= CALLOC_STRUCT(si_pm4_state
);
635 for (i
= 0; i
< psinfo
->num_inputs
; i
++) {
636 unsigned name
= psinfo
->input_semantic_name
[i
];
637 unsigned index
= psinfo
->input_semantic_index
[i
];
638 unsigned interpolate
= psinfo
->input_interpolate
[i
];
639 unsigned param_offset
= ps
->ps_input_param_offset
[i
];
641 if (name
== TGSI_SEMANTIC_POSITION
||
642 name
== TGSI_SEMANTIC_FACE
)
643 /* Read from preloaded VGPRs, not parameters */
649 if (interpolate
== TGSI_INTERPOLATE_CONSTANT
||
650 (interpolate
== TGSI_INTERPOLATE_COLOR
&& sctx
->flatshade
))
651 tmp
|= S_028644_FLAT_SHADE(1);
653 if (name
== TGSI_SEMANTIC_GENERIC
&&
654 sctx
->sprite_coord_enable
& (1 << index
)) {
655 tmp
|= S_028644_PT_SPRITE_TEX(1);
658 for (j
= 0; j
< vsinfo
->num_outputs
; j
++) {
659 if (name
== vsinfo
->output_semantic_name
[j
] &&
660 index
== vsinfo
->output_semantic_index
[j
]) {
661 tmp
|= S_028644_OFFSET(vs
->vs_output_param_offset
[j
]);
666 if (j
== vsinfo
->num_outputs
&& !G_028644_PT_SPRITE_TEX(tmp
)) {
667 /* No corresponding output found, load defaults into input.
668 * Don't set any other bits.
669 * (FLAT_SHADE=1 completely changes behavior) */
670 tmp
= S_028644_OFFSET(0x20);
674 R_028644_SPI_PS_INPUT_CNTL_0
+ param_offset
* 4,
677 if (name
== TGSI_SEMANTIC_COLOR
&&
678 ps
->key
.ps
.color_two_side
) {
679 name
= TGSI_SEMANTIC_BCOLOR
;
685 si_pm4_set_state(sctx
, spi
, pm4
);
688 /* Initialize state related to ESGS / GSVS ring buffers */
689 static void si_init_gs_rings(struct si_context
*sctx
)
691 unsigned esgs_ring_size
= 128 * 1024;
692 unsigned gsvs_ring_size
= 64 * 1024 * 1024;
694 assert(!sctx
->gs_rings
);
695 sctx
->gs_rings
= CALLOC_STRUCT(si_pm4_state
);
697 sctx
->esgs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
698 PIPE_USAGE_DEFAULT
, esgs_ring_size
);
700 sctx
->gsvs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
701 PIPE_USAGE_DEFAULT
, gsvs_ring_size
);
703 if (sctx
->b
.chip_class
>= CIK
) {
704 si_pm4_set_reg(sctx
->gs_rings
, R_030900_VGT_ESGS_RING_SIZE
,
705 esgs_ring_size
/ 256);
706 si_pm4_set_reg(sctx
->gs_rings
, R_030904_VGT_GSVS_RING_SIZE
,
707 gsvs_ring_size
/ 256);
709 si_pm4_set_reg(sctx
->gs_rings
, R_0088C8_VGT_ESGS_RING_SIZE
,
710 esgs_ring_size
/ 256);
711 si_pm4_set_reg(sctx
->gs_rings
, R_0088CC_VGT_GSVS_RING_SIZE
,
712 gsvs_ring_size
/ 256);
715 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_ESGS
,
716 sctx
->esgs_ring
, 0, esgs_ring_size
,
718 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_ESGS
,
719 sctx
->esgs_ring
, 0, esgs_ring_size
,
721 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_GSVS
,
722 sctx
->gsvs_ring
, 0, gsvs_ring_size
,
727 * @returns 1 if \p sel has been updated to use a new scratch buffer and 0
730 static unsigned si_update_scratch_buffer(struct si_context
*sctx
,
731 struct si_shader_selector
*sel
)
733 struct si_shader
*shader
;
734 uint64_t scratch_va
= sctx
->scratch_buffer
->gpu_address
;
740 shader
= sel
->current
;
742 /* This shader doesn't need a scratch buffer */
743 if (shader
->scratch_bytes_per_wave
== 0)
746 /* This shader is already configured to use the current
748 if (shader
->scratch_bo
== sctx
->scratch_buffer
)
751 assert(sctx
->scratch_buffer
);
753 si_shader_apply_scratch_relocs(sctx
, shader
, scratch_va
);
755 /* Replace the shader bo with a new bo that has the relocs applied. */
756 r600_resource_reference(&shader
->bo
, NULL
);
757 shader
->bo
= si_resource_create_custom(&sctx
->screen
->b
.b
, PIPE_USAGE_IMMUTABLE
,
758 shader
->binary
.code_size
);
759 ptr
= sctx
->screen
->b
.ws
->buffer_map(shader
->bo
->cs_buf
, NULL
, PIPE_TRANSFER_WRITE
);
760 util_memcpy_cpu_to_le32(ptr
, shader
->binary
.code
, shader
->binary
.code_size
);
761 sctx
->screen
->b
.ws
->buffer_unmap(shader
->bo
->cs_buf
);
763 /* Update the shader state to use the new shader bo. */
764 si_shader_init_pm4_state(shader
);
766 r600_resource_reference(&shader
->scratch_bo
, sctx
->scratch_buffer
);
771 static unsigned si_get_current_scratch_buffer_size(struct si_context
*sctx
)
773 if (!sctx
->scratch_buffer
)
776 return sctx
->scratch_buffer
->b
.b
.width0
;
779 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_context
*sctx
,
780 struct si_shader_selector
*sel
)
785 return sel
->current
->scratch_bytes_per_wave
;
788 static unsigned si_get_max_scratch_bytes_per_wave(struct si_context
*sctx
)
791 return MAX3(si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->ps_shader
),
792 si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->gs_shader
),
793 si_get_scratch_buffer_bytes_per_wave(sctx
, sctx
->vs_shader
));
796 static void si_update_spi_tmpring_size(struct si_context
*sctx
)
798 unsigned current_scratch_buffer_size
=
799 si_get_current_scratch_buffer_size(sctx
);
800 unsigned scratch_bytes_per_wave
=
801 si_get_max_scratch_bytes_per_wave(sctx
);
802 unsigned scratch_needed_size
= scratch_bytes_per_wave
*
805 if (scratch_needed_size
> 0) {
807 if (scratch_needed_size
> current_scratch_buffer_size
) {
808 /* Create a bigger scratch buffer */
809 pipe_resource_reference(
810 (struct pipe_resource
**)&sctx
->scratch_buffer
,
813 sctx
->scratch_buffer
=
814 si_resource_create_custom(&sctx
->screen
->b
.b
,
815 PIPE_USAGE_DEFAULT
, scratch_needed_size
);
818 /* Update the shaders, so they are using the latest scratch. The
819 * scratch buffer may have been changed since these shaders were
820 * last used, so we still need to try to update them, even if
821 * they require scratch buffers smaller than the current size.
823 if (si_update_scratch_buffer(sctx
, sctx
->ps_shader
))
824 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
->current
->pm4
);
825 if (si_update_scratch_buffer(sctx
, sctx
->gs_shader
))
826 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
->current
->pm4
);
827 if (si_update_scratch_buffer(sctx
, sctx
->vs_shader
))
828 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
->current
->pm4
);
831 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
832 assert((scratch_needed_size
& ~0x3FF) == scratch_needed_size
&&
833 "scratch size should already be aligned correctly.");
835 sctx
->spi_tmpring_size
= S_0286E8_WAVES(sctx
->scratch_waves
) |
836 S_0286E8_WAVESIZE(scratch_bytes_per_wave
>> 10);
839 void si_update_shaders(struct si_context
*sctx
)
841 struct pipe_context
*ctx
= (struct pipe_context
*)sctx
;
842 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
844 if (sctx
->gs_shader
) {
845 si_shader_select(ctx
, sctx
->gs_shader
);
846 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
->current
->pm4
);
847 si_pm4_bind_state(sctx
, vs
, sctx
->gs_shader
->current
->gs_copy_shader
->pm4
);
849 sctx
->b
.streamout
.stride_in_dw
= sctx
->gs_shader
->so
.stride
;
851 si_shader_select(ctx
, sctx
->vs_shader
);
852 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
->current
->pm4
);
855 si_init_gs_rings(sctx
);
856 if (sctx
->emitted
.named
.gs_rings
!= sctx
->gs_rings
)
857 sctx
->b
.flags
|= SI_CONTEXT_VGT_FLUSH
;
858 si_pm4_bind_state(sctx
, gs_rings
, sctx
->gs_rings
);
860 si_set_ring_buffer(ctx
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS
,
862 sctx
->gs_shader
->gs_max_out_vertices
*
863 sctx
->gs_shader
->info
.num_outputs
* 16,
864 64, true, true, 4, 16);
867 sctx
->gs_on
= CALLOC_STRUCT(si_pm4_state
);
869 si_pm4_set_reg(sctx
->gs_on
, R_028B54_VGT_SHADER_STAGES_EN
,
870 S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
872 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
));
874 si_pm4_bind_state(sctx
, gs_onoff
, sctx
->gs_on
);
876 si_shader_select(ctx
, sctx
->vs_shader
);
877 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
->current
->pm4
);
879 sctx
->b
.streamout
.stride_in_dw
= sctx
->vs_shader
->so
.stride
;
882 sctx
->gs_off
= CALLOC_STRUCT(si_pm4_state
);
884 si_pm4_set_reg(sctx
->gs_off
, R_028A40_VGT_GS_MODE
, 0);
885 si_pm4_set_reg(sctx
->gs_off
, R_028B54_VGT_SHADER_STAGES_EN
, 0);
887 si_pm4_bind_state(sctx
, gs_onoff
, sctx
->gs_off
);
888 si_pm4_bind_state(sctx
, gs_rings
, NULL
);
889 si_pm4_bind_state(sctx
, gs
, NULL
);
890 si_pm4_bind_state(sctx
, es
, NULL
);
893 si_shader_select(ctx
, sctx
->ps_shader
);
895 if (!sctx
->ps_shader
->current
) {
896 struct si_shader_selector
*sel
;
898 /* use a dummy shader if compiling the shader (variant) failed */
899 si_make_dummy_ps(sctx
);
900 sel
= sctx
->dummy_pixel_shader
;
901 si_shader_select(ctx
, sel
);
902 sctx
->ps_shader
->current
= sel
->current
;
905 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
->current
->pm4
);
907 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
) ||
908 sctx
->sprite_coord_enable
!= rs
->sprite_coord_enable
||
909 sctx
->flatshade
!= rs
->flatshade
) {
910 sctx
->sprite_coord_enable
= rs
->sprite_coord_enable
;
911 sctx
->flatshade
= rs
->flatshade
;
912 si_update_spi_map(sctx
);
915 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
) ||
916 si_pm4_state_changed(sctx
, gs
)) {
917 si_update_spi_tmpring_size(sctx
);
920 if (sctx
->ps_db_shader_control
!= sctx
->ps_shader
->current
->db_shader_control
) {
921 sctx
->ps_db_shader_control
= sctx
->ps_shader
->current
->db_shader_control
;
922 sctx
->db_render_state
.dirty
= true;
926 void si_init_shader_functions(struct si_context
*sctx
)
928 sctx
->b
.b
.create_vs_state
= si_create_vs_state
;
929 sctx
->b
.b
.create_gs_state
= si_create_gs_state
;
930 sctx
->b
.b
.create_fs_state
= si_create_fs_state
;
932 sctx
->b
.b
.bind_vs_state
= si_bind_vs_shader
;
933 sctx
->b
.b
.bind_gs_state
= si_bind_gs_shader
;
934 sctx
->b
.b
.bind_fs_state
= si_bind_ps_shader
;
936 sctx
->b
.b
.delete_vs_state
= si_delete_vs_shader
;
937 sctx
->b
.b
.delete_gs_state
= si_delete_gs_shader
;
938 sctx
->b
.b
.delete_fs_state
= si_delete_ps_shader
;