2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
25 * Marek Olšák <maraeo@gmail.com>
29 #include "si_shader.h"
32 #include "tgsi/tgsi_parse.h"
33 #include "util/u_memory.h"
34 #include "util/u_simple_shaders.h"
36 static void si_shader_es(struct si_shader
*shader
)
38 struct si_pm4_state
*pm4
;
39 unsigned num_sgprs
, num_user_sgprs
;
40 unsigned vgpr_comp_cnt
;
43 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
48 va
= shader
->bo
->gpu_address
;
49 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
51 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 0;
53 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
54 num_sgprs
= shader
->num_sgprs
;
55 /* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
56 if ((num_user_sgprs
+ 1) > num_sgprs
) {
57 /* Last 2 reserved SGPRs are used for VCC */
58 num_sgprs
= num_user_sgprs
+ 1 + 2;
60 assert(num_sgprs
<= 104);
62 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
63 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, va
>> 40);
64 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
65 S_00B328_VGPRS((shader
->num_vgprs
- 1) / 4) |
66 S_00B328_SGPRS((num_sgprs
- 1) / 8) |
67 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
));
68 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
69 S_00B32C_USER_SGPR(num_user_sgprs
));
72 static void si_shader_gs(struct si_shader
*shader
)
74 unsigned gs_vert_itemsize
= shader
->selector
->info
.num_outputs
* (16 >> 2);
75 unsigned gs_max_vert_out
= shader
->selector
->gs_max_out_vertices
;
76 unsigned gsvs_itemsize
= gs_vert_itemsize
* gs_max_vert_out
;
78 struct si_pm4_state
*pm4
;
79 unsigned num_sgprs
, num_user_sgprs
;
82 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
83 assert(gsvs_itemsize
< (1 << 15));
85 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
90 if (gs_max_vert_out
<= 128) {
91 cut_mode
= V_028A40_GS_CUT_128
;
92 } else if (gs_max_vert_out
<= 256) {
93 cut_mode
= V_028A40_GS_CUT_256
;
94 } else if (gs_max_vert_out
<= 512) {
95 cut_mode
= V_028A40_GS_CUT_512
;
97 assert(gs_max_vert_out
<= 1024);
98 cut_mode
= V_028A40_GS_CUT_1024
;
101 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
102 S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
103 S_028A40_CUT_MODE(cut_mode
)|
104 S_028A40_ES_WRITE_OPTIMIZE(1) |
105 S_028A40_GS_WRITE_OPTIMIZE(1));
107 si_pm4_set_reg(pm4
, R_028A60_VGT_GSVS_RING_OFFSET_1
, gsvs_itemsize
);
108 si_pm4_set_reg(pm4
, R_028A64_VGT_GSVS_RING_OFFSET_2
, gsvs_itemsize
);
109 si_pm4_set_reg(pm4
, R_028A68_VGT_GSVS_RING_OFFSET_3
, gsvs_itemsize
);
111 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
112 util_bitcount64(shader
->selector
->gs_used_inputs
) * (16 >> 2));
113 si_pm4_set_reg(pm4
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, gsvs_itemsize
);
115 si_pm4_set_reg(pm4
, R_028B38_VGT_GS_MAX_VERT_OUT
, gs_max_vert_out
);
117 si_pm4_set_reg(pm4
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, gs_vert_itemsize
);
119 va
= shader
->bo
->gpu_address
;
120 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
121 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
122 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, va
>> 40);
124 num_user_sgprs
= SI_GS_NUM_USER_SGPR
;
125 num_sgprs
= shader
->num_sgprs
;
126 /* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
127 if ((num_user_sgprs
+ 2) > num_sgprs
) {
128 /* Last 2 reserved SGPRs are used for VCC */
129 num_sgprs
= num_user_sgprs
+ 2 + 2;
131 assert(num_sgprs
<= 104);
133 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
134 S_00B228_VGPRS((shader
->num_vgprs
- 1) / 4) |
135 S_00B228_SGPRS((num_sgprs
- 1) / 8));
136 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
137 S_00B22C_USER_SGPR(num_user_sgprs
));
140 static void si_shader_vs(struct si_shader
*shader
)
142 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
143 struct si_pm4_state
*pm4
;
144 unsigned num_sgprs
, num_user_sgprs
;
145 unsigned nparams
, i
, vgpr_comp_cnt
;
147 unsigned window_space
=
148 shader
->selector
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
150 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
155 va
= shader
->bo
->gpu_address
;
156 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
158 vgpr_comp_cnt
= shader
->uses_instanceid
? 3 : 0;
160 if (shader
->is_gs_copy_shader
)
161 num_user_sgprs
= SI_GSCOPY_NUM_USER_SGPR
;
163 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
165 num_sgprs
= shader
->num_sgprs
;
166 if (num_user_sgprs
> num_sgprs
) {
167 /* Last 2 reserved SGPRs are used for VCC */
168 num_sgprs
= num_user_sgprs
+ 2;
170 assert(num_sgprs
<= 104);
172 /* Certain attributes (position, psize, etc.) don't count as params.
173 * VS is required to export at least one param and r600_shader_from_tgsi()
174 * takes care of adding a dummy export.
176 for (nparams
= 0, i
= 0 ; i
< info
->num_outputs
; i
++) {
177 switch (info
->output_semantic_name
[i
]) {
178 case TGSI_SEMANTIC_CLIPVERTEX
:
179 case TGSI_SEMANTIC_POSITION
:
180 case TGSI_SEMANTIC_PSIZE
:
189 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
190 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
192 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
193 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
194 S_02870C_POS1_EXPORT_FORMAT(shader
->nr_pos_exports
> 1 ?
195 V_02870C_SPI_SHADER_4COMP
:
196 V_02870C_SPI_SHADER_NONE
) |
197 S_02870C_POS2_EXPORT_FORMAT(shader
->nr_pos_exports
> 2 ?
198 V_02870C_SPI_SHADER_4COMP
:
199 V_02870C_SPI_SHADER_NONE
) |
200 S_02870C_POS3_EXPORT_FORMAT(shader
->nr_pos_exports
> 3 ?
201 V_02870C_SPI_SHADER_4COMP
:
202 V_02870C_SPI_SHADER_NONE
));
204 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
205 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
206 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
207 S_00B128_VGPRS((shader
->num_vgprs
- 1) / 4) |
208 S_00B128_SGPRS((num_sgprs
- 1) / 8) |
209 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
));
210 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
211 S_00B12C_USER_SGPR(num_user_sgprs
) |
212 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
213 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
214 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
215 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
216 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
));
218 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
219 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
221 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
222 S_028818_VTX_W0_FMT(1) |
223 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
224 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
225 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
228 static void si_shader_ps(struct si_shader
*shader
)
230 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
231 struct si_pm4_state
*pm4
;
232 unsigned i
, spi_ps_in_control
;
233 unsigned num_sgprs
, num_user_sgprs
;
234 unsigned spi_baryc_cntl
= 0, spi_ps_input_ena
;
237 pm4
= shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
242 for (i
= 0; i
< info
->num_inputs
; i
++) {
243 switch (info
->input_semantic_name
[i
]) {
244 case TGSI_SEMANTIC_POSITION
:
245 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
247 * 0 -> Position = pixel center (default)
248 * 1 -> Position = pixel centroid
249 * 2 -> Position = at sample position
251 switch (info
->input_interpolate_loc
[i
]) {
252 case TGSI_INTERPOLATE_LOC_CENTROID
:
253 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(1);
255 case TGSI_INTERPOLATE_LOC_SAMPLE
:
256 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
260 if (info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] ==
261 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)
262 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_ULC(1);
267 spi_ps_in_control
= S_0286D8_NUM_INTERP(shader
->nparam
) |
268 S_0286D8_BC_OPTIMIZE_DISABLE(1);
270 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
271 spi_ps_input_ena
= shader
->spi_ps_input_ena
;
272 /* we need to enable at least one of them, otherwise we hang the GPU */
273 assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena
) ||
274 G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena
) ||
275 G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena
) ||
276 G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena
) ||
277 G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena
) ||
278 G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena
) ||
279 G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena
) ||
280 G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena
));
282 si_pm4_set_reg(pm4
, R_0286CC_SPI_PS_INPUT_ENA
, spi_ps_input_ena
);
283 si_pm4_set_reg(pm4
, R_0286D0_SPI_PS_INPUT_ADDR
, spi_ps_input_ena
);
284 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
286 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
, shader
->spi_shader_z_format
);
287 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
,
288 shader
->spi_shader_col_format
);
289 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, shader
->cb_shader_mask
);
291 va
= shader
->bo
->gpu_address
;
292 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
);
293 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
294 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
296 num_user_sgprs
= SI_PS_NUM_USER_SGPR
;
297 num_sgprs
= shader
->num_sgprs
;
298 /* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
299 if ((num_user_sgprs
+ 1) > num_sgprs
) {
300 /* Last 2 reserved SGPRs are used for VCC */
301 num_sgprs
= num_user_sgprs
+ 1 + 2;
303 assert(num_sgprs
<= 104);
305 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
306 S_00B028_VGPRS((shader
->num_vgprs
- 1) / 4) |
307 S_00B028_SGPRS((num_sgprs
- 1) / 8));
308 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
309 S_00B02C_EXTRA_LDS_SIZE(shader
->lds_size
) |
310 S_00B02C_USER_SGPR(num_user_sgprs
));
313 static void si_shader_init_pm4_state(struct si_shader
*shader
)
315 switch (shader
->selector
->type
) {
316 case PIPE_SHADER_VERTEX
:
317 if (shader
->key
.vs
.as_es
)
318 si_shader_es(shader
);
320 si_shader_vs(shader
);
322 case PIPE_SHADER_GEOMETRY
:
323 si_shader_gs(shader
);
324 si_shader_vs(shader
->gs_copy_shader
);
326 case PIPE_SHADER_FRAGMENT
:
327 si_shader_ps(shader
);
334 /* Compute the key for the hw shader variant */
335 static INLINE
void si_shader_selector_key(struct pipe_context
*ctx
,
336 struct si_shader_selector
*sel
,
337 union si_shader_key
*key
)
339 struct si_context
*sctx
= (struct si_context
*)ctx
;
340 memset(key
, 0, sizeof(*key
));
342 if (sel
->type
== PIPE_SHADER_VERTEX
) {
344 if (!sctx
->vertex_elements
)
347 for (i
= 0; i
< sctx
->vertex_elements
->count
; ++i
)
348 key
->vs
.instance_divisors
[i
] = sctx
->vertex_elements
->elements
[i
].instance_divisor
;
350 if (sctx
->gs_shader
) {
352 key
->vs
.gs_used_inputs
= sctx
->gs_shader
->gs_used_inputs
;
354 } else if (sel
->type
== PIPE_SHADER_FRAGMENT
) {
355 if (sel
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
])
356 key
->ps
.last_cbuf
= MAX2(sctx
->framebuffer
.state
.nr_cbufs
, 1) - 1;
357 key
->ps
.export_16bpc
= sctx
->framebuffer
.export_16bpc
;
359 if (sctx
->queued
.named
.rasterizer
) {
360 key
->ps
.color_two_side
= sctx
->queued
.named
.rasterizer
->two_side
;
361 key
->ps
.flatshade
= sctx
->queued
.named
.rasterizer
->flatshade
;
363 if (sctx
->queued
.named
.blend
) {
364 key
->ps
.alpha_to_one
= sctx
->queued
.named
.blend
->alpha_to_one
&&
365 sctx
->queued
.named
.rasterizer
->multisample_enable
&&
366 !sctx
->framebuffer
.cb0_is_integer
;
369 if (sctx
->queued
.named
.dsa
) {
370 key
->ps
.alpha_func
= sctx
->queued
.named
.dsa
->alpha_func
;
372 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
373 if (sctx
->framebuffer
.cb0_is_integer
)
374 key
->ps
.alpha_func
= PIPE_FUNC_ALWAYS
;
376 key
->ps
.alpha_func
= PIPE_FUNC_ALWAYS
;
381 /* Select the hw shader variant depending on the current state. */
382 static int si_shader_select(struct pipe_context
*ctx
,
383 struct si_shader_selector
*sel
)
385 union si_shader_key key
;
386 struct si_shader
* shader
= NULL
;
389 si_shader_selector_key(ctx
, sel
, &key
);
391 /* Check if we don't need to change anything.
392 * This path is also used for most shaders that don't need multiple
393 * variants, it will cost just a computation of the key and this
395 if (likely(sel
->current
&& memcmp(&sel
->current
->key
, &key
, sizeof(key
)) == 0)) {
399 /* lookup if we have other variants in the list */
400 if (sel
->num_shaders
> 1) {
401 struct si_shader
*p
= sel
->current
, *c
= p
->next_variant
;
403 while (c
&& memcmp(&c
->key
, &key
, sizeof(key
)) != 0) {
409 p
->next_variant
= c
->next_variant
;
415 shader
->next_variant
= sel
->current
;
416 sel
->current
= shader
;
418 shader
= CALLOC(1, sizeof(struct si_shader
));
419 shader
->selector
= sel
;
422 shader
->next_variant
= sel
->current
;
423 sel
->current
= shader
;
424 r
= si_shader_create((struct si_screen
*)ctx
->screen
, shader
);
426 R600_ERR("Failed to build shader variant (type=%u) %d\n",
432 si_shader_init_pm4_state(shader
);
439 static void *si_create_shader_state(struct pipe_context
*ctx
,
440 const struct pipe_shader_state
*state
,
441 unsigned pipe_shader_type
)
443 struct si_shader_selector
*sel
= CALLOC_STRUCT(si_shader_selector
);
446 sel
->type
= pipe_shader_type
;
447 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
448 sel
->so
= state
->stream_output
;
449 tgsi_scan_shader(state
->tokens
, &sel
->info
);
451 switch (pipe_shader_type
) {
452 case PIPE_SHADER_GEOMETRY
:
453 sel
->gs_output_prim
=
454 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
455 sel
->gs_max_out_vertices
=
456 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
458 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
459 unsigned name
= sel
->info
.input_semantic_name
[i
];
460 unsigned index
= sel
->info
.input_semantic_index
[i
];
463 case TGSI_SEMANTIC_PRIMID
:
466 sel
->gs_used_inputs
|=
467 1llu << si_shader_io_get_unique_index(name
, index
);
475 static void *si_create_fs_state(struct pipe_context
*ctx
,
476 const struct pipe_shader_state
*state
)
478 return si_create_shader_state(ctx
, state
, PIPE_SHADER_FRAGMENT
);
481 static void *si_create_gs_state(struct pipe_context
*ctx
,
482 const struct pipe_shader_state
*state
)
484 return si_create_shader_state(ctx
, state
, PIPE_SHADER_GEOMETRY
);
487 static void *si_create_vs_state(struct pipe_context
*ctx
,
488 const struct pipe_shader_state
*state
)
490 return si_create_shader_state(ctx
, state
, PIPE_SHADER_VERTEX
);
493 static void si_bind_vs_shader(struct pipe_context
*ctx
, void *state
)
495 struct si_context
*sctx
= (struct si_context
*)ctx
;
496 struct si_shader_selector
*sel
= state
;
498 if (sctx
->vs_shader
== sel
|| !sel
)
501 sctx
->vs_shader
= sel
;
504 static void si_bind_gs_shader(struct pipe_context
*ctx
, void *state
)
506 struct si_context
*sctx
= (struct si_context
*)ctx
;
507 struct si_shader_selector
*sel
= state
;
509 if (sctx
->gs_shader
== sel
)
512 sctx
->gs_shader
= sel
;
515 static void si_make_dummy_ps(struct si_context
*sctx
)
517 if (!sctx
->dummy_pixel_shader
) {
518 sctx
->dummy_pixel_shader
=
519 util_make_fragment_cloneinput_shader(&sctx
->b
.b
, 0,
520 TGSI_SEMANTIC_GENERIC
,
521 TGSI_INTERPOLATE_CONSTANT
);
525 static void si_bind_ps_shader(struct pipe_context
*ctx
, void *state
)
527 struct si_context
*sctx
= (struct si_context
*)ctx
;
528 struct si_shader_selector
*sel
= state
;
530 /* skip if supplied shader is one already in use */
531 if (sctx
->ps_shader
== sel
)
534 /* use a dummy shader if binding a NULL shader */
536 si_make_dummy_ps(sctx
);
537 sel
= sctx
->dummy_pixel_shader
;
540 sctx
->ps_shader
= sel
;
543 static void si_delete_shader_selector(struct pipe_context
*ctx
,
544 struct si_shader_selector
*sel
)
546 struct si_context
*sctx
= (struct si_context
*)ctx
;
547 struct si_shader
*p
= sel
->current
, *c
;
551 if (sel
->type
== PIPE_SHADER_GEOMETRY
) {
552 si_pm4_delete_state(sctx
, gs
, p
->pm4
);
553 si_pm4_delete_state(sctx
, vs
, p
->gs_copy_shader
->pm4
);
554 } else if (sel
->type
== PIPE_SHADER_FRAGMENT
)
555 si_pm4_delete_state(sctx
, ps
, p
->pm4
);
556 else if (p
->key
.vs
.as_es
)
557 si_pm4_delete_state(sctx
, es
, p
->pm4
);
559 si_pm4_delete_state(sctx
, vs
, p
->pm4
);
560 si_shader_destroy(ctx
, p
);
569 static void si_delete_vs_shader(struct pipe_context
*ctx
, void *state
)
571 struct si_context
*sctx
= (struct si_context
*)ctx
;
572 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
574 if (sctx
->vs_shader
== sel
) {
575 sctx
->vs_shader
= NULL
;
578 si_delete_shader_selector(ctx
, sel
);
581 static void si_delete_gs_shader(struct pipe_context
*ctx
, void *state
)
583 struct si_context
*sctx
= (struct si_context
*)ctx
;
584 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
586 if (sctx
->gs_shader
== sel
) {
587 sctx
->gs_shader
= NULL
;
590 si_delete_shader_selector(ctx
, sel
);
593 static void si_delete_ps_shader(struct pipe_context
*ctx
, void *state
)
595 struct si_context
*sctx
= (struct si_context
*)ctx
;
596 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
598 if (sctx
->ps_shader
== sel
) {
599 sctx
->ps_shader
= NULL
;
602 si_delete_shader_selector(ctx
, sel
);
605 static void si_update_spi_map(struct si_context
*sctx
)
607 struct si_shader
*ps
= sctx
->ps_shader
->current
;
608 struct si_shader
*vs
= si_get_vs_state(sctx
);
609 struct tgsi_shader_info
*psinfo
= &ps
->selector
->info
;
610 struct tgsi_shader_info
*vsinfo
= &vs
->selector
->info
;
611 struct si_pm4_state
*pm4
= CALLOC_STRUCT(si_pm4_state
);
614 for (i
= 0; i
< psinfo
->num_inputs
; i
++) {
615 unsigned name
= psinfo
->input_semantic_name
[i
];
616 unsigned index
= psinfo
->input_semantic_index
[i
];
617 unsigned interpolate
= psinfo
->input_interpolate
[i
];
618 unsigned param_offset
= ps
->ps_input_param_offset
[i
];
620 if (name
== TGSI_SEMANTIC_POSITION
)
621 /* Read from preloaded VGPRs, not parameters */
627 if (interpolate
== TGSI_INTERPOLATE_CONSTANT
||
628 (interpolate
== TGSI_INTERPOLATE_COLOR
&&
629 ps
->key
.ps
.flatshade
)) {
630 tmp
|= S_028644_FLAT_SHADE(1);
633 if (name
== TGSI_SEMANTIC_GENERIC
&&
634 sctx
->sprite_coord_enable
& (1 << index
)) {
635 tmp
|= S_028644_PT_SPRITE_TEX(1);
638 for (j
= 0; j
< vsinfo
->num_outputs
; j
++) {
639 if (name
== vsinfo
->output_semantic_name
[j
] &&
640 index
== vsinfo
->output_semantic_index
[j
]) {
641 tmp
|= S_028644_OFFSET(vs
->vs_output_param_offset
[j
]);
646 if (j
== vsinfo
->num_outputs
) {
647 /* No corresponding output found, load defaults into input */
648 tmp
|= S_028644_OFFSET(0x20);
652 R_028644_SPI_PS_INPUT_CNTL_0
+ param_offset
* 4,
655 if (name
== TGSI_SEMANTIC_COLOR
&&
656 ps
->key
.ps
.color_two_side
) {
657 name
= TGSI_SEMANTIC_BCOLOR
;
663 si_pm4_set_state(sctx
, spi
, pm4
);
666 /* Initialize state related to ESGS / GSVS ring buffers */
667 static void si_init_gs_rings(struct si_context
*sctx
)
669 unsigned esgs_ring_size
= 128 * 1024;
670 unsigned gsvs_ring_size
= 64 * 1024 * 1024;
672 assert(!sctx
->gs_rings
);
673 sctx
->gs_rings
= CALLOC_STRUCT(si_pm4_state
);
675 sctx
->esgs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
676 PIPE_USAGE_DEFAULT
, esgs_ring_size
);
678 sctx
->gsvs_ring
= pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
679 PIPE_USAGE_DEFAULT
, gsvs_ring_size
);
681 if (sctx
->b
.chip_class
>= CIK
) {
682 si_pm4_set_reg(sctx
->gs_rings
, R_030900_VGT_ESGS_RING_SIZE
,
683 esgs_ring_size
/ 256);
684 si_pm4_set_reg(sctx
->gs_rings
, R_030904_VGT_GSVS_RING_SIZE
,
685 gsvs_ring_size
/ 256);
687 si_pm4_set_reg(sctx
->gs_rings
, R_0088C8_VGT_ESGS_RING_SIZE
,
688 esgs_ring_size
/ 256);
689 si_pm4_set_reg(sctx
->gs_rings
, R_0088CC_VGT_GSVS_RING_SIZE
,
690 gsvs_ring_size
/ 256);
693 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_ESGS
,
694 sctx
->esgs_ring
, 0, esgs_ring_size
,
696 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_GEOMETRY
, SI_RING_ESGS
,
697 sctx
->esgs_ring
, 0, esgs_ring_size
,
699 si_set_ring_buffer(&sctx
->b
.b
, PIPE_SHADER_VERTEX
, SI_RING_GSVS
,
700 sctx
->gsvs_ring
, 0, gsvs_ring_size
,
704 void si_update_shaders(struct si_context
*sctx
)
706 struct pipe_context
*ctx
= (struct pipe_context
*)sctx
;
708 if (sctx
->gs_shader
) {
709 si_shader_select(ctx
, sctx
->gs_shader
);
710 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
->current
->pm4
);
711 si_pm4_bind_state(sctx
, vs
, sctx
->gs_shader
->current
->gs_copy_shader
->pm4
);
713 sctx
->b
.streamout
.stride_in_dw
= sctx
->gs_shader
->so
.stride
;
715 si_shader_select(ctx
, sctx
->vs_shader
);
716 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
->current
->pm4
);
719 si_init_gs_rings(sctx
);
720 if (sctx
->emitted
.named
.gs_rings
!= sctx
->gs_rings
)
721 sctx
->b
.flags
|= R600_CONTEXT_VGT_FLUSH
;
722 si_pm4_bind_state(sctx
, gs_rings
, sctx
->gs_rings
);
724 si_set_ring_buffer(ctx
, PIPE_SHADER_GEOMETRY
, SI_RING_GSVS
,
726 sctx
->gs_shader
->gs_max_out_vertices
*
727 sctx
->gs_shader
->info
.num_outputs
* 16,
728 64, true, true, 4, 16);
731 sctx
->gs_on
= CALLOC_STRUCT(si_pm4_state
);
733 si_pm4_set_reg(sctx
->gs_on
, R_028B54_VGT_SHADER_STAGES_EN
,
734 S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
736 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
));
738 si_pm4_bind_state(sctx
, gs_onoff
, sctx
->gs_on
);
740 si_shader_select(ctx
, sctx
->vs_shader
);
741 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
->current
->pm4
);
743 sctx
->b
.streamout
.stride_in_dw
= sctx
->vs_shader
->so
.stride
;
746 sctx
->gs_off
= CALLOC_STRUCT(si_pm4_state
);
748 si_pm4_set_reg(sctx
->gs_off
, R_028A40_VGT_GS_MODE
, 0);
749 si_pm4_set_reg(sctx
->gs_off
, R_028B54_VGT_SHADER_STAGES_EN
, 0);
751 si_pm4_bind_state(sctx
, gs_onoff
, sctx
->gs_off
);
752 si_pm4_bind_state(sctx
, gs_rings
, NULL
);
753 si_pm4_bind_state(sctx
, gs
, NULL
);
754 si_pm4_bind_state(sctx
, es
, NULL
);
757 si_shader_select(ctx
, sctx
->ps_shader
);
759 if (!sctx
->ps_shader
->current
) {
760 struct si_shader_selector
*sel
;
762 /* use a dummy shader if compiling the shader (variant) failed */
763 si_make_dummy_ps(sctx
);
764 sel
= sctx
->dummy_pixel_shader
;
765 si_shader_select(ctx
, sel
);
766 sctx
->ps_shader
->current
= sel
->current
;
769 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
->current
->pm4
);
771 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
))
772 si_update_spi_map(sctx
);
774 if (sctx
->ps_db_shader_control
!= sctx
->ps_shader
->current
->db_shader_control
) {
775 sctx
->ps_db_shader_control
= sctx
->ps_shader
->current
->db_shader_control
;
776 sctx
->db_render_state
.dirty
= true;
780 void si_init_shader_functions(struct si_context
*sctx
)
782 sctx
->b
.b
.create_vs_state
= si_create_vs_state
;
783 sctx
->b
.b
.create_gs_state
= si_create_gs_state
;
784 sctx
->b
.b
.create_fs_state
= si_create_fs_state
;
786 sctx
->b
.b
.bind_vs_state
= si_bind_vs_shader
;
787 sctx
->b
.b
.bind_gs_state
= si_bind_gs_shader
;
788 sctx
->b
.b
.bind_fs_state
= si_bind_ps_shader
;
790 sctx
->b
.b
.delete_vs_state
= si_delete_vs_shader
;
791 sctx
->b
.b
.delete_gs_state
= si_delete_gs_shader
;
792 sctx
->b
.b
.delete_fs_state
= si_delete_ps_shader
;