2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
25 * Marek Olšák <maraeo@gmail.com>
31 #include "radeon/r600_cs.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_ureg.h"
35 #include "util/hash_table.h"
36 #include "util/crc32.h"
37 #include "util/u_memory.h"
38 #include "util/u_prim.h"
40 #include "util/disk_cache.h"
41 #include "util/mesa-sha1.h"
42 #include "ac_exp_param.h"
47 * Return the TGSI binary in a buffer. The first 4 bytes contain its size as
50 static void *si_get_tgsi_binary(struct si_shader_selector
*sel
)
52 unsigned tgsi_size
= tgsi_num_tokens(sel
->tokens
) *
53 sizeof(struct tgsi_token
);
54 unsigned size
= 4 + tgsi_size
+ sizeof(sel
->so
);
55 char *result
= (char*)MALLOC(size
);
60 *((uint32_t*)result
) = size
;
61 memcpy(result
+ 4, sel
->tokens
, tgsi_size
);
62 memcpy(result
+ 4 + tgsi_size
, &sel
->so
, sizeof(sel
->so
));
66 /** Copy "data" to "ptr" and return the next dword following copied data. */
67 static uint32_t *write_data(uint32_t *ptr
, const void *data
, unsigned size
)
69 /* data may be NULL if size == 0 */
71 memcpy(ptr
, data
, size
);
72 ptr
+= DIV_ROUND_UP(size
, 4);
76 /** Read data from "ptr". Return the next dword following the data. */
77 static uint32_t *read_data(uint32_t *ptr
, void *data
, unsigned size
)
79 memcpy(data
, ptr
, size
);
80 ptr
+= DIV_ROUND_UP(size
, 4);
85 * Write the size as uint followed by the data. Return the next dword
86 * following the copied data.
88 static uint32_t *write_chunk(uint32_t *ptr
, const void *data
, unsigned size
)
91 return write_data(ptr
, data
, size
);
95 * Read the size as uint followed by the data. Return both via parameters.
96 * Return the next dword following the data.
98 static uint32_t *read_chunk(uint32_t *ptr
, void **data
, unsigned *size
)
101 assert(*data
== NULL
);
104 *data
= malloc(*size
);
105 return read_data(ptr
, *data
, *size
);
109 * Return the shader binary in a buffer. The first 4 bytes contain its size
112 static void *si_get_shader_binary(struct si_shader
*shader
)
114 /* There is always a size of data followed by the data itself. */
115 unsigned relocs_size
= shader
->binary
.reloc_count
*
116 sizeof(shader
->binary
.relocs
[0]);
117 unsigned disasm_size
= shader
->binary
.disasm_string
?
118 strlen(shader
->binary
.disasm_string
) + 1 : 0;
119 unsigned llvm_ir_size
= shader
->binary
.llvm_ir_string
?
120 strlen(shader
->binary
.llvm_ir_string
) + 1 : 0;
123 4 + /* CRC32 of the data below */
124 align(sizeof(shader
->config
), 4) +
125 align(sizeof(shader
->info
), 4) +
126 4 + align(shader
->binary
.code_size
, 4) +
127 4 + align(shader
->binary
.rodata_size
, 4) +
128 4 + align(relocs_size
, 4) +
129 4 + align(disasm_size
, 4) +
130 4 + align(llvm_ir_size
, 4);
131 void *buffer
= CALLOC(1, size
);
132 uint32_t *ptr
= (uint32_t*)buffer
;
138 ptr
++; /* CRC32 is calculated at the end. */
140 ptr
= write_data(ptr
, &shader
->config
, sizeof(shader
->config
));
141 ptr
= write_data(ptr
, &shader
->info
, sizeof(shader
->info
));
142 ptr
= write_chunk(ptr
, shader
->binary
.code
, shader
->binary
.code_size
);
143 ptr
= write_chunk(ptr
, shader
->binary
.rodata
, shader
->binary
.rodata_size
);
144 ptr
= write_chunk(ptr
, shader
->binary
.relocs
, relocs_size
);
145 ptr
= write_chunk(ptr
, shader
->binary
.disasm_string
, disasm_size
);
146 ptr
= write_chunk(ptr
, shader
->binary
.llvm_ir_string
, llvm_ir_size
);
147 assert((char *)ptr
- (char *)buffer
== size
);
150 ptr
= (uint32_t*)buffer
;
152 *ptr
= util_hash_crc32(ptr
+ 1, size
- 8);
157 static bool si_load_shader_binary(struct si_shader
*shader
, void *binary
)
159 uint32_t *ptr
= (uint32_t*)binary
;
160 uint32_t size
= *ptr
++;
161 uint32_t crc32
= *ptr
++;
164 if (util_hash_crc32(ptr
, size
- 8) != crc32
) {
165 fprintf(stderr
, "radeonsi: binary shader has invalid CRC32\n");
169 ptr
= read_data(ptr
, &shader
->config
, sizeof(shader
->config
));
170 ptr
= read_data(ptr
, &shader
->info
, sizeof(shader
->info
));
171 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.code
,
172 &shader
->binary
.code_size
);
173 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.rodata
,
174 &shader
->binary
.rodata_size
);
175 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.relocs
, &chunk_size
);
176 shader
->binary
.reloc_count
= chunk_size
/ sizeof(shader
->binary
.relocs
[0]);
177 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.disasm_string
, &chunk_size
);
178 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.llvm_ir_string
, &chunk_size
);
184 * Insert a shader into the cache. It's assumed the shader is not in the cache.
185 * Use si_shader_cache_load_shader before calling this.
187 * Returns false on failure, in which case the tgsi_binary should be freed.
189 static bool si_shader_cache_insert_shader(struct si_screen
*sscreen
,
191 struct si_shader
*shader
,
192 bool insert_into_disk_cache
)
195 struct hash_entry
*entry
;
196 uint8_t key
[CACHE_KEY_SIZE
];
198 entry
= _mesa_hash_table_search(sscreen
->shader_cache
, tgsi_binary
);
200 return false; /* already added */
202 hw_binary
= si_get_shader_binary(shader
);
206 if (_mesa_hash_table_insert(sscreen
->shader_cache
, tgsi_binary
,
207 hw_binary
) == NULL
) {
212 if (sscreen
->b
.disk_shader_cache
&& insert_into_disk_cache
) {
213 disk_cache_compute_key(sscreen
->b
.disk_shader_cache
, tgsi_binary
,
214 *((uint32_t *)tgsi_binary
), key
);
215 disk_cache_put(sscreen
->b
.disk_shader_cache
, key
, hw_binary
,
216 *((uint32_t *) hw_binary
));
222 static bool si_shader_cache_load_shader(struct si_screen
*sscreen
,
224 struct si_shader
*shader
)
226 struct hash_entry
*entry
=
227 _mesa_hash_table_search(sscreen
->shader_cache
, tgsi_binary
);
229 if (sscreen
->b
.disk_shader_cache
) {
230 unsigned char sha1
[CACHE_KEY_SIZE
];
231 size_t tg_size
= *((uint32_t *) tgsi_binary
);
233 disk_cache_compute_key(sscreen
->b
.disk_shader_cache
,
234 tgsi_binary
, tg_size
, sha1
);
238 disk_cache_get(sscreen
->b
.disk_shader_cache
,
243 if (binary_size
< sizeof(uint32_t) ||
244 *((uint32_t*)buffer
) != binary_size
) {
245 /* Something has gone wrong discard the item
246 * from the cache and rebuild/link from
249 assert(!"Invalid radeonsi shader disk cache "
252 disk_cache_remove(sscreen
->b
.disk_shader_cache
,
259 if (!si_load_shader_binary(shader
, buffer
)) {
265 if (!si_shader_cache_insert_shader(sscreen
, tgsi_binary
,
272 if (si_load_shader_binary(shader
, entry
->data
))
277 p_atomic_inc(&sscreen
->b
.num_shader_cache_hits
);
281 static uint32_t si_shader_cache_key_hash(const void *key
)
283 /* The first dword is the key size. */
284 return util_hash_crc32(key
, *(uint32_t*)key
);
287 static bool si_shader_cache_key_equals(const void *a
, const void *b
)
289 uint32_t *keya
= (uint32_t*)a
;
290 uint32_t *keyb
= (uint32_t*)b
;
292 /* The first dword is the key size. */
296 return memcmp(keya
, keyb
, *keya
) == 0;
299 static void si_destroy_shader_cache_entry(struct hash_entry
*entry
)
301 FREE((void*)entry
->key
);
305 bool si_init_shader_cache(struct si_screen
*sscreen
)
307 (void) mtx_init(&sscreen
->shader_cache_mutex
, mtx_plain
);
308 sscreen
->shader_cache
=
309 _mesa_hash_table_create(NULL
,
310 si_shader_cache_key_hash
,
311 si_shader_cache_key_equals
);
313 return sscreen
->shader_cache
!= NULL
;
316 void si_destroy_shader_cache(struct si_screen
*sscreen
)
318 if (sscreen
->shader_cache
)
319 _mesa_hash_table_destroy(sscreen
->shader_cache
,
320 si_destroy_shader_cache_entry
);
321 mtx_destroy(&sscreen
->shader_cache_mutex
);
326 static void si_set_tesseval_regs(struct si_screen
*sscreen
,
327 struct si_shader_selector
*tes
,
328 struct si_pm4_state
*pm4
)
330 struct tgsi_shader_info
*info
= &tes
->info
;
331 unsigned tes_prim_mode
= info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
332 unsigned tes_spacing
= info
->properties
[TGSI_PROPERTY_TES_SPACING
];
333 bool tes_vertex_order_cw
= info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
];
334 bool tes_point_mode
= info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
];
335 unsigned type
, partitioning
, topology
, distribution_mode
;
337 switch (tes_prim_mode
) {
338 case PIPE_PRIM_LINES
:
339 type
= V_028B6C_TESS_ISOLINE
;
341 case PIPE_PRIM_TRIANGLES
:
342 type
= V_028B6C_TESS_TRIANGLE
;
344 case PIPE_PRIM_QUADS
:
345 type
= V_028B6C_TESS_QUAD
;
352 switch (tes_spacing
) {
353 case PIPE_TESS_SPACING_FRACTIONAL_ODD
:
354 partitioning
= V_028B6C_PART_FRAC_ODD
;
356 case PIPE_TESS_SPACING_FRACTIONAL_EVEN
:
357 partitioning
= V_028B6C_PART_FRAC_EVEN
;
359 case PIPE_TESS_SPACING_EQUAL
:
360 partitioning
= V_028B6C_PART_INTEGER
;
368 topology
= V_028B6C_OUTPUT_POINT
;
369 else if (tes_prim_mode
== PIPE_PRIM_LINES
)
370 topology
= V_028B6C_OUTPUT_LINE
;
371 else if (tes_vertex_order_cw
)
372 /* for some reason, this must be the other way around */
373 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
375 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
377 if (sscreen
->has_distributed_tess
) {
378 if (sscreen
->b
.family
== CHIP_FIJI
||
379 sscreen
->b
.family
>= CHIP_POLARIS10
)
380 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS
;
382 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_DONUTS
;
384 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_NO_DIST
;
386 si_pm4_set_reg(pm4
, R_028B6C_VGT_TF_PARAM
,
387 S_028B6C_TYPE(type
) |
388 S_028B6C_PARTITIONING(partitioning
) |
389 S_028B6C_TOPOLOGY(topology
) |
390 S_028B6C_DISTRIBUTION_MODE(distribution_mode
));
393 /* Polaris needs different VTX_REUSE_DEPTH settings depending on
394 * whether the "fractional odd" tessellation spacing is used.
396 * Possible VGT configurations and which state should set the register:
398 * Reg set in | VGT shader configuration | Value
399 * ------------------------------------------------------
401 * VS as ES | ES -> GS -> VS | 30
402 * TES as VS | LS -> HS -> VS | 14 or 30
403 * TES as ES | LS -> HS -> ES -> GS -> VS | 14 or 30
405 * If "shader" is NULL, it's assumed it's not LS or GS copy shader.
407 static void polaris_set_vgt_vertex_reuse(struct si_screen
*sscreen
,
408 struct si_shader_selector
*sel
,
409 struct si_shader
*shader
,
410 struct si_pm4_state
*pm4
)
412 unsigned type
= sel
->type
;
414 if (sscreen
->b
.family
< CHIP_POLARIS10
)
417 /* VS as VS, or VS as ES: */
418 if ((type
== PIPE_SHADER_VERTEX
&&
420 (!shader
->key
.as_ls
&& !shader
->is_gs_copy_shader
))) ||
421 /* TES as VS, or TES as ES: */
422 type
== PIPE_SHADER_TESS_EVAL
) {
423 unsigned vtx_reuse_depth
= 30;
425 if (type
== PIPE_SHADER_TESS_EVAL
&&
426 sel
->info
.properties
[TGSI_PROPERTY_TES_SPACING
] ==
427 PIPE_TESS_SPACING_FRACTIONAL_ODD
)
428 vtx_reuse_depth
= 14;
430 si_pm4_set_reg(pm4
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
435 static struct si_pm4_state
*si_get_shader_pm4_state(struct si_shader
*shader
)
438 si_pm4_clear_state(shader
->pm4
);
440 shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
445 static void si_shader_ls(struct si_screen
*sscreen
, struct si_shader
*shader
)
447 struct si_pm4_state
*pm4
;
448 unsigned vgpr_comp_cnt
;
451 assert(sscreen
->b
.chip_class
<= VI
);
453 pm4
= si_get_shader_pm4_state(shader
);
457 va
= shader
->bo
->gpu_address
;
458 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
460 /* We need at least 2 components for LS.
461 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
462 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
464 vgpr_comp_cnt
= shader
->info
.uses_instanceid
? 2 : 1;
466 si_pm4_set_reg(pm4
, R_00B520_SPI_SHADER_PGM_LO_LS
, va
>> 8);
467 si_pm4_set_reg(pm4
, R_00B524_SPI_SHADER_PGM_HI_LS
, va
>> 40);
469 shader
->config
.rsrc1
= S_00B528_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
470 S_00B528_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
471 S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt
) |
472 S_00B528_DX10_CLAMP(1) |
473 S_00B528_FLOAT_MODE(shader
->config
.float_mode
);
474 shader
->config
.rsrc2
= S_00B52C_USER_SGPR(SI_VS_NUM_USER_SGPR
) |
475 S_00B52C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
478 static void si_shader_hs(struct si_screen
*sscreen
, struct si_shader
*shader
)
480 struct si_pm4_state
*pm4
;
482 unsigned ls_vgpr_comp_cnt
= 0;
484 pm4
= si_get_shader_pm4_state(shader
);
488 va
= shader
->bo
->gpu_address
;
489 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
491 if (sscreen
->b
.chip_class
>= GFX9
) {
492 si_pm4_set_reg(pm4
, R_00B410_SPI_SHADER_PGM_LO_LS
, va
>> 8);
493 si_pm4_set_reg(pm4
, R_00B414_SPI_SHADER_PGM_HI_LS
, va
>> 40);
495 /* We need at least 2 components for LS.
496 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
497 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
499 ls_vgpr_comp_cnt
= shader
->info
.uses_instanceid
? 2 : 1;
501 shader
->config
.rsrc2
=
502 S_00B42C_USER_SGPR(GFX9_TCS_NUM_USER_SGPR
) |
503 S_00B42C_USER_SGPR_MSB(GFX9_TCS_NUM_USER_SGPR
>> 5) |
504 S_00B42C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
506 si_pm4_set_reg(pm4
, R_00B420_SPI_SHADER_PGM_LO_HS
, va
>> 8);
507 si_pm4_set_reg(pm4
, R_00B424_SPI_SHADER_PGM_HI_HS
, va
>> 40);
509 shader
->config
.rsrc2
=
510 S_00B42C_USER_SGPR(GFX6_TCS_NUM_USER_SGPR
) |
511 S_00B42C_OC_LDS_EN(1) |
512 S_00B42C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
515 si_pm4_set_reg(pm4
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
,
516 S_00B428_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
517 S_00B428_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
518 S_00B428_DX10_CLAMP(1) |
519 S_00B428_FLOAT_MODE(shader
->config
.float_mode
) |
520 S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt
));
522 if (sscreen
->b
.chip_class
<= VI
) {
523 si_pm4_set_reg(pm4
, R_00B42C_SPI_SHADER_PGM_RSRC2_HS
,
524 shader
->config
.rsrc2
);
528 static void si_shader_es(struct si_screen
*sscreen
, struct si_shader
*shader
)
530 struct si_pm4_state
*pm4
;
531 unsigned num_user_sgprs
;
532 unsigned vgpr_comp_cnt
;
536 assert(sscreen
->b
.chip_class
<= VI
);
538 pm4
= si_get_shader_pm4_state(shader
);
542 va
= shader
->bo
->gpu_address
;
543 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
545 if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
546 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
547 vgpr_comp_cnt
= shader
->info
.uses_instanceid
? 1 : 0;
548 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
549 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
550 vgpr_comp_cnt
= shader
->selector
->info
.uses_primid
? 3 : 2;
551 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
553 unreachable("invalid shader selector type");
555 oc_lds_en
= shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
? 1 : 0;
557 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
558 shader
->selector
->esgs_itemsize
/ 4);
559 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
560 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, va
>> 40);
561 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
562 S_00B328_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
563 S_00B328_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
564 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
) |
565 S_00B328_DX10_CLAMP(1) |
566 S_00B328_FLOAT_MODE(shader
->config
.float_mode
));
567 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
568 S_00B32C_USER_SGPR(num_user_sgprs
) |
569 S_00B32C_OC_LDS_EN(oc_lds_en
) |
570 S_00B32C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
572 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
573 si_set_tesseval_regs(sscreen
, shader
->selector
, pm4
);
575 polaris_set_vgt_vertex_reuse(sscreen
, shader
->selector
, shader
, pm4
);
579 * Calculate the appropriate setting of VGT_GS_MODE when \p shader is a
582 static uint32_t si_vgt_gs_mode(struct si_shader_selector
*sel
)
584 enum chip_class chip_class
= sel
->screen
->b
.chip_class
;
585 unsigned gs_max_vert_out
= sel
->gs_max_out_vertices
;
588 if (gs_max_vert_out
<= 128) {
589 cut_mode
= V_028A40_GS_CUT_128
;
590 } else if (gs_max_vert_out
<= 256) {
591 cut_mode
= V_028A40_GS_CUT_256
;
592 } else if (gs_max_vert_out
<= 512) {
593 cut_mode
= V_028A40_GS_CUT_512
;
595 assert(gs_max_vert_out
<= 1024);
596 cut_mode
= V_028A40_GS_CUT_1024
;
599 return S_028A40_MODE(V_028A40_GS_SCENARIO_G
) |
600 S_028A40_CUT_MODE(cut_mode
)|
601 S_028A40_ES_WRITE_OPTIMIZE(chip_class
<= VI
) |
602 S_028A40_GS_WRITE_OPTIMIZE(1) |
603 S_028A40_ONCHIP(chip_class
>= GFX9
? 1 : 0);
606 struct gfx9_gs_info
{
607 unsigned es_verts_per_subgroup
;
608 unsigned gs_prims_per_subgroup
;
609 unsigned gs_inst_prims_in_subgroup
;
610 unsigned max_prims_per_subgroup
;
614 static void gfx9_get_gs_info(struct si_shader_selector
*es
,
615 struct si_shader_selector
*gs
,
616 struct gfx9_gs_info
*out
)
618 unsigned gs_num_invocations
= MAX2(gs
->gs_num_invocations
, 1);
619 unsigned input_prim
= gs
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
];
620 bool uses_adjacency
= input_prim
>= PIPE_PRIM_LINES_ADJACENCY
&&
621 input_prim
<= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
;
623 /* All these are in dwords: */
624 /* We can't allow using the whole LDS, because GS waves compete with
625 * other shader stages for LDS space. */
626 const unsigned max_lds_size
= 8 * 1024;
627 const unsigned esgs_itemsize
= es
->esgs_itemsize
/ 4;
628 unsigned esgs_lds_size
;
630 /* All these are per subgroup: */
631 const unsigned max_out_prims
= 32 * 1024;
632 const unsigned max_es_verts
= 255;
633 const unsigned ideal_gs_prims
= 64;
634 unsigned max_gs_prims
, gs_prims
;
635 unsigned min_es_verts
, es_verts
, worst_case_es_verts
;
637 assert(gs_num_invocations
<= 32); /* GL maximum */
639 if (uses_adjacency
|| gs_num_invocations
> 1)
640 max_gs_prims
= 127 / gs_num_invocations
;
644 /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
645 * Make sure we don't go over the maximum value.
647 max_gs_prims
= MIN2(max_gs_prims
,
649 (gs
->gs_max_out_vertices
* gs_num_invocations
));
650 assert(max_gs_prims
> 0);
652 /* If the primitive has adjacency, halve the number of vertices
653 * that will be reused in multiple primitives.
655 min_es_verts
= gs
->gs_input_verts_per_prim
/ (uses_adjacency
? 2 : 1);
657 gs_prims
= MIN2(ideal_gs_prims
, max_gs_prims
);
658 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
, max_es_verts
);
660 /* Compute ESGS LDS size based on the worst case number of ES vertices
661 * needed to create the target number of GS prims per subgroup.
663 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
665 /* If total LDS usage is too big, refactor partitions based on ratio
666 * of ESGS item sizes.
668 if (esgs_lds_size
> max_lds_size
) {
669 /* Our target GS Prims Per Subgroup was too large. Calculate
670 * the maximum number of GS Prims Per Subgroup that will fit
671 * into LDS, capped by the maximum that the hardware can support.
673 gs_prims
= MIN2((max_lds_size
/ (esgs_itemsize
* min_es_verts
)),
675 assert(gs_prims
> 0);
676 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
,
679 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
680 assert(esgs_lds_size
<= max_lds_size
);
683 /* Now calculate remaining ESGS information. */
685 es_verts
= MIN2(esgs_lds_size
/ esgs_itemsize
, max_es_verts
);
687 es_verts
= max_es_verts
;
689 /* Vertices for adjacency primitives are not always reused, so restore
690 * it for ES_VERTS_PER_SUBGRP.
692 min_es_verts
= gs
->gs_input_verts_per_prim
;
694 /* For normal primitives, the VGT only checks if they are past the ES
695 * verts per subgroup after allocating a full GS primitive and if they
696 * are, kick off a new subgroup. But if those additional ES verts are
697 * unique (e.g. not reused) we need to make sure there is enough LDS
698 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
700 es_verts
-= min_es_verts
- 1;
702 out
->es_verts_per_subgroup
= es_verts
;
703 out
->gs_prims_per_subgroup
= gs_prims
;
704 out
->gs_inst_prims_in_subgroup
= gs_prims
* gs_num_invocations
;
705 out
->max_prims_per_subgroup
= out
->gs_inst_prims_in_subgroup
*
706 gs
->gs_max_out_vertices
;
707 out
->lds_size
= align(esgs_lds_size
, 128) / 128;
709 assert(out
->max_prims_per_subgroup
<= max_out_prims
);
712 static void si_shader_gs(struct si_screen
*sscreen
, struct si_shader
*shader
)
714 struct si_shader_selector
*sel
= shader
->selector
;
715 const ubyte
*num_components
= sel
->info
.num_stream_output_components
;
716 unsigned gs_num_invocations
= sel
->gs_num_invocations
;
717 struct si_pm4_state
*pm4
;
719 unsigned max_stream
= sel
->max_gs_stream
;
722 pm4
= si_get_shader_pm4_state(shader
);
726 offset
= num_components
[0] * sel
->gs_max_out_vertices
;
727 si_pm4_set_reg(pm4
, R_028A60_VGT_GSVS_RING_OFFSET_1
, offset
);
729 offset
+= num_components
[1] * sel
->gs_max_out_vertices
;
730 si_pm4_set_reg(pm4
, R_028A64_VGT_GSVS_RING_OFFSET_2
, offset
);
732 offset
+= num_components
[2] * sel
->gs_max_out_vertices
;
733 si_pm4_set_reg(pm4
, R_028A68_VGT_GSVS_RING_OFFSET_3
, offset
);
735 offset
+= num_components
[3] * sel
->gs_max_out_vertices
;
736 si_pm4_set_reg(pm4
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, offset
);
738 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
739 assert(offset
< (1 << 15));
741 si_pm4_set_reg(pm4
, R_028B38_VGT_GS_MAX_VERT_OUT
, sel
->gs_max_out_vertices
);
743 si_pm4_set_reg(pm4
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, num_components
[0]);
744 si_pm4_set_reg(pm4
, R_028B60_VGT_GS_VERT_ITEMSIZE_1
, (max_stream
>= 1) ? num_components
[1] : 0);
745 si_pm4_set_reg(pm4
, R_028B64_VGT_GS_VERT_ITEMSIZE_2
, (max_stream
>= 2) ? num_components
[2] : 0);
746 si_pm4_set_reg(pm4
, R_028B68_VGT_GS_VERT_ITEMSIZE_3
, (max_stream
>= 3) ? num_components
[3] : 0);
748 si_pm4_set_reg(pm4
, R_028B90_VGT_GS_INSTANCE_CNT
,
749 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
750 S_028B90_ENABLE(gs_num_invocations
> 0));
752 va
= shader
->bo
->gpu_address
;
753 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
755 if (sscreen
->b
.chip_class
>= GFX9
) {
756 unsigned input_prim
= sel
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
];
757 unsigned es_type
= shader
->key
.part
.gs
.es
->type
;
758 unsigned es_vgpr_comp_cnt
, gs_vgpr_comp_cnt
;
759 struct gfx9_gs_info gs_info
;
761 if (es_type
== PIPE_SHADER_VERTEX
)
762 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
763 es_vgpr_comp_cnt
= shader
->info
.uses_instanceid
? 1 : 0;
764 else if (es_type
== PIPE_SHADER_TESS_EVAL
)
765 es_vgpr_comp_cnt
= shader
->key
.part
.gs
.es
->info
.uses_primid
? 3 : 2;
767 unreachable("invalid shader selector type");
769 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
770 * VGPR[0:4] are always loaded.
772 if (sel
->info
.uses_invocationid
)
773 gs_vgpr_comp_cnt
= 3; /* VGPR3 contains InvocationID. */
774 else if (sel
->info
.uses_primid
)
775 gs_vgpr_comp_cnt
= 2; /* VGPR2 contains PrimitiveID. */
776 else if (input_prim
>= PIPE_PRIM_TRIANGLES
)
777 gs_vgpr_comp_cnt
= 1; /* VGPR1 contains offsets 2, 3 */
779 gs_vgpr_comp_cnt
= 0; /* VGPR0 contains offsets 0, 1 */
781 gfx9_get_gs_info(shader
->key
.part
.gs
.es
, sel
, &gs_info
);
783 si_pm4_set_reg(pm4
, R_00B210_SPI_SHADER_PGM_LO_ES
, va
>> 8);
784 si_pm4_set_reg(pm4
, R_00B214_SPI_SHADER_PGM_HI_ES
, va
>> 40);
786 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
787 S_00B228_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
788 S_00B228_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
789 S_00B228_DX10_CLAMP(1) |
790 S_00B228_FLOAT_MODE(shader
->config
.float_mode
) |
791 S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt
));
792 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
793 S_00B22C_USER_SGPR(GFX9_GS_NUM_USER_SGPR
) |
794 S_00B22C_USER_SGPR_MSB(GFX9_GS_NUM_USER_SGPR
>> 5) |
795 S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt
) |
796 S_00B22C_OC_LDS_EN(es_type
== PIPE_SHADER_TESS_EVAL
) |
797 S_00B22C_LDS_SIZE(gs_info
.lds_size
) |
798 S_00B22C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
800 si_pm4_set_reg(pm4
, R_028A44_VGT_GS_ONCHIP_CNTL
,
801 S_028A44_ES_VERTS_PER_SUBGRP(gs_info
.es_verts_per_subgroup
) |
802 S_028A44_GS_PRIMS_PER_SUBGRP(gs_info
.gs_prims_per_subgroup
) |
803 S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_info
.gs_inst_prims_in_subgroup
));
804 si_pm4_set_reg(pm4
, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP
,
805 S_028A94_MAX_PRIMS_PER_SUBGROUP(gs_info
.max_prims_per_subgroup
));
806 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
807 shader
->key
.part
.gs
.es
->esgs_itemsize
/ 4);
809 if (es_type
== PIPE_SHADER_TESS_EVAL
)
810 si_set_tesseval_regs(sscreen
, shader
->key
.part
.gs
.es
, pm4
);
812 polaris_set_vgt_vertex_reuse(sscreen
, shader
->key
.part
.gs
.es
,
815 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
816 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, va
>> 40);
818 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
819 S_00B228_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
820 S_00B228_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
821 S_00B228_DX10_CLAMP(1) |
822 S_00B228_FLOAT_MODE(shader
->config
.float_mode
));
823 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
824 S_00B22C_USER_SGPR(GFX6_GS_NUM_USER_SGPR
) |
825 S_00B22C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
830 * Compute the state for \p shader, which will run as a vertex shader on the
833 * If \p gs is non-NULL, it points to the geometry shader for which this shader
834 * is the copy shader.
836 static void si_shader_vs(struct si_screen
*sscreen
, struct si_shader
*shader
,
837 struct si_shader_selector
*gs
)
839 struct si_pm4_state
*pm4
;
840 unsigned num_user_sgprs
;
841 unsigned nparams
, vgpr_comp_cnt
;
844 unsigned window_space
=
845 shader
->selector
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
846 bool enable_prim_id
= shader
->key
.mono
.vs_export_prim_id
|| shader
->selector
->info
.uses_primid
;
848 pm4
= si_get_shader_pm4_state(shader
);
852 /* We always write VGT_GS_MODE in the VS state, because every switch
853 * between different shader pipelines involving a different GS or no
854 * GS at all involves a switch of the VS (different GS use different
855 * copy shaders). On the other hand, when the API switches from a GS to
856 * no GS and then back to the same GS used originally, the GS state is
862 /* PrimID needs GS scenario A.
863 * GFX9 also needs it when ViewportIndex is enabled.
865 if (enable_prim_id
||
866 (sscreen
->b
.chip_class
>= GFX9
&&
867 shader
->selector
->info
.writes_viewport_index
))
868 mode
= V_028A40_GS_SCENARIO_A
;
870 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
, S_028A40_MODE(mode
));
871 si_pm4_set_reg(pm4
, R_028A84_VGT_PRIMITIVEID_EN
, enable_prim_id
);
873 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
, si_vgt_gs_mode(gs
));
874 si_pm4_set_reg(pm4
, R_028A84_VGT_PRIMITIVEID_EN
, 0);
877 va
= shader
->bo
->gpu_address
;
878 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
881 vgpr_comp_cnt
= 0; /* only VertexID is needed for GS-COPY. */
882 num_user_sgprs
= SI_GSCOPY_NUM_USER_SGPR
;
883 } else if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
884 /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
885 * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
886 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
888 vgpr_comp_cnt
= enable_prim_id
? 2 : (shader
->info
.uses_instanceid
? 1 : 0);
889 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
890 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
891 vgpr_comp_cnt
= enable_prim_id
? 3 : 2;
892 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
894 unreachable("invalid shader selector type");
896 /* VS is required to export at least one param. */
897 nparams
= MAX2(shader
->info
.nr_param_exports
, 1);
898 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
899 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
901 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
902 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
903 S_02870C_POS1_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 1 ?
904 V_02870C_SPI_SHADER_4COMP
:
905 V_02870C_SPI_SHADER_NONE
) |
906 S_02870C_POS2_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 2 ?
907 V_02870C_SPI_SHADER_4COMP
:
908 V_02870C_SPI_SHADER_NONE
) |
909 S_02870C_POS3_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 3 ?
910 V_02870C_SPI_SHADER_4COMP
:
911 V_02870C_SPI_SHADER_NONE
));
913 oc_lds_en
= shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
? 1 : 0;
915 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
916 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
917 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
918 S_00B128_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
919 S_00B128_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
920 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
921 S_00B128_DX10_CLAMP(1) |
922 S_00B128_FLOAT_MODE(shader
->config
.float_mode
));
923 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
924 S_00B12C_USER_SGPR(num_user_sgprs
) |
925 S_00B12C_OC_LDS_EN(oc_lds_en
) |
926 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
927 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
928 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
929 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
930 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
) |
931 S_00B12C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
933 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
934 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
936 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
937 S_028818_VTX_W0_FMT(1) |
938 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
939 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
940 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
942 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
943 si_set_tesseval_regs(sscreen
, shader
->selector
, pm4
);
945 polaris_set_vgt_vertex_reuse(sscreen
, shader
->selector
, shader
, pm4
);
948 static unsigned si_get_ps_num_interp(struct si_shader
*ps
)
950 struct tgsi_shader_info
*info
= &ps
->selector
->info
;
951 unsigned num_colors
= !!(info
->colors_read
& 0x0f) +
952 !!(info
->colors_read
& 0xf0);
953 unsigned num_interp
= ps
->selector
->info
.num_inputs
+
954 (ps
->key
.part
.ps
.prolog
.color_two_side
? num_colors
: 0);
956 assert(num_interp
<= 32);
957 return MIN2(num_interp
, 32);
960 static unsigned si_get_spi_shader_col_format(struct si_shader
*shader
)
962 unsigned value
= shader
->key
.part
.ps
.epilog
.spi_shader_col_format
;
963 unsigned i
, num_targets
= (util_last_bit(value
) + 3) / 4;
965 /* If the i-th target format is set, all previous target formats must
966 * be non-zero to avoid hangs.
968 for (i
= 0; i
< num_targets
; i
++)
969 if (!(value
& (0xf << (i
* 4))))
970 value
|= V_028714_SPI_SHADER_32_R
<< (i
* 4);
975 static unsigned si_get_cb_shader_mask(unsigned spi_shader_col_format
)
977 unsigned i
, cb_shader_mask
= 0;
979 for (i
= 0; i
< 8; i
++) {
980 switch ((spi_shader_col_format
>> (i
* 4)) & 0xf) {
981 case V_028714_SPI_SHADER_ZERO
:
983 case V_028714_SPI_SHADER_32_R
:
984 cb_shader_mask
|= 0x1 << (i
* 4);
986 case V_028714_SPI_SHADER_32_GR
:
987 cb_shader_mask
|= 0x3 << (i
* 4);
989 case V_028714_SPI_SHADER_32_AR
:
990 cb_shader_mask
|= 0x9 << (i
* 4);
992 case V_028714_SPI_SHADER_FP16_ABGR
:
993 case V_028714_SPI_SHADER_UNORM16_ABGR
:
994 case V_028714_SPI_SHADER_SNORM16_ABGR
:
995 case V_028714_SPI_SHADER_UINT16_ABGR
:
996 case V_028714_SPI_SHADER_SINT16_ABGR
:
997 case V_028714_SPI_SHADER_32_ABGR
:
998 cb_shader_mask
|= 0xf << (i
* 4);
1004 return cb_shader_mask
;
1007 static void si_shader_ps(struct si_shader
*shader
)
1009 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
1010 struct si_pm4_state
*pm4
;
1011 unsigned spi_ps_in_control
, spi_shader_col_format
, cb_shader_mask
;
1012 unsigned spi_baryc_cntl
= S_0286E0_FRONT_FACE_ALL_BITS(1);
1014 unsigned input_ena
= shader
->config
.spi_ps_input_ena
;
1016 /* we need to enable at least one of them, otherwise we hang the GPU */
1017 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena
) ||
1018 G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1019 G_0286CC_PERSP_CENTROID_ENA(input_ena
) ||
1020 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena
) ||
1021 G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) ||
1022 G_0286CC_LINEAR_CENTER_ENA(input_ena
) ||
1023 G_0286CC_LINEAR_CENTROID_ENA(input_ena
) ||
1024 G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena
));
1025 /* POS_W_FLOAT_ENA requires one of the perspective weights. */
1026 assert(!G_0286CC_POS_W_FLOAT_ENA(input_ena
) ||
1027 G_0286CC_PERSP_SAMPLE_ENA(input_ena
) ||
1028 G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1029 G_0286CC_PERSP_CENTROID_ENA(input_ena
) ||
1030 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena
));
1032 /* Validate interpolation optimization flags (read as implications). */
1033 assert(!shader
->key
.part
.ps
.prolog
.bc_optimize_for_persp
||
1034 (G_0286CC_PERSP_CENTER_ENA(input_ena
) &&
1035 G_0286CC_PERSP_CENTROID_ENA(input_ena
)));
1036 assert(!shader
->key
.part
.ps
.prolog
.bc_optimize_for_linear
||
1037 (G_0286CC_LINEAR_CENTER_ENA(input_ena
) &&
1038 G_0286CC_LINEAR_CENTROID_ENA(input_ena
)));
1039 assert(!shader
->key
.part
.ps
.prolog
.force_persp_center_interp
||
1040 (!G_0286CC_PERSP_SAMPLE_ENA(input_ena
) &&
1041 !G_0286CC_PERSP_CENTROID_ENA(input_ena
)));
1042 assert(!shader
->key
.part
.ps
.prolog
.force_linear_center_interp
||
1043 (!G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) &&
1044 !G_0286CC_LINEAR_CENTROID_ENA(input_ena
)));
1045 assert(!shader
->key
.part
.ps
.prolog
.force_persp_sample_interp
||
1046 (!G_0286CC_PERSP_CENTER_ENA(input_ena
) &&
1047 !G_0286CC_PERSP_CENTROID_ENA(input_ena
)));
1048 assert(!shader
->key
.part
.ps
.prolog
.force_linear_sample_interp
||
1049 (!G_0286CC_LINEAR_CENTER_ENA(input_ena
) &&
1050 !G_0286CC_LINEAR_CENTROID_ENA(input_ena
)));
1052 /* Validate cases when the optimizations are off (read as implications). */
1053 assert(shader
->key
.part
.ps
.prolog
.bc_optimize_for_persp
||
1054 !G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1055 !G_0286CC_PERSP_CENTROID_ENA(input_ena
));
1056 assert(shader
->key
.part
.ps
.prolog
.bc_optimize_for_linear
||
1057 !G_0286CC_LINEAR_CENTER_ENA(input_ena
) ||
1058 !G_0286CC_LINEAR_CENTROID_ENA(input_ena
));
1060 pm4
= si_get_shader_pm4_state(shader
);
1064 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
1066 * 0 -> Position = pixel center
1067 * 1 -> Position = pixel centroid
1068 * 2 -> Position = at sample position
1070 * From GLSL 4.5 specification, section 7.1:
1071 * "The variable gl_FragCoord is available as an input variable from
1072 * within fragment shaders and it holds the window relative coordinates
1073 * (x, y, z, 1/w) values for the fragment. If multi-sampling, this
1074 * value can be for any location within the pixel, or one of the
1075 * fragment samples. The use of centroid does not further restrict
1076 * this value to be inside the current primitive."
1078 * Meaning that centroid has no effect and we can return anything within
1079 * the pixel. Thus, return the value at sample position, because that's
1080 * the most accurate one shaders can get.
1082 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
1084 if (info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] ==
1085 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)
1086 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_ULC(1);
1088 spi_shader_col_format
= si_get_spi_shader_col_format(shader
);
1089 cb_shader_mask
= si_get_cb_shader_mask(spi_shader_col_format
);
1091 /* Ensure that some export memory is always allocated, for two reasons:
1093 * 1) Correctness: The hardware ignores the EXEC mask if no export
1094 * memory is allocated, so KILL and alpha test do not work correctly
1096 * 2) Performance: Every shader needs at least a NULL export, even when
1097 * it writes no color/depth output. The NULL export instruction
1098 * stalls without this setting.
1100 * Don't add this to CB_SHADER_MASK.
1102 if (!spi_shader_col_format
&&
1103 !info
->writes_z
&& !info
->writes_stencil
&& !info
->writes_samplemask
)
1104 spi_shader_col_format
= V_028714_SPI_SHADER_32_R
;
1106 si_pm4_set_reg(pm4
, R_0286CC_SPI_PS_INPUT_ENA
, input_ena
);
1107 si_pm4_set_reg(pm4
, R_0286D0_SPI_PS_INPUT_ADDR
,
1108 shader
->config
.spi_ps_input_addr
);
1110 /* Set interpolation controls. */
1111 spi_ps_in_control
= S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader
));
1113 /* Set registers. */
1114 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
1115 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
1117 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
,
1118 si_get_spi_shader_z_format(info
->writes_z
,
1119 info
->writes_stencil
,
1120 info
->writes_samplemask
));
1122 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
, spi_shader_col_format
);
1123 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, cb_shader_mask
);
1125 va
= shader
->bo
->gpu_address
;
1126 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
1127 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
1128 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
1130 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
1131 S_00B028_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
1132 S_00B028_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
1133 S_00B028_DX10_CLAMP(1) |
1134 S_00B028_FLOAT_MODE(shader
->config
.float_mode
));
1135 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
1136 S_00B02C_EXTRA_LDS_SIZE(shader
->config
.lds_size
) |
1137 S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR
) |
1138 S_00B32C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
1141 static void si_shader_init_pm4_state(struct si_screen
*sscreen
,
1142 struct si_shader
*shader
)
1144 switch (shader
->selector
->type
) {
1145 case PIPE_SHADER_VERTEX
:
1146 if (shader
->key
.as_ls
)
1147 si_shader_ls(sscreen
, shader
);
1148 else if (shader
->key
.as_es
)
1149 si_shader_es(sscreen
, shader
);
1151 si_shader_vs(sscreen
, shader
, NULL
);
1153 case PIPE_SHADER_TESS_CTRL
:
1154 si_shader_hs(sscreen
, shader
);
1156 case PIPE_SHADER_TESS_EVAL
:
1157 if (shader
->key
.as_es
)
1158 si_shader_es(sscreen
, shader
);
1160 si_shader_vs(sscreen
, shader
, NULL
);
1162 case PIPE_SHADER_GEOMETRY
:
1163 si_shader_gs(sscreen
, shader
);
1165 case PIPE_SHADER_FRAGMENT
:
1166 si_shader_ps(shader
);
1173 static unsigned si_get_alpha_test_func(struct si_context
*sctx
)
1175 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
1176 if (sctx
->queued
.named
.dsa
)
1177 return sctx
->queued
.named
.dsa
->alpha_func
;
1179 return PIPE_FUNC_ALWAYS
;
1182 static void si_shader_selector_key_vs(struct si_context
*sctx
,
1183 struct si_shader_selector
*vs
,
1184 struct si_shader_key
*key
,
1185 struct si_vs_prolog_bits
*prolog_key
)
1187 if (!sctx
->vertex_elements
)
1190 unsigned count
= MIN2(vs
->info
.num_inputs
,
1191 sctx
->vertex_elements
->count
);
1192 for (unsigned i
= 0; i
< count
; ++i
) {
1193 prolog_key
->instance_divisors
[i
] =
1194 sctx
->vertex_elements
->elements
[i
].instance_divisor
;
1197 memcpy(key
->mono
.vs_fix_fetch
, sctx
->vertex_elements
->fix_fetch
, count
);
1200 static void si_shader_selector_key_hw_vs(struct si_context
*sctx
,
1201 struct si_shader_selector
*vs
,
1202 struct si_shader_key
*key
)
1204 struct si_shader_selector
*ps
= sctx
->ps_shader
.cso
;
1206 key
->opt
.hw_vs
.clip_disable
=
1207 sctx
->queued
.named
.rasterizer
->clip_plane_enable
== 0 &&
1208 (vs
->info
.clipdist_writemask
||
1209 vs
->info
.writes_clipvertex
) &&
1210 !vs
->info
.culldist_writemask
;
1212 /* Find out if PS is disabled. */
1213 bool ps_disabled
= true;
1215 bool ps_modifies_zs
= ps
->info
.uses_kill
||
1216 ps
->info
.writes_z
||
1217 ps
->info
.writes_stencil
||
1218 ps
->info
.writes_samplemask
||
1219 si_get_alpha_test_func(sctx
) != PIPE_FUNC_ALWAYS
;
1221 unsigned ps_colormask
= sctx
->framebuffer
.colorbuf_enabled_4bit
&
1222 sctx
->queued
.named
.blend
->cb_target_mask
;
1223 if (!ps
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
])
1224 ps_colormask
&= ps
->colors_written_4bit
;
1226 ps_disabled
= sctx
->queued
.named
.rasterizer
->rasterizer_discard
||
1229 !ps
->info
.writes_memory
);
1232 /* Find out which VS outputs aren't used by the PS. */
1233 uint64_t outputs_written
= vs
->outputs_written
;
1234 uint64_t inputs_read
= 0;
1236 outputs_written
&= ~0x3; /* ignore POSITION, PSIZE */
1239 inputs_read
= ps
->inputs_read
;
1242 uint64_t linked
= outputs_written
& inputs_read
;
1244 key
->opt
.hw_vs
.kill_outputs
= ~linked
& outputs_written
;
1247 /* Compute the key for the hw shader variant */
1248 static inline void si_shader_selector_key(struct pipe_context
*ctx
,
1249 struct si_shader_selector
*sel
,
1250 struct si_shader_key
*key
)
1252 struct si_context
*sctx
= (struct si_context
*)ctx
;
1254 memset(key
, 0, sizeof(*key
));
1256 switch (sel
->type
) {
1257 case PIPE_SHADER_VERTEX
:
1258 si_shader_selector_key_vs(sctx
, sel
, key
, &key
->part
.vs
.prolog
);
1260 if (sctx
->tes_shader
.cso
)
1262 else if (sctx
->gs_shader
.cso
)
1265 si_shader_selector_key_hw_vs(sctx
, sel
, key
);
1267 if (sctx
->ps_shader
.cso
&& sctx
->ps_shader
.cso
->info
.uses_primid
)
1268 key
->mono
.vs_export_prim_id
= 1;
1271 case PIPE_SHADER_TESS_CTRL
:
1272 if (sctx
->b
.chip_class
>= GFX9
) {
1273 si_shader_selector_key_vs(sctx
, sctx
->vs_shader
.cso
,
1274 key
, &key
->part
.tcs
.ls_prolog
);
1275 key
->part
.tcs
.ls
= sctx
->vs_shader
.cso
;
1278 key
->part
.tcs
.epilog
.prim_mode
=
1279 sctx
->tes_shader
.cso
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
1280 key
->part
.tcs
.epilog
.tes_reads_tess_factors
=
1281 sctx
->tes_shader
.cso
->info
.reads_tess_factors
;
1283 if (sel
== sctx
->fixed_func_tcs_shader
.cso
)
1284 key
->mono
.ff_tcs_inputs_to_copy
= sctx
->vs_shader
.cso
->outputs_written
;
1286 case PIPE_SHADER_TESS_EVAL
:
1287 if (sctx
->gs_shader
.cso
)
1290 si_shader_selector_key_hw_vs(sctx
, sel
, key
);
1292 if (sctx
->ps_shader
.cso
&& sctx
->ps_shader
.cso
->info
.uses_primid
)
1293 key
->mono
.vs_export_prim_id
= 1;
1296 case PIPE_SHADER_GEOMETRY
:
1297 if (sctx
->b
.chip_class
>= GFX9
) {
1298 if (sctx
->tes_shader
.cso
) {
1299 key
->part
.gs
.es
= sctx
->tes_shader
.cso
;
1301 si_shader_selector_key_vs(sctx
, sctx
->vs_shader
.cso
,
1302 key
, &key
->part
.gs
.vs_prolog
);
1303 key
->part
.gs
.es
= sctx
->vs_shader
.cso
;
1306 /* Merged ES-GS can have unbalanced wave usage.
1308 * ES threads are per-vertex, while GS threads are
1309 * per-primitive. So without any amplification, there
1310 * are fewer GS threads than ES threads, which can result
1311 * in empty (no-op) GS waves. With too much amplification,
1312 * there are more GS threads than ES threads, which
1313 * can result in empty (no-op) ES waves.
1315 * Non-monolithic shaders are implemented by setting EXEC
1316 * at the beginning of shader parts, and don't jump to
1317 * the end if EXEC is 0.
1319 * Monolithic shaders use conditional blocks, so they can
1320 * jump and skip empty waves of ES or GS. So set this to
1321 * always use optimized variants, which are monolithic.
1323 key
->opt
.prefer_mono
= 1;
1325 key
->part
.gs
.prolog
.tri_strip_adj_fix
= sctx
->gs_tri_strip_adj_fix
;
1327 case PIPE_SHADER_FRAGMENT
: {
1328 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
1329 struct si_state_blend
*blend
= sctx
->queued
.named
.blend
;
1331 if (sel
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
] &&
1332 sel
->info
.colors_written
== 0x1)
1333 key
->part
.ps
.epilog
.last_cbuf
= MAX2(sctx
->framebuffer
.state
.nr_cbufs
, 1) - 1;
1336 /* Select the shader color format based on whether
1337 * blending or alpha are needed.
1339 key
->part
.ps
.epilog
.spi_shader_col_format
=
1340 (blend
->blend_enable_4bit
& blend
->need_src_alpha_4bit
&
1341 sctx
->framebuffer
.spi_shader_col_format_blend_alpha
) |
1342 (blend
->blend_enable_4bit
& ~blend
->need_src_alpha_4bit
&
1343 sctx
->framebuffer
.spi_shader_col_format_blend
) |
1344 (~blend
->blend_enable_4bit
& blend
->need_src_alpha_4bit
&
1345 sctx
->framebuffer
.spi_shader_col_format_alpha
) |
1346 (~blend
->blend_enable_4bit
& ~blend
->need_src_alpha_4bit
&
1347 sctx
->framebuffer
.spi_shader_col_format
);
1349 /* The output for dual source blending should have
1350 * the same format as the first output.
1352 if (blend
->dual_src_blend
)
1353 key
->part
.ps
.epilog
.spi_shader_col_format
|=
1354 (key
->part
.ps
.epilog
.spi_shader_col_format
& 0xf) << 4;
1356 key
->part
.ps
.epilog
.spi_shader_col_format
= sctx
->framebuffer
.spi_shader_col_format
;
1358 /* If alpha-to-coverage is enabled, we have to export alpha
1359 * even if there is no color buffer.
1361 if (!(key
->part
.ps
.epilog
.spi_shader_col_format
& 0xf) &&
1362 blend
&& blend
->alpha_to_coverage
)
1363 key
->part
.ps
.epilog
.spi_shader_col_format
|= V_028710_SPI_SHADER_32_AR
;
1365 /* On SI and CIK except Hawaii, the CB doesn't clamp outputs
1366 * to the range supported by the type if a channel has less
1367 * than 16 bits and the export format is 16_ABGR.
1369 if (sctx
->b
.chip_class
<= CIK
&& sctx
->b
.family
!= CHIP_HAWAII
) {
1370 key
->part
.ps
.epilog
.color_is_int8
= sctx
->framebuffer
.color_is_int8
;
1371 key
->part
.ps
.epilog
.color_is_int10
= sctx
->framebuffer
.color_is_int10
;
1374 /* Disable unwritten outputs (if WRITE_ALL_CBUFS isn't enabled). */
1375 if (!key
->part
.ps
.epilog
.last_cbuf
) {
1376 key
->part
.ps
.epilog
.spi_shader_col_format
&= sel
->colors_written_4bit
;
1377 key
->part
.ps
.epilog
.color_is_int8
&= sel
->info
.colors_written
;
1378 key
->part
.ps
.epilog
.color_is_int10
&= sel
->info
.colors_written
;
1382 bool is_poly
= (sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES
&&
1383 sctx
->current_rast_prim
<= PIPE_PRIM_POLYGON
) ||
1384 sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES_ADJACENCY
;
1385 bool is_line
= !is_poly
&& sctx
->current_rast_prim
!= PIPE_PRIM_POINTS
;
1387 key
->part
.ps
.prolog
.color_two_side
= rs
->two_side
&& sel
->info
.colors_read
;
1388 key
->part
.ps
.prolog
.flatshade_colors
= rs
->flatshade
&& sel
->info
.colors_read
;
1390 if (sctx
->queued
.named
.blend
) {
1391 key
->part
.ps
.epilog
.alpha_to_one
= sctx
->queued
.named
.blend
->alpha_to_one
&&
1392 rs
->multisample_enable
;
1395 key
->part
.ps
.prolog
.poly_stipple
= rs
->poly_stipple_enable
&& is_poly
;
1396 key
->part
.ps
.epilog
.poly_line_smoothing
= ((is_poly
&& rs
->poly_smooth
) ||
1397 (is_line
&& rs
->line_smooth
)) &&
1398 sctx
->framebuffer
.nr_samples
<= 1;
1399 key
->part
.ps
.epilog
.clamp_color
= rs
->clamp_fragment_color
;
1401 if (rs
->force_persample_interp
&&
1402 rs
->multisample_enable
&&
1403 sctx
->framebuffer
.nr_samples
> 1 &&
1404 sctx
->ps_iter_samples
> 1) {
1405 key
->part
.ps
.prolog
.force_persp_sample_interp
=
1406 sel
->info
.uses_persp_center
||
1407 sel
->info
.uses_persp_centroid
;
1409 key
->part
.ps
.prolog
.force_linear_sample_interp
=
1410 sel
->info
.uses_linear_center
||
1411 sel
->info
.uses_linear_centroid
;
1412 } else if (rs
->multisample_enable
&&
1413 sctx
->framebuffer
.nr_samples
> 1) {
1414 key
->part
.ps
.prolog
.bc_optimize_for_persp
=
1415 sel
->info
.uses_persp_center
&&
1416 sel
->info
.uses_persp_centroid
;
1417 key
->part
.ps
.prolog
.bc_optimize_for_linear
=
1418 sel
->info
.uses_linear_center
&&
1419 sel
->info
.uses_linear_centroid
;
1421 /* Make sure SPI doesn't compute more than 1 pair
1422 * of (i,j), which is the optimization here. */
1423 key
->part
.ps
.prolog
.force_persp_center_interp
=
1424 sel
->info
.uses_persp_center
+
1425 sel
->info
.uses_persp_centroid
+
1426 sel
->info
.uses_persp_sample
> 1;
1428 key
->part
.ps
.prolog
.force_linear_center_interp
=
1429 sel
->info
.uses_linear_center
+
1430 sel
->info
.uses_linear_centroid
+
1431 sel
->info
.uses_linear_sample
> 1;
1435 key
->part
.ps
.epilog
.alpha_func
= si_get_alpha_test_func(sctx
);
1443 static void si_build_shader_variant(void *job
, int thread_index
)
1445 struct si_shader
*shader
= (struct si_shader
*)job
;
1446 struct si_shader_selector
*sel
= shader
->selector
;
1447 struct si_screen
*sscreen
= sel
->screen
;
1448 LLVMTargetMachineRef tm
;
1449 struct pipe_debug_callback
*debug
= &shader
->compiler_ctx_state
.debug
;
1452 if (thread_index
>= 0) {
1453 assert(thread_index
< ARRAY_SIZE(sscreen
->tm
));
1454 tm
= sscreen
->tm
[thread_index
];
1458 tm
= shader
->compiler_ctx_state
.tm
;
1461 r
= si_shader_create(sscreen
, tm
, shader
, debug
);
1463 R600_ERR("Failed to build shader variant (type=%u) %d\n",
1465 shader
->compilation_failed
= true;
1469 if (shader
->compiler_ctx_state
.is_debug_context
) {
1470 FILE *f
= open_memstream(&shader
->shader_log
,
1471 &shader
->shader_log_size
);
1473 si_shader_dump(sscreen
, shader
, NULL
, sel
->type
, f
, false);
1478 si_shader_init_pm4_state(sscreen
, shader
);
1481 static const struct si_shader_key zeroed
;
1483 static bool si_check_missing_main_part(struct si_screen
*sscreen
,
1484 struct si_shader_selector
*sel
,
1485 struct si_compiler_ctx_state
*compiler_state
,
1486 struct si_shader_key
*key
)
1488 struct si_shader
**mainp
= si_get_main_shader_part(sel
, key
);
1491 struct si_shader
*main_part
= CALLOC_STRUCT(si_shader
);
1496 main_part
->selector
= sel
;
1497 main_part
->key
.as_es
= key
->as_es
;
1498 main_part
->key
.as_ls
= key
->as_ls
;
1500 if (si_compile_tgsi_shader(sscreen
, compiler_state
->tm
,
1502 &compiler_state
->debug
) != 0) {
1511 static void si_destroy_shader_selector(struct si_context
*sctx
,
1512 struct si_shader_selector
*sel
);
1514 static void si_shader_selector_reference(struct si_context
*sctx
,
1515 struct si_shader_selector
**dst
,
1516 struct si_shader_selector
*src
)
1518 if (pipe_reference(&(*dst
)->reference
, &src
->reference
))
1519 si_destroy_shader_selector(sctx
, *dst
);
1524 /* Select the hw shader variant depending on the current state. */
1525 static int si_shader_select_with_key(struct si_screen
*sscreen
,
1526 struct si_shader_ctx_state
*state
,
1527 struct si_compiler_ctx_state
*compiler_state
,
1528 struct si_shader_key
*key
,
1531 struct si_shader_selector
*sel
= state
->cso
;
1532 struct si_shader_selector
*previous_stage_sel
= NULL
;
1533 struct si_shader
*current
= state
->current
;
1534 struct si_shader
*iter
, *shader
= NULL
;
1536 if (unlikely(sscreen
->b
.debug_flags
& DBG_NO_OPT_VARIANT
)) {
1537 memset(&key
->opt
, 0, sizeof(key
->opt
));
1541 /* Check if we don't need to change anything.
1542 * This path is also used for most shaders that don't need multiple
1543 * variants, it will cost just a computation of the key and this
1545 if (likely(current
&&
1546 memcmp(¤t
->key
, key
, sizeof(*key
)) == 0 &&
1547 (!current
->is_optimized
||
1548 util_queue_fence_is_signalled(¤t
->optimized_ready
))))
1549 return current
->compilation_failed
? -1 : 0;
1551 /* This must be done before the mutex is locked, because async GS
1552 * compilation calls this function too, and therefore must enter
1555 * Only wait if we are in a draw call. Don't wait if we are
1556 * in a compiler thread.
1558 if (thread_index
< 0)
1559 util_queue_fence_wait(&sel
->ready
);
1561 mtx_lock(&sel
->mutex
);
1563 /* Find the shader variant. */
1564 for (iter
= sel
->first_variant
; iter
; iter
= iter
->next_variant
) {
1565 /* Don't check the "current" shader. We checked it above. */
1566 if (current
!= iter
&&
1567 memcmp(&iter
->key
, key
, sizeof(*key
)) == 0) {
1568 /* If it's an optimized shader and its compilation has
1569 * been started but isn't done, use the unoptimized
1570 * shader so as not to cause a stall due to compilation.
1572 if (iter
->is_optimized
&&
1573 !util_queue_fence_is_signalled(&iter
->optimized_ready
)) {
1574 memset(&key
->opt
, 0, sizeof(key
->opt
));
1575 mtx_unlock(&sel
->mutex
);
1579 if (iter
->compilation_failed
) {
1580 mtx_unlock(&sel
->mutex
);
1581 return -1; /* skip the draw call */
1584 state
->current
= iter
;
1585 mtx_unlock(&sel
->mutex
);
1590 /* Build a new shader. */
1591 shader
= CALLOC_STRUCT(si_shader
);
1593 mtx_unlock(&sel
->mutex
);
1596 shader
->selector
= sel
;
1598 shader
->compiler_ctx_state
= *compiler_state
;
1600 /* If this is a merged shader, get the first shader's selector. */
1601 if (sscreen
->b
.chip_class
>= GFX9
) {
1602 if (sel
->type
== PIPE_SHADER_TESS_CTRL
)
1603 previous_stage_sel
= key
->part
.tcs
.ls
;
1604 else if (sel
->type
== PIPE_SHADER_GEOMETRY
)
1605 previous_stage_sel
= key
->part
.gs
.es
;
1608 /* Compile the main shader part if it doesn't exist. This can happen
1609 * if the initial guess was wrong. */
1610 bool is_pure_monolithic
=
1611 sscreen
->use_monolithic_shaders
||
1612 memcmp(&key
->mono
, &zeroed
.mono
, sizeof(key
->mono
)) != 0;
1614 if (!is_pure_monolithic
) {
1617 /* Make sure the main shader part is present. This is needed
1618 * for shaders that can be compiled as VS, LS, or ES, and only
1619 * one of them is compiled at creation.
1621 * For merged shaders, check that the starting shader's main
1624 if (previous_stage_sel
) {
1625 struct si_shader_key shader1_key
= zeroed
;
1627 if (sel
->type
== PIPE_SHADER_TESS_CTRL
)
1628 shader1_key
.as_ls
= 1;
1629 else if (sel
->type
== PIPE_SHADER_GEOMETRY
)
1630 shader1_key
.as_es
= 1;
1634 ok
= si_check_missing_main_part(sscreen
,
1636 compiler_state
, &shader1_key
);
1638 ok
= si_check_missing_main_part(sscreen
, sel
,
1639 compiler_state
, key
);
1643 mtx_unlock(&sel
->mutex
);
1644 return -ENOMEM
; /* skip the draw call */
1648 /* Keep the reference to the 1st shader of merged shaders, so that
1649 * Gallium can't destroy it before we destroy the 2nd shader.
1651 * Set sctx = NULL, because it's unused if we're not releasing
1652 * the shader, and we don't have any sctx here.
1654 si_shader_selector_reference(NULL
, &shader
->previous_stage_sel
,
1655 previous_stage_sel
);
1657 /* Monolithic-only shaders don't make a distinction between optimized
1658 * and unoptimized. */
1659 shader
->is_monolithic
=
1660 is_pure_monolithic
||
1661 memcmp(&key
->opt
, &zeroed
.opt
, sizeof(key
->opt
)) != 0;
1663 shader
->is_optimized
=
1664 !is_pure_monolithic
&&
1665 memcmp(&key
->opt
, &zeroed
.opt
, sizeof(key
->opt
)) != 0;
1666 if (shader
->is_optimized
)
1667 util_queue_fence_init(&shader
->optimized_ready
);
1669 if (!sel
->last_variant
) {
1670 sel
->first_variant
= shader
;
1671 sel
->last_variant
= shader
;
1673 sel
->last_variant
->next_variant
= shader
;
1674 sel
->last_variant
= shader
;
1677 /* If it's an optimized shader, compile it asynchronously. */
1678 if (shader
->is_optimized
&&
1679 !is_pure_monolithic
&&
1681 /* Compile it asynchronously. */
1682 util_queue_add_job(&sscreen
->shader_compiler_queue
,
1683 shader
, &shader
->optimized_ready
,
1684 si_build_shader_variant
, NULL
);
1686 /* Use the default (unoptimized) shader for now. */
1687 memset(&key
->opt
, 0, sizeof(key
->opt
));
1688 mtx_unlock(&sel
->mutex
);
1692 assert(!shader
->is_optimized
);
1693 si_build_shader_variant(shader
, thread_index
);
1695 if (!shader
->compilation_failed
)
1696 state
->current
= shader
;
1698 mtx_unlock(&sel
->mutex
);
1699 return shader
->compilation_failed
? -1 : 0;
1702 static int si_shader_select(struct pipe_context
*ctx
,
1703 struct si_shader_ctx_state
*state
,
1704 struct si_compiler_ctx_state
*compiler_state
)
1706 struct si_context
*sctx
= (struct si_context
*)ctx
;
1707 struct si_shader_key key
;
1709 si_shader_selector_key(ctx
, state
->cso
, &key
);
1710 return si_shader_select_with_key(sctx
->screen
, state
, compiler_state
,
1714 static void si_parse_next_shader_property(const struct tgsi_shader_info
*info
,
1715 struct si_shader_key
*key
)
1717 unsigned next_shader
= info
->properties
[TGSI_PROPERTY_NEXT_SHADER
];
1719 switch (info
->processor
) {
1720 case PIPE_SHADER_VERTEX
:
1721 switch (next_shader
) {
1722 case PIPE_SHADER_GEOMETRY
:
1725 case PIPE_SHADER_TESS_CTRL
:
1726 case PIPE_SHADER_TESS_EVAL
:
1730 /* If POSITION isn't written, it can't be a HW VS.
1731 * Assume that it's a HW LS. (the next shader is TCS)
1732 * This heuristic is needed for separate shader objects.
1734 if (!info
->writes_position
)
1739 case PIPE_SHADER_TESS_EVAL
:
1740 if (next_shader
== PIPE_SHADER_GEOMETRY
||
1741 !info
->writes_position
)
1748 * Compile the main shader part or the monolithic shader as part of
1749 * si_shader_selector initialization. Since it can be done asynchronously,
1750 * there is no way to report compile failures to applications.
1752 void si_init_shader_selector_async(void *job
, int thread_index
)
1754 struct si_shader_selector
*sel
= (struct si_shader_selector
*)job
;
1755 struct si_screen
*sscreen
= sel
->screen
;
1756 LLVMTargetMachineRef tm
;
1757 struct pipe_debug_callback
*debug
= &sel
->compiler_ctx_state
.debug
;
1760 if (thread_index
>= 0) {
1761 assert(thread_index
< ARRAY_SIZE(sscreen
->tm
));
1762 tm
= sscreen
->tm
[thread_index
];
1766 tm
= sel
->compiler_ctx_state
.tm
;
1769 /* Compile the main shader part for use with a prolog and/or epilog.
1770 * If this fails, the driver will try to compile a monolithic shader
1773 if (!sscreen
->use_monolithic_shaders
) {
1774 struct si_shader
*shader
= CALLOC_STRUCT(si_shader
);
1778 fprintf(stderr
, "radeonsi: can't allocate a main shader part\n");
1782 shader
->selector
= sel
;
1783 si_parse_next_shader_property(&sel
->info
, &shader
->key
);
1785 tgsi_binary
= si_get_tgsi_binary(sel
);
1787 /* Try to load the shader from the shader cache. */
1788 mtx_lock(&sscreen
->shader_cache_mutex
);
1791 si_shader_cache_load_shader(sscreen
, tgsi_binary
, shader
)) {
1792 mtx_unlock(&sscreen
->shader_cache_mutex
);
1794 mtx_unlock(&sscreen
->shader_cache_mutex
);
1796 /* Compile the shader if it hasn't been loaded from the cache. */
1797 if (si_compile_tgsi_shader(sscreen
, tm
, shader
, false,
1801 fprintf(stderr
, "radeonsi: can't compile a main shader part\n");
1806 mtx_lock(&sscreen
->shader_cache_mutex
);
1807 if (!si_shader_cache_insert_shader(sscreen
, tgsi_binary
, shader
, true))
1809 mtx_unlock(&sscreen
->shader_cache_mutex
);
1813 *si_get_main_shader_part(sel
, &shader
->key
) = shader
;
1815 /* Unset "outputs_written" flags for outputs converted to
1816 * DEFAULT_VAL, so that later inter-shader optimizations don't
1817 * try to eliminate outputs that don't exist in the final
1820 * This is only done if non-monolithic shaders are enabled.
1822 if ((sel
->type
== PIPE_SHADER_VERTEX
||
1823 sel
->type
== PIPE_SHADER_TESS_EVAL
) &&
1824 !shader
->key
.as_ls
&&
1825 !shader
->key
.as_es
) {
1828 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
1829 unsigned offset
= shader
->info
.vs_output_param_offset
[i
];
1831 if (offset
<= AC_EXP_PARAM_OFFSET_31
)
1834 unsigned name
= sel
->info
.output_semantic_name
[i
];
1835 unsigned index
= sel
->info
.output_semantic_index
[i
];
1839 case TGSI_SEMANTIC_GENERIC
:
1840 /* don't process indices the function can't handle */
1841 if (index
>= SI_MAX_IO_GENERIC
)
1845 id
= si_shader_io_get_unique_index(name
, index
);
1846 sel
->outputs_written
&= ~(1ull << id
);
1848 case TGSI_SEMANTIC_POSITION
: /* ignore these */
1849 case TGSI_SEMANTIC_PSIZE
:
1850 case TGSI_SEMANTIC_CLIPVERTEX
:
1851 case TGSI_SEMANTIC_EDGEFLAG
:
1858 /* Pre-compilation. */
1859 if (sscreen
->b
.debug_flags
& DBG_PRECOMPILE
) {
1860 struct si_shader_ctx_state state
= {sel
};
1861 struct si_shader_key key
;
1863 memset(&key
, 0, sizeof(key
));
1864 si_parse_next_shader_property(&sel
->info
, &key
);
1866 /* Set reasonable defaults, so that the shader key doesn't
1867 * cause any code to be eliminated.
1869 switch (sel
->type
) {
1870 case PIPE_SHADER_TESS_CTRL
:
1871 key
.part
.tcs
.epilog
.prim_mode
= PIPE_PRIM_TRIANGLES
;
1873 case PIPE_SHADER_FRAGMENT
:
1874 key
.part
.ps
.prolog
.bc_optimize_for_persp
=
1875 sel
->info
.uses_persp_center
&&
1876 sel
->info
.uses_persp_centroid
;
1877 key
.part
.ps
.prolog
.bc_optimize_for_linear
=
1878 sel
->info
.uses_linear_center
&&
1879 sel
->info
.uses_linear_centroid
;
1880 key
.part
.ps
.epilog
.alpha_func
= PIPE_FUNC_ALWAYS
;
1881 for (i
= 0; i
< 8; i
++)
1882 if (sel
->info
.colors_written
& (1 << i
))
1883 key
.part
.ps
.epilog
.spi_shader_col_format
|=
1884 V_028710_SPI_SHADER_FP16_ABGR
<< (i
* 4);
1888 if (si_shader_select_with_key(sscreen
, &state
,
1889 &sel
->compiler_ctx_state
, &key
,
1891 fprintf(stderr
, "radeonsi: can't create a monolithic shader\n");
1894 /* The GS copy shader is always pre-compiled. */
1895 if (sel
->type
== PIPE_SHADER_GEOMETRY
) {
1896 sel
->gs_copy_shader
= si_generate_gs_copy_shader(sscreen
, tm
, sel
, debug
);
1897 if (!sel
->gs_copy_shader
) {
1898 fprintf(stderr
, "radeonsi: can't create GS copy shader\n");
1902 si_shader_vs(sscreen
, sel
->gs_copy_shader
, sel
);
1906 static void *si_create_shader_selector(struct pipe_context
*ctx
,
1907 const struct pipe_shader_state
*state
)
1909 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
1910 struct si_context
*sctx
= (struct si_context
*)ctx
;
1911 struct si_shader_selector
*sel
= CALLOC_STRUCT(si_shader_selector
);
1917 pipe_reference_init(&sel
->reference
, 1);
1918 sel
->screen
= sscreen
;
1919 sel
->compiler_ctx_state
.tm
= sctx
->tm
;
1920 sel
->compiler_ctx_state
.debug
= sctx
->b
.debug
;
1921 sel
->compiler_ctx_state
.is_debug_context
= sctx
->is_debug
;
1922 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
1928 sel
->so
= state
->stream_output
;
1929 tgsi_scan_shader(state
->tokens
, &sel
->info
);
1930 sel
->type
= sel
->info
.processor
;
1931 p_atomic_inc(&sscreen
->b
.num_shaders_created
);
1933 /* The prolog is a no-op if there are no inputs. */
1934 sel
->vs_needs_prolog
= sel
->type
== PIPE_SHADER_VERTEX
&&
1935 sel
->info
.num_inputs
;
1937 /* Set which opcode uses which (i,j) pair. */
1938 if (sel
->info
.uses_persp_opcode_interp_centroid
)
1939 sel
->info
.uses_persp_centroid
= true;
1941 if (sel
->info
.uses_linear_opcode_interp_centroid
)
1942 sel
->info
.uses_linear_centroid
= true;
1944 if (sel
->info
.uses_persp_opcode_interp_offset
||
1945 sel
->info
.uses_persp_opcode_interp_sample
)
1946 sel
->info
.uses_persp_center
= true;
1948 if (sel
->info
.uses_linear_opcode_interp_offset
||
1949 sel
->info
.uses_linear_opcode_interp_sample
)
1950 sel
->info
.uses_linear_center
= true;
1952 switch (sel
->type
) {
1953 case PIPE_SHADER_GEOMETRY
:
1954 sel
->gs_output_prim
=
1955 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
1956 sel
->gs_max_out_vertices
=
1957 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
1958 sel
->gs_num_invocations
=
1959 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
1960 sel
->gsvs_vertex_size
= sel
->info
.num_outputs
* 16;
1961 sel
->max_gsvs_emit_size
= sel
->gsvs_vertex_size
*
1962 sel
->gs_max_out_vertices
;
1964 sel
->max_gs_stream
= 0;
1965 for (i
= 0; i
< sel
->so
.num_outputs
; i
++)
1966 sel
->max_gs_stream
= MAX2(sel
->max_gs_stream
,
1967 sel
->so
.output
[i
].stream
);
1969 sel
->gs_input_verts_per_prim
=
1970 u_vertices_per_prim(sel
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
]);
1973 case PIPE_SHADER_TESS_CTRL
:
1974 /* Always reserve space for these. */
1975 sel
->patch_outputs_written
|=
1976 (1llu << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER
, 0)) |
1977 (1llu << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER
, 0));
1979 case PIPE_SHADER_VERTEX
:
1980 case PIPE_SHADER_TESS_EVAL
:
1981 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
1982 unsigned name
= sel
->info
.output_semantic_name
[i
];
1983 unsigned index
= sel
->info
.output_semantic_index
[i
];
1986 case TGSI_SEMANTIC_TESSINNER
:
1987 case TGSI_SEMANTIC_TESSOUTER
:
1988 case TGSI_SEMANTIC_PATCH
:
1989 sel
->patch_outputs_written
|=
1990 1llu << si_shader_io_get_unique_index_patch(name
, index
);
1993 case TGSI_SEMANTIC_GENERIC
:
1994 /* don't process indices the function can't handle */
1995 if (index
>= SI_MAX_IO_GENERIC
)
1999 sel
->outputs_written
|=
2000 1llu << si_shader_io_get_unique_index(name
, index
);
2002 case TGSI_SEMANTIC_CLIPVERTEX
: /* ignore these */
2003 case TGSI_SEMANTIC_EDGEFLAG
:
2007 sel
->esgs_itemsize
= util_last_bit64(sel
->outputs_written
) * 16;
2009 /* For the ESGS ring in LDS, add 1 dword to reduce LDS bank
2010 * conflicts, i.e. each vertex will start at a different bank.
2012 if (sctx
->b
.chip_class
>= GFX9
)
2013 sel
->esgs_itemsize
+= 4;
2016 case PIPE_SHADER_FRAGMENT
:
2017 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
2018 unsigned name
= sel
->info
.input_semantic_name
[i
];
2019 unsigned index
= sel
->info
.input_semantic_index
[i
];
2022 case TGSI_SEMANTIC_GENERIC
:
2023 /* don't process indices the function can't handle */
2024 if (index
>= SI_MAX_IO_GENERIC
)
2029 1llu << si_shader_io_get_unique_index(name
, index
);
2031 case TGSI_SEMANTIC_PCOORD
: /* ignore this */
2036 for (i
= 0; i
< 8; i
++)
2037 if (sel
->info
.colors_written
& (1 << i
))
2038 sel
->colors_written_4bit
|= 0xf << (4 * i
);
2040 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
2041 if (sel
->info
.input_semantic_name
[i
] == TGSI_SEMANTIC_COLOR
) {
2042 int index
= sel
->info
.input_semantic_index
[i
];
2043 sel
->color_attr_index
[index
] = i
;
2049 /* DB_SHADER_CONTROL */
2050 sel
->db_shader_control
=
2051 S_02880C_Z_EXPORT_ENABLE(sel
->info
.writes_z
) |
2052 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(sel
->info
.writes_stencil
) |
2053 S_02880C_MASK_EXPORT_ENABLE(sel
->info
.writes_samplemask
) |
2054 S_02880C_KILL_ENABLE(sel
->info
.uses_kill
);
2056 switch (sel
->info
.properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
]) {
2057 case TGSI_FS_DEPTH_LAYOUT_GREATER
:
2058 sel
->db_shader_control
|=
2059 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z
);
2061 case TGSI_FS_DEPTH_LAYOUT_LESS
:
2062 sel
->db_shader_control
|=
2063 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z
);
2067 /* Z_ORDER, EXEC_ON_HIER_FAIL and EXEC_ON_NOOP should be set as following:
2069 * | early Z/S | writes_mem | allow_ReZ? | Z_ORDER | EXEC_ON_HIER_FAIL | EXEC_ON_NOOP
2070 * --|-----------|------------|------------|--------------------|-------------------|-------------
2071 * 1a| false | false | true | EarlyZ_Then_ReZ | 0 | 0
2072 * 1b| false | false | false | EarlyZ_Then_LateZ | 0 | 0
2073 * 2 | false | true | n/a | LateZ | 1 | 0
2074 * 3 | true | false | n/a | EarlyZ_Then_LateZ | 0 | 0
2075 * 4 | true | true | n/a | EarlyZ_Then_LateZ | 0 | 1
2077 * In cases 3 and 4, HW will force Z_ORDER to EarlyZ regardless of what's set in the register.
2078 * In case 2, NOOP_CULL is a don't care field. In case 2, 3 and 4, ReZ doesn't make sense.
2080 * Don't use ReZ without profiling !!!
2082 * ReZ decreases performance by 15% in DiRT: Showdown on Ultra settings, which has pretty complex
2085 if (sel
->info
.properties
[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
]) {
2087 sel
->db_shader_control
|= S_02880C_DEPTH_BEFORE_SHADER(1) |
2088 S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
) |
2089 S_02880C_EXEC_ON_NOOP(sel
->info
.writes_memory
);
2090 } else if (sel
->info
.writes_memory
) {
2092 sel
->db_shader_control
|= S_02880C_Z_ORDER(V_02880C_LATE_Z
) |
2093 S_02880C_EXEC_ON_HIER_FAIL(1);
2096 sel
->db_shader_control
|= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
);
2099 (void) mtx_init(&sel
->mutex
, mtx_plain
);
2100 util_queue_fence_init(&sel
->ready
);
2102 if ((sctx
->b
.debug
.debug_message
&& !sctx
->b
.debug
.async
) ||
2104 r600_can_dump_shader(&sscreen
->b
, sel
->info
.processor
))
2105 si_init_shader_selector_async(sel
, -1);
2107 util_queue_add_job(&sscreen
->shader_compiler_queue
, sel
,
2108 &sel
->ready
, si_init_shader_selector_async
,
2114 static void si_bind_vs_shader(struct pipe_context
*ctx
, void *state
)
2116 struct si_context
*sctx
= (struct si_context
*)ctx
;
2117 struct si_shader_selector
*sel
= state
;
2119 if (sctx
->vs_shader
.cso
== sel
)
2122 sctx
->vs_shader
.cso
= sel
;
2123 sctx
->vs_shader
.current
= sel
? sel
->first_variant
: NULL
;
2124 sctx
->do_update_shaders
= true;
2125 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
2126 r600_update_vs_writes_viewport_index(&sctx
->b
, si_get_vs_info(sctx
));
2129 static void si_update_tess_uses_prim_id(struct si_context
*sctx
)
2131 sctx
->ia_multi_vgt_param_key
.u
.tess_uses_prim_id
=
2132 (sctx
->tes_shader
.cso
&&
2133 sctx
->tes_shader
.cso
->info
.uses_primid
) ||
2134 (sctx
->tcs_shader
.cso
&&
2135 sctx
->tcs_shader
.cso
->info
.uses_primid
) ||
2136 (sctx
->gs_shader
.cso
&&
2137 sctx
->gs_shader
.cso
->info
.uses_primid
) ||
2138 (sctx
->ps_shader
.cso
&& !sctx
->gs_shader
.cso
&&
2139 sctx
->ps_shader
.cso
->info
.uses_primid
);
2142 static void si_bind_gs_shader(struct pipe_context
*ctx
, void *state
)
2144 struct si_context
*sctx
= (struct si_context
*)ctx
;
2145 struct si_shader_selector
*sel
= state
;
2146 bool enable_changed
= !!sctx
->gs_shader
.cso
!= !!sel
;
2148 if (sctx
->gs_shader
.cso
== sel
)
2151 sctx
->gs_shader
.cso
= sel
;
2152 sctx
->gs_shader
.current
= sel
? sel
->first_variant
: NULL
;
2153 sctx
->ia_multi_vgt_param_key
.u
.uses_gs
= sel
!= NULL
;
2154 sctx
->do_update_shaders
= true;
2155 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
2156 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
2158 if (enable_changed
) {
2159 si_shader_change_notify(sctx
);
2160 if (sctx
->ia_multi_vgt_param_key
.u
.uses_tess
)
2161 si_update_tess_uses_prim_id(sctx
);
2163 r600_update_vs_writes_viewport_index(&sctx
->b
, si_get_vs_info(sctx
));
2166 static void si_bind_tcs_shader(struct pipe_context
*ctx
, void *state
)
2168 struct si_context
*sctx
= (struct si_context
*)ctx
;
2169 struct si_shader_selector
*sel
= state
;
2170 bool enable_changed
= !!sctx
->tcs_shader
.cso
!= !!sel
;
2172 if (sctx
->tcs_shader
.cso
== sel
)
2175 sctx
->tcs_shader
.cso
= sel
;
2176 sctx
->tcs_shader
.current
= sel
? sel
->first_variant
: NULL
;
2177 si_update_tess_uses_prim_id(sctx
);
2178 sctx
->do_update_shaders
= true;
2181 sctx
->last_tcs
= NULL
; /* invalidate derived tess state */
2184 static void si_bind_tes_shader(struct pipe_context
*ctx
, void *state
)
2186 struct si_context
*sctx
= (struct si_context
*)ctx
;
2187 struct si_shader_selector
*sel
= state
;
2188 bool enable_changed
= !!sctx
->tes_shader
.cso
!= !!sel
;
2190 if (sctx
->tes_shader
.cso
== sel
)
2193 sctx
->tes_shader
.cso
= sel
;
2194 sctx
->tes_shader
.current
= sel
? sel
->first_variant
: NULL
;
2195 sctx
->ia_multi_vgt_param_key
.u
.uses_tess
= sel
!= NULL
;
2196 si_update_tess_uses_prim_id(sctx
);
2197 sctx
->do_update_shaders
= true;
2198 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
2199 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
2201 if (enable_changed
) {
2202 si_shader_change_notify(sctx
);
2203 sctx
->last_tes_sh_base
= -1; /* invalidate derived tess state */
2205 r600_update_vs_writes_viewport_index(&sctx
->b
, si_get_vs_info(sctx
));
2208 static void si_bind_ps_shader(struct pipe_context
*ctx
, void *state
)
2210 struct si_context
*sctx
= (struct si_context
*)ctx
;
2211 struct si_shader_selector
*sel
= state
;
2213 /* skip if supplied shader is one already in use */
2214 if (sctx
->ps_shader
.cso
== sel
)
2217 sctx
->ps_shader
.cso
= sel
;
2218 sctx
->ps_shader
.current
= sel
? sel
->first_variant
: NULL
;
2219 sctx
->do_update_shaders
= true;
2220 if (sel
&& sctx
->ia_multi_vgt_param_key
.u
.uses_tess
)
2221 si_update_tess_uses_prim_id(sctx
);
2222 si_mark_atom_dirty(sctx
, &sctx
->cb_render_state
);
2225 static void si_delete_shader(struct si_context
*sctx
, struct si_shader
*shader
)
2227 if (shader
->is_optimized
) {
2228 util_queue_fence_wait(&shader
->optimized_ready
);
2229 util_queue_fence_destroy(&shader
->optimized_ready
);
2233 switch (shader
->selector
->type
) {
2234 case PIPE_SHADER_VERTEX
:
2235 if (shader
->key
.as_ls
) {
2236 assert(sctx
->b
.chip_class
<= VI
);
2237 si_pm4_delete_state(sctx
, ls
, shader
->pm4
);
2238 } else if (shader
->key
.as_es
) {
2239 assert(sctx
->b
.chip_class
<= VI
);
2240 si_pm4_delete_state(sctx
, es
, shader
->pm4
);
2242 si_pm4_delete_state(sctx
, vs
, shader
->pm4
);
2245 case PIPE_SHADER_TESS_CTRL
:
2246 si_pm4_delete_state(sctx
, hs
, shader
->pm4
);
2248 case PIPE_SHADER_TESS_EVAL
:
2249 if (shader
->key
.as_es
) {
2250 assert(sctx
->b
.chip_class
<= VI
);
2251 si_pm4_delete_state(sctx
, es
, shader
->pm4
);
2253 si_pm4_delete_state(sctx
, vs
, shader
->pm4
);
2256 case PIPE_SHADER_GEOMETRY
:
2257 if (shader
->is_gs_copy_shader
)
2258 si_pm4_delete_state(sctx
, vs
, shader
->pm4
);
2260 si_pm4_delete_state(sctx
, gs
, shader
->pm4
);
2262 case PIPE_SHADER_FRAGMENT
:
2263 si_pm4_delete_state(sctx
, ps
, shader
->pm4
);
2268 si_shader_selector_reference(sctx
, &shader
->previous_stage_sel
, NULL
);
2269 si_shader_destroy(shader
);
2273 static void si_destroy_shader_selector(struct si_context
*sctx
,
2274 struct si_shader_selector
*sel
)
2276 struct si_shader
*p
= sel
->first_variant
, *c
;
2277 struct si_shader_ctx_state
*current_shader
[SI_NUM_SHADERS
] = {
2278 [PIPE_SHADER_VERTEX
] = &sctx
->vs_shader
,
2279 [PIPE_SHADER_TESS_CTRL
] = &sctx
->tcs_shader
,
2280 [PIPE_SHADER_TESS_EVAL
] = &sctx
->tes_shader
,
2281 [PIPE_SHADER_GEOMETRY
] = &sctx
->gs_shader
,
2282 [PIPE_SHADER_FRAGMENT
] = &sctx
->ps_shader
,
2285 util_queue_fence_wait(&sel
->ready
);
2287 if (current_shader
[sel
->type
]->cso
== sel
) {
2288 current_shader
[sel
->type
]->cso
= NULL
;
2289 current_shader
[sel
->type
]->current
= NULL
;
2293 c
= p
->next_variant
;
2294 si_delete_shader(sctx
, p
);
2298 if (sel
->main_shader_part
)
2299 si_delete_shader(sctx
, sel
->main_shader_part
);
2300 if (sel
->main_shader_part_ls
)
2301 si_delete_shader(sctx
, sel
->main_shader_part_ls
);
2302 if (sel
->main_shader_part_es
)
2303 si_delete_shader(sctx
, sel
->main_shader_part_es
);
2304 if (sel
->gs_copy_shader
)
2305 si_delete_shader(sctx
, sel
->gs_copy_shader
);
2307 util_queue_fence_destroy(&sel
->ready
);
2308 mtx_destroy(&sel
->mutex
);
2313 static void si_delete_shader_selector(struct pipe_context
*ctx
, void *state
)
2315 struct si_context
*sctx
= (struct si_context
*)ctx
;
2316 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
2318 si_shader_selector_reference(sctx
, &sel
, NULL
);
2321 static unsigned si_get_ps_input_cntl(struct si_context
*sctx
,
2322 struct si_shader
*vs
, unsigned name
,
2323 unsigned index
, unsigned interpolate
)
2325 struct tgsi_shader_info
*vsinfo
= &vs
->selector
->info
;
2326 unsigned j
, offset
, ps_input_cntl
= 0;
2328 if (interpolate
== TGSI_INTERPOLATE_CONSTANT
||
2329 (interpolate
== TGSI_INTERPOLATE_COLOR
&& sctx
->flatshade
))
2330 ps_input_cntl
|= S_028644_FLAT_SHADE(1);
2332 if (name
== TGSI_SEMANTIC_PCOORD
||
2333 (name
== TGSI_SEMANTIC_TEXCOORD
&&
2334 sctx
->sprite_coord_enable
& (1 << index
))) {
2335 ps_input_cntl
|= S_028644_PT_SPRITE_TEX(1);
2338 for (j
= 0; j
< vsinfo
->num_outputs
; j
++) {
2339 if (name
== vsinfo
->output_semantic_name
[j
] &&
2340 index
== vsinfo
->output_semantic_index
[j
]) {
2341 offset
= vs
->info
.vs_output_param_offset
[j
];
2343 if (offset
<= AC_EXP_PARAM_OFFSET_31
) {
2344 /* The input is loaded from parameter memory. */
2345 ps_input_cntl
|= S_028644_OFFSET(offset
);
2346 } else if (!G_028644_PT_SPRITE_TEX(ps_input_cntl
)) {
2347 if (offset
== AC_EXP_PARAM_UNDEFINED
) {
2348 /* This can happen with depth-only rendering. */
2351 /* The input is a DEFAULT_VAL constant. */
2352 assert(offset
>= AC_EXP_PARAM_DEFAULT_VAL_0000
&&
2353 offset
<= AC_EXP_PARAM_DEFAULT_VAL_1111
);
2354 offset
-= AC_EXP_PARAM_DEFAULT_VAL_0000
;
2357 ps_input_cntl
= S_028644_OFFSET(0x20) |
2358 S_028644_DEFAULT_VAL(offset
);
2364 if (name
== TGSI_SEMANTIC_PRIMID
)
2365 /* PrimID is written after the last output. */
2366 ps_input_cntl
|= S_028644_OFFSET(vs
->info
.vs_output_param_offset
[vsinfo
->num_outputs
]);
2367 else if (j
== vsinfo
->num_outputs
&& !G_028644_PT_SPRITE_TEX(ps_input_cntl
)) {
2368 /* No corresponding output found, load defaults into input.
2369 * Don't set any other bits.
2370 * (FLAT_SHADE=1 completely changes behavior) */
2371 ps_input_cntl
= S_028644_OFFSET(0x20);
2372 /* D3D 9 behaviour. GL is undefined */
2373 if (name
== TGSI_SEMANTIC_COLOR
&& index
== 0)
2374 ps_input_cntl
|= S_028644_DEFAULT_VAL(3);
2376 return ps_input_cntl
;
2379 static void si_emit_spi_map(struct si_context
*sctx
, struct r600_atom
*atom
)
2381 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
2382 struct si_shader
*ps
= sctx
->ps_shader
.current
;
2383 struct si_shader
*vs
= si_get_vs_state(sctx
);
2384 struct tgsi_shader_info
*psinfo
= ps
? &ps
->selector
->info
: NULL
;
2385 unsigned i
, num_interp
, num_written
= 0, bcol_interp
[2];
2387 if (!ps
|| !ps
->selector
->info
.num_inputs
)
2390 num_interp
= si_get_ps_num_interp(ps
);
2391 assert(num_interp
> 0);
2392 radeon_set_context_reg_seq(cs
, R_028644_SPI_PS_INPUT_CNTL_0
, num_interp
);
2394 for (i
= 0; i
< psinfo
->num_inputs
; i
++) {
2395 unsigned name
= psinfo
->input_semantic_name
[i
];
2396 unsigned index
= psinfo
->input_semantic_index
[i
];
2397 unsigned interpolate
= psinfo
->input_interpolate
[i
];
2399 radeon_emit(cs
, si_get_ps_input_cntl(sctx
, vs
, name
, index
,
2403 if (name
== TGSI_SEMANTIC_COLOR
) {
2404 assert(index
< ARRAY_SIZE(bcol_interp
));
2405 bcol_interp
[index
] = interpolate
;
2409 if (ps
->key
.part
.ps
.prolog
.color_two_side
) {
2410 unsigned bcol
= TGSI_SEMANTIC_BCOLOR
;
2412 for (i
= 0; i
< 2; i
++) {
2413 if (!(psinfo
->colors_read
& (0xf << (i
* 4))))
2416 radeon_emit(cs
, si_get_ps_input_cntl(sctx
, vs
, bcol
,
2417 i
, bcol_interp
[i
]));
2421 assert(num_interp
== num_written
);
2425 * Writing CONFIG or UCONFIG VGT registers requires VGT_FLUSH before that.
2427 static void si_init_config_add_vgt_flush(struct si_context
*sctx
)
2429 if (sctx
->init_config_has_vgt_flush
)
2432 /* Done by Vulkan before VGT_FLUSH. */
2433 si_pm4_cmd_begin(sctx
->init_config
, PKT3_EVENT_WRITE
);
2434 si_pm4_cmd_add(sctx
->init_config
,
2435 EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
2436 si_pm4_cmd_end(sctx
->init_config
, false);
2438 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
2439 si_pm4_cmd_begin(sctx
->init_config
, PKT3_EVENT_WRITE
);
2440 si_pm4_cmd_add(sctx
->init_config
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
2441 si_pm4_cmd_end(sctx
->init_config
, false);
2442 sctx
->init_config_has_vgt_flush
= true;
2445 /* Initialize state related to ESGS / GSVS ring buffers */
2446 static bool si_update_gs_ring_buffers(struct si_context
*sctx
)
2448 struct si_shader_selector
*es
=
2449 sctx
->tes_shader
.cso
? sctx
->tes_shader
.cso
: sctx
->vs_shader
.cso
;
2450 struct si_shader_selector
*gs
= sctx
->gs_shader
.cso
;
2451 struct si_pm4_state
*pm4
;
2453 /* Chip constants. */
2454 unsigned num_se
= sctx
->screen
->b
.info
.max_se
;
2455 unsigned wave_size
= 64;
2456 unsigned max_gs_waves
= 32 * num_se
; /* max 32 per SE on GCN */
2457 /* On SI-CI, the value comes from VGT_GS_VERTEX_REUSE = 16.
2458 * On VI+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
2460 unsigned gs_vertex_reuse
= (sctx
->b
.chip_class
>= VI
? 32 : 16) * num_se
;
2461 unsigned alignment
= 256 * num_se
;
2462 /* The maximum size is 63.999 MB per SE. */
2463 unsigned max_size
= ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se
;
2465 /* Calculate the minimum size. */
2466 unsigned min_esgs_ring_size
= align(es
->esgs_itemsize
* gs_vertex_reuse
*
2467 wave_size
, alignment
);
2469 /* These are recommended sizes, not minimum sizes. */
2470 unsigned esgs_ring_size
= max_gs_waves
* 2 * wave_size
*
2471 es
->esgs_itemsize
* gs
->gs_input_verts_per_prim
;
2472 unsigned gsvs_ring_size
= max_gs_waves
* 2 * wave_size
*
2473 gs
->max_gsvs_emit_size
;
2475 min_esgs_ring_size
= align(min_esgs_ring_size
, alignment
);
2476 esgs_ring_size
= align(esgs_ring_size
, alignment
);
2477 gsvs_ring_size
= align(gsvs_ring_size
, alignment
);
2479 esgs_ring_size
= CLAMP(esgs_ring_size
, min_esgs_ring_size
, max_size
);
2480 gsvs_ring_size
= MIN2(gsvs_ring_size
, max_size
);
2482 /* Some rings don't have to be allocated if shaders don't use them.
2483 * (e.g. no varyings between ES and GS or GS and VS)
2485 * GFX9 doesn't have the ESGS ring.
2487 bool update_esgs
= sctx
->b
.chip_class
<= VI
&&
2489 (!sctx
->esgs_ring
||
2490 sctx
->esgs_ring
->width0
< esgs_ring_size
);
2491 bool update_gsvs
= gsvs_ring_size
&&
2492 (!sctx
->gsvs_ring
||
2493 sctx
->gsvs_ring
->width0
< gsvs_ring_size
);
2495 if (!update_esgs
&& !update_gsvs
)
2499 pipe_resource_reference(&sctx
->esgs_ring
, NULL
);
2501 r600_aligned_buffer_create(sctx
->b
.b
.screen
,
2502 R600_RESOURCE_FLAG_UNMAPPABLE
,
2504 esgs_ring_size
, alignment
);
2505 if (!sctx
->esgs_ring
)
2510 pipe_resource_reference(&sctx
->gsvs_ring
, NULL
);
2512 r600_aligned_buffer_create(sctx
->b
.b
.screen
,
2513 R600_RESOURCE_FLAG_UNMAPPABLE
,
2515 gsvs_ring_size
, alignment
);
2516 if (!sctx
->gsvs_ring
)
2520 /* Create the "init_config_gs_rings" state. */
2521 pm4
= CALLOC_STRUCT(si_pm4_state
);
2525 if (sctx
->b
.chip_class
>= CIK
) {
2526 if (sctx
->esgs_ring
) {
2527 assert(sctx
->b
.chip_class
<= VI
);
2528 si_pm4_set_reg(pm4
, R_030900_VGT_ESGS_RING_SIZE
,
2529 sctx
->esgs_ring
->width0
/ 256);
2531 if (sctx
->gsvs_ring
)
2532 si_pm4_set_reg(pm4
, R_030904_VGT_GSVS_RING_SIZE
,
2533 sctx
->gsvs_ring
->width0
/ 256);
2535 if (sctx
->esgs_ring
)
2536 si_pm4_set_reg(pm4
, R_0088C8_VGT_ESGS_RING_SIZE
,
2537 sctx
->esgs_ring
->width0
/ 256);
2538 if (sctx
->gsvs_ring
)
2539 si_pm4_set_reg(pm4
, R_0088CC_VGT_GSVS_RING_SIZE
,
2540 sctx
->gsvs_ring
->width0
/ 256);
2543 /* Set the state. */
2544 if (sctx
->init_config_gs_rings
)
2545 si_pm4_free_state(sctx
, sctx
->init_config_gs_rings
, ~0);
2546 sctx
->init_config_gs_rings
= pm4
;
2548 if (!sctx
->init_config_has_vgt_flush
) {
2549 si_init_config_add_vgt_flush(sctx
);
2550 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
2553 /* Flush the context to re-emit both init_config states. */
2554 sctx
->b
.initial_gfx_cs_size
= 0; /* force flush */
2555 si_context_gfx_flush(sctx
, RADEON_FLUSH_ASYNC
, NULL
);
2557 /* Set ring bindings. */
2558 if (sctx
->esgs_ring
) {
2559 assert(sctx
->b
.chip_class
<= VI
);
2560 si_set_ring_buffer(&sctx
->b
.b
, SI_ES_RING_ESGS
,
2561 sctx
->esgs_ring
, 0, sctx
->esgs_ring
->width0
,
2562 true, true, 4, 64, 0);
2563 si_set_ring_buffer(&sctx
->b
.b
, SI_GS_RING_ESGS
,
2564 sctx
->esgs_ring
, 0, sctx
->esgs_ring
->width0
,
2565 false, false, 0, 0, 0);
2567 if (sctx
->gsvs_ring
) {
2568 si_set_ring_buffer(&sctx
->b
.b
, SI_RING_GSVS
,
2569 sctx
->gsvs_ring
, 0, sctx
->gsvs_ring
->width0
,
2570 false, false, 0, 0, 0);
2576 static void si_shader_lock(struct si_shader
*shader
)
2578 mtx_lock(&shader
->selector
->mutex
);
2579 if (shader
->previous_stage_sel
) {
2580 assert(shader
->previous_stage_sel
!= shader
->selector
);
2581 mtx_lock(&shader
->previous_stage_sel
->mutex
);
2585 static void si_shader_unlock(struct si_shader
*shader
)
2587 if (shader
->previous_stage_sel
)
2588 mtx_unlock(&shader
->previous_stage_sel
->mutex
);
2589 mtx_unlock(&shader
->selector
->mutex
);
2593 * @returns 1 if \p sel has been updated to use a new scratch buffer
2595 * < 0 if there was a failure
2597 static int si_update_scratch_buffer(struct si_context
*sctx
,
2598 struct si_shader
*shader
)
2600 uint64_t scratch_va
= sctx
->scratch_buffer
->gpu_address
;
2606 /* This shader doesn't need a scratch buffer */
2607 if (shader
->config
.scratch_bytes_per_wave
== 0)
2610 /* Prevent race conditions when updating:
2611 * - si_shader::scratch_bo
2612 * - si_shader::binary::code
2613 * - si_shader::previous_stage::binary::code.
2615 si_shader_lock(shader
);
2617 /* This shader is already configured to use the current
2618 * scratch buffer. */
2619 if (shader
->scratch_bo
== sctx
->scratch_buffer
) {
2620 si_shader_unlock(shader
);
2624 assert(sctx
->scratch_buffer
);
2626 if (shader
->previous_stage
)
2627 si_shader_apply_scratch_relocs(shader
->previous_stage
, scratch_va
);
2629 si_shader_apply_scratch_relocs(shader
, scratch_va
);
2631 /* Replace the shader bo with a new bo that has the relocs applied. */
2632 r
= si_shader_binary_upload(sctx
->screen
, shader
);
2634 si_shader_unlock(shader
);
2638 /* Update the shader state to use the new shader bo. */
2639 si_shader_init_pm4_state(sctx
->screen
, shader
);
2641 r600_resource_reference(&shader
->scratch_bo
, sctx
->scratch_buffer
);
2643 si_shader_unlock(shader
);
2647 static unsigned si_get_current_scratch_buffer_size(struct si_context
*sctx
)
2649 return sctx
->scratch_buffer
? sctx
->scratch_buffer
->b
.b
.width0
: 0;
2652 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader
*shader
)
2654 return shader
? shader
->config
.scratch_bytes_per_wave
: 0;
2657 static unsigned si_get_max_scratch_bytes_per_wave(struct si_context
*sctx
)
2661 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->ps_shader
.current
));
2662 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->gs_shader
.current
));
2663 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->vs_shader
.current
));
2664 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->tcs_shader
.current
));
2665 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->tes_shader
.current
));
2669 static bool si_update_scratch_relocs(struct si_context
*sctx
)
2673 /* Update the shaders, so that they are using the latest scratch.
2674 * The scratch buffer may have been changed since these shaders were
2675 * last used, so we still need to try to update them, even if they
2676 * require scratch buffers smaller than the current size.
2678 r
= si_update_scratch_buffer(sctx
, sctx
->ps_shader
.current
);
2682 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
.current
->pm4
);
2684 r
= si_update_scratch_buffer(sctx
, sctx
->gs_shader
.current
);
2688 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
.current
->pm4
);
2690 r
= si_update_scratch_buffer(sctx
, sctx
->tcs_shader
.current
);
2694 si_pm4_bind_state(sctx
, hs
, sctx
->tcs_shader
.current
->pm4
);
2696 /* VS can be bound as LS, ES, or VS. */
2697 r
= si_update_scratch_buffer(sctx
, sctx
->vs_shader
.current
);
2701 if (sctx
->tes_shader
.current
)
2702 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
.current
->pm4
);
2703 else if (sctx
->gs_shader
.current
)
2704 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
.current
->pm4
);
2706 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
.current
->pm4
);
2709 /* TES can be bound as ES or VS. */
2710 r
= si_update_scratch_buffer(sctx
, sctx
->tes_shader
.current
);
2714 if (sctx
->gs_shader
.current
)
2715 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
.current
->pm4
);
2717 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
.current
->pm4
);
2723 static bool si_update_spi_tmpring_size(struct si_context
*sctx
)
2725 unsigned current_scratch_buffer_size
=
2726 si_get_current_scratch_buffer_size(sctx
);
2727 unsigned scratch_bytes_per_wave
=
2728 si_get_max_scratch_bytes_per_wave(sctx
);
2729 unsigned scratch_needed_size
= scratch_bytes_per_wave
*
2730 sctx
->scratch_waves
;
2731 unsigned spi_tmpring_size
;
2733 if (scratch_needed_size
> 0) {
2734 if (scratch_needed_size
> current_scratch_buffer_size
) {
2735 /* Create a bigger scratch buffer */
2736 r600_resource_reference(&sctx
->scratch_buffer
, NULL
);
2738 sctx
->scratch_buffer
= (struct r600_resource
*)
2739 r600_aligned_buffer_create(&sctx
->screen
->b
.b
,
2740 R600_RESOURCE_FLAG_UNMAPPABLE
,
2742 scratch_needed_size
, 256);
2743 if (!sctx
->scratch_buffer
)
2746 si_mark_atom_dirty(sctx
, &sctx
->scratch_state
);
2747 r600_context_add_resource_size(&sctx
->b
.b
,
2748 &sctx
->scratch_buffer
->b
.b
);
2751 if (!si_update_scratch_relocs(sctx
))
2755 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
2756 assert((scratch_needed_size
& ~0x3FF) == scratch_needed_size
&&
2757 "scratch size should already be aligned correctly.");
2759 spi_tmpring_size
= S_0286E8_WAVES(sctx
->scratch_waves
) |
2760 S_0286E8_WAVESIZE(scratch_bytes_per_wave
>> 10);
2761 if (spi_tmpring_size
!= sctx
->spi_tmpring_size
) {
2762 sctx
->spi_tmpring_size
= spi_tmpring_size
;
2763 si_mark_atom_dirty(sctx
, &sctx
->scratch_state
);
2768 static void si_init_tess_factor_ring(struct si_context
*sctx
)
2770 bool double_offchip_buffers
= sctx
->b
.chip_class
>= CIK
&&
2771 sctx
->b
.family
!= CHIP_CARRIZO
&&
2772 sctx
->b
.family
!= CHIP_STONEY
;
2773 unsigned max_offchip_buffers_per_se
= double_offchip_buffers
? 128 : 64;
2774 unsigned max_offchip_buffers
= max_offchip_buffers_per_se
*
2775 sctx
->screen
->b
.info
.max_se
;
2776 unsigned offchip_granularity
;
2778 switch (sctx
->screen
->tess_offchip_block_dw_size
) {
2783 offchip_granularity
= V_03093C_X_8K_DWORDS
;
2786 offchip_granularity
= V_03093C_X_4K_DWORDS
;
2790 switch (sctx
->b
.chip_class
) {
2792 max_offchip_buffers
= MIN2(max_offchip_buffers
, 126);
2797 max_offchip_buffers
= MIN2(max_offchip_buffers
, 508);
2804 assert(!sctx
->tf_ring
);
2805 /* Use 64K alignment for both rings, so that we can pass the address
2806 * to shaders as one SGPR containing bits [16:47].
2808 sctx
->tf_ring
= r600_aligned_buffer_create(sctx
->b
.b
.screen
,
2809 R600_RESOURCE_FLAG_UNMAPPABLE
,
2811 32768 * sctx
->screen
->b
.info
.max_se
,
2816 assert(((sctx
->tf_ring
->width0
/ 4) & C_030938_SIZE
) == 0);
2818 sctx
->tess_offchip_ring
=
2819 r600_aligned_buffer_create(sctx
->b
.b
.screen
,
2820 R600_RESOURCE_FLAG_UNMAPPABLE
,
2822 max_offchip_buffers
*
2823 sctx
->screen
->tess_offchip_block_dw_size
* 4,
2825 if (!sctx
->tess_offchip_ring
)
2828 si_init_config_add_vgt_flush(sctx
);
2830 uint64_t offchip_va
= r600_resource(sctx
->tess_offchip_ring
)->gpu_address
;
2831 uint64_t factor_va
= r600_resource(sctx
->tf_ring
)->gpu_address
;
2832 assert((offchip_va
& 0xffff) == 0);
2833 assert((factor_va
& 0xffff) == 0);
2835 si_pm4_add_bo(sctx
->init_config
, r600_resource(sctx
->tess_offchip_ring
),
2836 RADEON_USAGE_READWRITE
, RADEON_PRIO_SHADER_RINGS
);
2837 si_pm4_add_bo(sctx
->init_config
, r600_resource(sctx
->tf_ring
),
2838 RADEON_USAGE_READWRITE
, RADEON_PRIO_SHADER_RINGS
);
2840 /* Append these registers to the init config state. */
2841 if (sctx
->b
.chip_class
>= CIK
) {
2842 if (sctx
->b
.chip_class
>= VI
)
2843 --max_offchip_buffers
;
2845 si_pm4_set_reg(sctx
->init_config
, R_030938_VGT_TF_RING_SIZE
,
2846 S_030938_SIZE(sctx
->tf_ring
->width0
/ 4));
2847 si_pm4_set_reg(sctx
->init_config
, R_030940_VGT_TF_MEMORY_BASE
,
2849 if (sctx
->b
.chip_class
>= GFX9
)
2850 si_pm4_set_reg(sctx
->init_config
, R_030944_VGT_TF_MEMORY_BASE_HI
,
2852 si_pm4_set_reg(sctx
->init_config
, R_03093C_VGT_HS_OFFCHIP_PARAM
,
2853 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers
) |
2854 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity
));
2856 assert(offchip_granularity
== V_03093C_X_8K_DWORDS
);
2857 si_pm4_set_reg(sctx
->init_config
, R_008988_VGT_TF_RING_SIZE
,
2858 S_008988_SIZE(sctx
->tf_ring
->width0
/ 4));
2859 si_pm4_set_reg(sctx
->init_config
, R_0089B8_VGT_TF_MEMORY_BASE
,
2861 si_pm4_set_reg(sctx
->init_config
, R_0089B0_VGT_HS_OFFCHIP_PARAM
,
2862 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers
));
2865 if (sctx
->b
.chip_class
>= GFX9
) {
2866 si_pm4_set_reg(sctx
->init_config
,
2867 R_00B430_SPI_SHADER_USER_DATA_LS_0
+
2868 GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K
* 4,
2870 si_pm4_set_reg(sctx
->init_config
,
2871 R_00B430_SPI_SHADER_USER_DATA_LS_0
+
2872 GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K
* 4,
2875 si_pm4_set_reg(sctx
->init_config
,
2876 R_00B430_SPI_SHADER_USER_DATA_HS_0
+
2877 GFX6_SGPR_TCS_OFFCHIP_ADDR_BASE64K
* 4,
2879 si_pm4_set_reg(sctx
->init_config
,
2880 R_00B430_SPI_SHADER_USER_DATA_HS_0
+
2881 GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K
* 4,
2885 /* Flush the context to re-emit the init_config state.
2886 * This is done only once in a lifetime of a context.
2888 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
2889 sctx
->b
.initial_gfx_cs_size
= 0; /* force flush */
2890 si_context_gfx_flush(sctx
, RADEON_FLUSH_ASYNC
, NULL
);
2894 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
2895 * VS passes its outputs to TES directly, so the fixed-function shader only
2896 * has to write TESSOUTER and TESSINNER.
2898 static void si_generate_fixed_func_tcs(struct si_context
*sctx
)
2900 struct ureg_src outer
, inner
;
2901 struct ureg_dst tessouter
, tessinner
;
2902 struct ureg_program
*ureg
= ureg_create(PIPE_SHADER_TESS_CTRL
);
2905 return; /* if we get here, we're screwed */
2907 assert(!sctx
->fixed_func_tcs_shader
.cso
);
2909 outer
= ureg_DECL_system_value(ureg
,
2910 TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI
, 0);
2911 inner
= ureg_DECL_system_value(ureg
,
2912 TGSI_SEMANTIC_DEFAULT_TESSINNER_SI
, 0);
2914 tessouter
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSOUTER
, 0);
2915 tessinner
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSINNER
, 0);
2917 ureg_MOV(ureg
, tessouter
, outer
);
2918 ureg_MOV(ureg
, tessinner
, inner
);
2921 sctx
->fixed_func_tcs_shader
.cso
=
2922 ureg_create_shader_and_destroy(ureg
, &sctx
->b
.b
);
2925 static void si_update_vgt_shader_config(struct si_context
*sctx
)
2927 /* Calculate the index of the config.
2928 * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */
2929 unsigned index
= 2*!!sctx
->tes_shader
.cso
+ !!sctx
->gs_shader
.cso
;
2930 struct si_pm4_state
**pm4
= &sctx
->vgt_shader_config
[index
];
2933 uint32_t stages
= 0;
2935 *pm4
= CALLOC_STRUCT(si_pm4_state
);
2937 if (sctx
->tes_shader
.cso
) {
2938 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
2939 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
2941 if (sctx
->gs_shader
.cso
)
2942 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
2944 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
2946 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
2947 } else if (sctx
->gs_shader
.cso
) {
2948 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
2950 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
2953 if (sctx
->b
.chip_class
>= GFX9
)
2954 stages
|= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
2956 si_pm4_set_reg(*pm4
, R_028B54_VGT_SHADER_STAGES_EN
, stages
);
2958 si_pm4_bind_state(sctx
, vgt_shader_config
, *pm4
);
2961 static void si_update_so(struct si_context
*sctx
, struct si_shader_selector
*shader
)
2963 struct pipe_stream_output_info
*so
= &shader
->so
;
2964 uint32_t enabled_stream_buffers_mask
= 0;
2967 for (i
= 0; i
< so
->num_outputs
; i
++)
2968 enabled_stream_buffers_mask
|= (1 << so
->output
[i
].output_buffer
) << (so
->output
[i
].stream
* 4);
2969 sctx
->b
.streamout
.enabled_stream_buffers_mask
= enabled_stream_buffers_mask
;
2970 sctx
->b
.streamout
.stride_in_dw
= shader
->so
.stride
;
2973 bool si_update_shaders(struct si_context
*sctx
)
2975 struct pipe_context
*ctx
= (struct pipe_context
*)sctx
;
2976 struct si_compiler_ctx_state compiler_state
;
2977 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
2978 struct si_shader
*old_vs
= si_get_vs_state(sctx
);
2979 bool old_clip_disable
= old_vs
? old_vs
->key
.opt
.hw_vs
.clip_disable
: false;
2982 compiler_state
.tm
= sctx
->tm
;
2983 compiler_state
.debug
= sctx
->b
.debug
;
2984 compiler_state
.is_debug_context
= sctx
->is_debug
;
2986 /* Update stages before GS. */
2987 if (sctx
->tes_shader
.cso
) {
2988 if (!sctx
->tf_ring
) {
2989 si_init_tess_factor_ring(sctx
);
2995 if (sctx
->b
.chip_class
<= VI
) {
2996 r
= si_shader_select(ctx
, &sctx
->vs_shader
,
3000 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
.current
->pm4
);
3003 if (sctx
->tcs_shader
.cso
) {
3004 r
= si_shader_select(ctx
, &sctx
->tcs_shader
,
3008 si_pm4_bind_state(sctx
, hs
, sctx
->tcs_shader
.current
->pm4
);
3010 if (!sctx
->fixed_func_tcs_shader
.cso
) {
3011 si_generate_fixed_func_tcs(sctx
);
3012 if (!sctx
->fixed_func_tcs_shader
.cso
)
3016 r
= si_shader_select(ctx
, &sctx
->fixed_func_tcs_shader
,
3020 si_pm4_bind_state(sctx
, hs
,
3021 sctx
->fixed_func_tcs_shader
.current
->pm4
);
3024 if (sctx
->gs_shader
.cso
) {
3026 if (sctx
->b
.chip_class
<= VI
) {
3027 r
= si_shader_select(ctx
, &sctx
->tes_shader
,
3031 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
.current
->pm4
);
3035 r
= si_shader_select(ctx
, &sctx
->tes_shader
,
3039 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
.current
->pm4
);
3040 si_update_so(sctx
, sctx
->tes_shader
.cso
);
3042 } else if (sctx
->gs_shader
.cso
) {
3043 if (sctx
->b
.chip_class
<= VI
) {
3045 r
= si_shader_select(ctx
, &sctx
->vs_shader
,
3049 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
.current
->pm4
);
3051 si_pm4_bind_state(sctx
, ls
, NULL
);
3052 si_pm4_bind_state(sctx
, hs
, NULL
);
3056 r
= si_shader_select(ctx
, &sctx
->vs_shader
, &compiler_state
);
3059 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
.current
->pm4
);
3060 si_update_so(sctx
, sctx
->vs_shader
.cso
);
3062 si_pm4_bind_state(sctx
, ls
, NULL
);
3063 si_pm4_bind_state(sctx
, hs
, NULL
);
3067 if (sctx
->gs_shader
.cso
) {
3068 r
= si_shader_select(ctx
, &sctx
->gs_shader
, &compiler_state
);
3071 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
.current
->pm4
);
3072 si_pm4_bind_state(sctx
, vs
, sctx
->gs_shader
.cso
->gs_copy_shader
->pm4
);
3073 si_update_so(sctx
, sctx
->gs_shader
.cso
);
3075 if (!si_update_gs_ring_buffers(sctx
))
3078 si_pm4_bind_state(sctx
, gs
, NULL
);
3079 if (sctx
->b
.chip_class
<= VI
)
3080 si_pm4_bind_state(sctx
, es
, NULL
);
3083 si_update_vgt_shader_config(sctx
);
3085 if (old_clip_disable
!= si_get_vs_state(sctx
)->key
.opt
.hw_vs
.clip_disable
)
3086 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
3088 if (sctx
->ps_shader
.cso
) {
3089 unsigned db_shader_control
;
3091 r
= si_shader_select(ctx
, &sctx
->ps_shader
, &compiler_state
);
3094 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
.current
->pm4
);
3097 sctx
->ps_shader
.cso
->db_shader_control
|
3098 S_02880C_KILL_ENABLE(si_get_alpha_test_func(sctx
) != PIPE_FUNC_ALWAYS
);
3100 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
) ||
3101 sctx
->sprite_coord_enable
!= rs
->sprite_coord_enable
||
3102 sctx
->flatshade
!= rs
->flatshade
) {
3103 sctx
->sprite_coord_enable
= rs
->sprite_coord_enable
;
3104 sctx
->flatshade
= rs
->flatshade
;
3105 si_mark_atom_dirty(sctx
, &sctx
->spi_map
);
3108 if (sctx
->screen
->b
.rbplus_allowed
&& si_pm4_state_changed(sctx
, ps
))
3109 si_mark_atom_dirty(sctx
, &sctx
->cb_render_state
);
3111 if (sctx
->ps_db_shader_control
!= db_shader_control
) {
3112 sctx
->ps_db_shader_control
= db_shader_control
;
3113 si_mark_atom_dirty(sctx
, &sctx
->db_render_state
);
3116 if (sctx
->smoothing_enabled
!= sctx
->ps_shader
.current
->key
.part
.ps
.epilog
.poly_line_smoothing
) {
3117 sctx
->smoothing_enabled
= sctx
->ps_shader
.current
->key
.part
.ps
.epilog
.poly_line_smoothing
;
3118 si_mark_atom_dirty(sctx
, &sctx
->msaa_config
);
3120 if (sctx
->b
.chip_class
== SI
)
3121 si_mark_atom_dirty(sctx
, &sctx
->db_render_state
);
3123 if (sctx
->framebuffer
.nr_samples
<= 1)
3124 si_mark_atom_dirty(sctx
, &sctx
->msaa_sample_locs
.atom
);
3128 if (si_pm4_state_changed(sctx
, ls
) ||
3129 si_pm4_state_changed(sctx
, hs
) ||
3130 si_pm4_state_changed(sctx
, es
) ||
3131 si_pm4_state_changed(sctx
, gs
) ||
3132 si_pm4_state_changed(sctx
, vs
) ||
3133 si_pm4_state_changed(sctx
, ps
)) {
3134 if (!si_update_spi_tmpring_size(sctx
))
3138 if (sctx
->b
.chip_class
>= CIK
)
3139 si_mark_atom_dirty(sctx
, &sctx
->prefetch_L2
);
3141 sctx
->do_update_shaders
= false;
3145 static void si_emit_scratch_state(struct si_context
*sctx
,
3146 struct r600_atom
*atom
)
3148 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
3150 radeon_set_context_reg(cs
, R_0286E8_SPI_TMPRING_SIZE
,
3151 sctx
->spi_tmpring_size
);
3153 if (sctx
->scratch_buffer
) {
3154 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
3155 sctx
->scratch_buffer
, RADEON_USAGE_READWRITE
,
3156 RADEON_PRIO_SCRATCH_BUFFER
);
3160 void si_init_shader_functions(struct si_context
*sctx
)
3162 si_init_atom(sctx
, &sctx
->spi_map
, &sctx
->atoms
.s
.spi_map
, si_emit_spi_map
);
3163 si_init_atom(sctx
, &sctx
->scratch_state
, &sctx
->atoms
.s
.scratch_state
,
3164 si_emit_scratch_state
);
3166 sctx
->b
.b
.create_vs_state
= si_create_shader_selector
;
3167 sctx
->b
.b
.create_tcs_state
= si_create_shader_selector
;
3168 sctx
->b
.b
.create_tes_state
= si_create_shader_selector
;
3169 sctx
->b
.b
.create_gs_state
= si_create_shader_selector
;
3170 sctx
->b
.b
.create_fs_state
= si_create_shader_selector
;
3172 sctx
->b
.b
.bind_vs_state
= si_bind_vs_shader
;
3173 sctx
->b
.b
.bind_tcs_state
= si_bind_tcs_shader
;
3174 sctx
->b
.b
.bind_tes_state
= si_bind_tes_shader
;
3175 sctx
->b
.b
.bind_gs_state
= si_bind_gs_shader
;
3176 sctx
->b
.b
.bind_fs_state
= si_bind_ps_shader
;
3178 sctx
->b
.b
.delete_vs_state
= si_delete_shader_selector
;
3179 sctx
->b
.b
.delete_tcs_state
= si_delete_shader_selector
;
3180 sctx
->b
.b
.delete_tes_state
= si_delete_shader_selector
;
3181 sctx
->b
.b
.delete_gs_state
= si_delete_shader_selector
;
3182 sctx
->b
.b
.delete_fs_state
= si_delete_shader_selector
;