2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "radeon/r600_cs.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "tgsi/tgsi_ureg.h"
31 #include "util/hash_table.h"
32 #include "util/crc32.h"
33 #include "util/u_async_debug.h"
34 #include "util/u_memory.h"
35 #include "util/u_prim.h"
37 #include "util/disk_cache.h"
38 #include "util/mesa-sha1.h"
39 #include "ac_exp_param.h"
40 #include "ac_shader_util.h"
45 * Return the TGSI binary in a buffer. The first 4 bytes contain its size as
48 static void *si_get_tgsi_binary(struct si_shader_selector
*sel
)
50 unsigned tgsi_size
= tgsi_num_tokens(sel
->tokens
) *
51 sizeof(struct tgsi_token
);
52 unsigned size
= 4 + tgsi_size
+ sizeof(sel
->so
);
53 char *result
= (char*)MALLOC(size
);
58 *((uint32_t*)result
) = size
;
59 memcpy(result
+ 4, sel
->tokens
, tgsi_size
);
60 memcpy(result
+ 4 + tgsi_size
, &sel
->so
, sizeof(sel
->so
));
64 /** Copy "data" to "ptr" and return the next dword following copied data. */
65 static uint32_t *write_data(uint32_t *ptr
, const void *data
, unsigned size
)
67 /* data may be NULL if size == 0 */
69 memcpy(ptr
, data
, size
);
70 ptr
+= DIV_ROUND_UP(size
, 4);
74 /** Read data from "ptr". Return the next dword following the data. */
75 static uint32_t *read_data(uint32_t *ptr
, void *data
, unsigned size
)
77 memcpy(data
, ptr
, size
);
78 ptr
+= DIV_ROUND_UP(size
, 4);
83 * Write the size as uint followed by the data. Return the next dword
84 * following the copied data.
86 static uint32_t *write_chunk(uint32_t *ptr
, const void *data
, unsigned size
)
89 return write_data(ptr
, data
, size
);
93 * Read the size as uint followed by the data. Return both via parameters.
94 * Return the next dword following the data.
96 static uint32_t *read_chunk(uint32_t *ptr
, void **data
, unsigned *size
)
99 assert(*data
== NULL
);
102 *data
= malloc(*size
);
103 return read_data(ptr
, *data
, *size
);
107 * Return the shader binary in a buffer. The first 4 bytes contain its size
110 static void *si_get_shader_binary(struct si_shader
*shader
)
112 /* There is always a size of data followed by the data itself. */
113 unsigned relocs_size
= shader
->binary
.reloc_count
*
114 sizeof(shader
->binary
.relocs
[0]);
115 unsigned disasm_size
= shader
->binary
.disasm_string
?
116 strlen(shader
->binary
.disasm_string
) + 1 : 0;
117 unsigned llvm_ir_size
= shader
->binary
.llvm_ir_string
?
118 strlen(shader
->binary
.llvm_ir_string
) + 1 : 0;
121 4 + /* CRC32 of the data below */
122 align(sizeof(shader
->config
), 4) +
123 align(sizeof(shader
->info
), 4) +
124 4 + align(shader
->binary
.code_size
, 4) +
125 4 + align(shader
->binary
.rodata_size
, 4) +
126 4 + align(relocs_size
, 4) +
127 4 + align(disasm_size
, 4) +
128 4 + align(llvm_ir_size
, 4);
129 void *buffer
= CALLOC(1, size
);
130 uint32_t *ptr
= (uint32_t*)buffer
;
136 ptr
++; /* CRC32 is calculated at the end. */
138 ptr
= write_data(ptr
, &shader
->config
, sizeof(shader
->config
));
139 ptr
= write_data(ptr
, &shader
->info
, sizeof(shader
->info
));
140 ptr
= write_chunk(ptr
, shader
->binary
.code
, shader
->binary
.code_size
);
141 ptr
= write_chunk(ptr
, shader
->binary
.rodata
, shader
->binary
.rodata_size
);
142 ptr
= write_chunk(ptr
, shader
->binary
.relocs
, relocs_size
);
143 ptr
= write_chunk(ptr
, shader
->binary
.disasm_string
, disasm_size
);
144 ptr
= write_chunk(ptr
, shader
->binary
.llvm_ir_string
, llvm_ir_size
);
145 assert((char *)ptr
- (char *)buffer
== size
);
148 ptr
= (uint32_t*)buffer
;
150 *ptr
= util_hash_crc32(ptr
+ 1, size
- 8);
155 static bool si_load_shader_binary(struct si_shader
*shader
, void *binary
)
157 uint32_t *ptr
= (uint32_t*)binary
;
158 uint32_t size
= *ptr
++;
159 uint32_t crc32
= *ptr
++;
162 if (util_hash_crc32(ptr
, size
- 8) != crc32
) {
163 fprintf(stderr
, "radeonsi: binary shader has invalid CRC32\n");
167 ptr
= read_data(ptr
, &shader
->config
, sizeof(shader
->config
));
168 ptr
= read_data(ptr
, &shader
->info
, sizeof(shader
->info
));
169 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.code
,
170 &shader
->binary
.code_size
);
171 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.rodata
,
172 &shader
->binary
.rodata_size
);
173 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.relocs
, &chunk_size
);
174 shader
->binary
.reloc_count
= chunk_size
/ sizeof(shader
->binary
.relocs
[0]);
175 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.disasm_string
, &chunk_size
);
176 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.llvm_ir_string
, &chunk_size
);
182 * Insert a shader into the cache. It's assumed the shader is not in the cache.
183 * Use si_shader_cache_load_shader before calling this.
185 * Returns false on failure, in which case the tgsi_binary should be freed.
187 static bool si_shader_cache_insert_shader(struct si_screen
*sscreen
,
189 struct si_shader
*shader
,
190 bool insert_into_disk_cache
)
193 struct hash_entry
*entry
;
194 uint8_t key
[CACHE_KEY_SIZE
];
196 entry
= _mesa_hash_table_search(sscreen
->shader_cache
, tgsi_binary
);
198 return false; /* already added */
200 hw_binary
= si_get_shader_binary(shader
);
204 if (_mesa_hash_table_insert(sscreen
->shader_cache
, tgsi_binary
,
205 hw_binary
) == NULL
) {
210 if (sscreen
->disk_shader_cache
&& insert_into_disk_cache
) {
211 disk_cache_compute_key(sscreen
->disk_shader_cache
, tgsi_binary
,
212 *((uint32_t *)tgsi_binary
), key
);
213 disk_cache_put(sscreen
->disk_shader_cache
, key
, hw_binary
,
214 *((uint32_t *) hw_binary
), NULL
);
220 static bool si_shader_cache_load_shader(struct si_screen
*sscreen
,
222 struct si_shader
*shader
)
224 struct hash_entry
*entry
=
225 _mesa_hash_table_search(sscreen
->shader_cache
, tgsi_binary
);
227 if (sscreen
->disk_shader_cache
) {
228 unsigned char sha1
[CACHE_KEY_SIZE
];
229 size_t tg_size
= *((uint32_t *) tgsi_binary
);
231 disk_cache_compute_key(sscreen
->disk_shader_cache
,
232 tgsi_binary
, tg_size
, sha1
);
236 disk_cache_get(sscreen
->disk_shader_cache
,
241 if (binary_size
< sizeof(uint32_t) ||
242 *((uint32_t*)buffer
) != binary_size
) {
243 /* Something has gone wrong discard the item
244 * from the cache and rebuild/link from
247 assert(!"Invalid radeonsi shader disk cache "
250 disk_cache_remove(sscreen
->disk_shader_cache
,
257 if (!si_load_shader_binary(shader
, buffer
)) {
263 if (!si_shader_cache_insert_shader(sscreen
, tgsi_binary
,
270 if (si_load_shader_binary(shader
, entry
->data
))
275 p_atomic_inc(&sscreen
->num_shader_cache_hits
);
279 static uint32_t si_shader_cache_key_hash(const void *key
)
281 /* The first dword is the key size. */
282 return util_hash_crc32(key
, *(uint32_t*)key
);
285 static bool si_shader_cache_key_equals(const void *a
, const void *b
)
287 uint32_t *keya
= (uint32_t*)a
;
288 uint32_t *keyb
= (uint32_t*)b
;
290 /* The first dword is the key size. */
294 return memcmp(keya
, keyb
, *keya
) == 0;
297 static void si_destroy_shader_cache_entry(struct hash_entry
*entry
)
299 FREE((void*)entry
->key
);
303 bool si_init_shader_cache(struct si_screen
*sscreen
)
305 (void) mtx_init(&sscreen
->shader_cache_mutex
, mtx_plain
);
306 sscreen
->shader_cache
=
307 _mesa_hash_table_create(NULL
,
308 si_shader_cache_key_hash
,
309 si_shader_cache_key_equals
);
311 return sscreen
->shader_cache
!= NULL
;
314 void si_destroy_shader_cache(struct si_screen
*sscreen
)
316 if (sscreen
->shader_cache
)
317 _mesa_hash_table_destroy(sscreen
->shader_cache
,
318 si_destroy_shader_cache_entry
);
319 mtx_destroy(&sscreen
->shader_cache_mutex
);
324 static void si_set_tesseval_regs(struct si_screen
*sscreen
,
325 struct si_shader_selector
*tes
,
326 struct si_pm4_state
*pm4
)
328 struct tgsi_shader_info
*info
= &tes
->info
;
329 unsigned tes_prim_mode
= info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
330 unsigned tes_spacing
= info
->properties
[TGSI_PROPERTY_TES_SPACING
];
331 bool tes_vertex_order_cw
= info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
];
332 bool tes_point_mode
= info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
];
333 unsigned type
, partitioning
, topology
, distribution_mode
;
335 switch (tes_prim_mode
) {
336 case PIPE_PRIM_LINES
:
337 type
= V_028B6C_TESS_ISOLINE
;
339 case PIPE_PRIM_TRIANGLES
:
340 type
= V_028B6C_TESS_TRIANGLE
;
342 case PIPE_PRIM_QUADS
:
343 type
= V_028B6C_TESS_QUAD
;
350 switch (tes_spacing
) {
351 case PIPE_TESS_SPACING_FRACTIONAL_ODD
:
352 partitioning
= V_028B6C_PART_FRAC_ODD
;
354 case PIPE_TESS_SPACING_FRACTIONAL_EVEN
:
355 partitioning
= V_028B6C_PART_FRAC_EVEN
;
357 case PIPE_TESS_SPACING_EQUAL
:
358 partitioning
= V_028B6C_PART_INTEGER
;
366 topology
= V_028B6C_OUTPUT_POINT
;
367 else if (tes_prim_mode
== PIPE_PRIM_LINES
)
368 topology
= V_028B6C_OUTPUT_LINE
;
369 else if (tes_vertex_order_cw
)
370 /* for some reason, this must be the other way around */
371 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
373 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
375 if (sscreen
->has_distributed_tess
) {
376 if (sscreen
->info
.family
== CHIP_FIJI
||
377 sscreen
->info
.family
>= CHIP_POLARIS10
)
378 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS
;
380 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_DONUTS
;
382 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_NO_DIST
;
384 si_pm4_set_reg(pm4
, R_028B6C_VGT_TF_PARAM
,
385 S_028B6C_TYPE(type
) |
386 S_028B6C_PARTITIONING(partitioning
) |
387 S_028B6C_TOPOLOGY(topology
) |
388 S_028B6C_DISTRIBUTION_MODE(distribution_mode
));
391 /* Polaris needs different VTX_REUSE_DEPTH settings depending on
392 * whether the "fractional odd" tessellation spacing is used.
394 * Possible VGT configurations and which state should set the register:
396 * Reg set in | VGT shader configuration | Value
397 * ------------------------------------------------------
399 * VS as ES | ES -> GS -> VS | 30
400 * TES as VS | LS -> HS -> VS | 14 or 30
401 * TES as ES | LS -> HS -> ES -> GS -> VS | 14 or 30
403 * If "shader" is NULL, it's assumed it's not LS or GS copy shader.
405 static void polaris_set_vgt_vertex_reuse(struct si_screen
*sscreen
,
406 struct si_shader_selector
*sel
,
407 struct si_shader
*shader
,
408 struct si_pm4_state
*pm4
)
410 unsigned type
= sel
->type
;
412 if (sscreen
->info
.family
< CHIP_POLARIS10
)
415 /* VS as VS, or VS as ES: */
416 if ((type
== PIPE_SHADER_VERTEX
&&
418 (!shader
->key
.as_ls
&& !shader
->is_gs_copy_shader
))) ||
419 /* TES as VS, or TES as ES: */
420 type
== PIPE_SHADER_TESS_EVAL
) {
421 unsigned vtx_reuse_depth
= 30;
423 if (type
== PIPE_SHADER_TESS_EVAL
&&
424 sel
->info
.properties
[TGSI_PROPERTY_TES_SPACING
] ==
425 PIPE_TESS_SPACING_FRACTIONAL_ODD
)
426 vtx_reuse_depth
= 14;
428 si_pm4_set_reg(pm4
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
433 static struct si_pm4_state
*si_get_shader_pm4_state(struct si_shader
*shader
)
436 si_pm4_clear_state(shader
->pm4
);
438 shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
443 static void si_shader_ls(struct si_screen
*sscreen
, struct si_shader
*shader
)
445 struct si_pm4_state
*pm4
;
446 unsigned vgpr_comp_cnt
;
449 assert(sscreen
->info
.chip_class
<= VI
);
451 pm4
= si_get_shader_pm4_state(shader
);
455 va
= shader
->bo
->gpu_address
;
456 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
458 /* We need at least 2 components for LS.
459 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
460 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
462 vgpr_comp_cnt
= shader
->info
.uses_instanceid
? 2 : 1;
464 si_pm4_set_reg(pm4
, R_00B520_SPI_SHADER_PGM_LO_LS
, va
>> 8);
465 si_pm4_set_reg(pm4
, R_00B524_SPI_SHADER_PGM_HI_LS
, va
>> 40);
467 shader
->config
.rsrc1
= S_00B528_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
468 S_00B528_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
469 S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt
) |
470 S_00B528_DX10_CLAMP(1) |
471 S_00B528_FLOAT_MODE(shader
->config
.float_mode
);
472 shader
->config
.rsrc2
= S_00B52C_USER_SGPR(SI_VS_NUM_USER_SGPR
) |
473 S_00B52C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
476 static void si_shader_hs(struct si_screen
*sscreen
, struct si_shader
*shader
)
478 struct si_pm4_state
*pm4
;
480 unsigned ls_vgpr_comp_cnt
= 0;
482 pm4
= si_get_shader_pm4_state(shader
);
486 va
= shader
->bo
->gpu_address
;
487 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
489 if (sscreen
->info
.chip_class
>= GFX9
) {
490 si_pm4_set_reg(pm4
, R_00B410_SPI_SHADER_PGM_LO_LS
, va
>> 8);
491 si_pm4_set_reg(pm4
, R_00B414_SPI_SHADER_PGM_HI_LS
, va
>> 40);
493 /* We need at least 2 components for LS.
494 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
495 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
497 ls_vgpr_comp_cnt
= shader
->info
.uses_instanceid
? 2 : 1;
499 shader
->config
.rsrc2
=
500 S_00B42C_USER_SGPR(GFX9_TCS_NUM_USER_SGPR
) |
501 S_00B42C_USER_SGPR_MSB(GFX9_TCS_NUM_USER_SGPR
>> 5) |
502 S_00B42C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
504 si_pm4_set_reg(pm4
, R_00B420_SPI_SHADER_PGM_LO_HS
, va
>> 8);
505 si_pm4_set_reg(pm4
, R_00B424_SPI_SHADER_PGM_HI_HS
, va
>> 40);
507 shader
->config
.rsrc2
=
508 S_00B42C_USER_SGPR(GFX6_TCS_NUM_USER_SGPR
) |
509 S_00B42C_OC_LDS_EN(1) |
510 S_00B42C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
513 si_pm4_set_reg(pm4
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
,
514 S_00B428_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
515 S_00B428_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
516 S_00B428_DX10_CLAMP(1) |
517 S_00B428_FLOAT_MODE(shader
->config
.float_mode
) |
518 S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt
));
520 if (sscreen
->info
.chip_class
<= VI
) {
521 si_pm4_set_reg(pm4
, R_00B42C_SPI_SHADER_PGM_RSRC2_HS
,
522 shader
->config
.rsrc2
);
526 static void si_shader_es(struct si_screen
*sscreen
, struct si_shader
*shader
)
528 struct si_pm4_state
*pm4
;
529 unsigned num_user_sgprs
;
530 unsigned vgpr_comp_cnt
;
534 assert(sscreen
->info
.chip_class
<= VI
);
536 pm4
= si_get_shader_pm4_state(shader
);
540 va
= shader
->bo
->gpu_address
;
541 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
543 if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
544 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
545 vgpr_comp_cnt
= shader
->info
.uses_instanceid
? 1 : 0;
546 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
547 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
548 vgpr_comp_cnt
= shader
->selector
->info
.uses_primid
? 3 : 2;
549 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
551 unreachable("invalid shader selector type");
553 oc_lds_en
= shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
? 1 : 0;
555 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
556 shader
->selector
->esgs_itemsize
/ 4);
557 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
558 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, va
>> 40);
559 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
560 S_00B328_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
561 S_00B328_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
562 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
) |
563 S_00B328_DX10_CLAMP(1) |
564 S_00B328_FLOAT_MODE(shader
->config
.float_mode
));
565 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
566 S_00B32C_USER_SGPR(num_user_sgprs
) |
567 S_00B32C_OC_LDS_EN(oc_lds_en
) |
568 S_00B32C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
570 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
571 si_set_tesseval_regs(sscreen
, shader
->selector
, pm4
);
573 polaris_set_vgt_vertex_reuse(sscreen
, shader
->selector
, shader
, pm4
);
576 struct gfx9_gs_info
{
577 unsigned es_verts_per_subgroup
;
578 unsigned gs_prims_per_subgroup
;
579 unsigned gs_inst_prims_in_subgroup
;
580 unsigned max_prims_per_subgroup
;
584 static void gfx9_get_gs_info(struct si_shader_selector
*es
,
585 struct si_shader_selector
*gs
,
586 struct gfx9_gs_info
*out
)
588 unsigned gs_num_invocations
= MAX2(gs
->gs_num_invocations
, 1);
589 unsigned input_prim
= gs
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
];
590 bool uses_adjacency
= input_prim
>= PIPE_PRIM_LINES_ADJACENCY
&&
591 input_prim
<= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
;
593 /* All these are in dwords: */
594 /* We can't allow using the whole LDS, because GS waves compete with
595 * other shader stages for LDS space. */
596 const unsigned max_lds_size
= 8 * 1024;
597 const unsigned esgs_itemsize
= es
->esgs_itemsize
/ 4;
598 unsigned esgs_lds_size
;
600 /* All these are per subgroup: */
601 const unsigned max_out_prims
= 32 * 1024;
602 const unsigned max_es_verts
= 255;
603 const unsigned ideal_gs_prims
= 64;
604 unsigned max_gs_prims
, gs_prims
;
605 unsigned min_es_verts
, es_verts
, worst_case_es_verts
;
607 assert(gs_num_invocations
<= 32); /* GL maximum */
609 if (uses_adjacency
|| gs_num_invocations
> 1)
610 max_gs_prims
= 127 / gs_num_invocations
;
614 /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
615 * Make sure we don't go over the maximum value.
617 if (gs
->gs_max_out_vertices
> 0) {
618 max_gs_prims
= MIN2(max_gs_prims
,
620 (gs
->gs_max_out_vertices
* gs_num_invocations
));
622 assert(max_gs_prims
> 0);
624 /* If the primitive has adjacency, halve the number of vertices
625 * that will be reused in multiple primitives.
627 min_es_verts
= gs
->gs_input_verts_per_prim
/ (uses_adjacency
? 2 : 1);
629 gs_prims
= MIN2(ideal_gs_prims
, max_gs_prims
);
630 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
, max_es_verts
);
632 /* Compute ESGS LDS size based on the worst case number of ES vertices
633 * needed to create the target number of GS prims per subgroup.
635 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
637 /* If total LDS usage is too big, refactor partitions based on ratio
638 * of ESGS item sizes.
640 if (esgs_lds_size
> max_lds_size
) {
641 /* Our target GS Prims Per Subgroup was too large. Calculate
642 * the maximum number of GS Prims Per Subgroup that will fit
643 * into LDS, capped by the maximum that the hardware can support.
645 gs_prims
= MIN2((max_lds_size
/ (esgs_itemsize
* min_es_verts
)),
647 assert(gs_prims
> 0);
648 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
,
651 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
652 assert(esgs_lds_size
<= max_lds_size
);
655 /* Now calculate remaining ESGS information. */
657 es_verts
= MIN2(esgs_lds_size
/ esgs_itemsize
, max_es_verts
);
659 es_verts
= max_es_verts
;
661 /* Vertices for adjacency primitives are not always reused, so restore
662 * it for ES_VERTS_PER_SUBGRP.
664 min_es_verts
= gs
->gs_input_verts_per_prim
;
666 /* For normal primitives, the VGT only checks if they are past the ES
667 * verts per subgroup after allocating a full GS primitive and if they
668 * are, kick off a new subgroup. But if those additional ES verts are
669 * unique (e.g. not reused) we need to make sure there is enough LDS
670 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
672 es_verts
-= min_es_verts
- 1;
674 out
->es_verts_per_subgroup
= es_verts
;
675 out
->gs_prims_per_subgroup
= gs_prims
;
676 out
->gs_inst_prims_in_subgroup
= gs_prims
* gs_num_invocations
;
677 out
->max_prims_per_subgroup
= out
->gs_inst_prims_in_subgroup
*
678 gs
->gs_max_out_vertices
;
679 out
->lds_size
= align(esgs_lds_size
, 128) / 128;
681 assert(out
->max_prims_per_subgroup
<= max_out_prims
);
684 static void si_shader_gs(struct si_screen
*sscreen
, struct si_shader
*shader
)
686 struct si_shader_selector
*sel
= shader
->selector
;
687 const ubyte
*num_components
= sel
->info
.num_stream_output_components
;
688 unsigned gs_num_invocations
= sel
->gs_num_invocations
;
689 struct si_pm4_state
*pm4
;
691 unsigned max_stream
= sel
->max_gs_stream
;
694 pm4
= si_get_shader_pm4_state(shader
);
698 offset
= num_components
[0] * sel
->gs_max_out_vertices
;
699 si_pm4_set_reg(pm4
, R_028A60_VGT_GSVS_RING_OFFSET_1
, offset
);
701 offset
+= num_components
[1] * sel
->gs_max_out_vertices
;
702 si_pm4_set_reg(pm4
, R_028A64_VGT_GSVS_RING_OFFSET_2
, offset
);
704 offset
+= num_components
[2] * sel
->gs_max_out_vertices
;
705 si_pm4_set_reg(pm4
, R_028A68_VGT_GSVS_RING_OFFSET_3
, offset
);
707 offset
+= num_components
[3] * sel
->gs_max_out_vertices
;
708 si_pm4_set_reg(pm4
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
, offset
);
710 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
711 assert(offset
< (1 << 15));
713 si_pm4_set_reg(pm4
, R_028B38_VGT_GS_MAX_VERT_OUT
, sel
->gs_max_out_vertices
);
715 si_pm4_set_reg(pm4
, R_028B5C_VGT_GS_VERT_ITEMSIZE
, num_components
[0]);
716 si_pm4_set_reg(pm4
, R_028B60_VGT_GS_VERT_ITEMSIZE_1
, (max_stream
>= 1) ? num_components
[1] : 0);
717 si_pm4_set_reg(pm4
, R_028B64_VGT_GS_VERT_ITEMSIZE_2
, (max_stream
>= 2) ? num_components
[2] : 0);
718 si_pm4_set_reg(pm4
, R_028B68_VGT_GS_VERT_ITEMSIZE_3
, (max_stream
>= 3) ? num_components
[3] : 0);
720 si_pm4_set_reg(pm4
, R_028B90_VGT_GS_INSTANCE_CNT
,
721 S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
722 S_028B90_ENABLE(gs_num_invocations
> 0));
724 va
= shader
->bo
->gpu_address
;
725 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
727 if (sscreen
->info
.chip_class
>= GFX9
) {
728 unsigned input_prim
= sel
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
];
729 unsigned es_type
= shader
->key
.part
.gs
.es
->type
;
730 unsigned es_vgpr_comp_cnt
, gs_vgpr_comp_cnt
;
731 struct gfx9_gs_info gs_info
;
733 if (es_type
== PIPE_SHADER_VERTEX
)
734 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
735 es_vgpr_comp_cnt
= shader
->info
.uses_instanceid
? 1 : 0;
736 else if (es_type
== PIPE_SHADER_TESS_EVAL
)
737 es_vgpr_comp_cnt
= shader
->key
.part
.gs
.es
->info
.uses_primid
? 3 : 2;
739 unreachable("invalid shader selector type");
741 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
742 * VGPR[0:4] are always loaded.
744 if (sel
->info
.uses_invocationid
)
745 gs_vgpr_comp_cnt
= 3; /* VGPR3 contains InvocationID. */
746 else if (sel
->info
.uses_primid
)
747 gs_vgpr_comp_cnt
= 2; /* VGPR2 contains PrimitiveID. */
748 else if (input_prim
>= PIPE_PRIM_TRIANGLES
)
749 gs_vgpr_comp_cnt
= 1; /* VGPR1 contains offsets 2, 3 */
751 gs_vgpr_comp_cnt
= 0; /* VGPR0 contains offsets 0, 1 */
753 gfx9_get_gs_info(shader
->key
.part
.gs
.es
, sel
, &gs_info
);
755 si_pm4_set_reg(pm4
, R_00B210_SPI_SHADER_PGM_LO_ES
, va
>> 8);
756 si_pm4_set_reg(pm4
, R_00B214_SPI_SHADER_PGM_HI_ES
, va
>> 40);
758 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
759 S_00B228_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
760 S_00B228_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
761 S_00B228_DX10_CLAMP(1) |
762 S_00B228_FLOAT_MODE(shader
->config
.float_mode
) |
763 S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt
));
764 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
765 S_00B22C_USER_SGPR(GFX9_GS_NUM_USER_SGPR
) |
766 S_00B22C_USER_SGPR_MSB(GFX9_GS_NUM_USER_SGPR
>> 5) |
767 S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt
) |
768 S_00B22C_OC_LDS_EN(es_type
== PIPE_SHADER_TESS_EVAL
) |
769 S_00B22C_LDS_SIZE(gs_info
.lds_size
) |
770 S_00B22C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
772 si_pm4_set_reg(pm4
, R_028A44_VGT_GS_ONCHIP_CNTL
,
773 S_028A44_ES_VERTS_PER_SUBGRP(gs_info
.es_verts_per_subgroup
) |
774 S_028A44_GS_PRIMS_PER_SUBGRP(gs_info
.gs_prims_per_subgroup
) |
775 S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_info
.gs_inst_prims_in_subgroup
));
776 si_pm4_set_reg(pm4
, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP
,
777 S_028A94_MAX_PRIMS_PER_SUBGROUP(gs_info
.max_prims_per_subgroup
));
778 si_pm4_set_reg(pm4
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
779 shader
->key
.part
.gs
.es
->esgs_itemsize
/ 4);
781 if (es_type
== PIPE_SHADER_TESS_EVAL
)
782 si_set_tesseval_regs(sscreen
, shader
->key
.part
.gs
.es
, pm4
);
784 polaris_set_vgt_vertex_reuse(sscreen
, shader
->key
.part
.gs
.es
,
787 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
788 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, va
>> 40);
790 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
791 S_00B228_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
792 S_00B228_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
793 S_00B228_DX10_CLAMP(1) |
794 S_00B228_FLOAT_MODE(shader
->config
.float_mode
));
795 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
796 S_00B22C_USER_SGPR(GFX6_GS_NUM_USER_SGPR
) |
797 S_00B22C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
802 * Compute the state for \p shader, which will run as a vertex shader on the
805 * If \p gs is non-NULL, it points to the geometry shader for which this shader
806 * is the copy shader.
808 static void si_shader_vs(struct si_screen
*sscreen
, struct si_shader
*shader
,
809 struct si_shader_selector
*gs
)
811 const struct tgsi_shader_info
*info
= &shader
->selector
->info
;
812 struct si_pm4_state
*pm4
;
813 unsigned num_user_sgprs
;
814 unsigned nparams
, vgpr_comp_cnt
;
817 unsigned window_space
=
818 info
->properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
819 bool enable_prim_id
= shader
->key
.mono
.u
.vs_export_prim_id
|| info
->uses_primid
;
821 pm4
= si_get_shader_pm4_state(shader
);
825 /* We always write VGT_GS_MODE in the VS state, because every switch
826 * between different shader pipelines involving a different GS or no
827 * GS at all involves a switch of the VS (different GS use different
828 * copy shaders). On the other hand, when the API switches from a GS to
829 * no GS and then back to the same GS used originally, the GS state is
833 unsigned mode
= V_028A40_GS_OFF
;
835 /* PrimID needs GS scenario A. */
837 mode
= V_028A40_GS_SCENARIO_A
;
839 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
, S_028A40_MODE(mode
));
840 si_pm4_set_reg(pm4
, R_028A84_VGT_PRIMITIVEID_EN
, enable_prim_id
);
842 si_pm4_set_reg(pm4
, R_028A40_VGT_GS_MODE
,
843 ac_vgt_gs_mode(gs
->gs_max_out_vertices
,
844 sscreen
->info
.chip_class
));
845 si_pm4_set_reg(pm4
, R_028A84_VGT_PRIMITIVEID_EN
, 0);
848 if (sscreen
->info
.chip_class
<= VI
) {
849 /* Reuse needs to be set off if we write oViewport. */
850 si_pm4_set_reg(pm4
, R_028AB4_VGT_REUSE_OFF
,
851 S_028AB4_REUSE_OFF(info
->writes_viewport_index
));
854 va
= shader
->bo
->gpu_address
;
855 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
858 vgpr_comp_cnt
= 0; /* only VertexID is needed for GS-COPY. */
859 num_user_sgprs
= SI_GSCOPY_NUM_USER_SGPR
;
860 } else if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
861 /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
862 * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
863 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
865 vgpr_comp_cnt
= enable_prim_id
? 2 : (shader
->info
.uses_instanceid
? 1 : 0);
867 if (info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
]) {
868 num_user_sgprs
= SI_SGPR_VS_BLIT_DATA
+
869 info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
];
871 num_user_sgprs
= SI_VS_NUM_USER_SGPR
;
873 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
874 vgpr_comp_cnt
= enable_prim_id
? 3 : 2;
875 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
877 unreachable("invalid shader selector type");
879 /* VS is required to export at least one param. */
880 nparams
= MAX2(shader
->info
.nr_param_exports
, 1);
881 si_pm4_set_reg(pm4
, R_0286C4_SPI_VS_OUT_CONFIG
,
882 S_0286C4_VS_EXPORT_COUNT(nparams
- 1));
884 si_pm4_set_reg(pm4
, R_02870C_SPI_SHADER_POS_FORMAT
,
885 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
886 S_02870C_POS1_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 1 ?
887 V_02870C_SPI_SHADER_4COMP
:
888 V_02870C_SPI_SHADER_NONE
) |
889 S_02870C_POS2_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 2 ?
890 V_02870C_SPI_SHADER_4COMP
:
891 V_02870C_SPI_SHADER_NONE
) |
892 S_02870C_POS3_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 3 ?
893 V_02870C_SPI_SHADER_4COMP
:
894 V_02870C_SPI_SHADER_NONE
));
896 oc_lds_en
= shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
? 1 : 0;
898 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
899 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, va
>> 40);
900 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
901 S_00B128_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
902 S_00B128_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
903 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
904 S_00B128_DX10_CLAMP(1) |
905 S_00B128_FLOAT_MODE(shader
->config
.float_mode
));
906 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
907 S_00B12C_USER_SGPR(num_user_sgprs
) |
908 S_00B12C_OC_LDS_EN(oc_lds_en
) |
909 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
910 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
911 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
912 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
913 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
) |
914 S_00B12C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
916 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
917 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
919 si_pm4_set_reg(pm4
, R_028818_PA_CL_VTE_CNTL
,
920 S_028818_VTX_W0_FMT(1) |
921 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
922 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
923 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
925 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
926 si_set_tesseval_regs(sscreen
, shader
->selector
, pm4
);
928 polaris_set_vgt_vertex_reuse(sscreen
, shader
->selector
, shader
, pm4
);
931 static unsigned si_get_ps_num_interp(struct si_shader
*ps
)
933 struct tgsi_shader_info
*info
= &ps
->selector
->info
;
934 unsigned num_colors
= !!(info
->colors_read
& 0x0f) +
935 !!(info
->colors_read
& 0xf0);
936 unsigned num_interp
= ps
->selector
->info
.num_inputs
+
937 (ps
->key
.part
.ps
.prolog
.color_two_side
? num_colors
: 0);
939 assert(num_interp
<= 32);
940 return MIN2(num_interp
, 32);
943 static unsigned si_get_spi_shader_col_format(struct si_shader
*shader
)
945 unsigned value
= shader
->key
.part
.ps
.epilog
.spi_shader_col_format
;
946 unsigned i
, num_targets
= (util_last_bit(value
) + 3) / 4;
948 /* If the i-th target format is set, all previous target formats must
949 * be non-zero to avoid hangs.
951 for (i
= 0; i
< num_targets
; i
++)
952 if (!(value
& (0xf << (i
* 4))))
953 value
|= V_028714_SPI_SHADER_32_R
<< (i
* 4);
958 static void si_shader_ps(struct si_shader
*shader
)
960 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
961 struct si_pm4_state
*pm4
;
962 unsigned spi_ps_in_control
, spi_shader_col_format
, cb_shader_mask
;
963 unsigned spi_baryc_cntl
= S_0286E0_FRONT_FACE_ALL_BITS(1);
965 unsigned input_ena
= shader
->config
.spi_ps_input_ena
;
967 /* we need to enable at least one of them, otherwise we hang the GPU */
968 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena
) ||
969 G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
970 G_0286CC_PERSP_CENTROID_ENA(input_ena
) ||
971 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena
) ||
972 G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) ||
973 G_0286CC_LINEAR_CENTER_ENA(input_ena
) ||
974 G_0286CC_LINEAR_CENTROID_ENA(input_ena
) ||
975 G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena
));
976 /* POS_W_FLOAT_ENA requires one of the perspective weights. */
977 assert(!G_0286CC_POS_W_FLOAT_ENA(input_ena
) ||
978 G_0286CC_PERSP_SAMPLE_ENA(input_ena
) ||
979 G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
980 G_0286CC_PERSP_CENTROID_ENA(input_ena
) ||
981 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena
));
983 /* Validate interpolation optimization flags (read as implications). */
984 assert(!shader
->key
.part
.ps
.prolog
.bc_optimize_for_persp
||
985 (G_0286CC_PERSP_CENTER_ENA(input_ena
) &&
986 G_0286CC_PERSP_CENTROID_ENA(input_ena
)));
987 assert(!shader
->key
.part
.ps
.prolog
.bc_optimize_for_linear
||
988 (G_0286CC_LINEAR_CENTER_ENA(input_ena
) &&
989 G_0286CC_LINEAR_CENTROID_ENA(input_ena
)));
990 assert(!shader
->key
.part
.ps
.prolog
.force_persp_center_interp
||
991 (!G_0286CC_PERSP_SAMPLE_ENA(input_ena
) &&
992 !G_0286CC_PERSP_CENTROID_ENA(input_ena
)));
993 assert(!shader
->key
.part
.ps
.prolog
.force_linear_center_interp
||
994 (!G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) &&
995 !G_0286CC_LINEAR_CENTROID_ENA(input_ena
)));
996 assert(!shader
->key
.part
.ps
.prolog
.force_persp_sample_interp
||
997 (!G_0286CC_PERSP_CENTER_ENA(input_ena
) &&
998 !G_0286CC_PERSP_CENTROID_ENA(input_ena
)));
999 assert(!shader
->key
.part
.ps
.prolog
.force_linear_sample_interp
||
1000 (!G_0286CC_LINEAR_CENTER_ENA(input_ena
) &&
1001 !G_0286CC_LINEAR_CENTROID_ENA(input_ena
)));
1003 /* Validate cases when the optimizations are off (read as implications). */
1004 assert(shader
->key
.part
.ps
.prolog
.bc_optimize_for_persp
||
1005 !G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1006 !G_0286CC_PERSP_CENTROID_ENA(input_ena
));
1007 assert(shader
->key
.part
.ps
.prolog
.bc_optimize_for_linear
||
1008 !G_0286CC_LINEAR_CENTER_ENA(input_ena
) ||
1009 !G_0286CC_LINEAR_CENTROID_ENA(input_ena
));
1011 pm4
= si_get_shader_pm4_state(shader
);
1015 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
1017 * 0 -> Position = pixel center
1018 * 1 -> Position = pixel centroid
1019 * 2 -> Position = at sample position
1021 * From GLSL 4.5 specification, section 7.1:
1022 * "The variable gl_FragCoord is available as an input variable from
1023 * within fragment shaders and it holds the window relative coordinates
1024 * (x, y, z, 1/w) values for the fragment. If multi-sampling, this
1025 * value can be for any location within the pixel, or one of the
1026 * fragment samples. The use of centroid does not further restrict
1027 * this value to be inside the current primitive."
1029 * Meaning that centroid has no effect and we can return anything within
1030 * the pixel. Thus, return the value at sample position, because that's
1031 * the most accurate one shaders can get.
1033 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
1035 if (info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] ==
1036 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)
1037 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_ULC(1);
1039 spi_shader_col_format
= si_get_spi_shader_col_format(shader
);
1040 cb_shader_mask
= ac_get_cb_shader_mask(spi_shader_col_format
);
1042 /* Ensure that some export memory is always allocated, for two reasons:
1044 * 1) Correctness: The hardware ignores the EXEC mask if no export
1045 * memory is allocated, so KILL and alpha test do not work correctly
1047 * 2) Performance: Every shader needs at least a NULL export, even when
1048 * it writes no color/depth output. The NULL export instruction
1049 * stalls without this setting.
1051 * Don't add this to CB_SHADER_MASK.
1053 if (!spi_shader_col_format
&&
1054 !info
->writes_z
&& !info
->writes_stencil
&& !info
->writes_samplemask
)
1055 spi_shader_col_format
= V_028714_SPI_SHADER_32_R
;
1057 si_pm4_set_reg(pm4
, R_0286CC_SPI_PS_INPUT_ENA
, input_ena
);
1058 si_pm4_set_reg(pm4
, R_0286D0_SPI_PS_INPUT_ADDR
,
1059 shader
->config
.spi_ps_input_addr
);
1061 /* Set interpolation controls. */
1062 spi_ps_in_control
= S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader
));
1064 /* Set registers. */
1065 si_pm4_set_reg(pm4
, R_0286E0_SPI_BARYC_CNTL
, spi_baryc_cntl
);
1066 si_pm4_set_reg(pm4
, R_0286D8_SPI_PS_IN_CONTROL
, spi_ps_in_control
);
1068 si_pm4_set_reg(pm4
, R_028710_SPI_SHADER_Z_FORMAT
,
1069 ac_get_spi_shader_z_format(info
->writes_z
,
1070 info
->writes_stencil
,
1071 info
->writes_samplemask
));
1073 si_pm4_set_reg(pm4
, R_028714_SPI_SHADER_COL_FORMAT
, spi_shader_col_format
);
1074 si_pm4_set_reg(pm4
, R_02823C_CB_SHADER_MASK
, cb_shader_mask
);
1076 va
= shader
->bo
->gpu_address
;
1077 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
1078 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
1079 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, va
>> 40);
1081 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
1082 S_00B028_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
1083 S_00B028_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
1084 S_00B028_DX10_CLAMP(1) |
1085 S_00B028_FLOAT_MODE(shader
->config
.float_mode
));
1086 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
1087 S_00B02C_EXTRA_LDS_SIZE(shader
->config
.lds_size
) |
1088 S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR
) |
1089 S_00B32C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
1092 static void si_shader_init_pm4_state(struct si_screen
*sscreen
,
1093 struct si_shader
*shader
)
1095 switch (shader
->selector
->type
) {
1096 case PIPE_SHADER_VERTEX
:
1097 if (shader
->key
.as_ls
)
1098 si_shader_ls(sscreen
, shader
);
1099 else if (shader
->key
.as_es
)
1100 si_shader_es(sscreen
, shader
);
1102 si_shader_vs(sscreen
, shader
, NULL
);
1104 case PIPE_SHADER_TESS_CTRL
:
1105 si_shader_hs(sscreen
, shader
);
1107 case PIPE_SHADER_TESS_EVAL
:
1108 if (shader
->key
.as_es
)
1109 si_shader_es(sscreen
, shader
);
1111 si_shader_vs(sscreen
, shader
, NULL
);
1113 case PIPE_SHADER_GEOMETRY
:
1114 si_shader_gs(sscreen
, shader
);
1116 case PIPE_SHADER_FRAGMENT
:
1117 si_shader_ps(shader
);
1124 static unsigned si_get_alpha_test_func(struct si_context
*sctx
)
1126 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
1127 if (sctx
->queued
.named
.dsa
)
1128 return sctx
->queued
.named
.dsa
->alpha_func
;
1130 return PIPE_FUNC_ALWAYS
;
1133 static void si_shader_selector_key_vs(struct si_context
*sctx
,
1134 struct si_shader_selector
*vs
,
1135 struct si_shader_key
*key
,
1136 struct si_vs_prolog_bits
*prolog_key
)
1138 if (!sctx
->vertex_elements
)
1141 prolog_key
->instance_divisor_is_one
=
1142 sctx
->vertex_elements
->instance_divisor_is_one
;
1143 prolog_key
->instance_divisor_is_fetched
=
1144 sctx
->vertex_elements
->instance_divisor_is_fetched
;
1146 /* Prefer a monolithic shader to allow scheduling divisions around
1148 if (prolog_key
->instance_divisor_is_fetched
)
1149 key
->opt
.prefer_mono
= 1;
1151 unsigned count
= MIN2(vs
->info
.num_inputs
,
1152 sctx
->vertex_elements
->count
);
1153 memcpy(key
->mono
.vs_fix_fetch
, sctx
->vertex_elements
->fix_fetch
, count
);
1156 static void si_shader_selector_key_hw_vs(struct si_context
*sctx
,
1157 struct si_shader_selector
*vs
,
1158 struct si_shader_key
*key
)
1160 struct si_shader_selector
*ps
= sctx
->ps_shader
.cso
;
1162 key
->opt
.clip_disable
=
1163 sctx
->queued
.named
.rasterizer
->clip_plane_enable
== 0 &&
1164 (vs
->info
.clipdist_writemask
||
1165 vs
->info
.writes_clipvertex
) &&
1166 !vs
->info
.culldist_writemask
;
1168 /* Find out if PS is disabled. */
1169 bool ps_disabled
= true;
1171 const struct si_state_blend
*blend
= sctx
->queued
.named
.blend
;
1172 bool alpha_to_coverage
= blend
&& blend
->alpha_to_coverage
;
1173 bool ps_modifies_zs
= ps
->info
.uses_kill
||
1174 ps
->info
.writes_z
||
1175 ps
->info
.writes_stencil
||
1176 ps
->info
.writes_samplemask
||
1177 alpha_to_coverage
||
1178 si_get_alpha_test_func(sctx
) != PIPE_FUNC_ALWAYS
;
1180 unsigned ps_colormask
= sctx
->framebuffer
.colorbuf_enabled_4bit
&
1181 sctx
->queued
.named
.blend
->cb_target_mask
;
1182 if (!ps
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
])
1183 ps_colormask
&= ps
->colors_written_4bit
;
1185 ps_disabled
= sctx
->queued
.named
.rasterizer
->rasterizer_discard
||
1188 !ps
->info
.writes_memory
);
1191 /* Find out which VS outputs aren't used by the PS. */
1192 uint64_t outputs_written
= vs
->outputs_written
;
1193 uint64_t inputs_read
= 0;
1195 /* ignore POSITION, PSIZE */
1196 outputs_written
&= ~((1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_POSITION
, 0) |
1197 (1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_PSIZE
, 0))));
1200 inputs_read
= ps
->inputs_read
;
1203 uint64_t linked
= outputs_written
& inputs_read
;
1205 key
->opt
.kill_outputs
= ~linked
& outputs_written
;
1208 /* Compute the key for the hw shader variant */
1209 static inline void si_shader_selector_key(struct pipe_context
*ctx
,
1210 struct si_shader_selector
*sel
,
1211 struct si_shader_key
*key
)
1213 struct si_context
*sctx
= (struct si_context
*)ctx
;
1215 memset(key
, 0, sizeof(*key
));
1217 switch (sel
->type
) {
1218 case PIPE_SHADER_VERTEX
:
1219 si_shader_selector_key_vs(sctx
, sel
, key
, &key
->part
.vs
.prolog
);
1221 if (sctx
->tes_shader
.cso
)
1223 else if (sctx
->gs_shader
.cso
)
1226 si_shader_selector_key_hw_vs(sctx
, sel
, key
);
1228 if (sctx
->ps_shader
.cso
&& sctx
->ps_shader
.cso
->info
.uses_primid
)
1229 key
->mono
.u
.vs_export_prim_id
= 1;
1232 case PIPE_SHADER_TESS_CTRL
:
1233 if (sctx
->b
.chip_class
>= GFX9
) {
1234 si_shader_selector_key_vs(sctx
, sctx
->vs_shader
.cso
,
1235 key
, &key
->part
.tcs
.ls_prolog
);
1236 key
->part
.tcs
.ls
= sctx
->vs_shader
.cso
;
1238 /* When the LS VGPR fix is needed, monolithic shaders
1240 * - avoid initializing EXEC in both the LS prolog
1241 * and the LS main part when !vs_needs_prolog
1242 * - remove the fixup for unused input VGPRs
1244 key
->part
.tcs
.ls_prolog
.ls_vgpr_fix
= sctx
->ls_vgpr_fix
;
1246 /* The LS output / HS input layout can be communicated
1247 * directly instead of via user SGPRs for merged LS-HS.
1248 * The LS VGPR fix prefers this too.
1250 key
->opt
.prefer_mono
= 1;
1253 key
->part
.tcs
.epilog
.prim_mode
=
1254 sctx
->tes_shader
.cso
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
1255 key
->part
.tcs
.epilog
.invoc0_tess_factors_are_def
=
1256 sel
->tcs_info
.tessfactors_are_def_in_all_invocs
;
1257 key
->part
.tcs
.epilog
.tes_reads_tess_factors
=
1258 sctx
->tes_shader
.cso
->info
.reads_tess_factors
;
1260 if (sel
== sctx
->fixed_func_tcs_shader
.cso
)
1261 key
->mono
.u
.ff_tcs_inputs_to_copy
= sctx
->vs_shader
.cso
->outputs_written
;
1263 case PIPE_SHADER_TESS_EVAL
:
1264 if (sctx
->gs_shader
.cso
)
1267 si_shader_selector_key_hw_vs(sctx
, sel
, key
);
1269 if (sctx
->ps_shader
.cso
&& sctx
->ps_shader
.cso
->info
.uses_primid
)
1270 key
->mono
.u
.vs_export_prim_id
= 1;
1273 case PIPE_SHADER_GEOMETRY
:
1274 if (sctx
->b
.chip_class
>= GFX9
) {
1275 if (sctx
->tes_shader
.cso
) {
1276 key
->part
.gs
.es
= sctx
->tes_shader
.cso
;
1278 si_shader_selector_key_vs(sctx
, sctx
->vs_shader
.cso
,
1279 key
, &key
->part
.gs
.vs_prolog
);
1280 key
->part
.gs
.es
= sctx
->vs_shader
.cso
;
1283 /* Merged ES-GS can have unbalanced wave usage.
1285 * ES threads are per-vertex, while GS threads are
1286 * per-primitive. So without any amplification, there
1287 * are fewer GS threads than ES threads, which can result
1288 * in empty (no-op) GS waves. With too much amplification,
1289 * there are more GS threads than ES threads, which
1290 * can result in empty (no-op) ES waves.
1292 * Non-monolithic shaders are implemented by setting EXEC
1293 * at the beginning of shader parts, and don't jump to
1294 * the end if EXEC is 0.
1296 * Monolithic shaders use conditional blocks, so they can
1297 * jump and skip empty waves of ES or GS. So set this to
1298 * always use optimized variants, which are monolithic.
1300 key
->opt
.prefer_mono
= 1;
1302 key
->part
.gs
.prolog
.tri_strip_adj_fix
= sctx
->gs_tri_strip_adj_fix
;
1304 case PIPE_SHADER_FRAGMENT
: {
1305 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
1306 struct si_state_blend
*blend
= sctx
->queued
.named
.blend
;
1308 if (sel
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
] &&
1309 sel
->info
.colors_written
== 0x1)
1310 key
->part
.ps
.epilog
.last_cbuf
= MAX2(sctx
->framebuffer
.state
.nr_cbufs
, 1) - 1;
1313 /* Select the shader color format based on whether
1314 * blending or alpha are needed.
1316 key
->part
.ps
.epilog
.spi_shader_col_format
=
1317 (blend
->blend_enable_4bit
& blend
->need_src_alpha_4bit
&
1318 sctx
->framebuffer
.spi_shader_col_format_blend_alpha
) |
1319 (blend
->blend_enable_4bit
& ~blend
->need_src_alpha_4bit
&
1320 sctx
->framebuffer
.spi_shader_col_format_blend
) |
1321 (~blend
->blend_enable_4bit
& blend
->need_src_alpha_4bit
&
1322 sctx
->framebuffer
.spi_shader_col_format_alpha
) |
1323 (~blend
->blend_enable_4bit
& ~blend
->need_src_alpha_4bit
&
1324 sctx
->framebuffer
.spi_shader_col_format
);
1325 key
->part
.ps
.epilog
.spi_shader_col_format
&= blend
->cb_target_enabled_4bit
;
1327 /* The output for dual source blending should have
1328 * the same format as the first output.
1330 if (blend
->dual_src_blend
)
1331 key
->part
.ps
.epilog
.spi_shader_col_format
|=
1332 (key
->part
.ps
.epilog
.spi_shader_col_format
& 0xf) << 4;
1334 key
->part
.ps
.epilog
.spi_shader_col_format
= sctx
->framebuffer
.spi_shader_col_format
;
1336 /* If alpha-to-coverage is enabled, we have to export alpha
1337 * even if there is no color buffer.
1339 if (!(key
->part
.ps
.epilog
.spi_shader_col_format
& 0xf) &&
1340 blend
&& blend
->alpha_to_coverage
)
1341 key
->part
.ps
.epilog
.spi_shader_col_format
|= V_028710_SPI_SHADER_32_AR
;
1343 /* On SI and CIK except Hawaii, the CB doesn't clamp outputs
1344 * to the range supported by the type if a channel has less
1345 * than 16 bits and the export format is 16_ABGR.
1347 if (sctx
->b
.chip_class
<= CIK
&& sctx
->b
.family
!= CHIP_HAWAII
) {
1348 key
->part
.ps
.epilog
.color_is_int8
= sctx
->framebuffer
.color_is_int8
;
1349 key
->part
.ps
.epilog
.color_is_int10
= sctx
->framebuffer
.color_is_int10
;
1352 /* Disable unwritten outputs (if WRITE_ALL_CBUFS isn't enabled). */
1353 if (!key
->part
.ps
.epilog
.last_cbuf
) {
1354 key
->part
.ps
.epilog
.spi_shader_col_format
&= sel
->colors_written_4bit
;
1355 key
->part
.ps
.epilog
.color_is_int8
&= sel
->info
.colors_written
;
1356 key
->part
.ps
.epilog
.color_is_int10
&= sel
->info
.colors_written
;
1360 bool is_poly
= (sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES
&&
1361 sctx
->current_rast_prim
<= PIPE_PRIM_POLYGON
) ||
1362 sctx
->current_rast_prim
>= PIPE_PRIM_TRIANGLES_ADJACENCY
;
1363 bool is_line
= !is_poly
&& sctx
->current_rast_prim
!= PIPE_PRIM_POINTS
;
1365 key
->part
.ps
.prolog
.color_two_side
= rs
->two_side
&& sel
->info
.colors_read
;
1366 key
->part
.ps
.prolog
.flatshade_colors
= rs
->flatshade
&& sel
->info
.colors_read
;
1368 if (sctx
->queued
.named
.blend
) {
1369 key
->part
.ps
.epilog
.alpha_to_one
= sctx
->queued
.named
.blend
->alpha_to_one
&&
1370 rs
->multisample_enable
;
1373 key
->part
.ps
.prolog
.poly_stipple
= rs
->poly_stipple_enable
&& is_poly
;
1374 key
->part
.ps
.epilog
.poly_line_smoothing
= ((is_poly
&& rs
->poly_smooth
) ||
1375 (is_line
&& rs
->line_smooth
)) &&
1376 sctx
->framebuffer
.nr_samples
<= 1;
1377 key
->part
.ps
.epilog
.clamp_color
= rs
->clamp_fragment_color
;
1379 if (sctx
->ps_iter_samples
> 1 &&
1380 sel
->info
.reads_samplemask
) {
1381 key
->part
.ps
.prolog
.samplemask_log_ps_iter
=
1382 util_logbase2(util_next_power_of_two(sctx
->ps_iter_samples
));
1385 if (rs
->force_persample_interp
&&
1386 rs
->multisample_enable
&&
1387 sctx
->framebuffer
.nr_samples
> 1 &&
1388 sctx
->ps_iter_samples
> 1) {
1389 key
->part
.ps
.prolog
.force_persp_sample_interp
=
1390 sel
->info
.uses_persp_center
||
1391 sel
->info
.uses_persp_centroid
;
1393 key
->part
.ps
.prolog
.force_linear_sample_interp
=
1394 sel
->info
.uses_linear_center
||
1395 sel
->info
.uses_linear_centroid
;
1396 } else if (rs
->multisample_enable
&&
1397 sctx
->framebuffer
.nr_samples
> 1) {
1398 key
->part
.ps
.prolog
.bc_optimize_for_persp
=
1399 sel
->info
.uses_persp_center
&&
1400 sel
->info
.uses_persp_centroid
;
1401 key
->part
.ps
.prolog
.bc_optimize_for_linear
=
1402 sel
->info
.uses_linear_center
&&
1403 sel
->info
.uses_linear_centroid
;
1405 /* Make sure SPI doesn't compute more than 1 pair
1406 * of (i,j), which is the optimization here. */
1407 key
->part
.ps
.prolog
.force_persp_center_interp
=
1408 sel
->info
.uses_persp_center
+
1409 sel
->info
.uses_persp_centroid
+
1410 sel
->info
.uses_persp_sample
> 1;
1412 key
->part
.ps
.prolog
.force_linear_center_interp
=
1413 sel
->info
.uses_linear_center
+
1414 sel
->info
.uses_linear_centroid
+
1415 sel
->info
.uses_linear_sample
> 1;
1417 if (sel
->info
.opcode_count
[TGSI_OPCODE_INTERP_SAMPLE
])
1418 key
->mono
.u
.ps
.interpolate_at_sample_force_center
= 1;
1422 key
->part
.ps
.epilog
.alpha_func
= si_get_alpha_test_func(sctx
);
1429 if (unlikely(sctx
->screen
->debug_flags
& DBG(NO_OPT_VARIANT
)))
1430 memset(&key
->opt
, 0, sizeof(key
->opt
));
1433 static void si_build_shader_variant(struct si_shader
*shader
,
1437 struct si_shader_selector
*sel
= shader
->selector
;
1438 struct si_screen
*sscreen
= sel
->screen
;
1439 LLVMTargetMachineRef tm
;
1440 struct pipe_debug_callback
*debug
= &shader
->compiler_ctx_state
.debug
;
1443 if (thread_index
>= 0) {
1445 assert(thread_index
< ARRAY_SIZE(sscreen
->tm_low_priority
));
1446 tm
= sscreen
->tm_low_priority
[thread_index
];
1448 assert(thread_index
< ARRAY_SIZE(sscreen
->tm
));
1449 tm
= sscreen
->tm
[thread_index
];
1454 assert(!low_priority
);
1455 tm
= shader
->compiler_ctx_state
.tm
;
1458 r
= si_shader_create(sscreen
, tm
, shader
, debug
);
1460 R600_ERR("Failed to build shader variant (type=%u) %d\n",
1462 shader
->compilation_failed
= true;
1466 if (shader
->compiler_ctx_state
.is_debug_context
) {
1467 FILE *f
= open_memstream(&shader
->shader_log
,
1468 &shader
->shader_log_size
);
1470 si_shader_dump(sscreen
, shader
, NULL
, sel
->type
, f
, false);
1475 si_shader_init_pm4_state(sscreen
, shader
);
1478 static void si_build_shader_variant_low_priority(void *job
, int thread_index
)
1480 struct si_shader
*shader
= (struct si_shader
*)job
;
1482 assert(thread_index
>= 0);
1484 si_build_shader_variant(shader
, thread_index
, true);
1487 static const struct si_shader_key zeroed
;
1489 static bool si_check_missing_main_part(struct si_screen
*sscreen
,
1490 struct si_shader_selector
*sel
,
1491 struct si_compiler_ctx_state
*compiler_state
,
1492 struct si_shader_key
*key
)
1494 struct si_shader
**mainp
= si_get_main_shader_part(sel
, key
);
1497 struct si_shader
*main_part
= CALLOC_STRUCT(si_shader
);
1502 /* We can leave the fence as permanently signaled because the
1503 * main part becomes visible globally only after it has been
1505 util_queue_fence_init(&main_part
->ready
);
1507 main_part
->selector
= sel
;
1508 main_part
->key
.as_es
= key
->as_es
;
1509 main_part
->key
.as_ls
= key
->as_ls
;
1511 if (si_compile_tgsi_shader(sscreen
, compiler_state
->tm
,
1513 &compiler_state
->debug
) != 0) {
1522 /* Select the hw shader variant depending on the current state. */
1523 static int si_shader_select_with_key(struct si_screen
*sscreen
,
1524 struct si_shader_ctx_state
*state
,
1525 struct si_compiler_ctx_state
*compiler_state
,
1526 struct si_shader_key
*key
,
1529 struct si_shader_selector
*sel
= state
->cso
;
1530 struct si_shader_selector
*previous_stage_sel
= NULL
;
1531 struct si_shader
*current
= state
->current
;
1532 struct si_shader
*iter
, *shader
= NULL
;
1535 /* Check if we don't need to change anything.
1536 * This path is also used for most shaders that don't need multiple
1537 * variants, it will cost just a computation of the key and this
1539 if (likely(current
&&
1540 memcmp(¤t
->key
, key
, sizeof(*key
)) == 0)) {
1541 if (unlikely(!util_queue_fence_is_signalled(¤t
->ready
))) {
1542 if (current
->is_optimized
) {
1543 memset(&key
->opt
, 0, sizeof(key
->opt
));
1544 goto current_not_ready
;
1547 util_queue_fence_wait(¤t
->ready
);
1550 return current
->compilation_failed
? -1 : 0;
1554 /* This must be done before the mutex is locked, because async GS
1555 * compilation calls this function too, and therefore must enter
1558 * Only wait if we are in a draw call. Don't wait if we are
1559 * in a compiler thread.
1561 if (thread_index
< 0)
1562 util_queue_fence_wait(&sel
->ready
);
1564 mtx_lock(&sel
->mutex
);
1566 /* Find the shader variant. */
1567 for (iter
= sel
->first_variant
; iter
; iter
= iter
->next_variant
) {
1568 /* Don't check the "current" shader. We checked it above. */
1569 if (current
!= iter
&&
1570 memcmp(&iter
->key
, key
, sizeof(*key
)) == 0) {
1571 mtx_unlock(&sel
->mutex
);
1573 if (unlikely(!util_queue_fence_is_signalled(&iter
->ready
))) {
1574 /* If it's an optimized shader and its compilation has
1575 * been started but isn't done, use the unoptimized
1576 * shader so as not to cause a stall due to compilation.
1578 if (iter
->is_optimized
) {
1579 memset(&key
->opt
, 0, sizeof(key
->opt
));
1583 util_queue_fence_wait(&iter
->ready
);
1586 if (iter
->compilation_failed
) {
1587 return -1; /* skip the draw call */
1590 state
->current
= iter
;
1595 /* Build a new shader. */
1596 shader
= CALLOC_STRUCT(si_shader
);
1598 mtx_unlock(&sel
->mutex
);
1602 util_queue_fence_init(&shader
->ready
);
1604 shader
->selector
= sel
;
1606 shader
->compiler_ctx_state
= *compiler_state
;
1608 /* If this is a merged shader, get the first shader's selector. */
1609 if (sscreen
->info
.chip_class
>= GFX9
) {
1610 if (sel
->type
== PIPE_SHADER_TESS_CTRL
)
1611 previous_stage_sel
= key
->part
.tcs
.ls
;
1612 else if (sel
->type
== PIPE_SHADER_GEOMETRY
)
1613 previous_stage_sel
= key
->part
.gs
.es
;
1615 /* We need to wait for the previous shader. */
1616 if (previous_stage_sel
&& thread_index
< 0)
1617 util_queue_fence_wait(&previous_stage_sel
->ready
);
1620 /* Compile the main shader part if it doesn't exist. This can happen
1621 * if the initial guess was wrong. */
1622 bool is_pure_monolithic
=
1623 sscreen
->use_monolithic_shaders
||
1624 memcmp(&key
->mono
, &zeroed
.mono
, sizeof(key
->mono
)) != 0;
1626 if (!is_pure_monolithic
) {
1629 /* Make sure the main shader part is present. This is needed
1630 * for shaders that can be compiled as VS, LS, or ES, and only
1631 * one of them is compiled at creation.
1633 * For merged shaders, check that the starting shader's main
1636 if (previous_stage_sel
) {
1637 struct si_shader_key shader1_key
= zeroed
;
1639 if (sel
->type
== PIPE_SHADER_TESS_CTRL
)
1640 shader1_key
.as_ls
= 1;
1641 else if (sel
->type
== PIPE_SHADER_GEOMETRY
)
1642 shader1_key
.as_es
= 1;
1646 mtx_lock(&previous_stage_sel
->mutex
);
1647 ok
= si_check_missing_main_part(sscreen
,
1649 compiler_state
, &shader1_key
);
1650 mtx_unlock(&previous_stage_sel
->mutex
);
1652 ok
= si_check_missing_main_part(sscreen
, sel
,
1653 compiler_state
, key
);
1657 mtx_unlock(&sel
->mutex
);
1658 return -ENOMEM
; /* skip the draw call */
1662 /* Keep the reference to the 1st shader of merged shaders, so that
1663 * Gallium can't destroy it before we destroy the 2nd shader.
1665 * Set sctx = NULL, because it's unused if we're not releasing
1666 * the shader, and we don't have any sctx here.
1668 si_shader_selector_reference(NULL
, &shader
->previous_stage_sel
,
1669 previous_stage_sel
);
1671 /* Monolithic-only shaders don't make a distinction between optimized
1672 * and unoptimized. */
1673 shader
->is_monolithic
=
1674 is_pure_monolithic
||
1675 memcmp(&key
->opt
, &zeroed
.opt
, sizeof(key
->opt
)) != 0;
1677 shader
->is_optimized
=
1678 !is_pure_monolithic
&&
1679 memcmp(&key
->opt
, &zeroed
.opt
, sizeof(key
->opt
)) != 0;
1681 /* If it's an optimized shader, compile it asynchronously. */
1682 if (shader
->is_optimized
&&
1683 !is_pure_monolithic
&&
1685 /* Compile it asynchronously. */
1686 util_queue_add_job(&sscreen
->shader_compiler_queue_low_priority
,
1687 shader
, &shader
->ready
,
1688 si_build_shader_variant_low_priority
, NULL
);
1690 /* Add only after the ready fence was reset, to guard against a
1691 * race with si_bind_XX_shader. */
1692 if (!sel
->last_variant
) {
1693 sel
->first_variant
= shader
;
1694 sel
->last_variant
= shader
;
1696 sel
->last_variant
->next_variant
= shader
;
1697 sel
->last_variant
= shader
;
1700 /* Use the default (unoptimized) shader for now. */
1701 memset(&key
->opt
, 0, sizeof(key
->opt
));
1702 mtx_unlock(&sel
->mutex
);
1706 /* Reset the fence before adding to the variant list. */
1707 util_queue_fence_reset(&shader
->ready
);
1709 if (!sel
->last_variant
) {
1710 sel
->first_variant
= shader
;
1711 sel
->last_variant
= shader
;
1713 sel
->last_variant
->next_variant
= shader
;
1714 sel
->last_variant
= shader
;
1717 mtx_unlock(&sel
->mutex
);
1719 assert(!shader
->is_optimized
);
1720 si_build_shader_variant(shader
, thread_index
, false);
1722 util_queue_fence_signal(&shader
->ready
);
1724 if (!shader
->compilation_failed
)
1725 state
->current
= shader
;
1727 return shader
->compilation_failed
? -1 : 0;
1730 static int si_shader_select(struct pipe_context
*ctx
,
1731 struct si_shader_ctx_state
*state
,
1732 struct si_compiler_ctx_state
*compiler_state
)
1734 struct si_context
*sctx
= (struct si_context
*)ctx
;
1735 struct si_shader_key key
;
1737 si_shader_selector_key(ctx
, state
->cso
, &key
);
1738 return si_shader_select_with_key(sctx
->screen
, state
, compiler_state
,
1742 static void si_parse_next_shader_property(const struct tgsi_shader_info
*info
,
1744 struct si_shader_key
*key
)
1746 unsigned next_shader
= info
->properties
[TGSI_PROPERTY_NEXT_SHADER
];
1748 switch (info
->processor
) {
1749 case PIPE_SHADER_VERTEX
:
1750 switch (next_shader
) {
1751 case PIPE_SHADER_GEOMETRY
:
1754 case PIPE_SHADER_TESS_CTRL
:
1755 case PIPE_SHADER_TESS_EVAL
:
1759 /* If POSITION isn't written, it can only be a HW VS
1760 * if streamout is used. If streamout isn't used,
1761 * assume that it's a HW LS. (the next shader is TCS)
1762 * This heuristic is needed for separate shader objects.
1764 if (!info
->writes_position
&& !streamout
)
1769 case PIPE_SHADER_TESS_EVAL
:
1770 if (next_shader
== PIPE_SHADER_GEOMETRY
||
1771 !info
->writes_position
)
1778 * Compile the main shader part or the monolithic shader as part of
1779 * si_shader_selector initialization. Since it can be done asynchronously,
1780 * there is no way to report compile failures to applications.
1782 static void si_init_shader_selector_async(void *job
, int thread_index
)
1784 struct si_shader_selector
*sel
= (struct si_shader_selector
*)job
;
1785 struct si_screen
*sscreen
= sel
->screen
;
1786 LLVMTargetMachineRef tm
;
1787 struct pipe_debug_callback
*debug
= &sel
->compiler_ctx_state
.debug
;
1789 assert(!debug
->debug_message
|| debug
->async
);
1790 assert(thread_index
>= 0);
1791 assert(thread_index
< ARRAY_SIZE(sscreen
->tm
));
1792 tm
= sscreen
->tm
[thread_index
];
1794 /* Compile the main shader part for use with a prolog and/or epilog.
1795 * If this fails, the driver will try to compile a monolithic shader
1798 if (!sscreen
->use_monolithic_shaders
) {
1799 struct si_shader
*shader
= CALLOC_STRUCT(si_shader
);
1800 void *tgsi_binary
= NULL
;
1803 fprintf(stderr
, "radeonsi: can't allocate a main shader part\n");
1807 /* We can leave the fence signaled because use of the default
1808 * main part is guarded by the selector's ready fence. */
1809 util_queue_fence_init(&shader
->ready
);
1811 shader
->selector
= sel
;
1812 si_parse_next_shader_property(&sel
->info
,
1813 sel
->so
.num_outputs
!= 0,
1817 tgsi_binary
= si_get_tgsi_binary(sel
);
1819 /* Try to load the shader from the shader cache. */
1820 mtx_lock(&sscreen
->shader_cache_mutex
);
1823 si_shader_cache_load_shader(sscreen
, tgsi_binary
, shader
)) {
1824 mtx_unlock(&sscreen
->shader_cache_mutex
);
1825 si_shader_dump_stats_for_shader_db(shader
, debug
);
1827 mtx_unlock(&sscreen
->shader_cache_mutex
);
1829 /* Compile the shader if it hasn't been loaded from the cache. */
1830 if (si_compile_tgsi_shader(sscreen
, tm
, shader
, false,
1834 fprintf(stderr
, "radeonsi: can't compile a main shader part\n");
1839 mtx_lock(&sscreen
->shader_cache_mutex
);
1840 if (!si_shader_cache_insert_shader(sscreen
, tgsi_binary
, shader
, true))
1842 mtx_unlock(&sscreen
->shader_cache_mutex
);
1846 *si_get_main_shader_part(sel
, &shader
->key
) = shader
;
1848 /* Unset "outputs_written" flags for outputs converted to
1849 * DEFAULT_VAL, so that later inter-shader optimizations don't
1850 * try to eliminate outputs that don't exist in the final
1853 * This is only done if non-monolithic shaders are enabled.
1855 if ((sel
->type
== PIPE_SHADER_VERTEX
||
1856 sel
->type
== PIPE_SHADER_TESS_EVAL
) &&
1857 !shader
->key
.as_ls
&&
1858 !shader
->key
.as_es
) {
1861 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
1862 unsigned offset
= shader
->info
.vs_output_param_offset
[i
];
1864 if (offset
<= AC_EXP_PARAM_OFFSET_31
)
1867 unsigned name
= sel
->info
.output_semantic_name
[i
];
1868 unsigned index
= sel
->info
.output_semantic_index
[i
];
1872 case TGSI_SEMANTIC_GENERIC
:
1873 /* don't process indices the function can't handle */
1874 if (index
>= SI_MAX_IO_GENERIC
)
1878 id
= si_shader_io_get_unique_index(name
, index
);
1879 sel
->outputs_written
&= ~(1ull << id
);
1881 case TGSI_SEMANTIC_POSITION
: /* ignore these */
1882 case TGSI_SEMANTIC_PSIZE
:
1883 case TGSI_SEMANTIC_CLIPVERTEX
:
1884 case TGSI_SEMANTIC_EDGEFLAG
:
1891 /* The GS copy shader is always pre-compiled. */
1892 if (sel
->type
== PIPE_SHADER_GEOMETRY
) {
1893 sel
->gs_copy_shader
= si_generate_gs_copy_shader(sscreen
, tm
, sel
, debug
);
1894 if (!sel
->gs_copy_shader
) {
1895 fprintf(stderr
, "radeonsi: can't create GS copy shader\n");
1899 si_shader_vs(sscreen
, sel
->gs_copy_shader
, sel
);
1903 /* Return descriptor slot usage masks from the given shader info. */
1904 void si_get_active_slot_masks(const struct tgsi_shader_info
*info
,
1905 uint32_t *const_and_shader_buffers
,
1906 uint64_t *samplers_and_images
)
1908 unsigned start
, num_shaderbufs
, num_constbufs
, num_images
, num_samplers
;
1910 num_shaderbufs
= util_last_bit(info
->shader_buffers_declared
);
1911 num_constbufs
= util_last_bit(info
->const_buffers_declared
);
1912 /* two 8-byte images share one 16-byte slot */
1913 num_images
= align(util_last_bit(info
->images_declared
), 2);
1914 num_samplers
= util_last_bit(info
->samplers_declared
);
1916 /* The layout is: sb[last] ... sb[0], cb[0] ... cb[last] */
1917 start
= si_get_shaderbuf_slot(num_shaderbufs
- 1);
1918 *const_and_shader_buffers
=
1919 u_bit_consecutive(start
, num_shaderbufs
+ num_constbufs
);
1921 /* The layout is: image[last] ... image[0], sampler[0] ... sampler[last] */
1922 start
= si_get_image_slot(num_images
- 1) / 2;
1923 *samplers_and_images
=
1924 u_bit_consecutive64(start
, num_images
/ 2 + num_samplers
);
1927 static void *si_create_shader_selector(struct pipe_context
*ctx
,
1928 const struct pipe_shader_state
*state
)
1930 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
1931 struct si_context
*sctx
= (struct si_context
*)ctx
;
1932 struct si_shader_selector
*sel
= CALLOC_STRUCT(si_shader_selector
);
1938 pipe_reference_init(&sel
->reference
, 1);
1939 sel
->screen
= sscreen
;
1940 sel
->compiler_ctx_state
.debug
= sctx
->debug
;
1941 sel
->compiler_ctx_state
.is_debug_context
= sctx
->is_debug
;
1943 sel
->so
= state
->stream_output
;
1945 if (state
->type
== PIPE_SHADER_IR_TGSI
) {
1946 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
1952 tgsi_scan_shader(state
->tokens
, &sel
->info
);
1953 tgsi_scan_tess_ctrl(state
->tokens
, &sel
->info
, &sel
->tcs_info
);
1955 assert(state
->type
== PIPE_SHADER_IR_NIR
);
1957 sel
->nir
= state
->ir
.nir
;
1959 si_nir_scan_shader(sel
->nir
, &sel
->info
);
1960 si_nir_scan_tess_ctrl(sel
->nir
, &sel
->info
, &sel
->tcs_info
);
1965 sel
->type
= sel
->info
.processor
;
1966 p_atomic_inc(&sscreen
->num_shaders_created
);
1967 si_get_active_slot_masks(&sel
->info
,
1968 &sel
->active_const_and_shader_buffers
,
1969 &sel
->active_samplers_and_images
);
1971 /* Record which streamout buffers are enabled. */
1972 for (i
= 0; i
< sel
->so
.num_outputs
; i
++) {
1973 sel
->enabled_streamout_buffer_mask
|=
1974 (1 << sel
->so
.output
[i
].output_buffer
) <<
1975 (sel
->so
.output
[i
].stream
* 4);
1978 /* The prolog is a no-op if there are no inputs. */
1979 sel
->vs_needs_prolog
= sel
->type
== PIPE_SHADER_VERTEX
&&
1980 sel
->info
.num_inputs
&&
1981 !sel
->info
.properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
];
1983 sel
->force_correct_derivs_after_kill
=
1984 sel
->type
== PIPE_SHADER_FRAGMENT
&&
1985 sel
->info
.uses_derivatives
&&
1986 sel
->info
.uses_kill
&&
1987 sctx
->screen
->debug_flags
& DBG(FS_CORRECT_DERIVS_AFTER_KILL
);
1989 /* Set which opcode uses which (i,j) pair. */
1990 if (sel
->info
.uses_persp_opcode_interp_centroid
)
1991 sel
->info
.uses_persp_centroid
= true;
1993 if (sel
->info
.uses_linear_opcode_interp_centroid
)
1994 sel
->info
.uses_linear_centroid
= true;
1996 if (sel
->info
.uses_persp_opcode_interp_offset
||
1997 sel
->info
.uses_persp_opcode_interp_sample
)
1998 sel
->info
.uses_persp_center
= true;
2000 if (sel
->info
.uses_linear_opcode_interp_offset
||
2001 sel
->info
.uses_linear_opcode_interp_sample
)
2002 sel
->info
.uses_linear_center
= true;
2004 switch (sel
->type
) {
2005 case PIPE_SHADER_GEOMETRY
:
2006 sel
->gs_output_prim
=
2007 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
2008 sel
->gs_max_out_vertices
=
2009 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
2010 sel
->gs_num_invocations
=
2011 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
2012 sel
->gsvs_vertex_size
= sel
->info
.num_outputs
* 16;
2013 sel
->max_gsvs_emit_size
= sel
->gsvs_vertex_size
*
2014 sel
->gs_max_out_vertices
;
2016 sel
->max_gs_stream
= 0;
2017 for (i
= 0; i
< sel
->so
.num_outputs
; i
++)
2018 sel
->max_gs_stream
= MAX2(sel
->max_gs_stream
,
2019 sel
->so
.output
[i
].stream
);
2021 sel
->gs_input_verts_per_prim
=
2022 u_vertices_per_prim(sel
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
]);
2025 case PIPE_SHADER_TESS_CTRL
:
2026 /* Always reserve space for these. */
2027 sel
->patch_outputs_written
|=
2028 (1ull << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER
, 0)) |
2029 (1ull << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER
, 0));
2031 case PIPE_SHADER_VERTEX
:
2032 case PIPE_SHADER_TESS_EVAL
:
2033 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
2034 unsigned name
= sel
->info
.output_semantic_name
[i
];
2035 unsigned index
= sel
->info
.output_semantic_index
[i
];
2038 case TGSI_SEMANTIC_TESSINNER
:
2039 case TGSI_SEMANTIC_TESSOUTER
:
2040 case TGSI_SEMANTIC_PATCH
:
2041 sel
->patch_outputs_written
|=
2042 1ull << si_shader_io_get_unique_index_patch(name
, index
);
2045 case TGSI_SEMANTIC_GENERIC
:
2046 /* don't process indices the function can't handle */
2047 if (index
>= SI_MAX_IO_GENERIC
)
2051 sel
->outputs_written
|=
2052 1ull << si_shader_io_get_unique_index(name
, index
);
2054 case TGSI_SEMANTIC_CLIPVERTEX
: /* ignore these */
2055 case TGSI_SEMANTIC_EDGEFLAG
:
2059 sel
->esgs_itemsize
= util_last_bit64(sel
->outputs_written
) * 16;
2061 /* For the ESGS ring in LDS, add 1 dword to reduce LDS bank
2062 * conflicts, i.e. each vertex will start at a different bank.
2064 if (sctx
->b
.chip_class
>= GFX9
)
2065 sel
->esgs_itemsize
+= 4;
2068 case PIPE_SHADER_FRAGMENT
:
2069 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
2070 unsigned name
= sel
->info
.input_semantic_name
[i
];
2071 unsigned index
= sel
->info
.input_semantic_index
[i
];
2074 case TGSI_SEMANTIC_GENERIC
:
2075 /* don't process indices the function can't handle */
2076 if (index
>= SI_MAX_IO_GENERIC
)
2081 1ull << si_shader_io_get_unique_index(name
, index
);
2083 case TGSI_SEMANTIC_PCOORD
: /* ignore this */
2088 for (i
= 0; i
< 8; i
++)
2089 if (sel
->info
.colors_written
& (1 << i
))
2090 sel
->colors_written_4bit
|= 0xf << (4 * i
);
2092 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
2093 if (sel
->info
.input_semantic_name
[i
] == TGSI_SEMANTIC_COLOR
) {
2094 int index
= sel
->info
.input_semantic_index
[i
];
2095 sel
->color_attr_index
[index
] = i
;
2101 /* PA_CL_VS_OUT_CNTL */
2103 sel
->info
.writes_psize
|| sel
->info
.writes_edgeflag
||
2104 sel
->info
.writes_layer
|| sel
->info
.writes_viewport_index
;
2105 sel
->pa_cl_vs_out_cntl
=
2106 S_02881C_USE_VTX_POINT_SIZE(sel
->info
.writes_psize
) |
2107 S_02881C_USE_VTX_EDGE_FLAG(sel
->info
.writes_edgeflag
) |
2108 S_02881C_USE_VTX_RENDER_TARGET_INDX(sel
->info
.writes_layer
) |
2109 S_02881C_USE_VTX_VIEWPORT_INDX(sel
->info
.writes_viewport_index
) |
2110 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
2111 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
);
2112 sel
->clipdist_mask
= sel
->info
.writes_clipvertex
?
2113 SIX_BITS
: sel
->info
.clipdist_writemask
;
2114 sel
->culldist_mask
= sel
->info
.culldist_writemask
<<
2115 sel
->info
.num_written_clipdistance
;
2117 /* DB_SHADER_CONTROL */
2118 sel
->db_shader_control
=
2119 S_02880C_Z_EXPORT_ENABLE(sel
->info
.writes_z
) |
2120 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(sel
->info
.writes_stencil
) |
2121 S_02880C_MASK_EXPORT_ENABLE(sel
->info
.writes_samplemask
) |
2122 S_02880C_KILL_ENABLE(sel
->info
.uses_kill
);
2124 switch (sel
->info
.properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
]) {
2125 case TGSI_FS_DEPTH_LAYOUT_GREATER
:
2126 sel
->db_shader_control
|=
2127 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z
);
2129 case TGSI_FS_DEPTH_LAYOUT_LESS
:
2130 sel
->db_shader_control
|=
2131 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z
);
2135 /* Z_ORDER, EXEC_ON_HIER_FAIL and EXEC_ON_NOOP should be set as following:
2137 * | early Z/S | writes_mem | allow_ReZ? | Z_ORDER | EXEC_ON_HIER_FAIL | EXEC_ON_NOOP
2138 * --|-----------|------------|------------|--------------------|-------------------|-------------
2139 * 1a| false | false | true | EarlyZ_Then_ReZ | 0 | 0
2140 * 1b| false | false | false | EarlyZ_Then_LateZ | 0 | 0
2141 * 2 | false | true | n/a | LateZ | 1 | 0
2142 * 3 | true | false | n/a | EarlyZ_Then_LateZ | 0 | 0
2143 * 4 | true | true | n/a | EarlyZ_Then_LateZ | 0 | 1
2145 * In cases 3 and 4, HW will force Z_ORDER to EarlyZ regardless of what's set in the register.
2146 * In case 2, NOOP_CULL is a don't care field. In case 2, 3 and 4, ReZ doesn't make sense.
2148 * Don't use ReZ without profiling !!!
2150 * ReZ decreases performance by 15% in DiRT: Showdown on Ultra settings, which has pretty complex
2153 if (sel
->info
.properties
[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
]) {
2155 sel
->db_shader_control
|= S_02880C_DEPTH_BEFORE_SHADER(1) |
2156 S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
) |
2157 S_02880C_EXEC_ON_NOOP(sel
->info
.writes_memory
);
2158 } else if (sel
->info
.writes_memory
) {
2160 sel
->db_shader_control
|= S_02880C_Z_ORDER(V_02880C_LATE_Z
) |
2161 S_02880C_EXEC_ON_HIER_FAIL(1);
2164 sel
->db_shader_control
|= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
);
2167 (void) mtx_init(&sel
->mutex
, mtx_plain
);
2168 util_queue_fence_init(&sel
->ready
);
2170 struct util_async_debug_callback async_debug
;
2172 (sctx
->debug
.debug_message
&& !sctx
->debug
.async
) ||
2174 si_can_dump_shader(sscreen
, sel
->info
.processor
);
2177 u_async_debug_init(&async_debug
);
2178 sel
->compiler_ctx_state
.debug
= async_debug
.base
;
2181 util_queue_add_job(&sscreen
->shader_compiler_queue
, sel
,
2182 &sel
->ready
, si_init_shader_selector_async
,
2186 util_queue_fence_wait(&sel
->ready
);
2187 u_async_debug_drain(&async_debug
, &sctx
->debug
);
2188 u_async_debug_cleanup(&async_debug
);
2194 static void si_update_streamout_state(struct si_context
*sctx
)
2196 struct si_shader_selector
*shader_with_so
= si_get_vs(sctx
)->cso
;
2198 if (!shader_with_so
)
2201 sctx
->streamout
.enabled_stream_buffers_mask
=
2202 shader_with_so
->enabled_streamout_buffer_mask
;
2203 sctx
->streamout
.stride_in_dw
= shader_with_so
->so
.stride
;
2206 static void si_update_clip_regs(struct si_context
*sctx
,
2207 struct si_shader_selector
*old_hw_vs
,
2208 struct si_shader
*old_hw_vs_variant
,
2209 struct si_shader_selector
*next_hw_vs
,
2210 struct si_shader
*next_hw_vs_variant
)
2214 old_hw_vs
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
] !=
2215 next_hw_vs
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
] ||
2216 old_hw_vs
->pa_cl_vs_out_cntl
!= next_hw_vs
->pa_cl_vs_out_cntl
||
2217 old_hw_vs
->clipdist_mask
!= next_hw_vs
->clipdist_mask
||
2218 old_hw_vs
->culldist_mask
!= next_hw_vs
->culldist_mask
||
2219 !old_hw_vs_variant
||
2220 !next_hw_vs_variant
||
2221 old_hw_vs_variant
->key
.opt
.clip_disable
!=
2222 next_hw_vs_variant
->key
.opt
.clip_disable
))
2223 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
2226 static void si_update_common_shader_state(struct si_context
*sctx
)
2228 sctx
->uses_bindless_samplers
=
2229 si_shader_uses_bindless_samplers(sctx
->vs_shader
.cso
) ||
2230 si_shader_uses_bindless_samplers(sctx
->gs_shader
.cso
) ||
2231 si_shader_uses_bindless_samplers(sctx
->ps_shader
.cso
) ||
2232 si_shader_uses_bindless_samplers(sctx
->tcs_shader
.cso
) ||
2233 si_shader_uses_bindless_samplers(sctx
->tes_shader
.cso
);
2234 sctx
->uses_bindless_images
=
2235 si_shader_uses_bindless_images(sctx
->vs_shader
.cso
) ||
2236 si_shader_uses_bindless_images(sctx
->gs_shader
.cso
) ||
2237 si_shader_uses_bindless_images(sctx
->ps_shader
.cso
) ||
2238 si_shader_uses_bindless_images(sctx
->tcs_shader
.cso
) ||
2239 si_shader_uses_bindless_images(sctx
->tes_shader
.cso
);
2240 sctx
->do_update_shaders
= true;
2243 static void si_bind_vs_shader(struct pipe_context
*ctx
, void *state
)
2245 struct si_context
*sctx
= (struct si_context
*)ctx
;
2246 struct si_shader_selector
*old_hw_vs
= si_get_vs(sctx
)->cso
;
2247 struct si_shader
*old_hw_vs_variant
= si_get_vs_state(sctx
);
2248 struct si_shader_selector
*sel
= state
;
2250 if (sctx
->vs_shader
.cso
== sel
)
2253 sctx
->vs_shader
.cso
= sel
;
2254 sctx
->vs_shader
.current
= sel
? sel
->first_variant
: NULL
;
2255 sctx
->num_vs_blit_sgprs
= sel
? sel
->info
.properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
] : 0;
2257 si_update_common_shader_state(sctx
);
2258 si_update_vs_viewport_state(sctx
);
2259 si_set_active_descriptors_for_shader(sctx
, sel
);
2260 si_update_streamout_state(sctx
);
2261 si_update_clip_regs(sctx
, old_hw_vs
, old_hw_vs_variant
,
2262 si_get_vs(sctx
)->cso
, si_get_vs_state(sctx
));
2265 static void si_update_tess_uses_prim_id(struct si_context
*sctx
)
2267 sctx
->ia_multi_vgt_param_key
.u
.tess_uses_prim_id
=
2268 (sctx
->tes_shader
.cso
&&
2269 sctx
->tes_shader
.cso
->info
.uses_primid
) ||
2270 (sctx
->tcs_shader
.cso
&&
2271 sctx
->tcs_shader
.cso
->info
.uses_primid
) ||
2272 (sctx
->gs_shader
.cso
&&
2273 sctx
->gs_shader
.cso
->info
.uses_primid
) ||
2274 (sctx
->ps_shader
.cso
&& !sctx
->gs_shader
.cso
&&
2275 sctx
->ps_shader
.cso
->info
.uses_primid
);
2278 static void si_bind_gs_shader(struct pipe_context
*ctx
, void *state
)
2280 struct si_context
*sctx
= (struct si_context
*)ctx
;
2281 struct si_shader_selector
*old_hw_vs
= si_get_vs(sctx
)->cso
;
2282 struct si_shader
*old_hw_vs_variant
= si_get_vs_state(sctx
);
2283 struct si_shader_selector
*sel
= state
;
2284 bool enable_changed
= !!sctx
->gs_shader
.cso
!= !!sel
;
2286 if (sctx
->gs_shader
.cso
== sel
)
2289 sctx
->gs_shader
.cso
= sel
;
2290 sctx
->gs_shader
.current
= sel
? sel
->first_variant
: NULL
;
2291 sctx
->ia_multi_vgt_param_key
.u
.uses_gs
= sel
!= NULL
;
2293 si_update_common_shader_state(sctx
);
2294 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
2296 if (enable_changed
) {
2297 si_shader_change_notify(sctx
);
2298 if (sctx
->ia_multi_vgt_param_key
.u
.uses_tess
)
2299 si_update_tess_uses_prim_id(sctx
);
2301 si_update_vs_viewport_state(sctx
);
2302 si_set_active_descriptors_for_shader(sctx
, sel
);
2303 si_update_streamout_state(sctx
);
2304 si_update_clip_regs(sctx
, old_hw_vs
, old_hw_vs_variant
,
2305 si_get_vs(sctx
)->cso
, si_get_vs_state(sctx
));
2308 static void si_bind_tcs_shader(struct pipe_context
*ctx
, void *state
)
2310 struct si_context
*sctx
= (struct si_context
*)ctx
;
2311 struct si_shader_selector
*sel
= state
;
2312 bool enable_changed
= !!sctx
->tcs_shader
.cso
!= !!sel
;
2314 if (sctx
->tcs_shader
.cso
== sel
)
2317 sctx
->tcs_shader
.cso
= sel
;
2318 sctx
->tcs_shader
.current
= sel
? sel
->first_variant
: NULL
;
2319 si_update_tess_uses_prim_id(sctx
);
2321 si_update_common_shader_state(sctx
);
2324 sctx
->last_tcs
= NULL
; /* invalidate derived tess state */
2326 si_set_active_descriptors_for_shader(sctx
, sel
);
2329 static void si_bind_tes_shader(struct pipe_context
*ctx
, void *state
)
2331 struct si_context
*sctx
= (struct si_context
*)ctx
;
2332 struct si_shader_selector
*old_hw_vs
= si_get_vs(sctx
)->cso
;
2333 struct si_shader
*old_hw_vs_variant
= si_get_vs_state(sctx
);
2334 struct si_shader_selector
*sel
= state
;
2335 bool enable_changed
= !!sctx
->tes_shader
.cso
!= !!sel
;
2337 if (sctx
->tes_shader
.cso
== sel
)
2340 sctx
->tes_shader
.cso
= sel
;
2341 sctx
->tes_shader
.current
= sel
? sel
->first_variant
: NULL
;
2342 sctx
->ia_multi_vgt_param_key
.u
.uses_tess
= sel
!= NULL
;
2343 si_update_tess_uses_prim_id(sctx
);
2345 si_update_common_shader_state(sctx
);
2346 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
2348 if (enable_changed
) {
2349 si_shader_change_notify(sctx
);
2350 sctx
->last_tes_sh_base
= -1; /* invalidate derived tess state */
2352 si_update_vs_viewport_state(sctx
);
2353 si_set_active_descriptors_for_shader(sctx
, sel
);
2354 si_update_streamout_state(sctx
);
2355 si_update_clip_regs(sctx
, old_hw_vs
, old_hw_vs_variant
,
2356 si_get_vs(sctx
)->cso
, si_get_vs_state(sctx
));
2359 static void si_bind_ps_shader(struct pipe_context
*ctx
, void *state
)
2361 struct si_context
*sctx
= (struct si_context
*)ctx
;
2362 struct si_shader_selector
*old_sel
= sctx
->ps_shader
.cso
;
2363 struct si_shader_selector
*sel
= state
;
2365 /* skip if supplied shader is one already in use */
2369 sctx
->ps_shader
.cso
= sel
;
2370 sctx
->ps_shader
.current
= sel
? sel
->first_variant
: NULL
;
2372 si_update_common_shader_state(sctx
);
2374 if (sctx
->ia_multi_vgt_param_key
.u
.uses_tess
)
2375 si_update_tess_uses_prim_id(sctx
);
2378 old_sel
->info
.colors_written
!= sel
->info
.colors_written
)
2379 si_mark_atom_dirty(sctx
, &sctx
->cb_render_state
);
2381 if (sctx
->screen
->has_out_of_order_rast
&&
2383 old_sel
->info
.writes_memory
!= sel
->info
.writes_memory
||
2384 old_sel
->info
.properties
[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
] !=
2385 sel
->info
.properties
[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
]))
2386 si_mark_atom_dirty(sctx
, &sctx
->msaa_config
);
2388 si_set_active_descriptors_for_shader(sctx
, sel
);
2391 static void si_delete_shader(struct si_context
*sctx
, struct si_shader
*shader
)
2393 if (shader
->is_optimized
) {
2394 util_queue_drop_job(&sctx
->screen
->shader_compiler_queue_low_priority
,
2398 util_queue_fence_destroy(&shader
->ready
);
2401 switch (shader
->selector
->type
) {
2402 case PIPE_SHADER_VERTEX
:
2403 if (shader
->key
.as_ls
) {
2404 assert(sctx
->b
.chip_class
<= VI
);
2405 si_pm4_delete_state(sctx
, ls
, shader
->pm4
);
2406 } else if (shader
->key
.as_es
) {
2407 assert(sctx
->b
.chip_class
<= VI
);
2408 si_pm4_delete_state(sctx
, es
, shader
->pm4
);
2410 si_pm4_delete_state(sctx
, vs
, shader
->pm4
);
2413 case PIPE_SHADER_TESS_CTRL
:
2414 si_pm4_delete_state(sctx
, hs
, shader
->pm4
);
2416 case PIPE_SHADER_TESS_EVAL
:
2417 if (shader
->key
.as_es
) {
2418 assert(sctx
->b
.chip_class
<= VI
);
2419 si_pm4_delete_state(sctx
, es
, shader
->pm4
);
2421 si_pm4_delete_state(sctx
, vs
, shader
->pm4
);
2424 case PIPE_SHADER_GEOMETRY
:
2425 if (shader
->is_gs_copy_shader
)
2426 si_pm4_delete_state(sctx
, vs
, shader
->pm4
);
2428 si_pm4_delete_state(sctx
, gs
, shader
->pm4
);
2430 case PIPE_SHADER_FRAGMENT
:
2431 si_pm4_delete_state(sctx
, ps
, shader
->pm4
);
2436 si_shader_selector_reference(sctx
, &shader
->previous_stage_sel
, NULL
);
2437 si_shader_destroy(shader
);
2441 void si_destroy_shader_selector(struct si_context
*sctx
,
2442 struct si_shader_selector
*sel
)
2444 struct si_shader
*p
= sel
->first_variant
, *c
;
2445 struct si_shader_ctx_state
*current_shader
[SI_NUM_SHADERS
] = {
2446 [PIPE_SHADER_VERTEX
] = &sctx
->vs_shader
,
2447 [PIPE_SHADER_TESS_CTRL
] = &sctx
->tcs_shader
,
2448 [PIPE_SHADER_TESS_EVAL
] = &sctx
->tes_shader
,
2449 [PIPE_SHADER_GEOMETRY
] = &sctx
->gs_shader
,
2450 [PIPE_SHADER_FRAGMENT
] = &sctx
->ps_shader
,
2453 util_queue_drop_job(&sctx
->screen
->shader_compiler_queue
, &sel
->ready
);
2455 if (current_shader
[sel
->type
]->cso
== sel
) {
2456 current_shader
[sel
->type
]->cso
= NULL
;
2457 current_shader
[sel
->type
]->current
= NULL
;
2461 c
= p
->next_variant
;
2462 si_delete_shader(sctx
, p
);
2466 if (sel
->main_shader_part
)
2467 si_delete_shader(sctx
, sel
->main_shader_part
);
2468 if (sel
->main_shader_part_ls
)
2469 si_delete_shader(sctx
, sel
->main_shader_part_ls
);
2470 if (sel
->main_shader_part_es
)
2471 si_delete_shader(sctx
, sel
->main_shader_part_es
);
2472 if (sel
->gs_copy_shader
)
2473 si_delete_shader(sctx
, sel
->gs_copy_shader
);
2475 util_queue_fence_destroy(&sel
->ready
);
2476 mtx_destroy(&sel
->mutex
);
2478 ralloc_free(sel
->nir
);
2482 static void si_delete_shader_selector(struct pipe_context
*ctx
, void *state
)
2484 struct si_context
*sctx
= (struct si_context
*)ctx
;
2485 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
2487 si_shader_selector_reference(sctx
, &sel
, NULL
);
2490 static unsigned si_get_ps_input_cntl(struct si_context
*sctx
,
2491 struct si_shader
*vs
, unsigned name
,
2492 unsigned index
, unsigned interpolate
)
2494 struct tgsi_shader_info
*vsinfo
= &vs
->selector
->info
;
2495 unsigned j
, offset
, ps_input_cntl
= 0;
2497 if (interpolate
== TGSI_INTERPOLATE_CONSTANT
||
2498 (interpolate
== TGSI_INTERPOLATE_COLOR
&& sctx
->flatshade
))
2499 ps_input_cntl
|= S_028644_FLAT_SHADE(1);
2501 if (name
== TGSI_SEMANTIC_PCOORD
||
2502 (name
== TGSI_SEMANTIC_TEXCOORD
&&
2503 sctx
->sprite_coord_enable
& (1 << index
))) {
2504 ps_input_cntl
|= S_028644_PT_SPRITE_TEX(1);
2507 for (j
= 0; j
< vsinfo
->num_outputs
; j
++) {
2508 if (name
== vsinfo
->output_semantic_name
[j
] &&
2509 index
== vsinfo
->output_semantic_index
[j
]) {
2510 offset
= vs
->info
.vs_output_param_offset
[j
];
2512 if (offset
<= AC_EXP_PARAM_OFFSET_31
) {
2513 /* The input is loaded from parameter memory. */
2514 ps_input_cntl
|= S_028644_OFFSET(offset
);
2515 } else if (!G_028644_PT_SPRITE_TEX(ps_input_cntl
)) {
2516 if (offset
== AC_EXP_PARAM_UNDEFINED
) {
2517 /* This can happen with depth-only rendering. */
2520 /* The input is a DEFAULT_VAL constant. */
2521 assert(offset
>= AC_EXP_PARAM_DEFAULT_VAL_0000
&&
2522 offset
<= AC_EXP_PARAM_DEFAULT_VAL_1111
);
2523 offset
-= AC_EXP_PARAM_DEFAULT_VAL_0000
;
2526 ps_input_cntl
= S_028644_OFFSET(0x20) |
2527 S_028644_DEFAULT_VAL(offset
);
2533 if (name
== TGSI_SEMANTIC_PRIMID
)
2534 /* PrimID is written after the last output. */
2535 ps_input_cntl
|= S_028644_OFFSET(vs
->info
.vs_output_param_offset
[vsinfo
->num_outputs
]);
2536 else if (j
== vsinfo
->num_outputs
&& !G_028644_PT_SPRITE_TEX(ps_input_cntl
)) {
2537 /* No corresponding output found, load defaults into input.
2538 * Don't set any other bits.
2539 * (FLAT_SHADE=1 completely changes behavior) */
2540 ps_input_cntl
= S_028644_OFFSET(0x20);
2541 /* D3D 9 behaviour. GL is undefined */
2542 if (name
== TGSI_SEMANTIC_COLOR
&& index
== 0)
2543 ps_input_cntl
|= S_028644_DEFAULT_VAL(3);
2545 return ps_input_cntl
;
2548 static void si_emit_spi_map(struct si_context
*sctx
, struct r600_atom
*atom
)
2550 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
2551 struct si_shader
*ps
= sctx
->ps_shader
.current
;
2552 struct si_shader
*vs
= si_get_vs_state(sctx
);
2553 struct tgsi_shader_info
*psinfo
= ps
? &ps
->selector
->info
: NULL
;
2554 unsigned i
, num_interp
, num_written
= 0, bcol_interp
[2];
2556 if (!ps
|| !ps
->selector
->info
.num_inputs
)
2559 num_interp
= si_get_ps_num_interp(ps
);
2560 assert(num_interp
> 0);
2561 radeon_set_context_reg_seq(cs
, R_028644_SPI_PS_INPUT_CNTL_0
, num_interp
);
2563 for (i
= 0; i
< psinfo
->num_inputs
; i
++) {
2564 unsigned name
= psinfo
->input_semantic_name
[i
];
2565 unsigned index
= psinfo
->input_semantic_index
[i
];
2566 unsigned interpolate
= psinfo
->input_interpolate
[i
];
2568 radeon_emit(cs
, si_get_ps_input_cntl(sctx
, vs
, name
, index
,
2572 if (name
== TGSI_SEMANTIC_COLOR
) {
2573 assert(index
< ARRAY_SIZE(bcol_interp
));
2574 bcol_interp
[index
] = interpolate
;
2578 if (ps
->key
.part
.ps
.prolog
.color_two_side
) {
2579 unsigned bcol
= TGSI_SEMANTIC_BCOLOR
;
2581 for (i
= 0; i
< 2; i
++) {
2582 if (!(psinfo
->colors_read
& (0xf << (i
* 4))))
2585 radeon_emit(cs
, si_get_ps_input_cntl(sctx
, vs
, bcol
,
2586 i
, bcol_interp
[i
]));
2590 assert(num_interp
== num_written
);
2594 * Writing CONFIG or UCONFIG VGT registers requires VGT_FLUSH before that.
2596 static void si_init_config_add_vgt_flush(struct si_context
*sctx
)
2598 if (sctx
->init_config_has_vgt_flush
)
2601 /* Done by Vulkan before VGT_FLUSH. */
2602 si_pm4_cmd_begin(sctx
->init_config
, PKT3_EVENT_WRITE
);
2603 si_pm4_cmd_add(sctx
->init_config
,
2604 EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
2605 si_pm4_cmd_end(sctx
->init_config
, false);
2607 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
2608 si_pm4_cmd_begin(sctx
->init_config
, PKT3_EVENT_WRITE
);
2609 si_pm4_cmd_add(sctx
->init_config
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
2610 si_pm4_cmd_end(sctx
->init_config
, false);
2611 sctx
->init_config_has_vgt_flush
= true;
2614 /* Initialize state related to ESGS / GSVS ring buffers */
2615 static bool si_update_gs_ring_buffers(struct si_context
*sctx
)
2617 struct si_shader_selector
*es
=
2618 sctx
->tes_shader
.cso
? sctx
->tes_shader
.cso
: sctx
->vs_shader
.cso
;
2619 struct si_shader_selector
*gs
= sctx
->gs_shader
.cso
;
2620 struct si_pm4_state
*pm4
;
2622 /* Chip constants. */
2623 unsigned num_se
= sctx
->screen
->info
.max_se
;
2624 unsigned wave_size
= 64;
2625 unsigned max_gs_waves
= 32 * num_se
; /* max 32 per SE on GCN */
2626 /* On SI-CI, the value comes from VGT_GS_VERTEX_REUSE = 16.
2627 * On VI+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
2629 unsigned gs_vertex_reuse
= (sctx
->b
.chip_class
>= VI
? 32 : 16) * num_se
;
2630 unsigned alignment
= 256 * num_se
;
2631 /* The maximum size is 63.999 MB per SE. */
2632 unsigned max_size
= ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se
;
2634 /* Calculate the minimum size. */
2635 unsigned min_esgs_ring_size
= align(es
->esgs_itemsize
* gs_vertex_reuse
*
2636 wave_size
, alignment
);
2638 /* These are recommended sizes, not minimum sizes. */
2639 unsigned esgs_ring_size
= max_gs_waves
* 2 * wave_size
*
2640 es
->esgs_itemsize
* gs
->gs_input_verts_per_prim
;
2641 unsigned gsvs_ring_size
= max_gs_waves
* 2 * wave_size
*
2642 gs
->max_gsvs_emit_size
;
2644 min_esgs_ring_size
= align(min_esgs_ring_size
, alignment
);
2645 esgs_ring_size
= align(esgs_ring_size
, alignment
);
2646 gsvs_ring_size
= align(gsvs_ring_size
, alignment
);
2648 esgs_ring_size
= CLAMP(esgs_ring_size
, min_esgs_ring_size
, max_size
);
2649 gsvs_ring_size
= MIN2(gsvs_ring_size
, max_size
);
2651 /* Some rings don't have to be allocated if shaders don't use them.
2652 * (e.g. no varyings between ES and GS or GS and VS)
2654 * GFX9 doesn't have the ESGS ring.
2656 bool update_esgs
= sctx
->b
.chip_class
<= VI
&&
2658 (!sctx
->esgs_ring
||
2659 sctx
->esgs_ring
->width0
< esgs_ring_size
);
2660 bool update_gsvs
= gsvs_ring_size
&&
2661 (!sctx
->gsvs_ring
||
2662 sctx
->gsvs_ring
->width0
< gsvs_ring_size
);
2664 if (!update_esgs
&& !update_gsvs
)
2668 pipe_resource_reference(&sctx
->esgs_ring
, NULL
);
2670 si_aligned_buffer_create(sctx
->b
.b
.screen
,
2671 R600_RESOURCE_FLAG_UNMAPPABLE
,
2673 esgs_ring_size
, alignment
);
2674 if (!sctx
->esgs_ring
)
2679 pipe_resource_reference(&sctx
->gsvs_ring
, NULL
);
2681 si_aligned_buffer_create(sctx
->b
.b
.screen
,
2682 R600_RESOURCE_FLAG_UNMAPPABLE
,
2684 gsvs_ring_size
, alignment
);
2685 if (!sctx
->gsvs_ring
)
2689 /* Create the "init_config_gs_rings" state. */
2690 pm4
= CALLOC_STRUCT(si_pm4_state
);
2694 if (sctx
->b
.chip_class
>= CIK
) {
2695 if (sctx
->esgs_ring
) {
2696 assert(sctx
->b
.chip_class
<= VI
);
2697 si_pm4_set_reg(pm4
, R_030900_VGT_ESGS_RING_SIZE
,
2698 sctx
->esgs_ring
->width0
/ 256);
2700 if (sctx
->gsvs_ring
)
2701 si_pm4_set_reg(pm4
, R_030904_VGT_GSVS_RING_SIZE
,
2702 sctx
->gsvs_ring
->width0
/ 256);
2704 if (sctx
->esgs_ring
)
2705 si_pm4_set_reg(pm4
, R_0088C8_VGT_ESGS_RING_SIZE
,
2706 sctx
->esgs_ring
->width0
/ 256);
2707 if (sctx
->gsvs_ring
)
2708 si_pm4_set_reg(pm4
, R_0088CC_VGT_GSVS_RING_SIZE
,
2709 sctx
->gsvs_ring
->width0
/ 256);
2712 /* Set the state. */
2713 if (sctx
->init_config_gs_rings
)
2714 si_pm4_free_state(sctx
, sctx
->init_config_gs_rings
, ~0);
2715 sctx
->init_config_gs_rings
= pm4
;
2717 if (!sctx
->init_config_has_vgt_flush
) {
2718 si_init_config_add_vgt_flush(sctx
);
2719 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
2722 /* Flush the context to re-emit both init_config states. */
2723 sctx
->b
.initial_gfx_cs_size
= 0; /* force flush */
2724 si_context_gfx_flush(sctx
, PIPE_FLUSH_ASYNC
, NULL
);
2726 /* Set ring bindings. */
2727 if (sctx
->esgs_ring
) {
2728 assert(sctx
->b
.chip_class
<= VI
);
2729 si_set_ring_buffer(&sctx
->b
.b
, SI_ES_RING_ESGS
,
2730 sctx
->esgs_ring
, 0, sctx
->esgs_ring
->width0
,
2731 true, true, 4, 64, 0);
2732 si_set_ring_buffer(&sctx
->b
.b
, SI_GS_RING_ESGS
,
2733 sctx
->esgs_ring
, 0, sctx
->esgs_ring
->width0
,
2734 false, false, 0, 0, 0);
2736 if (sctx
->gsvs_ring
) {
2737 si_set_ring_buffer(&sctx
->b
.b
, SI_RING_GSVS
,
2738 sctx
->gsvs_ring
, 0, sctx
->gsvs_ring
->width0
,
2739 false, false, 0, 0, 0);
2745 static void si_shader_lock(struct si_shader
*shader
)
2747 mtx_lock(&shader
->selector
->mutex
);
2748 if (shader
->previous_stage_sel
) {
2749 assert(shader
->previous_stage_sel
!= shader
->selector
);
2750 mtx_lock(&shader
->previous_stage_sel
->mutex
);
2754 static void si_shader_unlock(struct si_shader
*shader
)
2756 if (shader
->previous_stage_sel
)
2757 mtx_unlock(&shader
->previous_stage_sel
->mutex
);
2758 mtx_unlock(&shader
->selector
->mutex
);
2762 * @returns 1 if \p sel has been updated to use a new scratch buffer
2764 * < 0 if there was a failure
2766 static int si_update_scratch_buffer(struct si_context
*sctx
,
2767 struct si_shader
*shader
)
2769 uint64_t scratch_va
= sctx
->scratch_buffer
->gpu_address
;
2775 /* This shader doesn't need a scratch buffer */
2776 if (shader
->config
.scratch_bytes_per_wave
== 0)
2779 /* Prevent race conditions when updating:
2780 * - si_shader::scratch_bo
2781 * - si_shader::binary::code
2782 * - si_shader::previous_stage::binary::code.
2784 si_shader_lock(shader
);
2786 /* This shader is already configured to use the current
2787 * scratch buffer. */
2788 if (shader
->scratch_bo
== sctx
->scratch_buffer
) {
2789 si_shader_unlock(shader
);
2793 assert(sctx
->scratch_buffer
);
2795 if (shader
->previous_stage
)
2796 si_shader_apply_scratch_relocs(shader
->previous_stage
, scratch_va
);
2798 si_shader_apply_scratch_relocs(shader
, scratch_va
);
2800 /* Replace the shader bo with a new bo that has the relocs applied. */
2801 r
= si_shader_binary_upload(sctx
->screen
, shader
);
2803 si_shader_unlock(shader
);
2807 /* Update the shader state to use the new shader bo. */
2808 si_shader_init_pm4_state(sctx
->screen
, shader
);
2810 r600_resource_reference(&shader
->scratch_bo
, sctx
->scratch_buffer
);
2812 si_shader_unlock(shader
);
2816 static unsigned si_get_current_scratch_buffer_size(struct si_context
*sctx
)
2818 return sctx
->scratch_buffer
? sctx
->scratch_buffer
->b
.b
.width0
: 0;
2821 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader
*shader
)
2823 return shader
? shader
->config
.scratch_bytes_per_wave
: 0;
2826 static struct si_shader
*si_get_tcs_current(struct si_context
*sctx
)
2828 if (!sctx
->tes_shader
.cso
)
2829 return NULL
; /* tessellation disabled */
2831 return sctx
->tcs_shader
.cso
? sctx
->tcs_shader
.current
:
2832 sctx
->fixed_func_tcs_shader
.current
;
2835 static unsigned si_get_max_scratch_bytes_per_wave(struct si_context
*sctx
)
2839 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->ps_shader
.current
));
2840 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->gs_shader
.current
));
2841 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->vs_shader
.current
));
2842 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->tes_shader
.current
));
2844 if (sctx
->tes_shader
.cso
) {
2845 struct si_shader
*tcs
= si_get_tcs_current(sctx
);
2847 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(tcs
));
2852 static bool si_update_scratch_relocs(struct si_context
*sctx
)
2854 struct si_shader
*tcs
= si_get_tcs_current(sctx
);
2857 /* Update the shaders, so that they are using the latest scratch.
2858 * The scratch buffer may have been changed since these shaders were
2859 * last used, so we still need to try to update them, even if they
2860 * require scratch buffers smaller than the current size.
2862 r
= si_update_scratch_buffer(sctx
, sctx
->ps_shader
.current
);
2866 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
.current
->pm4
);
2868 r
= si_update_scratch_buffer(sctx
, sctx
->gs_shader
.current
);
2872 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
.current
->pm4
);
2874 r
= si_update_scratch_buffer(sctx
, tcs
);
2878 si_pm4_bind_state(sctx
, hs
, tcs
->pm4
);
2880 /* VS can be bound as LS, ES, or VS. */
2881 r
= si_update_scratch_buffer(sctx
, sctx
->vs_shader
.current
);
2885 if (sctx
->tes_shader
.current
)
2886 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
.current
->pm4
);
2887 else if (sctx
->gs_shader
.current
)
2888 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
.current
->pm4
);
2890 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
.current
->pm4
);
2893 /* TES can be bound as ES or VS. */
2894 r
= si_update_scratch_buffer(sctx
, sctx
->tes_shader
.current
);
2898 if (sctx
->gs_shader
.current
)
2899 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
.current
->pm4
);
2901 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
.current
->pm4
);
2907 static bool si_update_spi_tmpring_size(struct si_context
*sctx
)
2909 unsigned current_scratch_buffer_size
=
2910 si_get_current_scratch_buffer_size(sctx
);
2911 unsigned scratch_bytes_per_wave
=
2912 si_get_max_scratch_bytes_per_wave(sctx
);
2913 unsigned scratch_needed_size
= scratch_bytes_per_wave
*
2914 sctx
->scratch_waves
;
2915 unsigned spi_tmpring_size
;
2917 if (scratch_needed_size
> 0) {
2918 if (scratch_needed_size
> current_scratch_buffer_size
) {
2919 /* Create a bigger scratch buffer */
2920 r600_resource_reference(&sctx
->scratch_buffer
, NULL
);
2922 sctx
->scratch_buffer
= (struct r600_resource
*)
2923 si_aligned_buffer_create(&sctx
->screen
->b
,
2924 R600_RESOURCE_FLAG_UNMAPPABLE
,
2926 scratch_needed_size
, 256);
2927 if (!sctx
->scratch_buffer
)
2930 si_mark_atom_dirty(sctx
, &sctx
->scratch_state
);
2931 si_context_add_resource_size(&sctx
->b
.b
,
2932 &sctx
->scratch_buffer
->b
.b
);
2935 if (!si_update_scratch_relocs(sctx
))
2939 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
2940 assert((scratch_needed_size
& ~0x3FF) == scratch_needed_size
&&
2941 "scratch size should already be aligned correctly.");
2943 spi_tmpring_size
= S_0286E8_WAVES(sctx
->scratch_waves
) |
2944 S_0286E8_WAVESIZE(scratch_bytes_per_wave
>> 10);
2945 if (spi_tmpring_size
!= sctx
->spi_tmpring_size
) {
2946 sctx
->spi_tmpring_size
= spi_tmpring_size
;
2947 si_mark_atom_dirty(sctx
, &sctx
->scratch_state
);
2952 static void si_init_tess_factor_ring(struct si_context
*sctx
)
2954 bool double_offchip_buffers
= sctx
->b
.chip_class
>= CIK
&&
2955 sctx
->b
.family
!= CHIP_CARRIZO
&&
2956 sctx
->b
.family
!= CHIP_STONEY
;
2957 /* This must be one less than the maximum number due to a hw limitation.
2958 * Various hardware bugs in SI, CIK, and GFX9 need this.
2960 unsigned max_offchip_buffers_per_se
= double_offchip_buffers
? 127 : 63;
2961 unsigned max_offchip_buffers
= max_offchip_buffers_per_se
*
2962 sctx
->screen
->info
.max_se
;
2963 unsigned offchip_granularity
;
2965 switch (sctx
->screen
->tess_offchip_block_dw_size
) {
2970 offchip_granularity
= V_03093C_X_8K_DWORDS
;
2973 offchip_granularity
= V_03093C_X_4K_DWORDS
;
2977 assert(!sctx
->tf_ring
);
2978 /* Use 64K alignment for both rings, so that we can pass the address
2979 * to shaders as one SGPR containing bits [16:47].
2981 sctx
->tf_ring
= si_aligned_buffer_create(sctx
->b
.b
.screen
,
2982 R600_RESOURCE_FLAG_UNMAPPABLE
,
2984 32768 * sctx
->screen
->info
.max_se
,
2989 assert(((sctx
->tf_ring
->width0
/ 4) & C_030938_SIZE
) == 0);
2991 sctx
->tess_offchip_ring
=
2992 si_aligned_buffer_create(sctx
->b
.b
.screen
,
2993 R600_RESOURCE_FLAG_UNMAPPABLE
,
2995 max_offchip_buffers
*
2996 sctx
->screen
->tess_offchip_block_dw_size
* 4,
2998 if (!sctx
->tess_offchip_ring
)
3001 si_init_config_add_vgt_flush(sctx
);
3003 uint64_t offchip_va
= r600_resource(sctx
->tess_offchip_ring
)->gpu_address
;
3004 uint64_t factor_va
= r600_resource(sctx
->tf_ring
)->gpu_address
;
3005 assert((offchip_va
& 0xffff) == 0);
3006 assert((factor_va
& 0xffff) == 0);
3008 si_pm4_add_bo(sctx
->init_config
, r600_resource(sctx
->tess_offchip_ring
),
3009 RADEON_USAGE_READWRITE
, RADEON_PRIO_SHADER_RINGS
);
3010 si_pm4_add_bo(sctx
->init_config
, r600_resource(sctx
->tf_ring
),
3011 RADEON_USAGE_READWRITE
, RADEON_PRIO_SHADER_RINGS
);
3013 /* Append these registers to the init config state. */
3014 if (sctx
->b
.chip_class
>= CIK
) {
3015 if (sctx
->b
.chip_class
>= VI
)
3016 --max_offchip_buffers
;
3018 si_pm4_set_reg(sctx
->init_config
, R_030938_VGT_TF_RING_SIZE
,
3019 S_030938_SIZE(sctx
->tf_ring
->width0
/ 4));
3020 si_pm4_set_reg(sctx
->init_config
, R_030940_VGT_TF_MEMORY_BASE
,
3022 if (sctx
->b
.chip_class
>= GFX9
)
3023 si_pm4_set_reg(sctx
->init_config
, R_030944_VGT_TF_MEMORY_BASE_HI
,
3025 si_pm4_set_reg(sctx
->init_config
, R_03093C_VGT_HS_OFFCHIP_PARAM
,
3026 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers
) |
3027 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity
));
3029 assert(offchip_granularity
== V_03093C_X_8K_DWORDS
);
3030 si_pm4_set_reg(sctx
->init_config
, R_008988_VGT_TF_RING_SIZE
,
3031 S_008988_SIZE(sctx
->tf_ring
->width0
/ 4));
3032 si_pm4_set_reg(sctx
->init_config
, R_0089B8_VGT_TF_MEMORY_BASE
,
3034 si_pm4_set_reg(sctx
->init_config
, R_0089B0_VGT_HS_OFFCHIP_PARAM
,
3035 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers
));
3038 if (sctx
->b
.chip_class
>= GFX9
) {
3039 si_pm4_set_reg(sctx
->init_config
,
3040 R_00B430_SPI_SHADER_USER_DATA_LS_0
+
3041 GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K
* 4,
3043 si_pm4_set_reg(sctx
->init_config
,
3044 R_00B430_SPI_SHADER_USER_DATA_LS_0
+
3045 GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K
* 4,
3048 si_pm4_set_reg(sctx
->init_config
,
3049 R_00B430_SPI_SHADER_USER_DATA_HS_0
+
3050 GFX6_SGPR_TCS_OFFCHIP_ADDR_BASE64K
* 4,
3052 si_pm4_set_reg(sctx
->init_config
,
3053 R_00B430_SPI_SHADER_USER_DATA_HS_0
+
3054 GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K
* 4,
3058 /* Flush the context to re-emit the init_config state.
3059 * This is done only once in a lifetime of a context.
3061 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
3062 sctx
->b
.initial_gfx_cs_size
= 0; /* force flush */
3063 si_context_gfx_flush(sctx
, PIPE_FLUSH_ASYNC
, NULL
);
3067 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
3068 * VS passes its outputs to TES directly, so the fixed-function shader only
3069 * has to write TESSOUTER and TESSINNER.
3071 static void si_generate_fixed_func_tcs(struct si_context
*sctx
)
3073 struct ureg_src outer
, inner
;
3074 struct ureg_dst tessouter
, tessinner
;
3075 struct ureg_program
*ureg
= ureg_create(PIPE_SHADER_TESS_CTRL
);
3078 return; /* if we get here, we're screwed */
3080 assert(!sctx
->fixed_func_tcs_shader
.cso
);
3082 outer
= ureg_DECL_system_value(ureg
,
3083 TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI
, 0);
3084 inner
= ureg_DECL_system_value(ureg
,
3085 TGSI_SEMANTIC_DEFAULT_TESSINNER_SI
, 0);
3087 tessouter
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSOUTER
, 0);
3088 tessinner
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_TESSINNER
, 0);
3090 ureg_MOV(ureg
, tessouter
, outer
);
3091 ureg_MOV(ureg
, tessinner
, inner
);
3094 sctx
->fixed_func_tcs_shader
.cso
=
3095 ureg_create_shader_and_destroy(ureg
, &sctx
->b
.b
);
3098 static void si_update_vgt_shader_config(struct si_context
*sctx
)
3100 /* Calculate the index of the config.
3101 * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */
3102 unsigned index
= 2*!!sctx
->tes_shader
.cso
+ !!sctx
->gs_shader
.cso
;
3103 struct si_pm4_state
**pm4
= &sctx
->vgt_shader_config
[index
];
3106 uint32_t stages
= 0;
3108 *pm4
= CALLOC_STRUCT(si_pm4_state
);
3110 if (sctx
->tes_shader
.cso
) {
3111 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
3112 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
3114 if (sctx
->gs_shader
.cso
)
3115 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
3117 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
3119 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
3120 } else if (sctx
->gs_shader
.cso
) {
3121 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
3123 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
3126 if (sctx
->b
.chip_class
>= GFX9
)
3127 stages
|= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
3129 si_pm4_set_reg(*pm4
, R_028B54_VGT_SHADER_STAGES_EN
, stages
);
3131 si_pm4_bind_state(sctx
, vgt_shader_config
, *pm4
);
3134 bool si_update_shaders(struct si_context
*sctx
)
3136 struct pipe_context
*ctx
= (struct pipe_context
*)sctx
;
3137 struct si_compiler_ctx_state compiler_state
;
3138 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
3139 struct si_shader
*old_vs
= si_get_vs_state(sctx
);
3140 bool old_clip_disable
= old_vs
? old_vs
->key
.opt
.clip_disable
: false;
3141 struct si_shader
*old_ps
= sctx
->ps_shader
.current
;
3142 unsigned old_spi_shader_col_format
=
3143 old_ps
? old_ps
->key
.part
.ps
.epilog
.spi_shader_col_format
: 0;
3146 compiler_state
.tm
= sctx
->tm
;
3147 compiler_state
.debug
= sctx
->debug
;
3148 compiler_state
.is_debug_context
= sctx
->is_debug
;
3150 /* Update stages before GS. */
3151 if (sctx
->tes_shader
.cso
) {
3152 if (!sctx
->tf_ring
) {
3153 si_init_tess_factor_ring(sctx
);
3159 if (sctx
->b
.chip_class
<= VI
) {
3160 r
= si_shader_select(ctx
, &sctx
->vs_shader
,
3164 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
.current
->pm4
);
3167 if (sctx
->tcs_shader
.cso
) {
3168 r
= si_shader_select(ctx
, &sctx
->tcs_shader
,
3172 si_pm4_bind_state(sctx
, hs
, sctx
->tcs_shader
.current
->pm4
);
3174 if (!sctx
->fixed_func_tcs_shader
.cso
) {
3175 si_generate_fixed_func_tcs(sctx
);
3176 if (!sctx
->fixed_func_tcs_shader
.cso
)
3180 r
= si_shader_select(ctx
, &sctx
->fixed_func_tcs_shader
,
3184 si_pm4_bind_state(sctx
, hs
,
3185 sctx
->fixed_func_tcs_shader
.current
->pm4
);
3188 if (sctx
->gs_shader
.cso
) {
3190 if (sctx
->b
.chip_class
<= VI
) {
3191 r
= si_shader_select(ctx
, &sctx
->tes_shader
,
3195 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
.current
->pm4
);
3199 r
= si_shader_select(ctx
, &sctx
->tes_shader
,
3203 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
.current
->pm4
);
3205 } else if (sctx
->gs_shader
.cso
) {
3206 if (sctx
->b
.chip_class
<= VI
) {
3208 r
= si_shader_select(ctx
, &sctx
->vs_shader
,
3212 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
.current
->pm4
);
3214 si_pm4_bind_state(sctx
, ls
, NULL
);
3215 si_pm4_bind_state(sctx
, hs
, NULL
);
3219 r
= si_shader_select(ctx
, &sctx
->vs_shader
, &compiler_state
);
3222 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
.current
->pm4
);
3223 si_pm4_bind_state(sctx
, ls
, NULL
);
3224 si_pm4_bind_state(sctx
, hs
, NULL
);
3228 if (sctx
->gs_shader
.cso
) {
3229 r
= si_shader_select(ctx
, &sctx
->gs_shader
, &compiler_state
);
3232 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
.current
->pm4
);
3233 si_pm4_bind_state(sctx
, vs
, sctx
->gs_shader
.cso
->gs_copy_shader
->pm4
);
3235 if (!si_update_gs_ring_buffers(sctx
))
3238 si_pm4_bind_state(sctx
, gs
, NULL
);
3239 if (sctx
->b
.chip_class
<= VI
)
3240 si_pm4_bind_state(sctx
, es
, NULL
);
3243 si_update_vgt_shader_config(sctx
);
3245 if (old_clip_disable
!= si_get_vs_state(sctx
)->key
.opt
.clip_disable
)
3246 si_mark_atom_dirty(sctx
, &sctx
->clip_regs
);
3248 if (sctx
->ps_shader
.cso
) {
3249 unsigned db_shader_control
;
3251 r
= si_shader_select(ctx
, &sctx
->ps_shader
, &compiler_state
);
3254 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
.current
->pm4
);
3257 sctx
->ps_shader
.cso
->db_shader_control
|
3258 S_02880C_KILL_ENABLE(si_get_alpha_test_func(sctx
) != PIPE_FUNC_ALWAYS
);
3260 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
) ||
3261 sctx
->sprite_coord_enable
!= rs
->sprite_coord_enable
||
3262 sctx
->flatshade
!= rs
->flatshade
) {
3263 sctx
->sprite_coord_enable
= rs
->sprite_coord_enable
;
3264 sctx
->flatshade
= rs
->flatshade
;
3265 si_mark_atom_dirty(sctx
, &sctx
->spi_map
);
3268 if (sctx
->screen
->rbplus_allowed
&&
3269 si_pm4_state_changed(sctx
, ps
) &&
3271 old_spi_shader_col_format
!=
3272 sctx
->ps_shader
.current
->key
.part
.ps
.epilog
.spi_shader_col_format
))
3273 si_mark_atom_dirty(sctx
, &sctx
->cb_render_state
);
3275 if (sctx
->ps_db_shader_control
!= db_shader_control
) {
3276 sctx
->ps_db_shader_control
= db_shader_control
;
3277 si_mark_atom_dirty(sctx
, &sctx
->db_render_state
);
3278 if (sctx
->screen
->dpbb_allowed
)
3279 si_mark_atom_dirty(sctx
, &sctx
->dpbb_state
);
3282 if (sctx
->smoothing_enabled
!= sctx
->ps_shader
.current
->key
.part
.ps
.epilog
.poly_line_smoothing
) {
3283 sctx
->smoothing_enabled
= sctx
->ps_shader
.current
->key
.part
.ps
.epilog
.poly_line_smoothing
;
3284 si_mark_atom_dirty(sctx
, &sctx
->msaa_config
);
3286 if (sctx
->b
.chip_class
== SI
)
3287 si_mark_atom_dirty(sctx
, &sctx
->db_render_state
);
3289 if (sctx
->framebuffer
.nr_samples
<= 1)
3290 si_mark_atom_dirty(sctx
, &sctx
->msaa_sample_locs
.atom
);
3294 if (si_pm4_state_enabled_and_changed(sctx
, ls
) ||
3295 si_pm4_state_enabled_and_changed(sctx
, hs
) ||
3296 si_pm4_state_enabled_and_changed(sctx
, es
) ||
3297 si_pm4_state_enabled_and_changed(sctx
, gs
) ||
3298 si_pm4_state_enabled_and_changed(sctx
, vs
) ||
3299 si_pm4_state_enabled_and_changed(sctx
, ps
)) {
3300 if (!si_update_spi_tmpring_size(sctx
))
3304 if (sctx
->b
.chip_class
>= CIK
) {
3305 if (si_pm4_state_enabled_and_changed(sctx
, ls
))
3306 sctx
->prefetch_L2_mask
|= SI_PREFETCH_LS
;
3307 else if (!sctx
->queued
.named
.ls
)
3308 sctx
->prefetch_L2_mask
&= ~SI_PREFETCH_LS
;
3310 if (si_pm4_state_enabled_and_changed(sctx
, hs
))
3311 sctx
->prefetch_L2_mask
|= SI_PREFETCH_HS
;
3312 else if (!sctx
->queued
.named
.hs
)
3313 sctx
->prefetch_L2_mask
&= ~SI_PREFETCH_HS
;
3315 if (si_pm4_state_enabled_and_changed(sctx
, es
))
3316 sctx
->prefetch_L2_mask
|= SI_PREFETCH_ES
;
3317 else if (!sctx
->queued
.named
.es
)
3318 sctx
->prefetch_L2_mask
&= ~SI_PREFETCH_ES
;
3320 if (si_pm4_state_enabled_and_changed(sctx
, gs
))
3321 sctx
->prefetch_L2_mask
|= SI_PREFETCH_GS
;
3322 else if (!sctx
->queued
.named
.gs
)
3323 sctx
->prefetch_L2_mask
&= ~SI_PREFETCH_GS
;
3325 if (si_pm4_state_enabled_and_changed(sctx
, vs
))
3326 sctx
->prefetch_L2_mask
|= SI_PREFETCH_VS
;
3327 else if (!sctx
->queued
.named
.vs
)
3328 sctx
->prefetch_L2_mask
&= ~SI_PREFETCH_VS
;
3330 if (si_pm4_state_enabled_and_changed(sctx
, ps
))
3331 sctx
->prefetch_L2_mask
|= SI_PREFETCH_PS
;
3332 else if (!sctx
->queued
.named
.ps
)
3333 sctx
->prefetch_L2_mask
&= ~SI_PREFETCH_PS
;
3336 sctx
->do_update_shaders
= false;
3340 static void si_emit_scratch_state(struct si_context
*sctx
,
3341 struct r600_atom
*atom
)
3343 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
3345 radeon_set_context_reg(cs
, R_0286E8_SPI_TMPRING_SIZE
,
3346 sctx
->spi_tmpring_size
);
3348 if (sctx
->scratch_buffer
) {
3349 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
3350 sctx
->scratch_buffer
, RADEON_USAGE_READWRITE
,
3351 RADEON_PRIO_SCRATCH_BUFFER
);
3355 void *si_get_blit_vs(struct si_context
*sctx
, enum blitter_attrib_type type
,
3356 unsigned num_layers
)
3358 struct pipe_context
*pipe
= &sctx
->b
.b
;
3359 unsigned vs_blit_property
;
3363 case UTIL_BLITTER_ATTRIB_NONE
:
3364 vs
= num_layers
> 1 ? &sctx
->vs_blit_pos_layered
:
3366 vs_blit_property
= SI_VS_BLIT_SGPRS_POS
;
3368 case UTIL_BLITTER_ATTRIB_COLOR
:
3369 vs
= num_layers
> 1 ? &sctx
->vs_blit_color_layered
:
3370 &sctx
->vs_blit_color
;
3371 vs_blit_property
= SI_VS_BLIT_SGPRS_POS_COLOR
;
3373 case UTIL_BLITTER_ATTRIB_TEXCOORD_XY
:
3374 case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW
:
3375 assert(num_layers
== 1);
3376 vs
= &sctx
->vs_blit_texcoord
;
3377 vs_blit_property
= SI_VS_BLIT_SGPRS_POS_TEXCOORD
;
3386 struct ureg_program
*ureg
= ureg_create(PIPE_SHADER_VERTEX
);
3390 /* Tell the shader to load VS inputs from SGPRs: */
3391 ureg_property(ureg
, TGSI_PROPERTY_VS_BLIT_SGPRS
, vs_blit_property
);
3392 ureg_property(ureg
, TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
, true);
3394 /* This is just a pass-through shader with 1-3 MOV instructions. */
3396 ureg_DECL_output(ureg
, TGSI_SEMANTIC_POSITION
, 0),
3397 ureg_DECL_vs_input(ureg
, 0));
3399 if (type
!= UTIL_BLITTER_ATTRIB_NONE
) {
3401 ureg_DECL_output(ureg
, TGSI_SEMANTIC_GENERIC
, 0),
3402 ureg_DECL_vs_input(ureg
, 1));
3405 if (num_layers
> 1) {
3406 struct ureg_src instance_id
=
3407 ureg_DECL_system_value(ureg
, TGSI_SEMANTIC_INSTANCEID
, 0);
3408 struct ureg_dst layer
=
3409 ureg_DECL_output(ureg
, TGSI_SEMANTIC_LAYER
, 0);
3411 ureg_MOV(ureg
, ureg_writemask(layer
, TGSI_WRITEMASK_X
),
3412 ureg_scalar(instance_id
, TGSI_SWIZZLE_X
));
3416 *vs
= ureg_create_shader_and_destroy(ureg
, pipe
);
3420 void si_init_shader_functions(struct si_context
*sctx
)
3422 si_init_atom(sctx
, &sctx
->spi_map
, &sctx
->atoms
.s
.spi_map
, si_emit_spi_map
);
3423 si_init_atom(sctx
, &sctx
->scratch_state
, &sctx
->atoms
.s
.scratch_state
,
3424 si_emit_scratch_state
);
3426 sctx
->b
.b
.create_vs_state
= si_create_shader_selector
;
3427 sctx
->b
.b
.create_tcs_state
= si_create_shader_selector
;
3428 sctx
->b
.b
.create_tes_state
= si_create_shader_selector
;
3429 sctx
->b
.b
.create_gs_state
= si_create_shader_selector
;
3430 sctx
->b
.b
.create_fs_state
= si_create_shader_selector
;
3432 sctx
->b
.b
.bind_vs_state
= si_bind_vs_shader
;
3433 sctx
->b
.b
.bind_tcs_state
= si_bind_tcs_shader
;
3434 sctx
->b
.b
.bind_tes_state
= si_bind_tes_shader
;
3435 sctx
->b
.b
.bind_gs_state
= si_bind_gs_shader
;
3436 sctx
->b
.b
.bind_fs_state
= si_bind_ps_shader
;
3438 sctx
->b
.b
.delete_vs_state
= si_delete_shader_selector
;
3439 sctx
->b
.b
.delete_tcs_state
= si_delete_shader_selector
;
3440 sctx
->b
.b
.delete_tes_state
= si_delete_shader_selector
;
3441 sctx
->b
.b
.delete_gs_state
= si_delete_shader_selector
;
3442 sctx
->b
.b
.delete_fs_state
= si_delete_shader_selector
;