2 * Copyright 2012 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_build_pm4.h"
28 #include "compiler/nir/nir_serialize.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "util/hash_table.h"
31 #include "util/crc32.h"
32 #include "util/u_async_debug.h"
33 #include "util/u_memory.h"
34 #include "util/u_prim.h"
36 #include "util/disk_cache.h"
37 #include "util/mesa-sha1.h"
38 #include "ac_exp_param.h"
39 #include "ac_shader_util.h"
44 * Return the IR binary in a buffer. For TGSI the first 4 bytes contain its
47 void *si_get_ir_binary(struct si_shader_selector
*sel
)
54 ir_binary
= sel
->tokens
;
55 ir_size
= tgsi_num_tokens(sel
->tokens
) *
56 sizeof(struct tgsi_token
);
61 nir_serialize(&blob
, sel
->nir
);
62 ir_binary
= blob
.data
;
66 unsigned size
= 4 + ir_size
+ sizeof(sel
->so
);
67 char *result
= (char*)MALLOC(size
);
71 *((uint32_t*)result
) = size
;
72 memcpy(result
+ 4, ir_binary
, ir_size
);
73 memcpy(result
+ 4 + ir_size
, &sel
->so
, sizeof(sel
->so
));
81 /** Copy "data" to "ptr" and return the next dword following copied data. */
82 static uint32_t *write_data(uint32_t *ptr
, const void *data
, unsigned size
)
84 /* data may be NULL if size == 0 */
86 memcpy(ptr
, data
, size
);
87 ptr
+= DIV_ROUND_UP(size
, 4);
91 /** Read data from "ptr". Return the next dword following the data. */
92 static uint32_t *read_data(uint32_t *ptr
, void *data
, unsigned size
)
94 memcpy(data
, ptr
, size
);
95 ptr
+= DIV_ROUND_UP(size
, 4);
100 * Write the size as uint followed by the data. Return the next dword
101 * following the copied data.
103 static uint32_t *write_chunk(uint32_t *ptr
, const void *data
, unsigned size
)
106 return write_data(ptr
, data
, size
);
110 * Read the size as uint followed by the data. Return both via parameters.
111 * Return the next dword following the data.
113 static uint32_t *read_chunk(uint32_t *ptr
, void **data
, unsigned *size
)
116 assert(*data
== NULL
);
119 *data
= malloc(*size
);
120 return read_data(ptr
, *data
, *size
);
124 * Return the shader binary in a buffer. The first 4 bytes contain its size
127 static void *si_get_shader_binary(struct si_shader
*shader
)
129 /* There is always a size of data followed by the data itself. */
130 unsigned relocs_size
= shader
->binary
.reloc_count
*
131 sizeof(shader
->binary
.relocs
[0]);
132 unsigned disasm_size
= shader
->binary
.disasm_string
?
133 strlen(shader
->binary
.disasm_string
) + 1 : 0;
134 unsigned llvm_ir_size
= shader
->binary
.llvm_ir_string
?
135 strlen(shader
->binary
.llvm_ir_string
) + 1 : 0;
138 4 + /* CRC32 of the data below */
139 align(sizeof(shader
->config
), 4) +
140 align(sizeof(shader
->info
), 4) +
141 4 + align(shader
->binary
.code_size
, 4) +
142 4 + align(shader
->binary
.rodata_size
, 4) +
143 4 + align(relocs_size
, 4) +
144 4 + align(disasm_size
, 4) +
145 4 + align(llvm_ir_size
, 4);
146 void *buffer
= CALLOC(1, size
);
147 uint32_t *ptr
= (uint32_t*)buffer
;
153 ptr
++; /* CRC32 is calculated at the end. */
155 ptr
= write_data(ptr
, &shader
->config
, sizeof(shader
->config
));
156 ptr
= write_data(ptr
, &shader
->info
, sizeof(shader
->info
));
157 ptr
= write_chunk(ptr
, shader
->binary
.code
, shader
->binary
.code_size
);
158 ptr
= write_chunk(ptr
, shader
->binary
.rodata
, shader
->binary
.rodata_size
);
159 ptr
= write_chunk(ptr
, shader
->binary
.relocs
, relocs_size
);
160 ptr
= write_chunk(ptr
, shader
->binary
.disasm_string
, disasm_size
);
161 ptr
= write_chunk(ptr
, shader
->binary
.llvm_ir_string
, llvm_ir_size
);
162 assert((char *)ptr
- (char *)buffer
== size
);
165 ptr
= (uint32_t*)buffer
;
167 *ptr
= util_hash_crc32(ptr
+ 1, size
- 8);
172 static bool si_load_shader_binary(struct si_shader
*shader
, void *binary
)
174 uint32_t *ptr
= (uint32_t*)binary
;
175 uint32_t size
= *ptr
++;
176 uint32_t crc32
= *ptr
++;
179 if (util_hash_crc32(ptr
, size
- 8) != crc32
) {
180 fprintf(stderr
, "radeonsi: binary shader has invalid CRC32\n");
184 ptr
= read_data(ptr
, &shader
->config
, sizeof(shader
->config
));
185 ptr
= read_data(ptr
, &shader
->info
, sizeof(shader
->info
));
186 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.code
,
187 &shader
->binary
.code_size
);
188 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.rodata
,
189 &shader
->binary
.rodata_size
);
190 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.relocs
, &chunk_size
);
191 shader
->binary
.reloc_count
= chunk_size
/ sizeof(shader
->binary
.relocs
[0]);
192 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.disasm_string
, &chunk_size
);
193 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.llvm_ir_string
, &chunk_size
);
199 * Insert a shader into the cache. It's assumed the shader is not in the cache.
200 * Use si_shader_cache_load_shader before calling this.
202 * Returns false on failure, in which case the ir_binary should be freed.
204 bool si_shader_cache_insert_shader(struct si_screen
*sscreen
, void *ir_binary
,
205 struct si_shader
*shader
,
206 bool insert_into_disk_cache
)
209 struct hash_entry
*entry
;
210 uint8_t key
[CACHE_KEY_SIZE
];
212 entry
= _mesa_hash_table_search(sscreen
->shader_cache
, ir_binary
);
214 return false; /* already added */
216 hw_binary
= si_get_shader_binary(shader
);
220 if (_mesa_hash_table_insert(sscreen
->shader_cache
, ir_binary
,
221 hw_binary
) == NULL
) {
226 if (sscreen
->disk_shader_cache
&& insert_into_disk_cache
) {
227 disk_cache_compute_key(sscreen
->disk_shader_cache
, ir_binary
,
228 *((uint32_t *)ir_binary
), key
);
229 disk_cache_put(sscreen
->disk_shader_cache
, key
, hw_binary
,
230 *((uint32_t *) hw_binary
), NULL
);
236 bool si_shader_cache_load_shader(struct si_screen
*sscreen
, void *ir_binary
,
237 struct si_shader
*shader
)
239 struct hash_entry
*entry
=
240 _mesa_hash_table_search(sscreen
->shader_cache
, ir_binary
);
242 if (sscreen
->disk_shader_cache
) {
243 unsigned char sha1
[CACHE_KEY_SIZE
];
244 size_t tg_size
= *((uint32_t *) ir_binary
);
246 disk_cache_compute_key(sscreen
->disk_shader_cache
,
247 ir_binary
, tg_size
, sha1
);
251 disk_cache_get(sscreen
->disk_shader_cache
,
256 if (binary_size
< sizeof(uint32_t) ||
257 *((uint32_t*)buffer
) != binary_size
) {
258 /* Something has gone wrong discard the item
259 * from the cache and rebuild/link from
262 assert(!"Invalid radeonsi shader disk cache "
265 disk_cache_remove(sscreen
->disk_shader_cache
,
272 if (!si_load_shader_binary(shader
, buffer
)) {
278 if (!si_shader_cache_insert_shader(sscreen
, ir_binary
,
285 if (si_load_shader_binary(shader
, entry
->data
))
290 p_atomic_inc(&sscreen
->num_shader_cache_hits
);
294 static uint32_t si_shader_cache_key_hash(const void *key
)
296 /* The first dword is the key size. */
297 return util_hash_crc32(key
, *(uint32_t*)key
);
300 static bool si_shader_cache_key_equals(const void *a
, const void *b
)
302 uint32_t *keya
= (uint32_t*)a
;
303 uint32_t *keyb
= (uint32_t*)b
;
305 /* The first dword is the key size. */
309 return memcmp(keya
, keyb
, *keya
) == 0;
312 static void si_destroy_shader_cache_entry(struct hash_entry
*entry
)
314 FREE((void*)entry
->key
);
318 bool si_init_shader_cache(struct si_screen
*sscreen
)
320 (void) mtx_init(&sscreen
->shader_cache_mutex
, mtx_plain
);
321 sscreen
->shader_cache
=
322 _mesa_hash_table_create(NULL
,
323 si_shader_cache_key_hash
,
324 si_shader_cache_key_equals
);
326 return sscreen
->shader_cache
!= NULL
;
329 void si_destroy_shader_cache(struct si_screen
*sscreen
)
331 if (sscreen
->shader_cache
)
332 _mesa_hash_table_destroy(sscreen
->shader_cache
,
333 si_destroy_shader_cache_entry
);
334 mtx_destroy(&sscreen
->shader_cache_mutex
);
339 static void si_set_tesseval_regs(struct si_screen
*sscreen
,
340 const struct si_shader_selector
*tes
,
341 struct si_pm4_state
*pm4
)
343 const struct tgsi_shader_info
*info
= &tes
->info
;
344 unsigned tes_prim_mode
= info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
345 unsigned tes_spacing
= info
->properties
[TGSI_PROPERTY_TES_SPACING
];
346 bool tes_vertex_order_cw
= info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
];
347 bool tes_point_mode
= info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
];
348 unsigned type
, partitioning
, topology
, distribution_mode
;
350 switch (tes_prim_mode
) {
351 case PIPE_PRIM_LINES
:
352 type
= V_028B6C_TESS_ISOLINE
;
354 case PIPE_PRIM_TRIANGLES
:
355 type
= V_028B6C_TESS_TRIANGLE
;
357 case PIPE_PRIM_QUADS
:
358 type
= V_028B6C_TESS_QUAD
;
365 switch (tes_spacing
) {
366 case PIPE_TESS_SPACING_FRACTIONAL_ODD
:
367 partitioning
= V_028B6C_PART_FRAC_ODD
;
369 case PIPE_TESS_SPACING_FRACTIONAL_EVEN
:
370 partitioning
= V_028B6C_PART_FRAC_EVEN
;
372 case PIPE_TESS_SPACING_EQUAL
:
373 partitioning
= V_028B6C_PART_INTEGER
;
381 topology
= V_028B6C_OUTPUT_POINT
;
382 else if (tes_prim_mode
== PIPE_PRIM_LINES
)
383 topology
= V_028B6C_OUTPUT_LINE
;
384 else if (tes_vertex_order_cw
)
385 /* for some reason, this must be the other way around */
386 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
388 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
390 if (sscreen
->has_distributed_tess
) {
391 if (sscreen
->info
.family
== CHIP_FIJI
||
392 sscreen
->info
.family
>= CHIP_POLARIS10
)
393 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS
;
395 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_DONUTS
;
397 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_NO_DIST
;
400 pm4
->shader
->vgt_tf_param
= S_028B6C_TYPE(type
) |
401 S_028B6C_PARTITIONING(partitioning
) |
402 S_028B6C_TOPOLOGY(topology
) |
403 S_028B6C_DISTRIBUTION_MODE(distribution_mode
);
406 /* Polaris needs different VTX_REUSE_DEPTH settings depending on
407 * whether the "fractional odd" tessellation spacing is used.
409 * Possible VGT configurations and which state should set the register:
411 * Reg set in | VGT shader configuration | Value
412 * ------------------------------------------------------
414 * VS as ES | ES -> GS -> VS | 30
415 * TES as VS | LS -> HS -> VS | 14 or 30
416 * TES as ES | LS -> HS -> ES -> GS -> VS | 14 or 30
418 * If "shader" is NULL, it's assumed it's not LS or GS copy shader.
420 static void polaris_set_vgt_vertex_reuse(struct si_screen
*sscreen
,
421 struct si_shader_selector
*sel
,
422 struct si_shader
*shader
,
423 struct si_pm4_state
*pm4
)
425 unsigned type
= sel
->type
;
427 if (sscreen
->info
.family
< CHIP_POLARIS10
)
430 /* VS as VS, or VS as ES: */
431 if ((type
== PIPE_SHADER_VERTEX
&&
433 (!shader
->key
.as_ls
&& !shader
->is_gs_copy_shader
))) ||
434 /* TES as VS, or TES as ES: */
435 type
== PIPE_SHADER_TESS_EVAL
) {
436 unsigned vtx_reuse_depth
= 30;
438 if (type
== PIPE_SHADER_TESS_EVAL
&&
439 sel
->info
.properties
[TGSI_PROPERTY_TES_SPACING
] ==
440 PIPE_TESS_SPACING_FRACTIONAL_ODD
)
441 vtx_reuse_depth
= 14;
444 pm4
->shader
->vgt_vertex_reuse_block_cntl
= vtx_reuse_depth
;
448 static struct si_pm4_state
*si_get_shader_pm4_state(struct si_shader
*shader
)
451 si_pm4_clear_state(shader
->pm4
);
453 shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
456 shader
->pm4
->shader
= shader
;
459 fprintf(stderr
, "radeonsi: Failed to create pm4 state.\n");
464 static unsigned si_get_num_vs_user_sgprs(unsigned num_always_on_user_sgprs
)
466 /* Add the pointer to VBO descriptors. */
467 return num_always_on_user_sgprs
+ 1;
470 static void si_shader_ls(struct si_screen
*sscreen
, struct si_shader
*shader
)
472 struct si_pm4_state
*pm4
;
473 unsigned vgpr_comp_cnt
;
476 assert(sscreen
->info
.chip_class
<= GFX8
);
478 pm4
= si_get_shader_pm4_state(shader
);
482 va
= shader
->bo
->gpu_address
;
483 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
485 /* We need at least 2 components for LS.
486 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
487 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
489 vgpr_comp_cnt
= shader
->info
.uses_instanceid
? 2 : 1;
491 si_pm4_set_reg(pm4
, R_00B520_SPI_SHADER_PGM_LO_LS
, va
>> 8);
492 si_pm4_set_reg(pm4
, R_00B524_SPI_SHADER_PGM_HI_LS
, S_00B524_MEM_BASE(va
>> 40));
494 shader
->config
.rsrc1
= S_00B528_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
495 S_00B528_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
496 S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt
) |
497 S_00B528_DX10_CLAMP(1) |
498 S_00B528_FLOAT_MODE(shader
->config
.float_mode
);
499 shader
->config
.rsrc2
= S_00B52C_USER_SGPR(si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR
)) |
500 S_00B52C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
503 static void si_shader_hs(struct si_screen
*sscreen
, struct si_shader
*shader
)
505 struct si_pm4_state
*pm4
;
507 unsigned ls_vgpr_comp_cnt
= 0;
509 pm4
= si_get_shader_pm4_state(shader
);
513 va
= shader
->bo
->gpu_address
;
514 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
516 if (sscreen
->info
.chip_class
>= GFX9
) {
517 si_pm4_set_reg(pm4
, R_00B410_SPI_SHADER_PGM_LO_LS
, va
>> 8);
518 si_pm4_set_reg(pm4
, R_00B414_SPI_SHADER_PGM_HI_LS
, S_00B414_MEM_BASE(va
>> 40));
520 /* We need at least 2 components for LS.
521 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
522 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
524 ls_vgpr_comp_cnt
= shader
->info
.uses_instanceid
? 2 : 1;
526 unsigned num_user_sgprs
=
527 si_get_num_vs_user_sgprs(GFX9_TCS_NUM_USER_SGPR
);
529 shader
->config
.rsrc2
=
530 S_00B42C_USER_SGPR(num_user_sgprs
) |
531 S_00B42C_USER_SGPR_MSB(num_user_sgprs
>> 5) |
532 S_00B42C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
534 si_pm4_set_reg(pm4
, R_00B420_SPI_SHADER_PGM_LO_HS
, va
>> 8);
535 si_pm4_set_reg(pm4
, R_00B424_SPI_SHADER_PGM_HI_HS
, S_00B424_MEM_BASE(va
>> 40));
537 shader
->config
.rsrc2
=
538 S_00B42C_USER_SGPR(GFX6_TCS_NUM_USER_SGPR
) |
539 S_00B42C_OC_LDS_EN(1) |
540 S_00B42C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
543 si_pm4_set_reg(pm4
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
,
544 S_00B428_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
545 S_00B428_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
546 S_00B428_DX10_CLAMP(1) |
547 S_00B428_FLOAT_MODE(shader
->config
.float_mode
) |
548 S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt
));
550 if (sscreen
->info
.chip_class
<= GFX8
) {
551 si_pm4_set_reg(pm4
, R_00B42C_SPI_SHADER_PGM_RSRC2_HS
,
552 shader
->config
.rsrc2
);
556 static void si_emit_shader_es(struct si_context
*sctx
)
558 struct si_shader
*shader
= sctx
->queued
.named
.es
->shader
;
559 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
564 radeon_opt_set_context_reg(sctx
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
565 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE
,
566 shader
->selector
->esgs_itemsize
/ 4);
568 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
569 radeon_opt_set_context_reg(sctx
, R_028B6C_VGT_TF_PARAM
,
570 SI_TRACKED_VGT_TF_PARAM
,
571 shader
->vgt_tf_param
);
573 if (shader
->vgt_vertex_reuse_block_cntl
)
574 radeon_opt_set_context_reg(sctx
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
575 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL
,
576 shader
->vgt_vertex_reuse_block_cntl
);
578 if (initial_cdw
!= sctx
->gfx_cs
->current
.cdw
)
579 sctx
->context_roll
= true;
582 static void si_shader_es(struct si_screen
*sscreen
, struct si_shader
*shader
)
584 struct si_pm4_state
*pm4
;
585 unsigned num_user_sgprs
;
586 unsigned vgpr_comp_cnt
;
590 assert(sscreen
->info
.chip_class
<= GFX8
);
592 pm4
= si_get_shader_pm4_state(shader
);
596 pm4
->atom
.emit
= si_emit_shader_es
;
597 va
= shader
->bo
->gpu_address
;
598 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
600 if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
601 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
602 vgpr_comp_cnt
= shader
->info
.uses_instanceid
? 1 : 0;
603 num_user_sgprs
= si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR
);
604 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
605 vgpr_comp_cnt
= shader
->selector
->info
.uses_primid
? 3 : 2;
606 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
608 unreachable("invalid shader selector type");
610 oc_lds_en
= shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
? 1 : 0;
612 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
613 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, S_00B324_MEM_BASE(va
>> 40));
614 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
615 S_00B328_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
616 S_00B328_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
617 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
) |
618 S_00B328_DX10_CLAMP(1) |
619 S_00B328_FLOAT_MODE(shader
->config
.float_mode
));
620 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
621 S_00B32C_USER_SGPR(num_user_sgprs
) |
622 S_00B32C_OC_LDS_EN(oc_lds_en
) |
623 S_00B32C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
625 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
626 si_set_tesseval_regs(sscreen
, shader
->selector
, pm4
);
628 polaris_set_vgt_vertex_reuse(sscreen
, shader
->selector
, shader
, pm4
);
631 static unsigned si_conv_prim_to_gs_out(unsigned mode
)
633 static const int prim_conv
[] = {
634 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
635 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
636 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
637 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
638 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
639 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
640 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
641 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
642 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
643 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
644 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
645 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
646 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
647 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
648 [PIPE_PRIM_PATCHES
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
650 assert(mode
< ARRAY_SIZE(prim_conv
));
652 return prim_conv
[mode
];
655 struct gfx9_gs_info
{
656 unsigned es_verts_per_subgroup
;
657 unsigned gs_prims_per_subgroup
;
658 unsigned gs_inst_prims_in_subgroup
;
659 unsigned max_prims_per_subgroup
;
663 static void gfx9_get_gs_info(struct si_shader_selector
*es
,
664 struct si_shader_selector
*gs
,
665 struct gfx9_gs_info
*out
)
667 unsigned gs_num_invocations
= MAX2(gs
->gs_num_invocations
, 1);
668 unsigned input_prim
= gs
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
];
669 bool uses_adjacency
= input_prim
>= PIPE_PRIM_LINES_ADJACENCY
&&
670 input_prim
<= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
;
672 /* All these are in dwords: */
673 /* We can't allow using the whole LDS, because GS waves compete with
674 * other shader stages for LDS space. */
675 const unsigned max_lds_size
= 8 * 1024;
676 const unsigned esgs_itemsize
= es
->esgs_itemsize
/ 4;
677 unsigned esgs_lds_size
;
679 /* All these are per subgroup: */
680 const unsigned max_out_prims
= 32 * 1024;
681 const unsigned max_es_verts
= 255;
682 const unsigned ideal_gs_prims
= 64;
683 unsigned max_gs_prims
, gs_prims
;
684 unsigned min_es_verts
, es_verts
, worst_case_es_verts
;
686 if (uses_adjacency
|| gs_num_invocations
> 1)
687 max_gs_prims
= 127 / gs_num_invocations
;
691 /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
692 * Make sure we don't go over the maximum value.
694 if (gs
->gs_max_out_vertices
> 0) {
695 max_gs_prims
= MIN2(max_gs_prims
,
697 (gs
->gs_max_out_vertices
* gs_num_invocations
));
699 assert(max_gs_prims
> 0);
701 /* If the primitive has adjacency, halve the number of vertices
702 * that will be reused in multiple primitives.
704 min_es_verts
= gs
->gs_input_verts_per_prim
/ (uses_adjacency
? 2 : 1);
706 gs_prims
= MIN2(ideal_gs_prims
, max_gs_prims
);
707 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
, max_es_verts
);
709 /* Compute ESGS LDS size based on the worst case number of ES vertices
710 * needed to create the target number of GS prims per subgroup.
712 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
714 /* If total LDS usage is too big, refactor partitions based on ratio
715 * of ESGS item sizes.
717 if (esgs_lds_size
> max_lds_size
) {
718 /* Our target GS Prims Per Subgroup was too large. Calculate
719 * the maximum number of GS Prims Per Subgroup that will fit
720 * into LDS, capped by the maximum that the hardware can support.
722 gs_prims
= MIN2((max_lds_size
/ (esgs_itemsize
* min_es_verts
)),
724 assert(gs_prims
> 0);
725 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
,
728 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
729 assert(esgs_lds_size
<= max_lds_size
);
732 /* Now calculate remaining ESGS information. */
734 es_verts
= MIN2(esgs_lds_size
/ esgs_itemsize
, max_es_verts
);
736 es_verts
= max_es_verts
;
738 /* Vertices for adjacency primitives are not always reused, so restore
739 * it for ES_VERTS_PER_SUBGRP.
741 min_es_verts
= gs
->gs_input_verts_per_prim
;
743 /* For normal primitives, the VGT only checks if they are past the ES
744 * verts per subgroup after allocating a full GS primitive and if they
745 * are, kick off a new subgroup. But if those additional ES verts are
746 * unique (e.g. not reused) we need to make sure there is enough LDS
747 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
749 es_verts
-= min_es_verts
- 1;
751 out
->es_verts_per_subgroup
= es_verts
;
752 out
->gs_prims_per_subgroup
= gs_prims
;
753 out
->gs_inst_prims_in_subgroup
= gs_prims
* gs_num_invocations
;
754 out
->max_prims_per_subgroup
= out
->gs_inst_prims_in_subgroup
*
755 gs
->gs_max_out_vertices
;
756 out
->lds_size
= align(esgs_lds_size
, 128) / 128;
758 assert(out
->max_prims_per_subgroup
<= max_out_prims
);
761 static void si_emit_shader_gs(struct si_context
*sctx
)
763 struct si_shader
*shader
= sctx
->queued
.named
.gs
->shader
;
764 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
769 /* R_028A60_VGT_GSVS_RING_OFFSET_1, R_028A64_VGT_GSVS_RING_OFFSET_2
770 * R_028A68_VGT_GSVS_RING_OFFSET_3, R_028A6C_VGT_GS_OUT_PRIM_TYPE */
771 radeon_opt_set_context_reg4(sctx
, R_028A60_VGT_GSVS_RING_OFFSET_1
,
772 SI_TRACKED_VGT_GSVS_RING_OFFSET_1
,
773 shader
->ctx_reg
.gs
.vgt_gsvs_ring_offset_1
,
774 shader
->ctx_reg
.gs
.vgt_gsvs_ring_offset_2
,
775 shader
->ctx_reg
.gs
.vgt_gsvs_ring_offset_3
,
776 shader
->ctx_reg
.gs
.vgt_gs_out_prim_type
);
779 /* R_028AB0_VGT_GSVS_RING_ITEMSIZE */
780 radeon_opt_set_context_reg(sctx
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
,
781 SI_TRACKED_VGT_GSVS_RING_ITEMSIZE
,
782 shader
->ctx_reg
.gs
.vgt_gsvs_ring_itemsize
);
784 /* R_028B38_VGT_GS_MAX_VERT_OUT */
785 radeon_opt_set_context_reg(sctx
, R_028B38_VGT_GS_MAX_VERT_OUT
,
786 SI_TRACKED_VGT_GS_MAX_VERT_OUT
,
787 shader
->ctx_reg
.gs
.vgt_gs_max_vert_out
);
789 /* R_028B5C_VGT_GS_VERT_ITEMSIZE, R_028B60_VGT_GS_VERT_ITEMSIZE_1
790 * R_028B64_VGT_GS_VERT_ITEMSIZE_2, R_028B68_VGT_GS_VERT_ITEMSIZE_3 */
791 radeon_opt_set_context_reg4(sctx
, R_028B5C_VGT_GS_VERT_ITEMSIZE
,
792 SI_TRACKED_VGT_GS_VERT_ITEMSIZE
,
793 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize
,
794 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize_1
,
795 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize_2
,
796 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize_3
);
798 /* R_028B90_VGT_GS_INSTANCE_CNT */
799 radeon_opt_set_context_reg(sctx
, R_028B90_VGT_GS_INSTANCE_CNT
,
800 SI_TRACKED_VGT_GS_INSTANCE_CNT
,
801 shader
->ctx_reg
.gs
.vgt_gs_instance_cnt
);
803 if (sctx
->chip_class
>= GFX9
) {
804 /* R_028A44_VGT_GS_ONCHIP_CNTL */
805 radeon_opt_set_context_reg(sctx
, R_028A44_VGT_GS_ONCHIP_CNTL
,
806 SI_TRACKED_VGT_GS_ONCHIP_CNTL
,
807 shader
->ctx_reg
.gs
.vgt_gs_onchip_cntl
);
808 /* R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP */
809 radeon_opt_set_context_reg(sctx
, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP
,
810 SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP
,
811 shader
->ctx_reg
.gs
.vgt_gs_max_prims_per_subgroup
);
812 /* R_028AAC_VGT_ESGS_RING_ITEMSIZE */
813 radeon_opt_set_context_reg(sctx
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
814 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE
,
815 shader
->ctx_reg
.gs
.vgt_esgs_ring_itemsize
);
817 if (shader
->key
.part
.gs
.es
->type
== PIPE_SHADER_TESS_EVAL
)
818 radeon_opt_set_context_reg(sctx
, R_028B6C_VGT_TF_PARAM
,
819 SI_TRACKED_VGT_TF_PARAM
,
820 shader
->vgt_tf_param
);
821 if (shader
->vgt_vertex_reuse_block_cntl
)
822 radeon_opt_set_context_reg(sctx
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
823 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL
,
824 shader
->vgt_vertex_reuse_block_cntl
);
827 if (initial_cdw
!= sctx
->gfx_cs
->current
.cdw
)
828 sctx
->context_roll
= true;
831 static void si_shader_gs(struct si_screen
*sscreen
, struct si_shader
*shader
)
833 struct si_shader_selector
*sel
= shader
->selector
;
834 const ubyte
*num_components
= sel
->info
.num_stream_output_components
;
835 unsigned gs_num_invocations
= sel
->gs_num_invocations
;
836 struct si_pm4_state
*pm4
;
838 unsigned max_stream
= sel
->max_gs_stream
;
841 pm4
= si_get_shader_pm4_state(shader
);
845 pm4
->atom
.emit
= si_emit_shader_gs
;
847 offset
= num_components
[0] * sel
->gs_max_out_vertices
;
848 shader
->ctx_reg
.gs
.vgt_gsvs_ring_offset_1
= offset
;
851 offset
+= num_components
[1] * sel
->gs_max_out_vertices
;
852 shader
->ctx_reg
.gs
.vgt_gsvs_ring_offset_2
= offset
;
855 offset
+= num_components
[2] * sel
->gs_max_out_vertices
;
856 shader
->ctx_reg
.gs
.vgt_gsvs_ring_offset_3
= offset
;
858 shader
->ctx_reg
.gs
.vgt_gs_out_prim_type
=
859 si_conv_prim_to_gs_out(sel
->gs_output_prim
);
862 offset
+= num_components
[3] * sel
->gs_max_out_vertices
;
863 shader
->ctx_reg
.gs
.vgt_gsvs_ring_itemsize
= offset
;
865 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
866 assert(offset
< (1 << 15));
868 shader
->ctx_reg
.gs
.vgt_gs_max_vert_out
= sel
->gs_max_out_vertices
;
870 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize
= num_components
[0];
871 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize_1
= (max_stream
>= 1) ? num_components
[1] : 0;
872 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize_2
= (max_stream
>= 2) ? num_components
[2] : 0;
873 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize_3
= (max_stream
>= 3) ? num_components
[3] : 0;
875 shader
->ctx_reg
.gs
.vgt_gs_instance_cnt
= S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
876 S_028B90_ENABLE(gs_num_invocations
> 0);
878 va
= shader
->bo
->gpu_address
;
879 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
881 if (sscreen
->info
.chip_class
>= GFX9
) {
882 unsigned input_prim
= sel
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
];
883 unsigned es_type
= shader
->key
.part
.gs
.es
->type
;
884 unsigned es_vgpr_comp_cnt
, gs_vgpr_comp_cnt
;
885 struct gfx9_gs_info gs_info
;
887 if (es_type
== PIPE_SHADER_VERTEX
)
888 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
889 es_vgpr_comp_cnt
= shader
->info
.uses_instanceid
? 1 : 0;
890 else if (es_type
== PIPE_SHADER_TESS_EVAL
)
891 es_vgpr_comp_cnt
= shader
->key
.part
.gs
.es
->info
.uses_primid
? 3 : 2;
893 unreachable("invalid shader selector type");
895 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
896 * VGPR[0:4] are always loaded.
898 if (sel
->info
.uses_invocationid
)
899 gs_vgpr_comp_cnt
= 3; /* VGPR3 contains InvocationID. */
900 else if (sel
->info
.uses_primid
)
901 gs_vgpr_comp_cnt
= 2; /* VGPR2 contains PrimitiveID. */
902 else if (input_prim
>= PIPE_PRIM_TRIANGLES
)
903 gs_vgpr_comp_cnt
= 1; /* VGPR1 contains offsets 2, 3 */
905 gs_vgpr_comp_cnt
= 0; /* VGPR0 contains offsets 0, 1 */
907 unsigned num_user_sgprs
;
908 if (es_type
== PIPE_SHADER_VERTEX
)
909 num_user_sgprs
= si_get_num_vs_user_sgprs(GFX9_VSGS_NUM_USER_SGPR
);
911 num_user_sgprs
= GFX9_TESGS_NUM_USER_SGPR
;
913 gfx9_get_gs_info(shader
->key
.part
.gs
.es
, sel
, &gs_info
);
915 si_pm4_set_reg(pm4
, R_00B210_SPI_SHADER_PGM_LO_ES
, va
>> 8);
916 si_pm4_set_reg(pm4
, R_00B214_SPI_SHADER_PGM_HI_ES
, S_00B214_MEM_BASE(va
>> 40));
918 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
919 S_00B228_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
920 S_00B228_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
921 S_00B228_DX10_CLAMP(1) |
922 S_00B228_FLOAT_MODE(shader
->config
.float_mode
) |
923 S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt
));
924 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
925 S_00B22C_USER_SGPR(num_user_sgprs
) |
926 S_00B22C_USER_SGPR_MSB(num_user_sgprs
>> 5) |
927 S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt
) |
928 S_00B22C_OC_LDS_EN(es_type
== PIPE_SHADER_TESS_EVAL
) |
929 S_00B22C_LDS_SIZE(gs_info
.lds_size
) |
930 S_00B22C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
932 shader
->ctx_reg
.gs
.vgt_gs_onchip_cntl
=
933 S_028A44_ES_VERTS_PER_SUBGRP(gs_info
.es_verts_per_subgroup
) |
934 S_028A44_GS_PRIMS_PER_SUBGRP(gs_info
.gs_prims_per_subgroup
) |
935 S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_info
.gs_inst_prims_in_subgroup
);
936 shader
->ctx_reg
.gs
.vgt_gs_max_prims_per_subgroup
=
937 S_028A94_MAX_PRIMS_PER_SUBGROUP(gs_info
.max_prims_per_subgroup
);
938 shader
->ctx_reg
.gs
.vgt_esgs_ring_itemsize
=
939 shader
->key
.part
.gs
.es
->esgs_itemsize
/ 4;
941 if (es_type
== PIPE_SHADER_TESS_EVAL
)
942 si_set_tesseval_regs(sscreen
, shader
->key
.part
.gs
.es
, pm4
);
944 polaris_set_vgt_vertex_reuse(sscreen
, shader
->key
.part
.gs
.es
,
947 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
948 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, S_00B224_MEM_BASE(va
>> 40));
950 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
951 S_00B228_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
952 S_00B228_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
953 S_00B228_DX10_CLAMP(1) |
954 S_00B228_FLOAT_MODE(shader
->config
.float_mode
));
955 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
956 S_00B22C_USER_SGPR(GFX6_GS_NUM_USER_SGPR
) |
957 S_00B22C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
961 static void si_emit_shader_vs(struct si_context
*sctx
)
963 struct si_shader
*shader
= sctx
->queued
.named
.vs
->shader
;
964 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
969 radeon_opt_set_context_reg(sctx
, R_028A40_VGT_GS_MODE
,
970 SI_TRACKED_VGT_GS_MODE
,
971 shader
->ctx_reg
.vs
.vgt_gs_mode
);
972 radeon_opt_set_context_reg(sctx
, R_028A84_VGT_PRIMITIVEID_EN
,
973 SI_TRACKED_VGT_PRIMITIVEID_EN
,
974 shader
->ctx_reg
.vs
.vgt_primitiveid_en
);
976 if (sctx
->chip_class
<= GFX8
) {
977 radeon_opt_set_context_reg(sctx
, R_028AB4_VGT_REUSE_OFF
,
978 SI_TRACKED_VGT_REUSE_OFF
,
979 shader
->ctx_reg
.vs
.vgt_reuse_off
);
982 radeon_opt_set_context_reg(sctx
, R_0286C4_SPI_VS_OUT_CONFIG
,
983 SI_TRACKED_SPI_VS_OUT_CONFIG
,
984 shader
->ctx_reg
.vs
.spi_vs_out_config
);
986 radeon_opt_set_context_reg(sctx
, R_02870C_SPI_SHADER_POS_FORMAT
,
987 SI_TRACKED_SPI_SHADER_POS_FORMAT
,
988 shader
->ctx_reg
.vs
.spi_shader_pos_format
);
990 radeon_opt_set_context_reg(sctx
, R_028818_PA_CL_VTE_CNTL
,
991 SI_TRACKED_PA_CL_VTE_CNTL
,
992 shader
->ctx_reg
.vs
.pa_cl_vte_cntl
);
994 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
995 radeon_opt_set_context_reg(sctx
, R_028B6C_VGT_TF_PARAM
,
996 SI_TRACKED_VGT_TF_PARAM
,
997 shader
->vgt_tf_param
);
999 if (shader
->vgt_vertex_reuse_block_cntl
)
1000 radeon_opt_set_context_reg(sctx
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
1001 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL
,
1002 shader
->vgt_vertex_reuse_block_cntl
);
1004 if (initial_cdw
!= sctx
->gfx_cs
->current
.cdw
)
1005 sctx
->context_roll
= true;
1009 * Compute the state for \p shader, which will run as a vertex shader on the
1012 * If \p gs is non-NULL, it points to the geometry shader for which this shader
1013 * is the copy shader.
1015 static void si_shader_vs(struct si_screen
*sscreen
, struct si_shader
*shader
,
1016 struct si_shader_selector
*gs
)
1018 const struct tgsi_shader_info
*info
= &shader
->selector
->info
;
1019 struct si_pm4_state
*pm4
;
1020 unsigned num_user_sgprs
, vgpr_comp_cnt
;
1022 unsigned nparams
, oc_lds_en
;
1023 unsigned window_space
=
1024 info
->properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
1025 bool enable_prim_id
= shader
->key
.mono
.u
.vs_export_prim_id
|| info
->uses_primid
;
1027 pm4
= si_get_shader_pm4_state(shader
);
1031 pm4
->atom
.emit
= si_emit_shader_vs
;
1033 /* We always write VGT_GS_MODE in the VS state, because every switch
1034 * between different shader pipelines involving a different GS or no
1035 * GS at all involves a switch of the VS (different GS use different
1036 * copy shaders). On the other hand, when the API switches from a GS to
1037 * no GS and then back to the same GS used originally, the GS state is
1041 unsigned mode
= V_028A40_GS_OFF
;
1043 /* PrimID needs GS scenario A. */
1045 mode
= V_028A40_GS_SCENARIO_A
;
1047 shader
->ctx_reg
.vs
.vgt_gs_mode
= S_028A40_MODE(mode
);
1048 shader
->ctx_reg
.vs
.vgt_primitiveid_en
= enable_prim_id
;
1050 shader
->ctx_reg
.vs
.vgt_gs_mode
= ac_vgt_gs_mode(gs
->gs_max_out_vertices
,
1051 sscreen
->info
.chip_class
);
1052 shader
->ctx_reg
.vs
.vgt_primitiveid_en
= 0;
1055 if (sscreen
->info
.chip_class
<= GFX8
) {
1056 /* Reuse needs to be set off if we write oViewport. */
1057 shader
->ctx_reg
.vs
.vgt_reuse_off
=
1058 S_028AB4_REUSE_OFF(info
->writes_viewport_index
);
1061 va
= shader
->bo
->gpu_address
;
1062 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
1065 vgpr_comp_cnt
= 0; /* only VertexID is needed for GS-COPY. */
1066 num_user_sgprs
= SI_GSCOPY_NUM_USER_SGPR
;
1067 } else if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
1068 /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
1069 * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
1070 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
1072 vgpr_comp_cnt
= enable_prim_id
? 2 : (shader
->info
.uses_instanceid
? 1 : 0);
1074 if (info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
]) {
1075 num_user_sgprs
= SI_SGPR_VS_BLIT_DATA
+
1076 info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
];
1078 num_user_sgprs
= si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR
);
1080 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
1081 vgpr_comp_cnt
= enable_prim_id
? 3 : 2;
1082 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
1084 unreachable("invalid shader selector type");
1086 /* VS is required to export at least one param. */
1087 nparams
= MAX2(shader
->info
.nr_param_exports
, 1);
1088 shader
->ctx_reg
.vs
.spi_vs_out_config
= S_0286C4_VS_EXPORT_COUNT(nparams
- 1);
1090 shader
->ctx_reg
.vs
.spi_shader_pos_format
=
1091 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
1092 S_02870C_POS1_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 1 ?
1093 V_02870C_SPI_SHADER_4COMP
:
1094 V_02870C_SPI_SHADER_NONE
) |
1095 S_02870C_POS2_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 2 ?
1096 V_02870C_SPI_SHADER_4COMP
:
1097 V_02870C_SPI_SHADER_NONE
) |
1098 S_02870C_POS3_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 3 ?
1099 V_02870C_SPI_SHADER_4COMP
:
1100 V_02870C_SPI_SHADER_NONE
);
1102 oc_lds_en
= shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
? 1 : 0;
1104 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
1105 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, S_00B124_MEM_BASE(va
>> 40));
1106 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
,
1107 S_00B128_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
1108 S_00B128_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
1109 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
1110 S_00B128_DX10_CLAMP(1) |
1111 S_00B128_FLOAT_MODE(shader
->config
.float_mode
));
1112 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
,
1113 S_00B12C_USER_SGPR(num_user_sgprs
) |
1114 S_00B12C_OC_LDS_EN(oc_lds_en
) |
1115 S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
1116 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
1117 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
1118 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
1119 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
) |
1120 S_00B12C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
1123 shader
->ctx_reg
.vs
.pa_cl_vte_cntl
=
1124 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1);
1126 shader
->ctx_reg
.vs
.pa_cl_vte_cntl
=
1127 S_028818_VTX_W0_FMT(1) |
1128 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
1129 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
1130 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1);
1132 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
1133 si_set_tesseval_regs(sscreen
, shader
->selector
, pm4
);
1135 polaris_set_vgt_vertex_reuse(sscreen
, shader
->selector
, shader
, pm4
);
1138 static unsigned si_get_ps_num_interp(struct si_shader
*ps
)
1140 struct tgsi_shader_info
*info
= &ps
->selector
->info
;
1141 unsigned num_colors
= !!(info
->colors_read
& 0x0f) +
1142 !!(info
->colors_read
& 0xf0);
1143 unsigned num_interp
= ps
->selector
->info
.num_inputs
+
1144 (ps
->key
.part
.ps
.prolog
.color_two_side
? num_colors
: 0);
1146 assert(num_interp
<= 32);
1147 return MIN2(num_interp
, 32);
1150 static unsigned si_get_spi_shader_col_format(struct si_shader
*shader
)
1152 unsigned value
= shader
->key
.part
.ps
.epilog
.spi_shader_col_format
;
1153 unsigned i
, num_targets
= (util_last_bit(value
) + 3) / 4;
1155 /* If the i-th target format is set, all previous target formats must
1156 * be non-zero to avoid hangs.
1158 for (i
= 0; i
< num_targets
; i
++)
1159 if (!(value
& (0xf << (i
* 4))))
1160 value
|= V_028714_SPI_SHADER_32_R
<< (i
* 4);
1165 static void si_emit_shader_ps(struct si_context
*sctx
)
1167 struct si_shader
*shader
= sctx
->queued
.named
.ps
->shader
;
1168 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
1173 /* R_0286CC_SPI_PS_INPUT_ENA, R_0286D0_SPI_PS_INPUT_ADDR*/
1174 radeon_opt_set_context_reg2(sctx
, R_0286CC_SPI_PS_INPUT_ENA
,
1175 SI_TRACKED_SPI_PS_INPUT_ENA
,
1176 shader
->ctx_reg
.ps
.spi_ps_input_ena
,
1177 shader
->ctx_reg
.ps
.spi_ps_input_addr
);
1179 radeon_opt_set_context_reg(sctx
, R_0286E0_SPI_BARYC_CNTL
,
1180 SI_TRACKED_SPI_BARYC_CNTL
,
1181 shader
->ctx_reg
.ps
.spi_baryc_cntl
);
1182 radeon_opt_set_context_reg(sctx
, R_0286D8_SPI_PS_IN_CONTROL
,
1183 SI_TRACKED_SPI_PS_IN_CONTROL
,
1184 shader
->ctx_reg
.ps
.spi_ps_in_control
);
1186 /* R_028710_SPI_SHADER_Z_FORMAT, R_028714_SPI_SHADER_COL_FORMAT */
1187 radeon_opt_set_context_reg2(sctx
, R_028710_SPI_SHADER_Z_FORMAT
,
1188 SI_TRACKED_SPI_SHADER_Z_FORMAT
,
1189 shader
->ctx_reg
.ps
.spi_shader_z_format
,
1190 shader
->ctx_reg
.ps
.spi_shader_col_format
);
1192 radeon_opt_set_context_reg(sctx
, R_02823C_CB_SHADER_MASK
,
1193 SI_TRACKED_CB_SHADER_MASK
,
1194 shader
->ctx_reg
.ps
.cb_shader_mask
);
1196 if (initial_cdw
!= sctx
->gfx_cs
->current
.cdw
)
1197 sctx
->context_roll
= true;
1200 static void si_shader_ps(struct si_shader
*shader
)
1202 struct tgsi_shader_info
*info
= &shader
->selector
->info
;
1203 struct si_pm4_state
*pm4
;
1204 unsigned spi_ps_in_control
, spi_shader_col_format
, cb_shader_mask
;
1205 unsigned spi_baryc_cntl
= S_0286E0_FRONT_FACE_ALL_BITS(1);
1207 unsigned input_ena
= shader
->config
.spi_ps_input_ena
;
1209 /* we need to enable at least one of them, otherwise we hang the GPU */
1210 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena
) ||
1211 G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1212 G_0286CC_PERSP_CENTROID_ENA(input_ena
) ||
1213 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena
) ||
1214 G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) ||
1215 G_0286CC_LINEAR_CENTER_ENA(input_ena
) ||
1216 G_0286CC_LINEAR_CENTROID_ENA(input_ena
) ||
1217 G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena
));
1218 /* POS_W_FLOAT_ENA requires one of the perspective weights. */
1219 assert(!G_0286CC_POS_W_FLOAT_ENA(input_ena
) ||
1220 G_0286CC_PERSP_SAMPLE_ENA(input_ena
) ||
1221 G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1222 G_0286CC_PERSP_CENTROID_ENA(input_ena
) ||
1223 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena
));
1225 /* Validate interpolation optimization flags (read as implications). */
1226 assert(!shader
->key
.part
.ps
.prolog
.bc_optimize_for_persp
||
1227 (G_0286CC_PERSP_CENTER_ENA(input_ena
) &&
1228 G_0286CC_PERSP_CENTROID_ENA(input_ena
)));
1229 assert(!shader
->key
.part
.ps
.prolog
.bc_optimize_for_linear
||
1230 (G_0286CC_LINEAR_CENTER_ENA(input_ena
) &&
1231 G_0286CC_LINEAR_CENTROID_ENA(input_ena
)));
1232 assert(!shader
->key
.part
.ps
.prolog
.force_persp_center_interp
||
1233 (!G_0286CC_PERSP_SAMPLE_ENA(input_ena
) &&
1234 !G_0286CC_PERSP_CENTROID_ENA(input_ena
)));
1235 assert(!shader
->key
.part
.ps
.prolog
.force_linear_center_interp
||
1236 (!G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) &&
1237 !G_0286CC_LINEAR_CENTROID_ENA(input_ena
)));
1238 assert(!shader
->key
.part
.ps
.prolog
.force_persp_sample_interp
||
1239 (!G_0286CC_PERSP_CENTER_ENA(input_ena
) &&
1240 !G_0286CC_PERSP_CENTROID_ENA(input_ena
)));
1241 assert(!shader
->key
.part
.ps
.prolog
.force_linear_sample_interp
||
1242 (!G_0286CC_LINEAR_CENTER_ENA(input_ena
) &&
1243 !G_0286CC_LINEAR_CENTROID_ENA(input_ena
)));
1245 /* Validate cases when the optimizations are off (read as implications). */
1246 assert(shader
->key
.part
.ps
.prolog
.bc_optimize_for_persp
||
1247 !G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1248 !G_0286CC_PERSP_CENTROID_ENA(input_ena
));
1249 assert(shader
->key
.part
.ps
.prolog
.bc_optimize_for_linear
||
1250 !G_0286CC_LINEAR_CENTER_ENA(input_ena
) ||
1251 !G_0286CC_LINEAR_CENTROID_ENA(input_ena
));
1253 pm4
= si_get_shader_pm4_state(shader
);
1257 pm4
->atom
.emit
= si_emit_shader_ps
;
1259 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
1261 * 0 -> Position = pixel center
1262 * 1 -> Position = pixel centroid
1263 * 2 -> Position = at sample position
1265 * From GLSL 4.5 specification, section 7.1:
1266 * "The variable gl_FragCoord is available as an input variable from
1267 * within fragment shaders and it holds the window relative coordinates
1268 * (x, y, z, 1/w) values for the fragment. If multi-sampling, this
1269 * value can be for any location within the pixel, or one of the
1270 * fragment samples. The use of centroid does not further restrict
1271 * this value to be inside the current primitive."
1273 * Meaning that centroid has no effect and we can return anything within
1274 * the pixel. Thus, return the value at sample position, because that's
1275 * the most accurate one shaders can get.
1277 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
1279 if (info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] ==
1280 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)
1281 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_ULC(1);
1283 spi_shader_col_format
= si_get_spi_shader_col_format(shader
);
1284 cb_shader_mask
= ac_get_cb_shader_mask(spi_shader_col_format
);
1286 /* Ensure that some export memory is always allocated, for two reasons:
1288 * 1) Correctness: The hardware ignores the EXEC mask if no export
1289 * memory is allocated, so KILL and alpha test do not work correctly
1291 * 2) Performance: Every shader needs at least a NULL export, even when
1292 * it writes no color/depth output. The NULL export instruction
1293 * stalls without this setting.
1295 * Don't add this to CB_SHADER_MASK.
1297 if (!spi_shader_col_format
&&
1298 !info
->writes_z
&& !info
->writes_stencil
&& !info
->writes_samplemask
)
1299 spi_shader_col_format
= V_028714_SPI_SHADER_32_R
;
1301 shader
->ctx_reg
.ps
.spi_ps_input_ena
= input_ena
;
1302 shader
->ctx_reg
.ps
.spi_ps_input_addr
= shader
->config
.spi_ps_input_addr
;
1304 /* Set interpolation controls. */
1305 spi_ps_in_control
= S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader
));
1307 shader
->ctx_reg
.ps
.spi_baryc_cntl
= spi_baryc_cntl
;
1308 shader
->ctx_reg
.ps
.spi_ps_in_control
= spi_ps_in_control
;
1309 shader
->ctx_reg
.ps
.spi_shader_z_format
=
1310 ac_get_spi_shader_z_format(info
->writes_z
,
1311 info
->writes_stencil
,
1312 info
->writes_samplemask
);
1313 shader
->ctx_reg
.ps
.spi_shader_col_format
= spi_shader_col_format
;
1314 shader
->ctx_reg
.ps
.cb_shader_mask
= cb_shader_mask
;
1316 va
= shader
->bo
->gpu_address
;
1317 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
1318 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
1319 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, S_00B024_MEM_BASE(va
>> 40));
1321 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
,
1322 S_00B028_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
1323 S_00B028_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
1324 S_00B028_DX10_CLAMP(1) |
1325 S_00B028_FLOAT_MODE(shader
->config
.float_mode
));
1326 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
1327 S_00B02C_EXTRA_LDS_SIZE(shader
->config
.lds_size
) |
1328 S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR
) |
1329 S_00B32C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
1332 static void si_shader_init_pm4_state(struct si_screen
*sscreen
,
1333 struct si_shader
*shader
)
1335 switch (shader
->selector
->type
) {
1336 case PIPE_SHADER_VERTEX
:
1337 if (shader
->key
.as_ls
)
1338 si_shader_ls(sscreen
, shader
);
1339 else if (shader
->key
.as_es
)
1340 si_shader_es(sscreen
, shader
);
1342 si_shader_vs(sscreen
, shader
, NULL
);
1344 case PIPE_SHADER_TESS_CTRL
:
1345 si_shader_hs(sscreen
, shader
);
1347 case PIPE_SHADER_TESS_EVAL
:
1348 if (shader
->key
.as_es
)
1349 si_shader_es(sscreen
, shader
);
1351 si_shader_vs(sscreen
, shader
, NULL
);
1353 case PIPE_SHADER_GEOMETRY
:
1354 si_shader_gs(sscreen
, shader
);
1356 case PIPE_SHADER_FRAGMENT
:
1357 si_shader_ps(shader
);
1364 static unsigned si_get_alpha_test_func(struct si_context
*sctx
)
1366 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
1367 if (sctx
->queued
.named
.dsa
)
1368 return sctx
->queued
.named
.dsa
->alpha_func
;
1370 return PIPE_FUNC_ALWAYS
;
1373 void si_shader_selector_key_vs(struct si_context
*sctx
,
1374 struct si_shader_selector
*vs
,
1375 struct si_shader_key
*key
,
1376 struct si_vs_prolog_bits
*prolog_key
)
1378 if (!sctx
->vertex_elements
||
1379 vs
->info
.properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
])
1382 struct si_vertex_elements
*elts
= sctx
->vertex_elements
;
1384 prolog_key
->instance_divisor_is_one
= elts
->instance_divisor_is_one
;
1385 prolog_key
->instance_divisor_is_fetched
= elts
->instance_divisor_is_fetched
;
1386 prolog_key
->unpack_instance_id_from_vertex_id
=
1387 sctx
->prim_discard_cs_instancing
;
1389 /* Prefer a monolithic shader to allow scheduling divisions around
1391 if (prolog_key
->instance_divisor_is_fetched
)
1392 key
->opt
.prefer_mono
= 1;
1394 unsigned count
= MIN2(vs
->info
.num_inputs
, elts
->count
);
1395 unsigned count_mask
= (1 << count
) - 1;
1396 unsigned fix
= elts
->fix_fetch_always
& count_mask
;
1397 unsigned opencode
= elts
->fix_fetch_opencode
& count_mask
;
1399 if (sctx
->vertex_buffer_unaligned
& elts
->vb_alignment_check_mask
) {
1400 uint32_t mask
= elts
->fix_fetch_unaligned
& count_mask
;
1402 unsigned i
= u_bit_scan(&mask
);
1403 unsigned log_hw_load_size
= 1 + ((elts
->hw_load_is_dword
>> i
) & 1);
1404 unsigned vbidx
= elts
->vertex_buffer_index
[i
];
1405 struct pipe_vertex_buffer
*vb
= &sctx
->vertex_buffer
[vbidx
];
1406 unsigned align_mask
= (1 << log_hw_load_size
) - 1;
1407 if (vb
->buffer_offset
& align_mask
||
1408 vb
->stride
& align_mask
) {
1416 unsigned i
= u_bit_scan(&fix
);
1417 key
->mono
.vs_fix_fetch
[i
].bits
= elts
->fix_fetch
[i
];
1419 key
->mono
.vs_fetch_opencode
= opencode
;
1422 static void si_shader_selector_key_hw_vs(struct si_context
*sctx
,
1423 struct si_shader_selector
*vs
,
1424 struct si_shader_key
*key
)
1426 struct si_shader_selector
*ps
= sctx
->ps_shader
.cso
;
1428 key
->opt
.clip_disable
=
1429 sctx
->queued
.named
.rasterizer
->clip_plane_enable
== 0 &&
1430 (vs
->info
.clipdist_writemask
||
1431 vs
->info
.writes_clipvertex
) &&
1432 !vs
->info
.culldist_writemask
;
1434 /* Find out if PS is disabled. */
1435 bool ps_disabled
= true;
1437 const struct si_state_blend
*blend
= sctx
->queued
.named
.blend
;
1438 bool alpha_to_coverage
= blend
&& blend
->alpha_to_coverage
;
1439 bool ps_modifies_zs
= ps
->info
.uses_kill
||
1440 ps
->info
.writes_z
||
1441 ps
->info
.writes_stencil
||
1442 ps
->info
.writes_samplemask
||
1443 alpha_to_coverage
||
1444 si_get_alpha_test_func(sctx
) != PIPE_FUNC_ALWAYS
;
1445 unsigned ps_colormask
= si_get_total_colormask(sctx
);
1447 ps_disabled
= sctx
->queued
.named
.rasterizer
->rasterizer_discard
||
1450 !ps
->info
.writes_memory
);
1453 /* Find out which VS outputs aren't used by the PS. */
1454 uint64_t outputs_written
= vs
->outputs_written_before_ps
;
1455 uint64_t inputs_read
= 0;
1457 /* Ignore outputs that are not passed from VS to PS. */
1458 outputs_written
&= ~((1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_POSITION
, 0, true)) |
1459 (1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_PSIZE
, 0, true)) |
1460 (1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_CLIPVERTEX
, 0, true)));
1463 inputs_read
= ps
->inputs_read
;
1466 uint64_t linked
= outputs_written
& inputs_read
;
1468 key
->opt
.kill_outputs
= ~linked
& outputs_written
;
1471 /* Compute the key for the hw shader variant */
1472 static inline void si_shader_selector_key(struct pipe_context
*ctx
,
1473 struct si_shader_selector
*sel
,
1474 struct si_shader_key
*key
)
1476 struct si_context
*sctx
= (struct si_context
*)ctx
;
1478 memset(key
, 0, sizeof(*key
));
1480 switch (sel
->type
) {
1481 case PIPE_SHADER_VERTEX
:
1482 si_shader_selector_key_vs(sctx
, sel
, key
, &key
->part
.vs
.prolog
);
1484 if (sctx
->tes_shader
.cso
)
1486 else if (sctx
->gs_shader
.cso
)
1489 si_shader_selector_key_hw_vs(sctx
, sel
, key
);
1491 if (sctx
->ps_shader
.cso
&& sctx
->ps_shader
.cso
->info
.uses_primid
)
1492 key
->mono
.u
.vs_export_prim_id
= 1;
1495 case PIPE_SHADER_TESS_CTRL
:
1496 if (sctx
->chip_class
>= GFX9
) {
1497 si_shader_selector_key_vs(sctx
, sctx
->vs_shader
.cso
,
1498 key
, &key
->part
.tcs
.ls_prolog
);
1499 key
->part
.tcs
.ls
= sctx
->vs_shader
.cso
;
1501 /* When the LS VGPR fix is needed, monolithic shaders
1503 * - avoid initializing EXEC in both the LS prolog
1504 * and the LS main part when !vs_needs_prolog
1505 * - remove the fixup for unused input VGPRs
1507 key
->part
.tcs
.ls_prolog
.ls_vgpr_fix
= sctx
->ls_vgpr_fix
;
1509 /* The LS output / HS input layout can be communicated
1510 * directly instead of via user SGPRs for merged LS-HS.
1511 * The LS VGPR fix prefers this too.
1513 key
->opt
.prefer_mono
= 1;
1516 key
->part
.tcs
.epilog
.prim_mode
=
1517 sctx
->tes_shader
.cso
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
1518 key
->part
.tcs
.epilog
.invoc0_tess_factors_are_def
=
1519 sel
->tcs_info
.tessfactors_are_def_in_all_invocs
;
1520 key
->part
.tcs
.epilog
.tes_reads_tess_factors
=
1521 sctx
->tes_shader
.cso
->info
.reads_tess_factors
;
1523 if (sel
== sctx
->fixed_func_tcs_shader
.cso
)
1524 key
->mono
.u
.ff_tcs_inputs_to_copy
= sctx
->vs_shader
.cso
->outputs_written
;
1526 case PIPE_SHADER_TESS_EVAL
:
1527 if (sctx
->gs_shader
.cso
)
1530 si_shader_selector_key_hw_vs(sctx
, sel
, key
);
1532 if (sctx
->ps_shader
.cso
&& sctx
->ps_shader
.cso
->info
.uses_primid
)
1533 key
->mono
.u
.vs_export_prim_id
= 1;
1536 case PIPE_SHADER_GEOMETRY
:
1537 if (sctx
->chip_class
>= GFX9
) {
1538 if (sctx
->tes_shader
.cso
) {
1539 key
->part
.gs
.es
= sctx
->tes_shader
.cso
;
1541 si_shader_selector_key_vs(sctx
, sctx
->vs_shader
.cso
,
1542 key
, &key
->part
.gs
.vs_prolog
);
1543 key
->part
.gs
.es
= sctx
->vs_shader
.cso
;
1544 key
->part
.gs
.prolog
.gfx9_prev_is_vs
= 1;
1547 /* Merged ES-GS can have unbalanced wave usage.
1549 * ES threads are per-vertex, while GS threads are
1550 * per-primitive. So without any amplification, there
1551 * are fewer GS threads than ES threads, which can result
1552 * in empty (no-op) GS waves. With too much amplification,
1553 * there are more GS threads than ES threads, which
1554 * can result in empty (no-op) ES waves.
1556 * Non-monolithic shaders are implemented by setting EXEC
1557 * at the beginning of shader parts, and don't jump to
1558 * the end if EXEC is 0.
1560 * Monolithic shaders use conditional blocks, so they can
1561 * jump and skip empty waves of ES or GS. So set this to
1562 * always use optimized variants, which are monolithic.
1564 key
->opt
.prefer_mono
= 1;
1566 key
->part
.gs
.prolog
.tri_strip_adj_fix
= sctx
->gs_tri_strip_adj_fix
;
1568 case PIPE_SHADER_FRAGMENT
: {
1569 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
1570 struct si_state_blend
*blend
= sctx
->queued
.named
.blend
;
1572 if (sel
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
] &&
1573 sel
->info
.colors_written
== 0x1)
1574 key
->part
.ps
.epilog
.last_cbuf
= MAX2(sctx
->framebuffer
.state
.nr_cbufs
, 1) - 1;
1577 /* Select the shader color format based on whether
1578 * blending or alpha are needed.
1580 key
->part
.ps
.epilog
.spi_shader_col_format
=
1581 (blend
->blend_enable_4bit
& blend
->need_src_alpha_4bit
&
1582 sctx
->framebuffer
.spi_shader_col_format_blend_alpha
) |
1583 (blend
->blend_enable_4bit
& ~blend
->need_src_alpha_4bit
&
1584 sctx
->framebuffer
.spi_shader_col_format_blend
) |
1585 (~blend
->blend_enable_4bit
& blend
->need_src_alpha_4bit
&
1586 sctx
->framebuffer
.spi_shader_col_format_alpha
) |
1587 (~blend
->blend_enable_4bit
& ~blend
->need_src_alpha_4bit
&
1588 sctx
->framebuffer
.spi_shader_col_format
);
1589 key
->part
.ps
.epilog
.spi_shader_col_format
&= blend
->cb_target_enabled_4bit
;
1591 /* The output for dual source blending should have
1592 * the same format as the first output.
1594 if (blend
->dual_src_blend
)
1595 key
->part
.ps
.epilog
.spi_shader_col_format
|=
1596 (key
->part
.ps
.epilog
.spi_shader_col_format
& 0xf) << 4;
1598 key
->part
.ps
.epilog
.spi_shader_col_format
= sctx
->framebuffer
.spi_shader_col_format
;
1600 /* If alpha-to-coverage is enabled, we have to export alpha
1601 * even if there is no color buffer.
1603 if (!(key
->part
.ps
.epilog
.spi_shader_col_format
& 0xf) &&
1604 blend
&& blend
->alpha_to_coverage
)
1605 key
->part
.ps
.epilog
.spi_shader_col_format
|= V_028710_SPI_SHADER_32_AR
;
1607 /* On GFX6 and GFX7 except Hawaii, the CB doesn't clamp outputs
1608 * to the range supported by the type if a channel has less
1609 * than 16 bits and the export format is 16_ABGR.
1611 if (sctx
->chip_class
<= GFX7
&& sctx
->family
!= CHIP_HAWAII
) {
1612 key
->part
.ps
.epilog
.color_is_int8
= sctx
->framebuffer
.color_is_int8
;
1613 key
->part
.ps
.epilog
.color_is_int10
= sctx
->framebuffer
.color_is_int10
;
1616 /* Disable unwritten outputs (if WRITE_ALL_CBUFS isn't enabled). */
1617 if (!key
->part
.ps
.epilog
.last_cbuf
) {
1618 key
->part
.ps
.epilog
.spi_shader_col_format
&= sel
->colors_written_4bit
;
1619 key
->part
.ps
.epilog
.color_is_int8
&= sel
->info
.colors_written
;
1620 key
->part
.ps
.epilog
.color_is_int10
&= sel
->info
.colors_written
;
1623 bool is_poly
= !util_prim_is_points_or_lines(sctx
->current_rast_prim
);
1624 bool is_line
= util_prim_is_lines(sctx
->current_rast_prim
);
1626 key
->part
.ps
.prolog
.color_two_side
= rs
->two_side
&& sel
->info
.colors_read
;
1627 key
->part
.ps
.prolog
.flatshade_colors
= rs
->flatshade
&& sel
->info
.colors_read
;
1629 if (sctx
->queued
.named
.blend
) {
1630 key
->part
.ps
.epilog
.alpha_to_one
= sctx
->queued
.named
.blend
->alpha_to_one
&&
1631 rs
->multisample_enable
;
1634 key
->part
.ps
.prolog
.poly_stipple
= rs
->poly_stipple_enable
&& is_poly
;
1635 key
->part
.ps
.epilog
.poly_line_smoothing
= ((is_poly
&& rs
->poly_smooth
) ||
1636 (is_line
&& rs
->line_smooth
)) &&
1637 sctx
->framebuffer
.nr_samples
<= 1;
1638 key
->part
.ps
.epilog
.clamp_color
= rs
->clamp_fragment_color
;
1640 if (sctx
->ps_iter_samples
> 1 &&
1641 sel
->info
.reads_samplemask
) {
1642 key
->part
.ps
.prolog
.samplemask_log_ps_iter
=
1643 util_logbase2(sctx
->ps_iter_samples
);
1646 if (rs
->force_persample_interp
&&
1647 rs
->multisample_enable
&&
1648 sctx
->framebuffer
.nr_samples
> 1 &&
1649 sctx
->ps_iter_samples
> 1) {
1650 key
->part
.ps
.prolog
.force_persp_sample_interp
=
1651 sel
->info
.uses_persp_center
||
1652 sel
->info
.uses_persp_centroid
;
1654 key
->part
.ps
.prolog
.force_linear_sample_interp
=
1655 sel
->info
.uses_linear_center
||
1656 sel
->info
.uses_linear_centroid
;
1657 } else if (rs
->multisample_enable
&&
1658 sctx
->framebuffer
.nr_samples
> 1) {
1659 key
->part
.ps
.prolog
.bc_optimize_for_persp
=
1660 sel
->info
.uses_persp_center
&&
1661 sel
->info
.uses_persp_centroid
;
1662 key
->part
.ps
.prolog
.bc_optimize_for_linear
=
1663 sel
->info
.uses_linear_center
&&
1664 sel
->info
.uses_linear_centroid
;
1666 /* Make sure SPI doesn't compute more than 1 pair
1667 * of (i,j), which is the optimization here. */
1668 key
->part
.ps
.prolog
.force_persp_center_interp
=
1669 sel
->info
.uses_persp_center
+
1670 sel
->info
.uses_persp_centroid
+
1671 sel
->info
.uses_persp_sample
> 1;
1673 key
->part
.ps
.prolog
.force_linear_center_interp
=
1674 sel
->info
.uses_linear_center
+
1675 sel
->info
.uses_linear_centroid
+
1676 sel
->info
.uses_linear_sample
> 1;
1678 if (sel
->info
.opcode_count
[TGSI_OPCODE_INTERP_SAMPLE
])
1679 key
->mono
.u
.ps
.interpolate_at_sample_force_center
= 1;
1682 key
->part
.ps
.epilog
.alpha_func
= si_get_alpha_test_func(sctx
);
1684 /* ps_uses_fbfetch is true only if the color buffer is bound. */
1685 if (sctx
->ps_uses_fbfetch
&& !sctx
->blitter
->running
) {
1686 struct pipe_surface
*cb0
= sctx
->framebuffer
.state
.cbufs
[0];
1687 struct pipe_resource
*tex
= cb0
->texture
;
1689 /* 1D textures are allocated and used as 2D on GFX9. */
1690 key
->mono
.u
.ps
.fbfetch_msaa
= sctx
->framebuffer
.nr_samples
> 1;
1691 key
->mono
.u
.ps
.fbfetch_is_1D
= sctx
->chip_class
!= GFX9
&&
1692 (tex
->target
== PIPE_TEXTURE_1D
||
1693 tex
->target
== PIPE_TEXTURE_1D_ARRAY
);
1694 key
->mono
.u
.ps
.fbfetch_layered
= tex
->target
== PIPE_TEXTURE_1D_ARRAY
||
1695 tex
->target
== PIPE_TEXTURE_2D_ARRAY
||
1696 tex
->target
== PIPE_TEXTURE_CUBE
||
1697 tex
->target
== PIPE_TEXTURE_CUBE_ARRAY
||
1698 tex
->target
== PIPE_TEXTURE_3D
;
1706 if (unlikely(sctx
->screen
->debug_flags
& DBG(NO_OPT_VARIANT
)))
1707 memset(&key
->opt
, 0, sizeof(key
->opt
));
1710 static void si_build_shader_variant(struct si_shader
*shader
,
1714 struct si_shader_selector
*sel
= shader
->selector
;
1715 struct si_screen
*sscreen
= sel
->screen
;
1716 struct ac_llvm_compiler
*compiler
;
1717 struct pipe_debug_callback
*debug
= &shader
->compiler_ctx_state
.debug
;
1719 if (thread_index
>= 0) {
1721 assert(thread_index
< ARRAY_SIZE(sscreen
->compiler_lowp
));
1722 compiler
= &sscreen
->compiler_lowp
[thread_index
];
1724 assert(thread_index
< ARRAY_SIZE(sscreen
->compiler
));
1725 compiler
= &sscreen
->compiler
[thread_index
];
1730 assert(!low_priority
);
1731 compiler
= shader
->compiler_ctx_state
.compiler
;
1734 if (unlikely(!si_shader_create(sscreen
, compiler
, shader
, debug
))) {
1735 PRINT_ERR("Failed to build shader variant (type=%u)\n",
1737 shader
->compilation_failed
= true;
1741 if (shader
->compiler_ctx_state
.is_debug_context
) {
1742 FILE *f
= open_memstream(&shader
->shader_log
,
1743 &shader
->shader_log_size
);
1745 si_shader_dump(sscreen
, shader
, NULL
, sel
->type
, f
, false);
1750 si_shader_init_pm4_state(sscreen
, shader
);
1753 static void si_build_shader_variant_low_priority(void *job
, int thread_index
)
1755 struct si_shader
*shader
= (struct si_shader
*)job
;
1757 assert(thread_index
>= 0);
1759 si_build_shader_variant(shader
, thread_index
, true);
1762 static const struct si_shader_key zeroed
;
1764 static bool si_check_missing_main_part(struct si_screen
*sscreen
,
1765 struct si_shader_selector
*sel
,
1766 struct si_compiler_ctx_state
*compiler_state
,
1767 struct si_shader_key
*key
)
1769 struct si_shader
**mainp
= si_get_main_shader_part(sel
, key
);
1772 struct si_shader
*main_part
= CALLOC_STRUCT(si_shader
);
1777 /* We can leave the fence as permanently signaled because the
1778 * main part becomes visible globally only after it has been
1780 util_queue_fence_init(&main_part
->ready
);
1782 main_part
->selector
= sel
;
1783 main_part
->key
.as_es
= key
->as_es
;
1784 main_part
->key
.as_ls
= key
->as_ls
;
1785 main_part
->is_monolithic
= false;
1787 if (si_compile_tgsi_shader(sscreen
, compiler_state
->compiler
,
1788 main_part
, &compiler_state
->debug
) != 0) {
1798 * Select a shader variant according to the shader key.
1800 * \param optimized_or_none If the key describes an optimized shader variant and
1801 * the compilation isn't finished, don't select any
1802 * shader and return an error.
1804 int si_shader_select_with_key(struct si_screen
*sscreen
,
1805 struct si_shader_ctx_state
*state
,
1806 struct si_compiler_ctx_state
*compiler_state
,
1807 struct si_shader_key
*key
,
1809 bool optimized_or_none
)
1811 struct si_shader_selector
*sel
= state
->cso
;
1812 struct si_shader_selector
*previous_stage_sel
= NULL
;
1813 struct si_shader
*current
= state
->current
;
1814 struct si_shader
*iter
, *shader
= NULL
;
1817 /* Check if we don't need to change anything.
1818 * This path is also used for most shaders that don't need multiple
1819 * variants, it will cost just a computation of the key and this
1821 if (likely(current
&&
1822 memcmp(¤t
->key
, key
, sizeof(*key
)) == 0)) {
1823 if (unlikely(!util_queue_fence_is_signalled(¤t
->ready
))) {
1824 if (current
->is_optimized
) {
1825 if (optimized_or_none
)
1828 memset(&key
->opt
, 0, sizeof(key
->opt
));
1829 goto current_not_ready
;
1832 util_queue_fence_wait(¤t
->ready
);
1835 return current
->compilation_failed
? -1 : 0;
1839 /* This must be done before the mutex is locked, because async GS
1840 * compilation calls this function too, and therefore must enter
1843 * Only wait if we are in a draw call. Don't wait if we are
1844 * in a compiler thread.
1846 if (thread_index
< 0)
1847 util_queue_fence_wait(&sel
->ready
);
1849 mtx_lock(&sel
->mutex
);
1851 /* Find the shader variant. */
1852 for (iter
= sel
->first_variant
; iter
; iter
= iter
->next_variant
) {
1853 /* Don't check the "current" shader. We checked it above. */
1854 if (current
!= iter
&&
1855 memcmp(&iter
->key
, key
, sizeof(*key
)) == 0) {
1856 mtx_unlock(&sel
->mutex
);
1858 if (unlikely(!util_queue_fence_is_signalled(&iter
->ready
))) {
1859 /* If it's an optimized shader and its compilation has
1860 * been started but isn't done, use the unoptimized
1861 * shader so as not to cause a stall due to compilation.
1863 if (iter
->is_optimized
) {
1864 if (optimized_or_none
)
1866 memset(&key
->opt
, 0, sizeof(key
->opt
));
1870 util_queue_fence_wait(&iter
->ready
);
1873 if (iter
->compilation_failed
) {
1874 return -1; /* skip the draw call */
1877 state
->current
= iter
;
1882 /* Build a new shader. */
1883 shader
= CALLOC_STRUCT(si_shader
);
1885 mtx_unlock(&sel
->mutex
);
1889 util_queue_fence_init(&shader
->ready
);
1891 shader
->selector
= sel
;
1893 shader
->compiler_ctx_state
= *compiler_state
;
1895 /* If this is a merged shader, get the first shader's selector. */
1896 if (sscreen
->info
.chip_class
>= GFX9
) {
1897 if (sel
->type
== PIPE_SHADER_TESS_CTRL
)
1898 previous_stage_sel
= key
->part
.tcs
.ls
;
1899 else if (sel
->type
== PIPE_SHADER_GEOMETRY
)
1900 previous_stage_sel
= key
->part
.gs
.es
;
1902 /* We need to wait for the previous shader. */
1903 if (previous_stage_sel
&& thread_index
< 0)
1904 util_queue_fence_wait(&previous_stage_sel
->ready
);
1907 bool is_pure_monolithic
=
1908 sscreen
->use_monolithic_shaders
||
1909 memcmp(&key
->mono
, &zeroed
.mono
, sizeof(key
->mono
)) != 0;
1911 /* Compile the main shader part if it doesn't exist. This can happen
1912 * if the initial guess was wrong.
1914 * The prim discard CS doesn't need the main shader part.
1916 if (!is_pure_monolithic
&&
1917 !key
->opt
.vs_as_prim_discard_cs
) {
1920 /* Make sure the main shader part is present. This is needed
1921 * for shaders that can be compiled as VS, LS, or ES, and only
1922 * one of them is compiled at creation.
1924 * For merged shaders, check that the starting shader's main
1927 if (previous_stage_sel
) {
1928 struct si_shader_key shader1_key
= zeroed
;
1930 if (sel
->type
== PIPE_SHADER_TESS_CTRL
)
1931 shader1_key
.as_ls
= 1;
1932 else if (sel
->type
== PIPE_SHADER_GEOMETRY
)
1933 shader1_key
.as_es
= 1;
1937 mtx_lock(&previous_stage_sel
->mutex
);
1938 ok
= si_check_missing_main_part(sscreen
,
1940 compiler_state
, &shader1_key
);
1941 mtx_unlock(&previous_stage_sel
->mutex
);
1943 ok
= si_check_missing_main_part(sscreen
, sel
,
1944 compiler_state
, key
);
1948 mtx_unlock(&sel
->mutex
);
1949 return -ENOMEM
; /* skip the draw call */
1953 /* Keep the reference to the 1st shader of merged shaders, so that
1954 * Gallium can't destroy it before we destroy the 2nd shader.
1956 * Set sctx = NULL, because it's unused if we're not releasing
1957 * the shader, and we don't have any sctx here.
1959 si_shader_selector_reference(NULL
, &shader
->previous_stage_sel
,
1960 previous_stage_sel
);
1962 /* Monolithic-only shaders don't make a distinction between optimized
1963 * and unoptimized. */
1964 shader
->is_monolithic
=
1965 is_pure_monolithic
||
1966 memcmp(&key
->opt
, &zeroed
.opt
, sizeof(key
->opt
)) != 0;
1968 /* The prim discard CS is always optimized. */
1969 shader
->is_optimized
=
1970 (!is_pure_monolithic
|| key
->opt
.vs_as_prim_discard_cs
) &&
1971 memcmp(&key
->opt
, &zeroed
.opt
, sizeof(key
->opt
)) != 0;
1973 /* If it's an optimized shader, compile it asynchronously. */
1974 if (shader
->is_optimized
&& thread_index
< 0) {
1975 /* Compile it asynchronously. */
1976 util_queue_add_job(&sscreen
->shader_compiler_queue_low_priority
,
1977 shader
, &shader
->ready
,
1978 si_build_shader_variant_low_priority
, NULL
);
1980 /* Add only after the ready fence was reset, to guard against a
1981 * race with si_bind_XX_shader. */
1982 if (!sel
->last_variant
) {
1983 sel
->first_variant
= shader
;
1984 sel
->last_variant
= shader
;
1986 sel
->last_variant
->next_variant
= shader
;
1987 sel
->last_variant
= shader
;
1990 /* Use the default (unoptimized) shader for now. */
1991 memset(&key
->opt
, 0, sizeof(key
->opt
));
1992 mtx_unlock(&sel
->mutex
);
1994 if (sscreen
->options
.sync_compile
)
1995 util_queue_fence_wait(&shader
->ready
);
1997 if (optimized_or_none
)
2002 /* Reset the fence before adding to the variant list. */
2003 util_queue_fence_reset(&shader
->ready
);
2005 if (!sel
->last_variant
) {
2006 sel
->first_variant
= shader
;
2007 sel
->last_variant
= shader
;
2009 sel
->last_variant
->next_variant
= shader
;
2010 sel
->last_variant
= shader
;
2013 mtx_unlock(&sel
->mutex
);
2015 assert(!shader
->is_optimized
);
2016 si_build_shader_variant(shader
, thread_index
, false);
2018 util_queue_fence_signal(&shader
->ready
);
2020 if (!shader
->compilation_failed
)
2021 state
->current
= shader
;
2023 return shader
->compilation_failed
? -1 : 0;
2026 static int si_shader_select(struct pipe_context
*ctx
,
2027 struct si_shader_ctx_state
*state
,
2028 struct si_compiler_ctx_state
*compiler_state
)
2030 struct si_context
*sctx
= (struct si_context
*)ctx
;
2031 struct si_shader_key key
;
2033 si_shader_selector_key(ctx
, state
->cso
, &key
);
2034 return si_shader_select_with_key(sctx
->screen
, state
, compiler_state
,
2038 static void si_parse_next_shader_property(const struct tgsi_shader_info
*info
,
2040 struct si_shader_key
*key
)
2042 unsigned next_shader
= info
->properties
[TGSI_PROPERTY_NEXT_SHADER
];
2044 switch (info
->processor
) {
2045 case PIPE_SHADER_VERTEX
:
2046 switch (next_shader
) {
2047 case PIPE_SHADER_GEOMETRY
:
2050 case PIPE_SHADER_TESS_CTRL
:
2051 case PIPE_SHADER_TESS_EVAL
:
2055 /* If POSITION isn't written, it can only be a HW VS
2056 * if streamout is used. If streamout isn't used,
2057 * assume that it's a HW LS. (the next shader is TCS)
2058 * This heuristic is needed for separate shader objects.
2060 if (!info
->writes_position
&& !streamout
)
2065 case PIPE_SHADER_TESS_EVAL
:
2066 if (next_shader
== PIPE_SHADER_GEOMETRY
||
2067 !info
->writes_position
)
2074 * Compile the main shader part or the monolithic shader as part of
2075 * si_shader_selector initialization. Since it can be done asynchronously,
2076 * there is no way to report compile failures to applications.
2078 static void si_init_shader_selector_async(void *job
, int thread_index
)
2080 struct si_shader_selector
*sel
= (struct si_shader_selector
*)job
;
2081 struct si_screen
*sscreen
= sel
->screen
;
2082 struct ac_llvm_compiler
*compiler
;
2083 struct pipe_debug_callback
*debug
= &sel
->compiler_ctx_state
.debug
;
2085 assert(!debug
->debug_message
|| debug
->async
);
2086 assert(thread_index
>= 0);
2087 assert(thread_index
< ARRAY_SIZE(sscreen
->compiler
));
2088 compiler
= &sscreen
->compiler
[thread_index
];
2093 /* Compile the main shader part for use with a prolog and/or epilog.
2094 * If this fails, the driver will try to compile a monolithic shader
2097 if (!sscreen
->use_monolithic_shaders
) {
2098 struct si_shader
*shader
= CALLOC_STRUCT(si_shader
);
2099 void *ir_binary
= NULL
;
2102 fprintf(stderr
, "radeonsi: can't allocate a main shader part\n");
2106 /* We can leave the fence signaled because use of the default
2107 * main part is guarded by the selector's ready fence. */
2108 util_queue_fence_init(&shader
->ready
);
2110 shader
->selector
= sel
;
2111 shader
->is_monolithic
= false;
2112 si_parse_next_shader_property(&sel
->info
,
2113 sel
->so
.num_outputs
!= 0,
2116 if (sel
->tokens
|| sel
->nir
)
2117 ir_binary
= si_get_ir_binary(sel
);
2119 /* Try to load the shader from the shader cache. */
2120 mtx_lock(&sscreen
->shader_cache_mutex
);
2123 si_shader_cache_load_shader(sscreen
, ir_binary
, shader
)) {
2124 mtx_unlock(&sscreen
->shader_cache_mutex
);
2125 si_shader_dump_stats_for_shader_db(shader
, debug
);
2127 mtx_unlock(&sscreen
->shader_cache_mutex
);
2129 /* Compile the shader if it hasn't been loaded from the cache. */
2130 if (si_compile_tgsi_shader(sscreen
, compiler
, shader
,
2134 fprintf(stderr
, "radeonsi: can't compile a main shader part\n");
2139 mtx_lock(&sscreen
->shader_cache_mutex
);
2140 if (!si_shader_cache_insert_shader(sscreen
, ir_binary
, shader
, true))
2142 mtx_unlock(&sscreen
->shader_cache_mutex
);
2146 *si_get_main_shader_part(sel
, &shader
->key
) = shader
;
2148 /* Unset "outputs_written" flags for outputs converted to
2149 * DEFAULT_VAL, so that later inter-shader optimizations don't
2150 * try to eliminate outputs that don't exist in the final
2153 * This is only done if non-monolithic shaders are enabled.
2155 if ((sel
->type
== PIPE_SHADER_VERTEX
||
2156 sel
->type
== PIPE_SHADER_TESS_EVAL
) &&
2157 !shader
->key
.as_ls
&&
2158 !shader
->key
.as_es
) {
2161 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
2162 unsigned offset
= shader
->info
.vs_output_param_offset
[i
];
2164 if (offset
<= AC_EXP_PARAM_OFFSET_31
)
2167 unsigned name
= sel
->info
.output_semantic_name
[i
];
2168 unsigned index
= sel
->info
.output_semantic_index
[i
];
2172 case TGSI_SEMANTIC_GENERIC
:
2173 /* don't process indices the function can't handle */
2174 if (index
>= SI_MAX_IO_GENERIC
)
2178 id
= si_shader_io_get_unique_index(name
, index
, true);
2179 sel
->outputs_written_before_ps
&= ~(1ull << id
);
2181 case TGSI_SEMANTIC_POSITION
: /* ignore these */
2182 case TGSI_SEMANTIC_PSIZE
:
2183 case TGSI_SEMANTIC_CLIPVERTEX
:
2184 case TGSI_SEMANTIC_EDGEFLAG
:
2191 /* The GS copy shader is always pre-compiled. */
2192 if (sel
->type
== PIPE_SHADER_GEOMETRY
) {
2193 sel
->gs_copy_shader
= si_generate_gs_copy_shader(sscreen
, compiler
, sel
, debug
);
2194 if (!sel
->gs_copy_shader
) {
2195 fprintf(stderr
, "radeonsi: can't create GS copy shader\n");
2199 si_shader_vs(sscreen
, sel
->gs_copy_shader
, sel
);
2203 void si_schedule_initial_compile(struct si_context
*sctx
, unsigned processor
,
2204 struct util_queue_fence
*ready_fence
,
2205 struct si_compiler_ctx_state
*compiler_ctx_state
,
2206 void *job
, util_queue_execute_func execute
)
2208 util_queue_fence_init(ready_fence
);
2210 struct util_async_debug_callback async_debug
;
2212 (sctx
->debug
.debug_message
&& !sctx
->debug
.async
) ||
2214 si_can_dump_shader(sctx
->screen
, processor
);
2217 u_async_debug_init(&async_debug
);
2218 compiler_ctx_state
->debug
= async_debug
.base
;
2221 util_queue_add_job(&sctx
->screen
->shader_compiler_queue
, job
,
2222 ready_fence
, execute
, NULL
);
2225 util_queue_fence_wait(ready_fence
);
2226 u_async_debug_drain(&async_debug
, &sctx
->debug
);
2227 u_async_debug_cleanup(&async_debug
);
2230 if (sctx
->screen
->options
.sync_compile
)
2231 util_queue_fence_wait(ready_fence
);
2234 /* Return descriptor slot usage masks from the given shader info. */
2235 void si_get_active_slot_masks(const struct tgsi_shader_info
*info
,
2236 uint32_t *const_and_shader_buffers
,
2237 uint64_t *samplers_and_images
)
2239 unsigned start
, num_shaderbufs
, num_constbufs
, num_images
, num_samplers
;
2241 num_shaderbufs
= util_last_bit(info
->shader_buffers_declared
);
2242 num_constbufs
= util_last_bit(info
->const_buffers_declared
);
2243 /* two 8-byte images share one 16-byte slot */
2244 num_images
= align(util_last_bit(info
->images_declared
), 2);
2245 num_samplers
= util_last_bit(info
->samplers_declared
);
2247 /* The layout is: sb[last] ... sb[0], cb[0] ... cb[last] */
2248 start
= si_get_shaderbuf_slot(num_shaderbufs
- 1);
2249 *const_and_shader_buffers
=
2250 u_bit_consecutive(start
, num_shaderbufs
+ num_constbufs
);
2252 /* The layout is: image[last] ... image[0], sampler[0] ... sampler[last] */
2253 start
= si_get_image_slot(num_images
- 1) / 2;
2254 *samplers_and_images
=
2255 u_bit_consecutive64(start
, num_images
/ 2 + num_samplers
);
2258 static void *si_create_shader_selector(struct pipe_context
*ctx
,
2259 const struct pipe_shader_state
*state
)
2261 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
2262 struct si_context
*sctx
= (struct si_context
*)ctx
;
2263 struct si_shader_selector
*sel
= CALLOC_STRUCT(si_shader_selector
);
2269 pipe_reference_init(&sel
->reference
, 1);
2270 sel
->screen
= sscreen
;
2271 sel
->compiler_ctx_state
.debug
= sctx
->debug
;
2272 sel
->compiler_ctx_state
.is_debug_context
= sctx
->is_debug
;
2274 sel
->so
= state
->stream_output
;
2276 if (state
->type
== PIPE_SHADER_IR_TGSI
) {
2277 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
2283 tgsi_scan_shader(state
->tokens
, &sel
->info
);
2284 tgsi_scan_tess_ctrl(state
->tokens
, &sel
->info
, &sel
->tcs_info
);
2286 assert(state
->type
== PIPE_SHADER_IR_NIR
);
2288 sel
->nir
= state
->ir
.nir
;
2290 si_nir_opts(sel
->nir
);
2291 si_nir_scan_shader(sel
->nir
, &sel
->info
);
2292 si_nir_scan_tess_ctrl(sel
->nir
, &sel
->tcs_info
);
2295 sel
->type
= sel
->info
.processor
;
2296 p_atomic_inc(&sscreen
->num_shaders_created
);
2297 si_get_active_slot_masks(&sel
->info
,
2298 &sel
->active_const_and_shader_buffers
,
2299 &sel
->active_samplers_and_images
);
2301 /* Record which streamout buffers are enabled. */
2302 for (i
= 0; i
< sel
->so
.num_outputs
; i
++) {
2303 sel
->enabled_streamout_buffer_mask
|=
2304 (1 << sel
->so
.output
[i
].output_buffer
) <<
2305 (sel
->so
.output
[i
].stream
* 4);
2308 /* The prolog is a no-op if there are no inputs. */
2309 sel
->vs_needs_prolog
= sel
->type
== PIPE_SHADER_VERTEX
&&
2310 sel
->info
.num_inputs
&&
2311 !sel
->info
.properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
];
2313 sel
->force_correct_derivs_after_kill
=
2314 sel
->type
== PIPE_SHADER_FRAGMENT
&&
2315 sel
->info
.uses_derivatives
&&
2316 sel
->info
.uses_kill
&&
2317 sctx
->screen
->debug_flags
& DBG(FS_CORRECT_DERIVS_AFTER_KILL
);
2319 sel
->prim_discard_cs_allowed
=
2320 sel
->type
== PIPE_SHADER_VERTEX
&&
2321 !sel
->info
.uses_bindless_images
&&
2322 !sel
->info
.uses_bindless_samplers
&&
2323 !sel
->info
.writes_memory
&&
2324 !sel
->info
.writes_viewport_index
&&
2325 !sel
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
] &&
2326 !sel
->so
.num_outputs
;
2328 /* Set which opcode uses which (i,j) pair. */
2329 if (sel
->info
.uses_persp_opcode_interp_centroid
)
2330 sel
->info
.uses_persp_centroid
= true;
2332 if (sel
->info
.uses_linear_opcode_interp_centroid
)
2333 sel
->info
.uses_linear_centroid
= true;
2335 if (sel
->info
.uses_persp_opcode_interp_offset
||
2336 sel
->info
.uses_persp_opcode_interp_sample
)
2337 sel
->info
.uses_persp_center
= true;
2339 if (sel
->info
.uses_linear_opcode_interp_offset
||
2340 sel
->info
.uses_linear_opcode_interp_sample
)
2341 sel
->info
.uses_linear_center
= true;
2343 switch (sel
->type
) {
2344 case PIPE_SHADER_GEOMETRY
:
2345 sel
->gs_output_prim
=
2346 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
2347 sel
->gs_max_out_vertices
=
2348 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
2349 sel
->gs_num_invocations
=
2350 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
2351 sel
->gsvs_vertex_size
= sel
->info
.num_outputs
* 16;
2352 sel
->max_gsvs_emit_size
= sel
->gsvs_vertex_size
*
2353 sel
->gs_max_out_vertices
;
2355 sel
->max_gs_stream
= 0;
2356 for (i
= 0; i
< sel
->so
.num_outputs
; i
++)
2357 sel
->max_gs_stream
= MAX2(sel
->max_gs_stream
,
2358 sel
->so
.output
[i
].stream
);
2360 sel
->gs_input_verts_per_prim
=
2361 u_vertices_per_prim(sel
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
]);
2364 case PIPE_SHADER_TESS_CTRL
:
2365 /* Always reserve space for these. */
2366 sel
->patch_outputs_written
|=
2367 (1ull << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER
, 0)) |
2368 (1ull << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER
, 0));
2370 case PIPE_SHADER_VERTEX
:
2371 case PIPE_SHADER_TESS_EVAL
:
2372 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
2373 unsigned name
= sel
->info
.output_semantic_name
[i
];
2374 unsigned index
= sel
->info
.output_semantic_index
[i
];
2377 case TGSI_SEMANTIC_TESSINNER
:
2378 case TGSI_SEMANTIC_TESSOUTER
:
2379 case TGSI_SEMANTIC_PATCH
:
2380 sel
->patch_outputs_written
|=
2381 1ull << si_shader_io_get_unique_index_patch(name
, index
);
2384 case TGSI_SEMANTIC_GENERIC
:
2385 /* don't process indices the function can't handle */
2386 if (index
>= SI_MAX_IO_GENERIC
)
2390 sel
->outputs_written
|=
2391 1ull << si_shader_io_get_unique_index(name
, index
, false);
2392 sel
->outputs_written_before_ps
|=
2393 1ull << si_shader_io_get_unique_index(name
, index
, true);
2395 case TGSI_SEMANTIC_EDGEFLAG
:
2399 sel
->esgs_itemsize
= util_last_bit64(sel
->outputs_written
) * 16;
2400 sel
->lshs_vertex_stride
= sel
->esgs_itemsize
;
2402 /* Add 1 dword to reduce LDS bank conflicts, so that each vertex
2403 * will start on a different bank. (except for the maximum 32*16).
2405 if (sel
->lshs_vertex_stride
< 32*16)
2406 sel
->lshs_vertex_stride
+= 4;
2408 /* For the ESGS ring in LDS, add 1 dword to reduce LDS bank
2409 * conflicts, i.e. each vertex will start at a different bank.
2411 if (sctx
->chip_class
>= GFX9
)
2412 sel
->esgs_itemsize
+= 4;
2414 assert(((sel
->esgs_itemsize
/ 4) & C_028AAC_ITEMSIZE
) == 0);
2417 case PIPE_SHADER_FRAGMENT
:
2418 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
2419 unsigned name
= sel
->info
.input_semantic_name
[i
];
2420 unsigned index
= sel
->info
.input_semantic_index
[i
];
2423 case TGSI_SEMANTIC_GENERIC
:
2424 /* don't process indices the function can't handle */
2425 if (index
>= SI_MAX_IO_GENERIC
)
2430 1ull << si_shader_io_get_unique_index(name
, index
, true);
2432 case TGSI_SEMANTIC_PCOORD
: /* ignore this */
2437 for (i
= 0; i
< 8; i
++)
2438 if (sel
->info
.colors_written
& (1 << i
))
2439 sel
->colors_written_4bit
|= 0xf << (4 * i
);
2441 for (i
= 0; i
< sel
->info
.num_inputs
; i
++) {
2442 if (sel
->info
.input_semantic_name
[i
] == TGSI_SEMANTIC_COLOR
) {
2443 int index
= sel
->info
.input_semantic_index
[i
];
2444 sel
->color_attr_index
[index
] = i
;
2450 /* PA_CL_VS_OUT_CNTL */
2452 sel
->info
.writes_psize
|| sel
->info
.writes_edgeflag
||
2453 sel
->info
.writes_layer
|| sel
->info
.writes_viewport_index
;
2454 sel
->pa_cl_vs_out_cntl
=
2455 S_02881C_USE_VTX_POINT_SIZE(sel
->info
.writes_psize
) |
2456 S_02881C_USE_VTX_EDGE_FLAG(sel
->info
.writes_edgeflag
) |
2457 S_02881C_USE_VTX_RENDER_TARGET_INDX(sel
->info
.writes_layer
) |
2458 S_02881C_USE_VTX_VIEWPORT_INDX(sel
->info
.writes_viewport_index
) |
2459 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
2460 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
);
2461 sel
->clipdist_mask
= sel
->info
.writes_clipvertex
?
2462 SIX_BITS
: sel
->info
.clipdist_writemask
;
2463 sel
->culldist_mask
= sel
->info
.culldist_writemask
<<
2464 sel
->info
.num_written_clipdistance
;
2466 /* DB_SHADER_CONTROL */
2467 sel
->db_shader_control
=
2468 S_02880C_Z_EXPORT_ENABLE(sel
->info
.writes_z
) |
2469 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(sel
->info
.writes_stencil
) |
2470 S_02880C_MASK_EXPORT_ENABLE(sel
->info
.writes_samplemask
) |
2471 S_02880C_KILL_ENABLE(sel
->info
.uses_kill
);
2473 switch (sel
->info
.properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
]) {
2474 case TGSI_FS_DEPTH_LAYOUT_GREATER
:
2475 sel
->db_shader_control
|=
2476 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z
);
2478 case TGSI_FS_DEPTH_LAYOUT_LESS
:
2479 sel
->db_shader_control
|=
2480 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z
);
2484 /* Z_ORDER, EXEC_ON_HIER_FAIL and EXEC_ON_NOOP should be set as following:
2486 * | early Z/S | writes_mem | allow_ReZ? | Z_ORDER | EXEC_ON_HIER_FAIL | EXEC_ON_NOOP
2487 * --|-----------|------------|------------|--------------------|-------------------|-------------
2488 * 1a| false | false | true | EarlyZ_Then_ReZ | 0 | 0
2489 * 1b| false | false | false | EarlyZ_Then_LateZ | 0 | 0
2490 * 2 | false | true | n/a | LateZ | 1 | 0
2491 * 3 | true | false | n/a | EarlyZ_Then_LateZ | 0 | 0
2492 * 4 | true | true | n/a | EarlyZ_Then_LateZ | 0 | 1
2494 * In cases 3 and 4, HW will force Z_ORDER to EarlyZ regardless of what's set in the register.
2495 * In case 2, NOOP_CULL is a don't care field. In case 2, 3 and 4, ReZ doesn't make sense.
2497 * Don't use ReZ without profiling !!!
2499 * ReZ decreases performance by 15% in DiRT: Showdown on Ultra settings, which has pretty complex
2502 if (sel
->info
.properties
[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
]) {
2504 sel
->db_shader_control
|= S_02880C_DEPTH_BEFORE_SHADER(1) |
2505 S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
) |
2506 S_02880C_EXEC_ON_NOOP(sel
->info
.writes_memory
);
2507 } else if (sel
->info
.writes_memory
) {
2509 sel
->db_shader_control
|= S_02880C_Z_ORDER(V_02880C_LATE_Z
) |
2510 S_02880C_EXEC_ON_HIER_FAIL(1);
2513 sel
->db_shader_control
|= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z
);
2516 (void) mtx_init(&sel
->mutex
, mtx_plain
);
2518 si_schedule_initial_compile(sctx
, sel
->info
.processor
, &sel
->ready
,
2519 &sel
->compiler_ctx_state
, sel
,
2520 si_init_shader_selector_async
);
2524 static void si_update_streamout_state(struct si_context
*sctx
)
2526 struct si_shader_selector
*shader_with_so
= si_get_vs(sctx
)->cso
;
2528 if (!shader_with_so
)
2531 sctx
->streamout
.enabled_stream_buffers_mask
=
2532 shader_with_so
->enabled_streamout_buffer_mask
;
2533 sctx
->streamout
.stride_in_dw
= shader_with_so
->so
.stride
;
2536 static void si_update_clip_regs(struct si_context
*sctx
,
2537 struct si_shader_selector
*old_hw_vs
,
2538 struct si_shader
*old_hw_vs_variant
,
2539 struct si_shader_selector
*next_hw_vs
,
2540 struct si_shader
*next_hw_vs_variant
)
2544 old_hw_vs
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
] !=
2545 next_hw_vs
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
] ||
2546 old_hw_vs
->pa_cl_vs_out_cntl
!= next_hw_vs
->pa_cl_vs_out_cntl
||
2547 old_hw_vs
->clipdist_mask
!= next_hw_vs
->clipdist_mask
||
2548 old_hw_vs
->culldist_mask
!= next_hw_vs
->culldist_mask
||
2549 !old_hw_vs_variant
||
2550 !next_hw_vs_variant
||
2551 old_hw_vs_variant
->key
.opt
.clip_disable
!=
2552 next_hw_vs_variant
->key
.opt
.clip_disable
))
2553 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.clip_regs
);
2556 static void si_update_common_shader_state(struct si_context
*sctx
)
2558 sctx
->uses_bindless_samplers
=
2559 si_shader_uses_bindless_samplers(sctx
->vs_shader
.cso
) ||
2560 si_shader_uses_bindless_samplers(sctx
->gs_shader
.cso
) ||
2561 si_shader_uses_bindless_samplers(sctx
->ps_shader
.cso
) ||
2562 si_shader_uses_bindless_samplers(sctx
->tcs_shader
.cso
) ||
2563 si_shader_uses_bindless_samplers(sctx
->tes_shader
.cso
);
2564 sctx
->uses_bindless_images
=
2565 si_shader_uses_bindless_images(sctx
->vs_shader
.cso
) ||
2566 si_shader_uses_bindless_images(sctx
->gs_shader
.cso
) ||
2567 si_shader_uses_bindless_images(sctx
->ps_shader
.cso
) ||
2568 si_shader_uses_bindless_images(sctx
->tcs_shader
.cso
) ||
2569 si_shader_uses_bindless_images(sctx
->tes_shader
.cso
);
2570 sctx
->do_update_shaders
= true;
2573 static void si_bind_vs_shader(struct pipe_context
*ctx
, void *state
)
2575 struct si_context
*sctx
= (struct si_context
*)ctx
;
2576 struct si_shader_selector
*old_hw_vs
= si_get_vs(sctx
)->cso
;
2577 struct si_shader
*old_hw_vs_variant
= si_get_vs_state(sctx
);
2578 struct si_shader_selector
*sel
= state
;
2580 if (sctx
->vs_shader
.cso
== sel
)
2583 sctx
->vs_shader
.cso
= sel
;
2584 sctx
->vs_shader
.current
= sel
? sel
->first_variant
: NULL
;
2585 sctx
->num_vs_blit_sgprs
= sel
? sel
->info
.properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
] : 0;
2587 si_update_common_shader_state(sctx
);
2588 si_update_vs_viewport_state(sctx
);
2589 si_set_active_descriptors_for_shader(sctx
, sel
);
2590 si_update_streamout_state(sctx
);
2591 si_update_clip_regs(sctx
, old_hw_vs
, old_hw_vs_variant
,
2592 si_get_vs(sctx
)->cso
, si_get_vs_state(sctx
));
2595 static void si_update_tess_uses_prim_id(struct si_context
*sctx
)
2597 sctx
->ia_multi_vgt_param_key
.u
.tess_uses_prim_id
=
2598 (sctx
->tes_shader
.cso
&&
2599 sctx
->tes_shader
.cso
->info
.uses_primid
) ||
2600 (sctx
->tcs_shader
.cso
&&
2601 sctx
->tcs_shader
.cso
->info
.uses_primid
) ||
2602 (sctx
->gs_shader
.cso
&&
2603 sctx
->gs_shader
.cso
->info
.uses_primid
) ||
2604 (sctx
->ps_shader
.cso
&& !sctx
->gs_shader
.cso
&&
2605 sctx
->ps_shader
.cso
->info
.uses_primid
);
2608 static void si_bind_gs_shader(struct pipe_context
*ctx
, void *state
)
2610 struct si_context
*sctx
= (struct si_context
*)ctx
;
2611 struct si_shader_selector
*old_hw_vs
= si_get_vs(sctx
)->cso
;
2612 struct si_shader
*old_hw_vs_variant
= si_get_vs_state(sctx
);
2613 struct si_shader_selector
*sel
= state
;
2614 bool enable_changed
= !!sctx
->gs_shader
.cso
!= !!sel
;
2616 if (sctx
->gs_shader
.cso
== sel
)
2619 sctx
->gs_shader
.cso
= sel
;
2620 sctx
->gs_shader
.current
= sel
? sel
->first_variant
: NULL
;
2621 sctx
->ia_multi_vgt_param_key
.u
.uses_gs
= sel
!= NULL
;
2623 si_update_common_shader_state(sctx
);
2624 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
2626 if (enable_changed
) {
2627 si_shader_change_notify(sctx
);
2628 if (sctx
->ia_multi_vgt_param_key
.u
.uses_tess
)
2629 si_update_tess_uses_prim_id(sctx
);
2631 si_update_vs_viewport_state(sctx
);
2632 si_set_active_descriptors_for_shader(sctx
, sel
);
2633 si_update_streamout_state(sctx
);
2634 si_update_clip_regs(sctx
, old_hw_vs
, old_hw_vs_variant
,
2635 si_get_vs(sctx
)->cso
, si_get_vs_state(sctx
));
2638 static void si_bind_tcs_shader(struct pipe_context
*ctx
, void *state
)
2640 struct si_context
*sctx
= (struct si_context
*)ctx
;
2641 struct si_shader_selector
*sel
= state
;
2642 bool enable_changed
= !!sctx
->tcs_shader
.cso
!= !!sel
;
2644 if (sctx
->tcs_shader
.cso
== sel
)
2647 sctx
->tcs_shader
.cso
= sel
;
2648 sctx
->tcs_shader
.current
= sel
? sel
->first_variant
: NULL
;
2649 si_update_tess_uses_prim_id(sctx
);
2651 si_update_common_shader_state(sctx
);
2654 sctx
->last_tcs
= NULL
; /* invalidate derived tess state */
2656 si_set_active_descriptors_for_shader(sctx
, sel
);
2659 static void si_bind_tes_shader(struct pipe_context
*ctx
, void *state
)
2661 struct si_context
*sctx
= (struct si_context
*)ctx
;
2662 struct si_shader_selector
*old_hw_vs
= si_get_vs(sctx
)->cso
;
2663 struct si_shader
*old_hw_vs_variant
= si_get_vs_state(sctx
);
2664 struct si_shader_selector
*sel
= state
;
2665 bool enable_changed
= !!sctx
->tes_shader
.cso
!= !!sel
;
2667 if (sctx
->tes_shader
.cso
== sel
)
2670 sctx
->tes_shader
.cso
= sel
;
2671 sctx
->tes_shader
.current
= sel
? sel
->first_variant
: NULL
;
2672 sctx
->ia_multi_vgt_param_key
.u
.uses_tess
= sel
!= NULL
;
2673 si_update_tess_uses_prim_id(sctx
);
2675 si_update_common_shader_state(sctx
);
2676 sctx
->last_rast_prim
= -1; /* reset this so that it gets updated */
2678 if (enable_changed
) {
2679 si_shader_change_notify(sctx
);
2680 sctx
->last_tes_sh_base
= -1; /* invalidate derived tess state */
2682 si_update_vs_viewport_state(sctx
);
2683 si_set_active_descriptors_for_shader(sctx
, sel
);
2684 si_update_streamout_state(sctx
);
2685 si_update_clip_regs(sctx
, old_hw_vs
, old_hw_vs_variant
,
2686 si_get_vs(sctx
)->cso
, si_get_vs_state(sctx
));
2689 static void si_bind_ps_shader(struct pipe_context
*ctx
, void *state
)
2691 struct si_context
*sctx
= (struct si_context
*)ctx
;
2692 struct si_shader_selector
*old_sel
= sctx
->ps_shader
.cso
;
2693 struct si_shader_selector
*sel
= state
;
2695 /* skip if supplied shader is one already in use */
2699 sctx
->ps_shader
.cso
= sel
;
2700 sctx
->ps_shader
.current
= sel
? sel
->first_variant
: NULL
;
2702 si_update_common_shader_state(sctx
);
2704 if (sctx
->ia_multi_vgt_param_key
.u
.uses_tess
)
2705 si_update_tess_uses_prim_id(sctx
);
2708 old_sel
->info
.colors_written
!= sel
->info
.colors_written
)
2709 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.cb_render_state
);
2711 if (sctx
->screen
->has_out_of_order_rast
&&
2713 old_sel
->info
.writes_memory
!= sel
->info
.writes_memory
||
2714 old_sel
->info
.properties
[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
] !=
2715 sel
->info
.properties
[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
]))
2716 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.msaa_config
);
2718 si_set_active_descriptors_for_shader(sctx
, sel
);
2719 si_update_ps_colorbuf0_slot(sctx
);
2722 static void si_delete_shader(struct si_context
*sctx
, struct si_shader
*shader
)
2724 if (shader
->is_optimized
) {
2725 util_queue_drop_job(&sctx
->screen
->shader_compiler_queue_low_priority
,
2729 util_queue_fence_destroy(&shader
->ready
);
2732 switch (shader
->selector
->type
) {
2733 case PIPE_SHADER_VERTEX
:
2734 if (shader
->key
.as_ls
) {
2735 assert(sctx
->chip_class
<= GFX8
);
2736 si_pm4_delete_state(sctx
, ls
, shader
->pm4
);
2737 } else if (shader
->key
.as_es
) {
2738 assert(sctx
->chip_class
<= GFX8
);
2739 si_pm4_delete_state(sctx
, es
, shader
->pm4
);
2741 si_pm4_delete_state(sctx
, vs
, shader
->pm4
);
2744 case PIPE_SHADER_TESS_CTRL
:
2745 si_pm4_delete_state(sctx
, hs
, shader
->pm4
);
2747 case PIPE_SHADER_TESS_EVAL
:
2748 if (shader
->key
.as_es
) {
2749 assert(sctx
->chip_class
<= GFX8
);
2750 si_pm4_delete_state(sctx
, es
, shader
->pm4
);
2752 si_pm4_delete_state(sctx
, vs
, shader
->pm4
);
2755 case PIPE_SHADER_GEOMETRY
:
2756 if (shader
->is_gs_copy_shader
)
2757 si_pm4_delete_state(sctx
, vs
, shader
->pm4
);
2759 si_pm4_delete_state(sctx
, gs
, shader
->pm4
);
2761 case PIPE_SHADER_FRAGMENT
:
2762 si_pm4_delete_state(sctx
, ps
, shader
->pm4
);
2767 si_shader_selector_reference(sctx
, &shader
->previous_stage_sel
, NULL
);
2768 si_shader_destroy(shader
);
2772 void si_destroy_shader_selector(struct si_context
*sctx
,
2773 struct si_shader_selector
*sel
)
2775 struct si_shader
*p
= sel
->first_variant
, *c
;
2776 struct si_shader_ctx_state
*current_shader
[SI_NUM_SHADERS
] = {
2777 [PIPE_SHADER_VERTEX
] = &sctx
->vs_shader
,
2778 [PIPE_SHADER_TESS_CTRL
] = &sctx
->tcs_shader
,
2779 [PIPE_SHADER_TESS_EVAL
] = &sctx
->tes_shader
,
2780 [PIPE_SHADER_GEOMETRY
] = &sctx
->gs_shader
,
2781 [PIPE_SHADER_FRAGMENT
] = &sctx
->ps_shader
,
2784 util_queue_drop_job(&sctx
->screen
->shader_compiler_queue
, &sel
->ready
);
2786 if (current_shader
[sel
->type
]->cso
== sel
) {
2787 current_shader
[sel
->type
]->cso
= NULL
;
2788 current_shader
[sel
->type
]->current
= NULL
;
2792 c
= p
->next_variant
;
2793 si_delete_shader(sctx
, p
);
2797 if (sel
->main_shader_part
)
2798 si_delete_shader(sctx
, sel
->main_shader_part
);
2799 if (sel
->main_shader_part_ls
)
2800 si_delete_shader(sctx
, sel
->main_shader_part_ls
);
2801 if (sel
->main_shader_part_es
)
2802 si_delete_shader(sctx
, sel
->main_shader_part_es
);
2803 if (sel
->gs_copy_shader
)
2804 si_delete_shader(sctx
, sel
->gs_copy_shader
);
2806 util_queue_fence_destroy(&sel
->ready
);
2807 mtx_destroy(&sel
->mutex
);
2809 ralloc_free(sel
->nir
);
2813 static void si_delete_shader_selector(struct pipe_context
*ctx
, void *state
)
2815 struct si_context
*sctx
= (struct si_context
*)ctx
;
2816 struct si_shader_selector
*sel
= (struct si_shader_selector
*)state
;
2818 si_shader_selector_reference(sctx
, &sel
, NULL
);
2821 static unsigned si_get_ps_input_cntl(struct si_context
*sctx
,
2822 struct si_shader
*vs
, unsigned name
,
2823 unsigned index
, unsigned interpolate
)
2825 struct tgsi_shader_info
*vsinfo
= &vs
->selector
->info
;
2826 unsigned j
, offset
, ps_input_cntl
= 0;
2828 if (interpolate
== TGSI_INTERPOLATE_CONSTANT
||
2829 (interpolate
== TGSI_INTERPOLATE_COLOR
&& sctx
->flatshade
) ||
2830 name
== TGSI_SEMANTIC_PRIMID
)
2831 ps_input_cntl
|= S_028644_FLAT_SHADE(1);
2833 if (name
== TGSI_SEMANTIC_PCOORD
||
2834 (name
== TGSI_SEMANTIC_TEXCOORD
&&
2835 sctx
->sprite_coord_enable
& (1 << index
))) {
2836 ps_input_cntl
|= S_028644_PT_SPRITE_TEX(1);
2839 for (j
= 0; j
< vsinfo
->num_outputs
; j
++) {
2840 if (name
== vsinfo
->output_semantic_name
[j
] &&
2841 index
== vsinfo
->output_semantic_index
[j
]) {
2842 offset
= vs
->info
.vs_output_param_offset
[j
];
2844 if (offset
<= AC_EXP_PARAM_OFFSET_31
) {
2845 /* The input is loaded from parameter memory. */
2846 ps_input_cntl
|= S_028644_OFFSET(offset
);
2847 } else if (!G_028644_PT_SPRITE_TEX(ps_input_cntl
)) {
2848 if (offset
== AC_EXP_PARAM_UNDEFINED
) {
2849 /* This can happen with depth-only rendering. */
2852 /* The input is a DEFAULT_VAL constant. */
2853 assert(offset
>= AC_EXP_PARAM_DEFAULT_VAL_0000
&&
2854 offset
<= AC_EXP_PARAM_DEFAULT_VAL_1111
);
2855 offset
-= AC_EXP_PARAM_DEFAULT_VAL_0000
;
2858 ps_input_cntl
= S_028644_OFFSET(0x20) |
2859 S_028644_DEFAULT_VAL(offset
);
2865 if (name
== TGSI_SEMANTIC_PRIMID
)
2866 /* PrimID is written after the last output. */
2867 ps_input_cntl
|= S_028644_OFFSET(vs
->info
.vs_output_param_offset
[vsinfo
->num_outputs
]);
2868 else if (j
== vsinfo
->num_outputs
&& !G_028644_PT_SPRITE_TEX(ps_input_cntl
)) {
2869 /* No corresponding output found, load defaults into input.
2870 * Don't set any other bits.
2871 * (FLAT_SHADE=1 completely changes behavior) */
2872 ps_input_cntl
= S_028644_OFFSET(0x20);
2873 /* D3D 9 behaviour. GL is undefined */
2874 if (name
== TGSI_SEMANTIC_COLOR
&& index
== 0)
2875 ps_input_cntl
|= S_028644_DEFAULT_VAL(3);
2877 return ps_input_cntl
;
2880 static void si_emit_spi_map(struct si_context
*sctx
)
2882 struct si_shader
*ps
= sctx
->ps_shader
.current
;
2883 struct si_shader
*vs
= si_get_vs_state(sctx
);
2884 struct tgsi_shader_info
*psinfo
= ps
? &ps
->selector
->info
: NULL
;
2885 unsigned i
, num_interp
, num_written
= 0, bcol_interp
[2];
2886 unsigned spi_ps_input_cntl
[32];
2888 if (!ps
|| !ps
->selector
->info
.num_inputs
)
2891 num_interp
= si_get_ps_num_interp(ps
);
2892 assert(num_interp
> 0);
2894 for (i
= 0; i
< psinfo
->num_inputs
; i
++) {
2895 unsigned name
= psinfo
->input_semantic_name
[i
];
2896 unsigned index
= psinfo
->input_semantic_index
[i
];
2897 unsigned interpolate
= psinfo
->input_interpolate
[i
];
2899 spi_ps_input_cntl
[num_written
++] = si_get_ps_input_cntl(sctx
, vs
, name
,
2900 index
, interpolate
);
2902 if (name
== TGSI_SEMANTIC_COLOR
) {
2903 assert(index
< ARRAY_SIZE(bcol_interp
));
2904 bcol_interp
[index
] = interpolate
;
2908 if (ps
->key
.part
.ps
.prolog
.color_two_side
) {
2909 unsigned bcol
= TGSI_SEMANTIC_BCOLOR
;
2911 for (i
= 0; i
< 2; i
++) {
2912 if (!(psinfo
->colors_read
& (0xf << (i
* 4))))
2915 spi_ps_input_cntl
[num_written
++] =
2916 si_get_ps_input_cntl(sctx
, vs
, bcol
, i
, bcol_interp
[i
]);
2920 assert(num_interp
== num_written
);
2922 /* R_028644_SPI_PS_INPUT_CNTL_0 */
2923 /* Dota 2: Only ~16% of SPI map updates set different values. */
2924 /* Talos: Only ~9% of SPI map updates set different values. */
2925 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
2926 radeon_opt_set_context_regn(sctx
, R_028644_SPI_PS_INPUT_CNTL_0
,
2928 sctx
->tracked_regs
.spi_ps_input_cntl
, num_interp
);
2930 if (initial_cdw
!= sctx
->gfx_cs
->current
.cdw
)
2931 sctx
->context_roll
= true;
2935 * Writing CONFIG or UCONFIG VGT registers requires VGT_FLUSH before that.
2937 static void si_init_config_add_vgt_flush(struct si_context
*sctx
)
2939 if (sctx
->init_config_has_vgt_flush
)
2942 /* Done by Vulkan before VGT_FLUSH. */
2943 si_pm4_cmd_begin(sctx
->init_config
, PKT3_EVENT_WRITE
);
2944 si_pm4_cmd_add(sctx
->init_config
,
2945 EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
2946 si_pm4_cmd_end(sctx
->init_config
, false);
2948 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
2949 si_pm4_cmd_begin(sctx
->init_config
, PKT3_EVENT_WRITE
);
2950 si_pm4_cmd_add(sctx
->init_config
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
2951 si_pm4_cmd_end(sctx
->init_config
, false);
2952 sctx
->init_config_has_vgt_flush
= true;
2955 /* Initialize state related to ESGS / GSVS ring buffers */
2956 static bool si_update_gs_ring_buffers(struct si_context
*sctx
)
2958 struct si_shader_selector
*es
=
2959 sctx
->tes_shader
.cso
? sctx
->tes_shader
.cso
: sctx
->vs_shader
.cso
;
2960 struct si_shader_selector
*gs
= sctx
->gs_shader
.cso
;
2961 struct si_pm4_state
*pm4
;
2963 /* Chip constants. */
2964 unsigned num_se
= sctx
->screen
->info
.max_se
;
2965 unsigned wave_size
= 64;
2966 unsigned max_gs_waves
= 32 * num_se
; /* max 32 per SE on GCN */
2967 /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
2968 * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
2970 unsigned gs_vertex_reuse
= (sctx
->chip_class
>= GFX8
? 32 : 16) * num_se
;
2971 unsigned alignment
= 256 * num_se
;
2972 /* The maximum size is 63.999 MB per SE. */
2973 unsigned max_size
= ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se
;
2975 /* Calculate the minimum size. */
2976 unsigned min_esgs_ring_size
= align(es
->esgs_itemsize
* gs_vertex_reuse
*
2977 wave_size
, alignment
);
2979 /* These are recommended sizes, not minimum sizes. */
2980 unsigned esgs_ring_size
= max_gs_waves
* 2 * wave_size
*
2981 es
->esgs_itemsize
* gs
->gs_input_verts_per_prim
;
2982 unsigned gsvs_ring_size
= max_gs_waves
* 2 * wave_size
*
2983 gs
->max_gsvs_emit_size
;
2985 min_esgs_ring_size
= align(min_esgs_ring_size
, alignment
);
2986 esgs_ring_size
= align(esgs_ring_size
, alignment
);
2987 gsvs_ring_size
= align(gsvs_ring_size
, alignment
);
2989 esgs_ring_size
= CLAMP(esgs_ring_size
, min_esgs_ring_size
, max_size
);
2990 gsvs_ring_size
= MIN2(gsvs_ring_size
, max_size
);
2992 /* Some rings don't have to be allocated if shaders don't use them.
2993 * (e.g. no varyings between ES and GS or GS and VS)
2995 * GFX9 doesn't have the ESGS ring.
2997 bool update_esgs
= sctx
->chip_class
<= GFX8
&&
2999 (!sctx
->esgs_ring
||
3000 sctx
->esgs_ring
->width0
< esgs_ring_size
);
3001 bool update_gsvs
= gsvs_ring_size
&&
3002 (!sctx
->gsvs_ring
||
3003 sctx
->gsvs_ring
->width0
< gsvs_ring_size
);
3005 if (!update_esgs
&& !update_gsvs
)
3009 pipe_resource_reference(&sctx
->esgs_ring
, NULL
);
3011 pipe_aligned_buffer_create(sctx
->b
.screen
,
3012 SI_RESOURCE_FLAG_UNMAPPABLE
,
3014 esgs_ring_size
, alignment
);
3015 if (!sctx
->esgs_ring
)
3020 pipe_resource_reference(&sctx
->gsvs_ring
, NULL
);
3022 pipe_aligned_buffer_create(sctx
->b
.screen
,
3023 SI_RESOURCE_FLAG_UNMAPPABLE
,
3025 gsvs_ring_size
, alignment
);
3026 if (!sctx
->gsvs_ring
)
3030 /* Create the "init_config_gs_rings" state. */
3031 pm4
= CALLOC_STRUCT(si_pm4_state
);
3035 if (sctx
->chip_class
>= GFX7
) {
3036 if (sctx
->esgs_ring
) {
3037 assert(sctx
->chip_class
<= GFX8
);
3038 si_pm4_set_reg(pm4
, R_030900_VGT_ESGS_RING_SIZE
,
3039 sctx
->esgs_ring
->width0
/ 256);
3041 if (sctx
->gsvs_ring
)
3042 si_pm4_set_reg(pm4
, R_030904_VGT_GSVS_RING_SIZE
,
3043 sctx
->gsvs_ring
->width0
/ 256);
3045 if (sctx
->esgs_ring
)
3046 si_pm4_set_reg(pm4
, R_0088C8_VGT_ESGS_RING_SIZE
,
3047 sctx
->esgs_ring
->width0
/ 256);
3048 if (sctx
->gsvs_ring
)
3049 si_pm4_set_reg(pm4
, R_0088CC_VGT_GSVS_RING_SIZE
,
3050 sctx
->gsvs_ring
->width0
/ 256);
3053 /* Set the state. */
3054 if (sctx
->init_config_gs_rings
)
3055 si_pm4_free_state(sctx
, sctx
->init_config_gs_rings
, ~0);
3056 sctx
->init_config_gs_rings
= pm4
;
3058 if (!sctx
->init_config_has_vgt_flush
) {
3059 si_init_config_add_vgt_flush(sctx
);
3060 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
3063 /* Flush the context to re-emit both init_config states. */
3064 sctx
->initial_gfx_cs_size
= 0; /* force flush */
3065 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
3067 /* Set ring bindings. */
3068 if (sctx
->esgs_ring
) {
3069 assert(sctx
->chip_class
<= GFX8
);
3070 si_set_ring_buffer(sctx
, SI_ES_RING_ESGS
,
3071 sctx
->esgs_ring
, 0, sctx
->esgs_ring
->width0
,
3072 true, true, 4, 64, 0);
3073 si_set_ring_buffer(sctx
, SI_GS_RING_ESGS
,
3074 sctx
->esgs_ring
, 0, sctx
->esgs_ring
->width0
,
3075 false, false, 0, 0, 0);
3077 if (sctx
->gsvs_ring
) {
3078 si_set_ring_buffer(sctx
, SI_RING_GSVS
,
3079 sctx
->gsvs_ring
, 0, sctx
->gsvs_ring
->width0
,
3080 false, false, 0, 0, 0);
3086 static void si_shader_lock(struct si_shader
*shader
)
3088 mtx_lock(&shader
->selector
->mutex
);
3089 if (shader
->previous_stage_sel
) {
3090 assert(shader
->previous_stage_sel
!= shader
->selector
);
3091 mtx_lock(&shader
->previous_stage_sel
->mutex
);
3095 static void si_shader_unlock(struct si_shader
*shader
)
3097 if (shader
->previous_stage_sel
)
3098 mtx_unlock(&shader
->previous_stage_sel
->mutex
);
3099 mtx_unlock(&shader
->selector
->mutex
);
3103 * @returns 1 if \p sel has been updated to use a new scratch buffer
3105 * < 0 if there was a failure
3107 static int si_update_scratch_buffer(struct si_context
*sctx
,
3108 struct si_shader
*shader
)
3110 uint64_t scratch_va
= sctx
->scratch_buffer
->gpu_address
;
3116 /* This shader doesn't need a scratch buffer */
3117 if (shader
->config
.scratch_bytes_per_wave
== 0)
3120 /* Prevent race conditions when updating:
3121 * - si_shader::scratch_bo
3122 * - si_shader::binary::code
3123 * - si_shader::previous_stage::binary::code.
3125 si_shader_lock(shader
);
3127 /* This shader is already configured to use the current
3128 * scratch buffer. */
3129 if (shader
->scratch_bo
== sctx
->scratch_buffer
) {
3130 si_shader_unlock(shader
);
3134 assert(sctx
->scratch_buffer
);
3136 if (shader
->previous_stage
)
3137 si_shader_apply_scratch_relocs(shader
->previous_stage
, scratch_va
);
3139 si_shader_apply_scratch_relocs(shader
, scratch_va
);
3141 /* Replace the shader bo with a new bo that has the relocs applied. */
3142 r
= si_shader_binary_upload(sctx
->screen
, shader
);
3144 si_shader_unlock(shader
);
3148 /* Update the shader state to use the new shader bo. */
3149 si_shader_init_pm4_state(sctx
->screen
, shader
);
3151 si_resource_reference(&shader
->scratch_bo
, sctx
->scratch_buffer
);
3153 si_shader_unlock(shader
);
3157 static unsigned si_get_current_scratch_buffer_size(struct si_context
*sctx
)
3159 return sctx
->scratch_buffer
? sctx
->scratch_buffer
->b
.b
.width0
: 0;
3162 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader
*shader
)
3164 return shader
? shader
->config
.scratch_bytes_per_wave
: 0;
3167 static struct si_shader
*si_get_tcs_current(struct si_context
*sctx
)
3169 if (!sctx
->tes_shader
.cso
)
3170 return NULL
; /* tessellation disabled */
3172 return sctx
->tcs_shader
.cso
? sctx
->tcs_shader
.current
:
3173 sctx
->fixed_func_tcs_shader
.current
;
3176 static unsigned si_get_max_scratch_bytes_per_wave(struct si_context
*sctx
)
3180 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->ps_shader
.current
));
3181 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->gs_shader
.current
));
3182 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->vs_shader
.current
));
3183 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(sctx
->tes_shader
.current
));
3185 if (sctx
->tes_shader
.cso
) {
3186 struct si_shader
*tcs
= si_get_tcs_current(sctx
);
3188 bytes
= MAX2(bytes
, si_get_scratch_buffer_bytes_per_wave(tcs
));
3193 static bool si_update_scratch_relocs(struct si_context
*sctx
)
3195 struct si_shader
*tcs
= si_get_tcs_current(sctx
);
3198 /* Update the shaders, so that they are using the latest scratch.
3199 * The scratch buffer may have been changed since these shaders were
3200 * last used, so we still need to try to update them, even if they
3201 * require scratch buffers smaller than the current size.
3203 r
= si_update_scratch_buffer(sctx
, sctx
->ps_shader
.current
);
3207 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
.current
->pm4
);
3209 r
= si_update_scratch_buffer(sctx
, sctx
->gs_shader
.current
);
3213 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
.current
->pm4
);
3215 r
= si_update_scratch_buffer(sctx
, tcs
);
3219 si_pm4_bind_state(sctx
, hs
, tcs
->pm4
);
3221 /* VS can be bound as LS, ES, or VS. */
3222 r
= si_update_scratch_buffer(sctx
, sctx
->vs_shader
.current
);
3226 if (sctx
->tes_shader
.current
)
3227 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
.current
->pm4
);
3228 else if (sctx
->gs_shader
.current
)
3229 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
.current
->pm4
);
3231 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
.current
->pm4
);
3234 /* TES can be bound as ES or VS. */
3235 r
= si_update_scratch_buffer(sctx
, sctx
->tes_shader
.current
);
3239 if (sctx
->gs_shader
.current
)
3240 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
.current
->pm4
);
3242 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
.current
->pm4
);
3248 static bool si_update_spi_tmpring_size(struct si_context
*sctx
)
3250 unsigned current_scratch_buffer_size
=
3251 si_get_current_scratch_buffer_size(sctx
);
3252 unsigned scratch_bytes_per_wave
=
3253 si_get_max_scratch_bytes_per_wave(sctx
);
3254 unsigned scratch_needed_size
= scratch_bytes_per_wave
*
3255 sctx
->scratch_waves
;
3256 unsigned spi_tmpring_size
;
3258 if (scratch_needed_size
> 0) {
3259 if (scratch_needed_size
> current_scratch_buffer_size
) {
3260 /* Create a bigger scratch buffer */
3261 si_resource_reference(&sctx
->scratch_buffer
, NULL
);
3263 sctx
->scratch_buffer
=
3264 si_aligned_buffer_create(&sctx
->screen
->b
,
3265 SI_RESOURCE_FLAG_UNMAPPABLE
,
3267 scratch_needed_size
, 256);
3268 if (!sctx
->scratch_buffer
)
3271 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.scratch_state
);
3272 si_context_add_resource_size(sctx
,
3273 &sctx
->scratch_buffer
->b
.b
);
3276 if (!si_update_scratch_relocs(sctx
))
3280 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
3281 assert((scratch_needed_size
& ~0x3FF) == scratch_needed_size
&&
3282 "scratch size should already be aligned correctly.");
3284 spi_tmpring_size
= S_0286E8_WAVES(sctx
->scratch_waves
) |
3285 S_0286E8_WAVESIZE(scratch_bytes_per_wave
>> 10);
3286 if (spi_tmpring_size
!= sctx
->spi_tmpring_size
) {
3287 sctx
->spi_tmpring_size
= spi_tmpring_size
;
3288 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.scratch_state
);
3293 static void si_init_tess_factor_ring(struct si_context
*sctx
)
3295 assert(!sctx
->tess_rings
);
3297 /* The address must be aligned to 2^19, because the shader only
3298 * receives the high 13 bits.
3300 sctx
->tess_rings
= pipe_aligned_buffer_create(sctx
->b
.screen
,
3301 SI_RESOURCE_FLAG_32BIT
,
3303 sctx
->screen
->tess_offchip_ring_size
+
3304 sctx
->screen
->tess_factor_ring_size
,
3306 if (!sctx
->tess_rings
)
3309 si_init_config_add_vgt_flush(sctx
);
3311 si_pm4_add_bo(sctx
->init_config
, si_resource(sctx
->tess_rings
),
3312 RADEON_USAGE_READWRITE
, RADEON_PRIO_SHADER_RINGS
);
3314 uint64_t factor_va
= si_resource(sctx
->tess_rings
)->gpu_address
+
3315 sctx
->screen
->tess_offchip_ring_size
;
3317 /* Append these registers to the init config state. */
3318 if (sctx
->chip_class
>= GFX7
) {
3319 si_pm4_set_reg(sctx
->init_config
, R_030938_VGT_TF_RING_SIZE
,
3320 S_030938_SIZE(sctx
->screen
->tess_factor_ring_size
/ 4));
3321 si_pm4_set_reg(sctx
->init_config
, R_030940_VGT_TF_MEMORY_BASE
,
3323 if (sctx
->chip_class
>= GFX9
)
3324 si_pm4_set_reg(sctx
->init_config
, R_030944_VGT_TF_MEMORY_BASE_HI
,
3325 S_030944_BASE_HI(factor_va
>> 40));
3326 si_pm4_set_reg(sctx
->init_config
, R_03093C_VGT_HS_OFFCHIP_PARAM
,
3327 sctx
->screen
->vgt_hs_offchip_param
);
3329 si_pm4_set_reg(sctx
->init_config
, R_008988_VGT_TF_RING_SIZE
,
3330 S_008988_SIZE(sctx
->screen
->tess_factor_ring_size
/ 4));
3331 si_pm4_set_reg(sctx
->init_config
, R_0089B8_VGT_TF_MEMORY_BASE
,
3333 si_pm4_set_reg(sctx
->init_config
, R_0089B0_VGT_HS_OFFCHIP_PARAM
,
3334 sctx
->screen
->vgt_hs_offchip_param
);
3337 /* Flush the context to re-emit the init_config state.
3338 * This is done only once in a lifetime of a context.
3340 si_pm4_upload_indirect_buffer(sctx
, sctx
->init_config
);
3341 sctx
->initial_gfx_cs_size
= 0; /* force flush */
3342 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
3345 static void si_update_vgt_shader_config(struct si_context
*sctx
)
3347 /* Calculate the index of the config.
3348 * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */
3349 unsigned index
= 2*!!sctx
->tes_shader
.cso
+ !!sctx
->gs_shader
.cso
;
3350 struct si_pm4_state
**pm4
= &sctx
->vgt_shader_config
[index
];
3353 uint32_t stages
= 0;
3355 *pm4
= CALLOC_STRUCT(si_pm4_state
);
3357 if (sctx
->tes_shader
.cso
) {
3358 stages
|= S_028B54_LS_EN(V_028B54_LS_STAGE_ON
) |
3359 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
3361 if (sctx
->gs_shader
.cso
)
3362 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_DS
) |
3364 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
3366 stages
|= S_028B54_VS_EN(V_028B54_VS_STAGE_DS
);
3367 } else if (sctx
->gs_shader
.cso
) {
3368 stages
|= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL
) |
3370 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER
);
3373 if (sctx
->chip_class
>= GFX9
)
3374 stages
|= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
3376 si_pm4_set_reg(*pm4
, R_028B54_VGT_SHADER_STAGES_EN
, stages
);
3378 si_pm4_bind_state(sctx
, vgt_shader_config
, *pm4
);
3381 bool si_update_shaders(struct si_context
*sctx
)
3383 struct pipe_context
*ctx
= (struct pipe_context
*)sctx
;
3384 struct si_compiler_ctx_state compiler_state
;
3385 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
3386 struct si_shader
*old_vs
= si_get_vs_state(sctx
);
3387 bool old_clip_disable
= old_vs
? old_vs
->key
.opt
.clip_disable
: false;
3388 struct si_shader
*old_ps
= sctx
->ps_shader
.current
;
3389 unsigned old_spi_shader_col_format
=
3390 old_ps
? old_ps
->key
.part
.ps
.epilog
.spi_shader_col_format
: 0;
3393 compiler_state
.compiler
= &sctx
->compiler
;
3394 compiler_state
.debug
= sctx
->debug
;
3395 compiler_state
.is_debug_context
= sctx
->is_debug
;
3397 /* Update stages before GS. */
3398 if (sctx
->tes_shader
.cso
) {
3399 if (!sctx
->tess_rings
) {
3400 si_init_tess_factor_ring(sctx
);
3401 if (!sctx
->tess_rings
)
3406 if (sctx
->chip_class
<= GFX8
) {
3407 r
= si_shader_select(ctx
, &sctx
->vs_shader
,
3411 si_pm4_bind_state(sctx
, ls
, sctx
->vs_shader
.current
->pm4
);
3414 if (sctx
->tcs_shader
.cso
) {
3415 r
= si_shader_select(ctx
, &sctx
->tcs_shader
,
3419 si_pm4_bind_state(sctx
, hs
, sctx
->tcs_shader
.current
->pm4
);
3421 if (!sctx
->fixed_func_tcs_shader
.cso
) {
3422 sctx
->fixed_func_tcs_shader
.cso
=
3423 si_create_fixed_func_tcs(sctx
);
3424 if (!sctx
->fixed_func_tcs_shader
.cso
)
3428 r
= si_shader_select(ctx
, &sctx
->fixed_func_tcs_shader
,
3432 si_pm4_bind_state(sctx
, hs
,
3433 sctx
->fixed_func_tcs_shader
.current
->pm4
);
3436 if (sctx
->gs_shader
.cso
) {
3438 if (sctx
->chip_class
<= GFX8
) {
3439 r
= si_shader_select(ctx
, &sctx
->tes_shader
,
3443 si_pm4_bind_state(sctx
, es
, sctx
->tes_shader
.current
->pm4
);
3447 r
= si_shader_select(ctx
, &sctx
->tes_shader
,
3451 si_pm4_bind_state(sctx
, vs
, sctx
->tes_shader
.current
->pm4
);
3453 } else if (sctx
->gs_shader
.cso
) {
3454 if (sctx
->chip_class
<= GFX8
) {
3456 r
= si_shader_select(ctx
, &sctx
->vs_shader
,
3460 si_pm4_bind_state(sctx
, es
, sctx
->vs_shader
.current
->pm4
);
3462 si_pm4_bind_state(sctx
, ls
, NULL
);
3463 si_pm4_bind_state(sctx
, hs
, NULL
);
3467 r
= si_shader_select(ctx
, &sctx
->vs_shader
, &compiler_state
);
3470 si_pm4_bind_state(sctx
, vs
, sctx
->vs_shader
.current
->pm4
);
3471 si_pm4_bind_state(sctx
, ls
, NULL
);
3472 si_pm4_bind_state(sctx
, hs
, NULL
);
3476 if (sctx
->gs_shader
.cso
) {
3477 r
= si_shader_select(ctx
, &sctx
->gs_shader
, &compiler_state
);
3480 si_pm4_bind_state(sctx
, gs
, sctx
->gs_shader
.current
->pm4
);
3481 si_pm4_bind_state(sctx
, vs
, sctx
->gs_shader
.cso
->gs_copy_shader
->pm4
);
3483 if (!si_update_gs_ring_buffers(sctx
))
3486 si_pm4_bind_state(sctx
, gs
, NULL
);
3487 if (sctx
->chip_class
<= GFX8
)
3488 si_pm4_bind_state(sctx
, es
, NULL
);
3491 si_update_vgt_shader_config(sctx
);
3493 if (old_clip_disable
!= si_get_vs_state(sctx
)->key
.opt
.clip_disable
)
3494 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.clip_regs
);
3496 if (sctx
->ps_shader
.cso
) {
3497 unsigned db_shader_control
;
3499 r
= si_shader_select(ctx
, &sctx
->ps_shader
, &compiler_state
);
3502 si_pm4_bind_state(sctx
, ps
, sctx
->ps_shader
.current
->pm4
);
3505 sctx
->ps_shader
.cso
->db_shader_control
|
3506 S_02880C_KILL_ENABLE(si_get_alpha_test_func(sctx
) != PIPE_FUNC_ALWAYS
);
3508 if (si_pm4_state_changed(sctx
, ps
) || si_pm4_state_changed(sctx
, vs
) ||
3509 sctx
->sprite_coord_enable
!= rs
->sprite_coord_enable
||
3510 sctx
->flatshade
!= rs
->flatshade
) {
3511 sctx
->sprite_coord_enable
= rs
->sprite_coord_enable
;
3512 sctx
->flatshade
= rs
->flatshade
;
3513 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.spi_map
);
3516 if (sctx
->screen
->rbplus_allowed
&&
3517 si_pm4_state_changed(sctx
, ps
) &&
3519 old_spi_shader_col_format
!=
3520 sctx
->ps_shader
.current
->key
.part
.ps
.epilog
.spi_shader_col_format
))
3521 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.cb_render_state
);
3523 if (sctx
->ps_db_shader_control
!= db_shader_control
) {
3524 sctx
->ps_db_shader_control
= db_shader_control
;
3525 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.db_render_state
);
3526 if (sctx
->screen
->dpbb_allowed
)
3527 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.dpbb_state
);
3530 if (sctx
->smoothing_enabled
!= sctx
->ps_shader
.current
->key
.part
.ps
.epilog
.poly_line_smoothing
) {
3531 sctx
->smoothing_enabled
= sctx
->ps_shader
.current
->key
.part
.ps
.epilog
.poly_line_smoothing
;
3532 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.msaa_config
);
3534 if (sctx
->chip_class
== GFX6
)
3535 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.db_render_state
);
3537 if (sctx
->framebuffer
.nr_samples
<= 1)
3538 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.msaa_sample_locs
);
3542 if (si_pm4_state_enabled_and_changed(sctx
, ls
) ||
3543 si_pm4_state_enabled_and_changed(sctx
, hs
) ||
3544 si_pm4_state_enabled_and_changed(sctx
, es
) ||
3545 si_pm4_state_enabled_and_changed(sctx
, gs
) ||
3546 si_pm4_state_enabled_and_changed(sctx
, vs
) ||
3547 si_pm4_state_enabled_and_changed(sctx
, ps
)) {
3548 if (!si_update_spi_tmpring_size(sctx
))
3552 if (sctx
->chip_class
>= GFX7
) {
3553 if (si_pm4_state_enabled_and_changed(sctx
, ls
))
3554 sctx
->prefetch_L2_mask
|= SI_PREFETCH_LS
;
3555 else if (!sctx
->queued
.named
.ls
)
3556 sctx
->prefetch_L2_mask
&= ~SI_PREFETCH_LS
;
3558 if (si_pm4_state_enabled_and_changed(sctx
, hs
))
3559 sctx
->prefetch_L2_mask
|= SI_PREFETCH_HS
;
3560 else if (!sctx
->queued
.named
.hs
)
3561 sctx
->prefetch_L2_mask
&= ~SI_PREFETCH_HS
;
3563 if (si_pm4_state_enabled_and_changed(sctx
, es
))
3564 sctx
->prefetch_L2_mask
|= SI_PREFETCH_ES
;
3565 else if (!sctx
->queued
.named
.es
)
3566 sctx
->prefetch_L2_mask
&= ~SI_PREFETCH_ES
;
3568 if (si_pm4_state_enabled_and_changed(sctx
, gs
))
3569 sctx
->prefetch_L2_mask
|= SI_PREFETCH_GS
;
3570 else if (!sctx
->queued
.named
.gs
)
3571 sctx
->prefetch_L2_mask
&= ~SI_PREFETCH_GS
;
3573 if (si_pm4_state_enabled_and_changed(sctx
, vs
))
3574 sctx
->prefetch_L2_mask
|= SI_PREFETCH_VS
;
3575 else if (!sctx
->queued
.named
.vs
)
3576 sctx
->prefetch_L2_mask
&= ~SI_PREFETCH_VS
;
3578 if (si_pm4_state_enabled_and_changed(sctx
, ps
))
3579 sctx
->prefetch_L2_mask
|= SI_PREFETCH_PS
;
3580 else if (!sctx
->queued
.named
.ps
)
3581 sctx
->prefetch_L2_mask
&= ~SI_PREFETCH_PS
;
3584 sctx
->do_update_shaders
= false;
3588 static void si_emit_scratch_state(struct si_context
*sctx
)
3590 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
3592 radeon_set_context_reg(cs
, R_0286E8_SPI_TMPRING_SIZE
,
3593 sctx
->spi_tmpring_size
);
3595 if (sctx
->scratch_buffer
) {
3596 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
,
3597 sctx
->scratch_buffer
, RADEON_USAGE_READWRITE
,
3598 RADEON_PRIO_SCRATCH_BUFFER
);
3602 void si_init_shader_functions(struct si_context
*sctx
)
3604 sctx
->atoms
.s
.spi_map
.emit
= si_emit_spi_map
;
3605 sctx
->atoms
.s
.scratch_state
.emit
= si_emit_scratch_state
;
3607 sctx
->b
.create_vs_state
= si_create_shader_selector
;
3608 sctx
->b
.create_tcs_state
= si_create_shader_selector
;
3609 sctx
->b
.create_tes_state
= si_create_shader_selector
;
3610 sctx
->b
.create_gs_state
= si_create_shader_selector
;
3611 sctx
->b
.create_fs_state
= si_create_shader_selector
;
3613 sctx
->b
.bind_vs_state
= si_bind_vs_shader
;
3614 sctx
->b
.bind_tcs_state
= si_bind_tcs_shader
;
3615 sctx
->b
.bind_tes_state
= si_bind_tes_shader
;
3616 sctx
->b
.bind_gs_state
= si_bind_gs_shader
;
3617 sctx
->b
.bind_fs_state
= si_bind_ps_shader
;
3619 sctx
->b
.delete_vs_state
= si_delete_shader_selector
;
3620 sctx
->b
.delete_tcs_state
= si_delete_shader_selector
;
3621 sctx
->b
.delete_tes_state
= si_delete_shader_selector
;
3622 sctx
->b
.delete_gs_state
= si_delete_shader_selector
;
3623 sctx
->b
.delete_fs_state
= si_delete_shader_selector
;