2 * Copyright 2012 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_build_pm4.h"
28 #include "compiler/nir/nir_serialize.h"
29 #include "nir/tgsi_to_nir.h"
30 #include "util/hash_table.h"
31 #include "util/crc32.h"
32 #include "util/u_async_debug.h"
33 #include "util/u_memory.h"
34 #include "util/u_prim.h"
36 #include "util/disk_cache.h"
37 #include "util/mesa-sha1.h"
38 #include "ac_exp_param.h"
39 #include "ac_shader_util.h"
44 * Return the IR key for the shader cache.
46 void si_get_ir_cache_key(struct si_shader_selector
*sel
, bool ngg
, bool es
,
47 unsigned char ir_sha1_cache_key
[20])
49 struct blob blob
= {};
53 if (sel
->nir_binary
) {
54 ir_binary
= sel
->nir_binary
;
55 ir_size
= sel
->nir_size
;
60 nir_serialize(&blob
, sel
->nir
, true);
61 ir_binary
= blob
.data
;
65 /* These settings affect the compilation, but they are not derived
66 * from the input shader IR.
68 unsigned shader_variant_flags
= 0;
71 shader_variant_flags
|= 1 << 0;
73 shader_variant_flags
|= 1 << 1;
74 if (si_get_wave_size(sel
->screen
, sel
->type
, ngg
, es
, false) == 32)
75 shader_variant_flags
|= 1 << 2;
76 if (sel
->type
== PIPE_SHADER_FRAGMENT
&&
77 sel
->info
.uses_derivatives
&&
78 sel
->info
.uses_kill
&&
79 sel
->screen
->debug_flags
& DBG(FS_CORRECT_DERIVS_AFTER_KILL
))
80 shader_variant_flags
|= 1 << 3;
82 /* This varies depending on whether compute-based culling is enabled. */
83 shader_variant_flags
|= sel
->screen
->num_vbos_in_user_sgprs
<< 4;
86 _mesa_sha1_init(&ctx
);
87 _mesa_sha1_update(&ctx
, &shader_variant_flags
, 4);
88 _mesa_sha1_update(&ctx
, ir_binary
, ir_size
);
89 if (sel
->type
== PIPE_SHADER_VERTEX
||
90 sel
->type
== PIPE_SHADER_TESS_EVAL
||
91 sel
->type
== PIPE_SHADER_GEOMETRY
)
92 _mesa_sha1_update(&ctx
, &sel
->so
, sizeof(sel
->so
));
93 _mesa_sha1_final(&ctx
, ir_sha1_cache_key
);
95 if (ir_binary
== blob
.data
)
99 /** Copy "data" to "ptr" and return the next dword following copied data. */
100 static uint32_t *write_data(uint32_t *ptr
, const void *data
, unsigned size
)
102 /* data may be NULL if size == 0 */
104 memcpy(ptr
, data
, size
);
105 ptr
+= DIV_ROUND_UP(size
, 4);
109 /** Read data from "ptr". Return the next dword following the data. */
110 static uint32_t *read_data(uint32_t *ptr
, void *data
, unsigned size
)
112 memcpy(data
, ptr
, size
);
113 ptr
+= DIV_ROUND_UP(size
, 4);
118 * Write the size as uint followed by the data. Return the next dword
119 * following the copied data.
121 static uint32_t *write_chunk(uint32_t *ptr
, const void *data
, unsigned size
)
124 return write_data(ptr
, data
, size
);
128 * Read the size as uint followed by the data. Return both via parameters.
129 * Return the next dword following the data.
131 static uint32_t *read_chunk(uint32_t *ptr
, void **data
, unsigned *size
)
134 assert(*data
== NULL
);
137 *data
= malloc(*size
);
138 return read_data(ptr
, *data
, *size
);
142 * Return the shader binary in a buffer. The first 4 bytes contain its size
145 static void *si_get_shader_binary(struct si_shader
*shader
)
147 /* There is always a size of data followed by the data itself. */
148 unsigned llvm_ir_size
= shader
->binary
.llvm_ir_string
?
149 strlen(shader
->binary
.llvm_ir_string
) + 1 : 0;
151 /* Refuse to allocate overly large buffers and guard against integer
153 if (shader
->binary
.elf_size
> UINT_MAX
/ 4 ||
154 llvm_ir_size
> UINT_MAX
/ 4)
159 4 + /* CRC32 of the data below */
160 align(sizeof(shader
->config
), 4) +
161 align(sizeof(shader
->info
), 4) +
162 4 + align(shader
->binary
.elf_size
, 4) +
163 4 + align(llvm_ir_size
, 4);
164 void *buffer
= CALLOC(1, size
);
165 uint32_t *ptr
= (uint32_t*)buffer
;
171 ptr
++; /* CRC32 is calculated at the end. */
173 ptr
= write_data(ptr
, &shader
->config
, sizeof(shader
->config
));
174 ptr
= write_data(ptr
, &shader
->info
, sizeof(shader
->info
));
175 ptr
= write_chunk(ptr
, shader
->binary
.elf_buffer
, shader
->binary
.elf_size
);
176 ptr
= write_chunk(ptr
, shader
->binary
.llvm_ir_string
, llvm_ir_size
);
177 assert((char *)ptr
- (char *)buffer
== size
);
180 ptr
= (uint32_t*)buffer
;
182 *ptr
= util_hash_crc32(ptr
+ 1, size
- 8);
187 static bool si_load_shader_binary(struct si_shader
*shader
, void *binary
)
189 uint32_t *ptr
= (uint32_t*)binary
;
190 uint32_t size
= *ptr
++;
191 uint32_t crc32
= *ptr
++;
195 if (util_hash_crc32(ptr
, size
- 8) != crc32
) {
196 fprintf(stderr
, "radeonsi: binary shader has invalid CRC32\n");
200 ptr
= read_data(ptr
, &shader
->config
, sizeof(shader
->config
));
201 ptr
= read_data(ptr
, &shader
->info
, sizeof(shader
->info
));
202 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.elf_buffer
,
204 shader
->binary
.elf_size
= elf_size
;
205 ptr
= read_chunk(ptr
, (void**)&shader
->binary
.llvm_ir_string
, &chunk_size
);
211 * Insert a shader into the cache. It's assumed the shader is not in the cache.
212 * Use si_shader_cache_load_shader before calling this.
214 void si_shader_cache_insert_shader(struct si_screen
*sscreen
,
215 unsigned char ir_sha1_cache_key
[20],
216 struct si_shader
*shader
,
217 bool insert_into_disk_cache
)
220 struct hash_entry
*entry
;
221 uint8_t key
[CACHE_KEY_SIZE
];
223 entry
= _mesa_hash_table_search(sscreen
->shader_cache
, ir_sha1_cache_key
);
225 return; /* already added */
227 hw_binary
= si_get_shader_binary(shader
);
231 if (_mesa_hash_table_insert(sscreen
->shader_cache
,
232 mem_dup(ir_sha1_cache_key
, 20),
233 hw_binary
) == NULL
) {
238 if (sscreen
->disk_shader_cache
&& insert_into_disk_cache
) {
239 disk_cache_compute_key(sscreen
->disk_shader_cache
,
240 ir_sha1_cache_key
, 20, key
);
241 disk_cache_put(sscreen
->disk_shader_cache
, key
, hw_binary
,
242 *((uint32_t *) hw_binary
), NULL
);
246 bool si_shader_cache_load_shader(struct si_screen
*sscreen
,
247 unsigned char ir_sha1_cache_key
[20],
248 struct si_shader
*shader
)
250 struct hash_entry
*entry
=
251 _mesa_hash_table_search(sscreen
->shader_cache
, ir_sha1_cache_key
);
254 if (si_load_shader_binary(shader
, entry
->data
)) {
255 p_atomic_inc(&sscreen
->num_memory_shader_cache_hits
);
259 p_atomic_inc(&sscreen
->num_memory_shader_cache_misses
);
261 if (!sscreen
->disk_shader_cache
)
264 unsigned char sha1
[CACHE_KEY_SIZE
];
265 disk_cache_compute_key(sscreen
->disk_shader_cache
, ir_sha1_cache_key
,
269 uint8_t *buffer
= disk_cache_get(sscreen
->disk_shader_cache
, sha1
,
272 if (binary_size
>= sizeof(uint32_t) &&
273 *((uint32_t*)buffer
) == binary_size
) {
274 if (si_load_shader_binary(shader
, buffer
)) {
276 si_shader_cache_insert_shader(sscreen
, ir_sha1_cache_key
,
278 p_atomic_inc(&sscreen
->num_disk_shader_cache_hits
);
282 /* Something has gone wrong discard the item from the cache and
283 * rebuild/link from source.
285 assert(!"Invalid radeonsi shader disk cache item!");
286 disk_cache_remove(sscreen
->disk_shader_cache
, sha1
);
291 p_atomic_inc(&sscreen
->num_disk_shader_cache_misses
);
295 static uint32_t si_shader_cache_key_hash(const void *key
)
297 /* Take the first dword of SHA1. */
298 return *(uint32_t*)key
;
301 static bool si_shader_cache_key_equals(const void *a
, const void *b
)
304 return memcmp(a
, b
, 20) == 0;
307 static void si_destroy_shader_cache_entry(struct hash_entry
*entry
)
309 FREE((void*)entry
->key
);
313 bool si_init_shader_cache(struct si_screen
*sscreen
)
315 (void) simple_mtx_init(&sscreen
->shader_cache_mutex
, mtx_plain
);
316 sscreen
->shader_cache
=
317 _mesa_hash_table_create(NULL
,
318 si_shader_cache_key_hash
,
319 si_shader_cache_key_equals
);
321 return sscreen
->shader_cache
!= NULL
;
324 void si_destroy_shader_cache(struct si_screen
*sscreen
)
326 if (sscreen
->shader_cache
)
327 _mesa_hash_table_destroy(sscreen
->shader_cache
,
328 si_destroy_shader_cache_entry
);
329 simple_mtx_destroy(&sscreen
->shader_cache_mutex
);
334 static void si_set_tesseval_regs(struct si_screen
*sscreen
,
335 const struct si_shader_selector
*tes
,
336 struct si_pm4_state
*pm4
)
338 const struct si_shader_info
*info
= &tes
->info
;
339 unsigned tes_prim_mode
= info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
340 unsigned tes_spacing
= info
->properties
[TGSI_PROPERTY_TES_SPACING
];
341 bool tes_vertex_order_cw
= info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
];
342 bool tes_point_mode
= info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
];
343 unsigned type
, partitioning
, topology
, distribution_mode
;
345 switch (tes_prim_mode
) {
346 case PIPE_PRIM_LINES
:
347 type
= V_028B6C_TESS_ISOLINE
;
349 case PIPE_PRIM_TRIANGLES
:
350 type
= V_028B6C_TESS_TRIANGLE
;
352 case PIPE_PRIM_QUADS
:
353 type
= V_028B6C_TESS_QUAD
;
360 switch (tes_spacing
) {
361 case PIPE_TESS_SPACING_FRACTIONAL_ODD
:
362 partitioning
= V_028B6C_PART_FRAC_ODD
;
364 case PIPE_TESS_SPACING_FRACTIONAL_EVEN
:
365 partitioning
= V_028B6C_PART_FRAC_EVEN
;
367 case PIPE_TESS_SPACING_EQUAL
:
368 partitioning
= V_028B6C_PART_INTEGER
;
376 topology
= V_028B6C_OUTPUT_POINT
;
377 else if (tes_prim_mode
== PIPE_PRIM_LINES
)
378 topology
= V_028B6C_OUTPUT_LINE
;
379 else if (tes_vertex_order_cw
)
380 /* for some reason, this must be the other way around */
381 topology
= V_028B6C_OUTPUT_TRIANGLE_CCW
;
383 topology
= V_028B6C_OUTPUT_TRIANGLE_CW
;
385 if (sscreen
->info
.has_distributed_tess
) {
386 if (sscreen
->info
.family
== CHIP_FIJI
||
387 sscreen
->info
.family
>= CHIP_POLARIS10
)
388 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS
;
390 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_DONUTS
;
392 distribution_mode
= V_028B6C_DISTRIBUTION_MODE_NO_DIST
;
395 pm4
->shader
->vgt_tf_param
= S_028B6C_TYPE(type
) |
396 S_028B6C_PARTITIONING(partitioning
) |
397 S_028B6C_TOPOLOGY(topology
) |
398 S_028B6C_DISTRIBUTION_MODE(distribution_mode
);
401 /* Polaris needs different VTX_REUSE_DEPTH settings depending on
402 * whether the "fractional odd" tessellation spacing is used.
404 * Possible VGT configurations and which state should set the register:
406 * Reg set in | VGT shader configuration | Value
407 * ------------------------------------------------------
409 * VS as ES | ES -> GS -> VS | 30
410 * TES as VS | LS -> HS -> VS | 14 or 30
411 * TES as ES | LS -> HS -> ES -> GS -> VS | 14 or 30
413 * If "shader" is NULL, it's assumed it's not LS or GS copy shader.
415 static void polaris_set_vgt_vertex_reuse(struct si_screen
*sscreen
,
416 struct si_shader_selector
*sel
,
417 struct si_shader
*shader
,
418 struct si_pm4_state
*pm4
)
420 unsigned type
= sel
->type
;
422 if (sscreen
->info
.family
< CHIP_POLARIS10
||
423 sscreen
->info
.chip_class
>= GFX10
)
426 /* VS as VS, or VS as ES: */
427 if ((type
== PIPE_SHADER_VERTEX
&&
429 (!shader
->key
.as_ls
&& !shader
->is_gs_copy_shader
))) ||
430 /* TES as VS, or TES as ES: */
431 type
== PIPE_SHADER_TESS_EVAL
) {
432 unsigned vtx_reuse_depth
= 30;
434 if (type
== PIPE_SHADER_TESS_EVAL
&&
435 sel
->info
.properties
[TGSI_PROPERTY_TES_SPACING
] ==
436 PIPE_TESS_SPACING_FRACTIONAL_ODD
)
437 vtx_reuse_depth
= 14;
440 pm4
->shader
->vgt_vertex_reuse_block_cntl
= vtx_reuse_depth
;
444 static struct si_pm4_state
*si_get_shader_pm4_state(struct si_shader
*shader
)
447 si_pm4_clear_state(shader
->pm4
);
449 shader
->pm4
= CALLOC_STRUCT(si_pm4_state
);
452 shader
->pm4
->shader
= shader
;
455 fprintf(stderr
, "radeonsi: Failed to create pm4 state.\n");
460 static unsigned si_get_num_vs_user_sgprs(struct si_shader
*shader
,
461 unsigned num_always_on_user_sgprs
)
463 struct si_shader_selector
*vs
= shader
->previous_stage_sel
?
464 shader
->previous_stage_sel
: shader
->selector
;
465 unsigned num_vbos_in_user_sgprs
= vs
->num_vbos_in_user_sgprs
;
467 /* 1 SGPR is reserved for the vertex buffer pointer. */
468 assert(num_always_on_user_sgprs
<= SI_SGPR_VS_VB_DESCRIPTOR_FIRST
- 1);
470 if (num_vbos_in_user_sgprs
)
471 return SI_SGPR_VS_VB_DESCRIPTOR_FIRST
+ num_vbos_in_user_sgprs
* 4;
473 /* Add the pointer to VBO descriptors. */
474 return num_always_on_user_sgprs
+ 1;
477 /* Return VGPR_COMP_CNT for the API vertex shader. This can be hw LS, LSHS, ES, ESGS, VS. */
478 static unsigned si_get_vs_vgpr_comp_cnt(struct si_screen
*sscreen
,
479 struct si_shader
*shader
, bool legacy_vs_prim_id
)
481 assert(shader
->selector
->type
== PIPE_SHADER_VERTEX
||
482 (shader
->previous_stage_sel
&&
483 shader
->previous_stage_sel
->type
== PIPE_SHADER_VERTEX
));
485 /* GFX6-9 LS (VertexID, RelAutoindex, InstanceID / StepRate0(==1), ...).
486 * GFX6-9 ES,VS (VertexID, InstanceID / StepRate0(==1), VSPrimID, ...)
487 * GFX10 LS (VertexID, RelAutoindex, UserVGPR1, InstanceID).
488 * GFX10 ES,VS (VertexID, UserVGPR0, UserVGPR1 or VSPrimID, UserVGPR2 or InstanceID)
490 bool is_ls
= shader
->selector
->type
== PIPE_SHADER_TESS_CTRL
|| shader
->key
.as_ls
;
492 if (sscreen
->info
.chip_class
>= GFX10
&& shader
->info
.uses_instanceid
)
494 else if ((is_ls
&& shader
->info
.uses_instanceid
) || legacy_vs_prim_id
)
496 else if (is_ls
|| shader
->info
.uses_instanceid
)
502 static void si_shader_ls(struct si_screen
*sscreen
, struct si_shader
*shader
)
504 struct si_pm4_state
*pm4
;
507 assert(sscreen
->info
.chip_class
<= GFX8
);
509 pm4
= si_get_shader_pm4_state(shader
);
513 va
= shader
->bo
->gpu_address
;
514 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
516 si_pm4_set_reg(pm4
, R_00B520_SPI_SHADER_PGM_LO_LS
, va
>> 8);
517 si_pm4_set_reg(pm4
, R_00B524_SPI_SHADER_PGM_HI_LS
, S_00B524_MEM_BASE(va
>> 40));
519 shader
->config
.rsrc1
= S_00B528_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
520 S_00B528_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
521 S_00B528_VGPR_COMP_CNT(si_get_vs_vgpr_comp_cnt(sscreen
, shader
, false)) |
522 S_00B528_DX10_CLAMP(1) |
523 S_00B528_FLOAT_MODE(shader
->config
.float_mode
);
524 shader
->config
.rsrc2
= S_00B52C_USER_SGPR(si_get_num_vs_user_sgprs(shader
, SI_VS_NUM_USER_SGPR
)) |
525 S_00B52C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
528 static void si_shader_hs(struct si_screen
*sscreen
, struct si_shader
*shader
)
530 struct si_pm4_state
*pm4
;
533 pm4
= si_get_shader_pm4_state(shader
);
537 va
= shader
->bo
->gpu_address
;
538 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
540 if (sscreen
->info
.chip_class
>= GFX9
) {
541 if (sscreen
->info
.chip_class
>= GFX10
) {
542 si_pm4_set_reg(pm4
, R_00B520_SPI_SHADER_PGM_LO_LS
, va
>> 8);
543 si_pm4_set_reg(pm4
, R_00B524_SPI_SHADER_PGM_HI_LS
, S_00B524_MEM_BASE(va
>> 40));
545 si_pm4_set_reg(pm4
, R_00B410_SPI_SHADER_PGM_LO_LS
, va
>> 8);
546 si_pm4_set_reg(pm4
, R_00B414_SPI_SHADER_PGM_HI_LS
, S_00B414_MEM_BASE(va
>> 40));
549 unsigned num_user_sgprs
=
550 si_get_num_vs_user_sgprs(shader
, GFX9_TCS_NUM_USER_SGPR
);
552 shader
->config
.rsrc2
=
553 S_00B42C_USER_SGPR(num_user_sgprs
) |
554 S_00B42C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
556 if (sscreen
->info
.chip_class
>= GFX10
)
557 shader
->config
.rsrc2
|= S_00B42C_USER_SGPR_MSB_GFX10(num_user_sgprs
>> 5);
559 shader
->config
.rsrc2
|= S_00B42C_USER_SGPR_MSB_GFX9(num_user_sgprs
>> 5);
561 si_pm4_set_reg(pm4
, R_00B420_SPI_SHADER_PGM_LO_HS
, va
>> 8);
562 si_pm4_set_reg(pm4
, R_00B424_SPI_SHADER_PGM_HI_HS
, S_00B424_MEM_BASE(va
>> 40));
564 shader
->config
.rsrc2
=
565 S_00B42C_USER_SGPR(GFX6_TCS_NUM_USER_SGPR
) |
566 S_00B42C_OC_LDS_EN(1) |
567 S_00B42C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
570 si_pm4_set_reg(pm4
, R_00B428_SPI_SHADER_PGM_RSRC1_HS
,
571 S_00B428_VGPRS((shader
->config
.num_vgprs
- 1) /
572 (sscreen
->ge_wave_size
== 32 ? 8 : 4)) |
573 (sscreen
->info
.chip_class
<= GFX9
?
574 S_00B428_SGPRS((shader
->config
.num_sgprs
- 1) / 8) : 0) |
575 S_00B428_DX10_CLAMP(1) |
576 S_00B428_MEM_ORDERED(sscreen
->info
.chip_class
>= GFX10
) |
577 S_00B428_WGP_MODE(sscreen
->info
.chip_class
>= GFX10
) |
578 S_00B428_FLOAT_MODE(shader
->config
.float_mode
) |
579 S_00B428_LS_VGPR_COMP_CNT(sscreen
->info
.chip_class
>= GFX9
?
580 si_get_vs_vgpr_comp_cnt(sscreen
, shader
, false) : 0));
582 if (sscreen
->info
.chip_class
<= GFX8
) {
583 si_pm4_set_reg(pm4
, R_00B42C_SPI_SHADER_PGM_RSRC2_HS
,
584 shader
->config
.rsrc2
);
588 static void si_emit_shader_es(struct si_context
*sctx
)
590 struct si_shader
*shader
= sctx
->queued
.named
.es
->shader
;
591 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
596 radeon_opt_set_context_reg(sctx
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
597 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE
,
598 shader
->selector
->esgs_itemsize
/ 4);
600 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
601 radeon_opt_set_context_reg(sctx
, R_028B6C_VGT_TF_PARAM
,
602 SI_TRACKED_VGT_TF_PARAM
,
603 shader
->vgt_tf_param
);
605 if (shader
->vgt_vertex_reuse_block_cntl
)
606 radeon_opt_set_context_reg(sctx
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
607 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL
,
608 shader
->vgt_vertex_reuse_block_cntl
);
610 if (initial_cdw
!= sctx
->gfx_cs
->current
.cdw
)
611 sctx
->context_roll
= true;
614 static void si_shader_es(struct si_screen
*sscreen
, struct si_shader
*shader
)
616 struct si_pm4_state
*pm4
;
617 unsigned num_user_sgprs
;
618 unsigned vgpr_comp_cnt
;
622 assert(sscreen
->info
.chip_class
<= GFX8
);
624 pm4
= si_get_shader_pm4_state(shader
);
628 pm4
->atom
.emit
= si_emit_shader_es
;
629 va
= shader
->bo
->gpu_address
;
630 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
632 if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
633 vgpr_comp_cnt
= si_get_vs_vgpr_comp_cnt(sscreen
, shader
, false);
634 num_user_sgprs
= si_get_num_vs_user_sgprs(shader
, SI_VS_NUM_USER_SGPR
);
635 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
636 vgpr_comp_cnt
= shader
->selector
->info
.uses_primid
? 3 : 2;
637 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
639 unreachable("invalid shader selector type");
641 oc_lds_en
= shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
? 1 : 0;
643 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
644 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, S_00B324_MEM_BASE(va
>> 40));
645 si_pm4_set_reg(pm4
, R_00B328_SPI_SHADER_PGM_RSRC1_ES
,
646 S_00B328_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
647 S_00B328_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
648 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt
) |
649 S_00B328_DX10_CLAMP(1) |
650 S_00B328_FLOAT_MODE(shader
->config
.float_mode
));
651 si_pm4_set_reg(pm4
, R_00B32C_SPI_SHADER_PGM_RSRC2_ES
,
652 S_00B32C_USER_SGPR(num_user_sgprs
) |
653 S_00B32C_OC_LDS_EN(oc_lds_en
) |
654 S_00B32C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
656 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
657 si_set_tesseval_regs(sscreen
, shader
->selector
, pm4
);
659 polaris_set_vgt_vertex_reuse(sscreen
, shader
->selector
, shader
, pm4
);
662 void gfx9_get_gs_info(struct si_shader_selector
*es
,
663 struct si_shader_selector
*gs
,
664 struct gfx9_gs_info
*out
)
666 unsigned gs_num_invocations
= MAX2(gs
->gs_num_invocations
, 1);
667 unsigned input_prim
= gs
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
];
668 bool uses_adjacency
= input_prim
>= PIPE_PRIM_LINES_ADJACENCY
&&
669 input_prim
<= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
;
671 /* All these are in dwords: */
672 /* We can't allow using the whole LDS, because GS waves compete with
673 * other shader stages for LDS space. */
674 const unsigned max_lds_size
= 8 * 1024;
675 const unsigned esgs_itemsize
= es
->esgs_itemsize
/ 4;
676 unsigned esgs_lds_size
;
678 /* All these are per subgroup: */
679 const unsigned max_out_prims
= 32 * 1024;
680 const unsigned max_es_verts
= 255;
681 const unsigned ideal_gs_prims
= 64;
682 unsigned max_gs_prims
, gs_prims
;
683 unsigned min_es_verts
, es_verts
, worst_case_es_verts
;
685 if (uses_adjacency
|| gs_num_invocations
> 1)
686 max_gs_prims
= 127 / gs_num_invocations
;
690 /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
691 * Make sure we don't go over the maximum value.
693 if (gs
->gs_max_out_vertices
> 0) {
694 max_gs_prims
= MIN2(max_gs_prims
,
696 (gs
->gs_max_out_vertices
* gs_num_invocations
));
698 assert(max_gs_prims
> 0);
700 /* If the primitive has adjacency, halve the number of vertices
701 * that will be reused in multiple primitives.
703 min_es_verts
= gs
->gs_input_verts_per_prim
/ (uses_adjacency
? 2 : 1);
705 gs_prims
= MIN2(ideal_gs_prims
, max_gs_prims
);
706 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
, max_es_verts
);
708 /* Compute ESGS LDS size based on the worst case number of ES vertices
709 * needed to create the target number of GS prims per subgroup.
711 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
713 /* If total LDS usage is too big, refactor partitions based on ratio
714 * of ESGS item sizes.
716 if (esgs_lds_size
> max_lds_size
) {
717 /* Our target GS Prims Per Subgroup was too large. Calculate
718 * the maximum number of GS Prims Per Subgroup that will fit
719 * into LDS, capped by the maximum that the hardware can support.
721 gs_prims
= MIN2((max_lds_size
/ (esgs_itemsize
* min_es_verts
)),
723 assert(gs_prims
> 0);
724 worst_case_es_verts
= MIN2(min_es_verts
* gs_prims
,
727 esgs_lds_size
= esgs_itemsize
* worst_case_es_verts
;
728 assert(esgs_lds_size
<= max_lds_size
);
731 /* Now calculate remaining ESGS information. */
733 es_verts
= MIN2(esgs_lds_size
/ esgs_itemsize
, max_es_verts
);
735 es_verts
= max_es_verts
;
737 /* Vertices for adjacency primitives are not always reused, so restore
738 * it for ES_VERTS_PER_SUBGRP.
740 min_es_verts
= gs
->gs_input_verts_per_prim
;
742 /* For normal primitives, the VGT only checks if they are past the ES
743 * verts per subgroup after allocating a full GS primitive and if they
744 * are, kick off a new subgroup. But if those additional ES verts are
745 * unique (e.g. not reused) we need to make sure there is enough LDS
746 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
748 es_verts
-= min_es_verts
- 1;
750 out
->es_verts_per_subgroup
= es_verts
;
751 out
->gs_prims_per_subgroup
= gs_prims
;
752 out
->gs_inst_prims_in_subgroup
= gs_prims
* gs_num_invocations
;
753 out
->max_prims_per_subgroup
= out
->gs_inst_prims_in_subgroup
*
754 gs
->gs_max_out_vertices
;
755 out
->esgs_ring_size
= 4 * esgs_lds_size
;
757 assert(out
->max_prims_per_subgroup
<= max_out_prims
);
760 static void si_emit_shader_gs(struct si_context
*sctx
)
762 struct si_shader
*shader
= sctx
->queued
.named
.gs
->shader
;
763 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
768 /* R_028A60_VGT_GSVS_RING_OFFSET_1, R_028A64_VGT_GSVS_RING_OFFSET_2
769 * R_028A68_VGT_GSVS_RING_OFFSET_3 */
770 radeon_opt_set_context_reg3(sctx
, R_028A60_VGT_GSVS_RING_OFFSET_1
,
771 SI_TRACKED_VGT_GSVS_RING_OFFSET_1
,
772 shader
->ctx_reg
.gs
.vgt_gsvs_ring_offset_1
,
773 shader
->ctx_reg
.gs
.vgt_gsvs_ring_offset_2
,
774 shader
->ctx_reg
.gs
.vgt_gsvs_ring_offset_3
);
776 /* R_028AB0_VGT_GSVS_RING_ITEMSIZE */
777 radeon_opt_set_context_reg(sctx
, R_028AB0_VGT_GSVS_RING_ITEMSIZE
,
778 SI_TRACKED_VGT_GSVS_RING_ITEMSIZE
,
779 shader
->ctx_reg
.gs
.vgt_gsvs_ring_itemsize
);
781 /* R_028B38_VGT_GS_MAX_VERT_OUT */
782 radeon_opt_set_context_reg(sctx
, R_028B38_VGT_GS_MAX_VERT_OUT
,
783 SI_TRACKED_VGT_GS_MAX_VERT_OUT
,
784 shader
->ctx_reg
.gs
.vgt_gs_max_vert_out
);
786 /* R_028B5C_VGT_GS_VERT_ITEMSIZE, R_028B60_VGT_GS_VERT_ITEMSIZE_1
787 * R_028B64_VGT_GS_VERT_ITEMSIZE_2, R_028B68_VGT_GS_VERT_ITEMSIZE_3 */
788 radeon_opt_set_context_reg4(sctx
, R_028B5C_VGT_GS_VERT_ITEMSIZE
,
789 SI_TRACKED_VGT_GS_VERT_ITEMSIZE
,
790 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize
,
791 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize_1
,
792 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize_2
,
793 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize_3
);
795 /* R_028B90_VGT_GS_INSTANCE_CNT */
796 radeon_opt_set_context_reg(sctx
, R_028B90_VGT_GS_INSTANCE_CNT
,
797 SI_TRACKED_VGT_GS_INSTANCE_CNT
,
798 shader
->ctx_reg
.gs
.vgt_gs_instance_cnt
);
800 if (sctx
->chip_class
>= GFX9
) {
801 /* R_028A44_VGT_GS_ONCHIP_CNTL */
802 radeon_opt_set_context_reg(sctx
, R_028A44_VGT_GS_ONCHIP_CNTL
,
803 SI_TRACKED_VGT_GS_ONCHIP_CNTL
,
804 shader
->ctx_reg
.gs
.vgt_gs_onchip_cntl
);
805 /* R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP */
806 radeon_opt_set_context_reg(sctx
, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP
,
807 SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP
,
808 shader
->ctx_reg
.gs
.vgt_gs_max_prims_per_subgroup
);
809 /* R_028AAC_VGT_ESGS_RING_ITEMSIZE */
810 radeon_opt_set_context_reg(sctx
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
811 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE
,
812 shader
->ctx_reg
.gs
.vgt_esgs_ring_itemsize
);
814 if (shader
->key
.part
.gs
.es
->type
== PIPE_SHADER_TESS_EVAL
)
815 radeon_opt_set_context_reg(sctx
, R_028B6C_VGT_TF_PARAM
,
816 SI_TRACKED_VGT_TF_PARAM
,
817 shader
->vgt_tf_param
);
818 if (shader
->vgt_vertex_reuse_block_cntl
)
819 radeon_opt_set_context_reg(sctx
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
820 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL
,
821 shader
->vgt_vertex_reuse_block_cntl
);
824 if (initial_cdw
!= sctx
->gfx_cs
->current
.cdw
)
825 sctx
->context_roll
= true;
828 static void si_shader_gs(struct si_screen
*sscreen
, struct si_shader
*shader
)
830 struct si_shader_selector
*sel
= shader
->selector
;
831 const ubyte
*num_components
= sel
->info
.num_stream_output_components
;
832 unsigned gs_num_invocations
= sel
->gs_num_invocations
;
833 struct si_pm4_state
*pm4
;
835 unsigned max_stream
= sel
->max_gs_stream
;
838 pm4
= si_get_shader_pm4_state(shader
);
842 pm4
->atom
.emit
= si_emit_shader_gs
;
844 offset
= num_components
[0] * sel
->gs_max_out_vertices
;
845 shader
->ctx_reg
.gs
.vgt_gsvs_ring_offset_1
= offset
;
848 offset
+= num_components
[1] * sel
->gs_max_out_vertices
;
849 shader
->ctx_reg
.gs
.vgt_gsvs_ring_offset_2
= offset
;
852 offset
+= num_components
[2] * sel
->gs_max_out_vertices
;
853 shader
->ctx_reg
.gs
.vgt_gsvs_ring_offset_3
= offset
;
856 offset
+= num_components
[3] * sel
->gs_max_out_vertices
;
857 shader
->ctx_reg
.gs
.vgt_gsvs_ring_itemsize
= offset
;
859 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
860 assert(offset
< (1 << 15));
862 shader
->ctx_reg
.gs
.vgt_gs_max_vert_out
= sel
->gs_max_out_vertices
;
864 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize
= num_components
[0];
865 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize_1
= (max_stream
>= 1) ? num_components
[1] : 0;
866 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize_2
= (max_stream
>= 2) ? num_components
[2] : 0;
867 shader
->ctx_reg
.gs
.vgt_gs_vert_itemsize_3
= (max_stream
>= 3) ? num_components
[3] : 0;
869 shader
->ctx_reg
.gs
.vgt_gs_instance_cnt
= S_028B90_CNT(MIN2(gs_num_invocations
, 127)) |
870 S_028B90_ENABLE(gs_num_invocations
> 0);
872 va
= shader
->bo
->gpu_address
;
873 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
875 if (sscreen
->info
.chip_class
>= GFX9
) {
876 unsigned input_prim
= sel
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
];
877 unsigned es_type
= shader
->key
.part
.gs
.es
->type
;
878 unsigned es_vgpr_comp_cnt
, gs_vgpr_comp_cnt
;
880 if (es_type
== PIPE_SHADER_VERTEX
) {
881 es_vgpr_comp_cnt
= si_get_vs_vgpr_comp_cnt(sscreen
, shader
, false);
882 } else if (es_type
== PIPE_SHADER_TESS_EVAL
)
883 es_vgpr_comp_cnt
= shader
->key
.part
.gs
.es
->info
.uses_primid
? 3 : 2;
885 unreachable("invalid shader selector type");
887 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
888 * VGPR[0:4] are always loaded.
890 if (sel
->info
.uses_invocationid
)
891 gs_vgpr_comp_cnt
= 3; /* VGPR3 contains InvocationID. */
892 else if (sel
->info
.uses_primid
)
893 gs_vgpr_comp_cnt
= 2; /* VGPR2 contains PrimitiveID. */
894 else if (input_prim
>= PIPE_PRIM_TRIANGLES
)
895 gs_vgpr_comp_cnt
= 1; /* VGPR1 contains offsets 2, 3 */
897 gs_vgpr_comp_cnt
= 0; /* VGPR0 contains offsets 0, 1 */
899 unsigned num_user_sgprs
;
900 if (es_type
== PIPE_SHADER_VERTEX
)
901 num_user_sgprs
= si_get_num_vs_user_sgprs(shader
, GFX9_VSGS_NUM_USER_SGPR
);
903 num_user_sgprs
= GFX9_TESGS_NUM_USER_SGPR
;
905 if (sscreen
->info
.chip_class
>= GFX10
) {
906 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
907 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, S_00B324_MEM_BASE(va
>> 40));
909 si_pm4_set_reg(pm4
, R_00B210_SPI_SHADER_PGM_LO_ES
, va
>> 8);
910 si_pm4_set_reg(pm4
, R_00B214_SPI_SHADER_PGM_HI_ES
, S_00B214_MEM_BASE(va
>> 40));
914 S_00B228_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
915 S_00B228_DX10_CLAMP(1) |
916 S_00B228_MEM_ORDERED(sscreen
->info
.chip_class
>= GFX10
) |
917 S_00B228_WGP_MODE(sscreen
->info
.chip_class
>= GFX10
) |
918 S_00B228_FLOAT_MODE(shader
->config
.float_mode
) |
919 S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt
);
921 S_00B22C_USER_SGPR(num_user_sgprs
) |
922 S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt
) |
923 S_00B22C_OC_LDS_EN(es_type
== PIPE_SHADER_TESS_EVAL
) |
924 S_00B22C_LDS_SIZE(shader
->config
.lds_size
) |
925 S_00B22C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
927 if (sscreen
->info
.chip_class
>= GFX10
) {
928 rsrc2
|= S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs
>> 5);
930 rsrc1
|= S_00B228_SGPRS((shader
->config
.num_sgprs
- 1) / 8);
931 rsrc2
|= S_00B22C_USER_SGPR_MSB_GFX9(num_user_sgprs
>> 5);
934 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
, rsrc1
);
935 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
, rsrc2
);
937 if (sscreen
->info
.chip_class
>= GFX10
) {
938 si_pm4_set_reg(pm4
, R_00B204_SPI_SHADER_PGM_RSRC4_GS
,
939 S_00B204_CU_EN(0xffff) |
940 S_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(0));
943 shader
->ctx_reg
.gs
.vgt_gs_onchip_cntl
=
944 S_028A44_ES_VERTS_PER_SUBGRP(shader
->gs_info
.es_verts_per_subgroup
) |
945 S_028A44_GS_PRIMS_PER_SUBGRP(shader
->gs_info
.gs_prims_per_subgroup
) |
946 S_028A44_GS_INST_PRIMS_IN_SUBGRP(shader
->gs_info
.gs_inst_prims_in_subgroup
);
947 shader
->ctx_reg
.gs
.vgt_gs_max_prims_per_subgroup
=
948 S_028A94_MAX_PRIMS_PER_SUBGROUP(shader
->gs_info
.max_prims_per_subgroup
);
949 shader
->ctx_reg
.gs
.vgt_esgs_ring_itemsize
=
950 shader
->key
.part
.gs
.es
->esgs_itemsize
/ 4;
952 if (es_type
== PIPE_SHADER_TESS_EVAL
)
953 si_set_tesseval_regs(sscreen
, shader
->key
.part
.gs
.es
, pm4
);
955 polaris_set_vgt_vertex_reuse(sscreen
, shader
->key
.part
.gs
.es
,
958 si_pm4_set_reg(pm4
, R_00B220_SPI_SHADER_PGM_LO_GS
, va
>> 8);
959 si_pm4_set_reg(pm4
, R_00B224_SPI_SHADER_PGM_HI_GS
, S_00B224_MEM_BASE(va
>> 40));
961 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
962 S_00B228_VGPRS((shader
->config
.num_vgprs
- 1) / 4) |
963 S_00B228_SGPRS((shader
->config
.num_sgprs
- 1) / 8) |
964 S_00B228_DX10_CLAMP(1) |
965 S_00B228_FLOAT_MODE(shader
->config
.float_mode
));
966 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
967 S_00B22C_USER_SGPR(GFX6_GS_NUM_USER_SGPR
) |
968 S_00B22C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
972 static void gfx10_emit_ge_pc_alloc(struct si_context
*sctx
, unsigned value
)
974 enum si_tracked_reg reg
= SI_TRACKED_GE_PC_ALLOC
;
976 if (((sctx
->tracked_regs
.reg_saved
>> reg
) & 0x1) != 0x1 ||
977 sctx
->tracked_regs
.reg_value
[reg
] != value
) {
978 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
980 if (sctx
->family
== CHIP_NAVI10
||
981 sctx
->family
== CHIP_NAVI12
||
982 sctx
->family
== CHIP_NAVI14
) {
983 /* SQ_NON_EVENT must be emitted before GE_PC_ALLOC is written. */
984 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
985 radeon_emit(cs
, EVENT_TYPE(V_028A90_SQ_NON_EVENT
) | EVENT_INDEX(0));
988 radeon_set_uconfig_reg(cs
, R_030980_GE_PC_ALLOC
, value
);
990 sctx
->tracked_regs
.reg_saved
|= 0x1ull
<< reg
;
991 sctx
->tracked_regs
.reg_value
[reg
] = value
;
995 /* Common tail code for NGG primitive shaders. */
996 static void gfx10_emit_shader_ngg_tail(struct si_context
*sctx
,
997 struct si_shader
*shader
,
998 unsigned initial_cdw
)
1000 radeon_opt_set_context_reg(sctx
, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP
,
1001 SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP
,
1002 shader
->ctx_reg
.ngg
.ge_max_output_per_subgroup
);
1003 radeon_opt_set_context_reg(sctx
, R_028B4C_GE_NGG_SUBGRP_CNTL
,
1004 SI_TRACKED_GE_NGG_SUBGRP_CNTL
,
1005 shader
->ctx_reg
.ngg
.ge_ngg_subgrp_cntl
);
1006 radeon_opt_set_context_reg(sctx
, R_028A84_VGT_PRIMITIVEID_EN
,
1007 SI_TRACKED_VGT_PRIMITIVEID_EN
,
1008 shader
->ctx_reg
.ngg
.vgt_primitiveid_en
);
1009 radeon_opt_set_context_reg(sctx
, R_028A44_VGT_GS_ONCHIP_CNTL
,
1010 SI_TRACKED_VGT_GS_ONCHIP_CNTL
,
1011 shader
->ctx_reg
.ngg
.vgt_gs_onchip_cntl
);
1012 radeon_opt_set_context_reg(sctx
, R_028B90_VGT_GS_INSTANCE_CNT
,
1013 SI_TRACKED_VGT_GS_INSTANCE_CNT
,
1014 shader
->ctx_reg
.ngg
.vgt_gs_instance_cnt
);
1015 radeon_opt_set_context_reg(sctx
, R_028AAC_VGT_ESGS_RING_ITEMSIZE
,
1016 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE
,
1017 shader
->ctx_reg
.ngg
.vgt_esgs_ring_itemsize
);
1018 radeon_opt_set_context_reg(sctx
, R_0286C4_SPI_VS_OUT_CONFIG
,
1019 SI_TRACKED_SPI_VS_OUT_CONFIG
,
1020 shader
->ctx_reg
.ngg
.spi_vs_out_config
);
1021 radeon_opt_set_context_reg2(sctx
, R_028708_SPI_SHADER_IDX_FORMAT
,
1022 SI_TRACKED_SPI_SHADER_IDX_FORMAT
,
1023 shader
->ctx_reg
.ngg
.spi_shader_idx_format
,
1024 shader
->ctx_reg
.ngg
.spi_shader_pos_format
);
1025 radeon_opt_set_context_reg(sctx
, R_028818_PA_CL_VTE_CNTL
,
1026 SI_TRACKED_PA_CL_VTE_CNTL
,
1027 shader
->ctx_reg
.ngg
.pa_cl_vte_cntl
);
1028 radeon_opt_set_context_reg(sctx
, R_028838_PA_CL_NGG_CNTL
,
1029 SI_TRACKED_PA_CL_NGG_CNTL
,
1030 shader
->ctx_reg
.ngg
.pa_cl_ngg_cntl
);
1032 radeon_opt_set_context_reg_rmw(sctx
, R_02881C_PA_CL_VS_OUT_CNTL
,
1033 SI_TRACKED_PA_CL_VS_OUT_CNTL__VS
,
1034 shader
->pa_cl_vs_out_cntl
,
1035 SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK
);
1037 if (initial_cdw
!= sctx
->gfx_cs
->current
.cdw
)
1038 sctx
->context_roll
= true;
1040 /* GE_PC_ALLOC is not a context register, so it doesn't cause a context roll. */
1041 gfx10_emit_ge_pc_alloc(sctx
, shader
->ctx_reg
.ngg
.ge_pc_alloc
);
1044 static void gfx10_emit_shader_ngg_notess_nogs(struct si_context
*sctx
)
1046 struct si_shader
*shader
= sctx
->queued
.named
.gs
->shader
;
1047 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
1052 gfx10_emit_shader_ngg_tail(sctx
, shader
, initial_cdw
);
1055 static void gfx10_emit_shader_ngg_tess_nogs(struct si_context
*sctx
)
1057 struct si_shader
*shader
= sctx
->queued
.named
.gs
->shader
;
1058 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
1063 radeon_opt_set_context_reg(sctx
, R_028B6C_VGT_TF_PARAM
,
1064 SI_TRACKED_VGT_TF_PARAM
,
1065 shader
->vgt_tf_param
);
1067 gfx10_emit_shader_ngg_tail(sctx
, shader
, initial_cdw
);
1070 static void gfx10_emit_shader_ngg_notess_gs(struct si_context
*sctx
)
1072 struct si_shader
*shader
= sctx
->queued
.named
.gs
->shader
;
1073 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
1078 radeon_opt_set_context_reg(sctx
, R_028B38_VGT_GS_MAX_VERT_OUT
,
1079 SI_TRACKED_VGT_GS_MAX_VERT_OUT
,
1080 shader
->ctx_reg
.ngg
.vgt_gs_max_vert_out
);
1082 gfx10_emit_shader_ngg_tail(sctx
, shader
, initial_cdw
);
1085 static void gfx10_emit_shader_ngg_tess_gs(struct si_context
*sctx
)
1087 struct si_shader
*shader
= sctx
->queued
.named
.gs
->shader
;
1088 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
1093 radeon_opt_set_context_reg(sctx
, R_028B38_VGT_GS_MAX_VERT_OUT
,
1094 SI_TRACKED_VGT_GS_MAX_VERT_OUT
,
1095 shader
->ctx_reg
.ngg
.vgt_gs_max_vert_out
);
1096 radeon_opt_set_context_reg(sctx
, R_028B6C_VGT_TF_PARAM
,
1097 SI_TRACKED_VGT_TF_PARAM
,
1098 shader
->vgt_tf_param
);
1100 gfx10_emit_shader_ngg_tail(sctx
, shader
, initial_cdw
);
1103 unsigned si_get_input_prim(const struct si_shader_selector
*gs
)
1105 if (gs
->type
== PIPE_SHADER_GEOMETRY
)
1106 return gs
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
];
1108 if (gs
->type
== PIPE_SHADER_TESS_EVAL
) {
1109 if (gs
->info
.properties
[TGSI_PROPERTY_TES_POINT_MODE
])
1110 return PIPE_PRIM_POINTS
;
1111 if (gs
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
] == PIPE_PRIM_LINES
)
1112 return PIPE_PRIM_LINES
;
1113 return PIPE_PRIM_TRIANGLES
;
1116 /* TODO: Set this correctly if the primitive type is set in the shader key. */
1117 return PIPE_PRIM_TRIANGLES
; /* worst case for all callers */
1120 static unsigned si_get_vs_out_cntl(const struct si_shader_selector
*sel
, bool ngg
)
1123 sel
->info
.writes_psize
|| (sel
->info
.writes_edgeflag
&& !ngg
) ||
1124 sel
->info
.writes_layer
|| sel
->info
.writes_viewport_index
;
1125 return S_02881C_USE_VTX_POINT_SIZE(sel
->info
.writes_psize
) |
1126 S_02881C_USE_VTX_EDGE_FLAG(sel
->info
.writes_edgeflag
&& !ngg
) |
1127 S_02881C_USE_VTX_RENDER_TARGET_INDX(sel
->info
.writes_layer
) |
1128 S_02881C_USE_VTX_VIEWPORT_INDX(sel
->info
.writes_viewport_index
) |
1129 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena
) |
1130 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena
);
1134 * Prepare the PM4 image for \p shader, which will run as a merged ESGS shader
1137 static void gfx10_shader_ngg(struct si_screen
*sscreen
, struct si_shader
*shader
)
1139 const struct si_shader_selector
*gs_sel
= shader
->selector
;
1140 const struct si_shader_info
*gs_info
= &gs_sel
->info
;
1141 enum pipe_shader_type gs_type
= shader
->selector
->type
;
1142 const struct si_shader_selector
*es_sel
=
1143 shader
->previous_stage_sel
? shader
->previous_stage_sel
: shader
->selector
;
1144 const struct si_shader_info
*es_info
= &es_sel
->info
;
1145 enum pipe_shader_type es_type
= es_sel
->type
;
1146 unsigned num_user_sgprs
;
1147 unsigned nparams
, es_vgpr_comp_cnt
, gs_vgpr_comp_cnt
;
1149 unsigned window_space
=
1150 gs_info
->properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
1151 bool es_enable_prim_id
= shader
->key
.mono
.u
.vs_export_prim_id
|| es_info
->uses_primid
;
1152 unsigned gs_num_invocations
= MAX2(gs_sel
->gs_num_invocations
, 1);
1153 unsigned input_prim
= si_get_input_prim(gs_sel
);
1154 bool break_wave_at_eoi
= false;
1155 struct si_pm4_state
*pm4
= si_get_shader_pm4_state(shader
);
1159 if (es_type
== PIPE_SHADER_TESS_EVAL
) {
1160 pm4
->atom
.emit
= gs_type
== PIPE_SHADER_GEOMETRY
? gfx10_emit_shader_ngg_tess_gs
1161 : gfx10_emit_shader_ngg_tess_nogs
;
1163 pm4
->atom
.emit
= gs_type
== PIPE_SHADER_GEOMETRY
? gfx10_emit_shader_ngg_notess_gs
1164 : gfx10_emit_shader_ngg_notess_nogs
;
1167 va
= shader
->bo
->gpu_address
;
1168 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
1170 if (es_type
== PIPE_SHADER_VERTEX
) {
1171 es_vgpr_comp_cnt
= si_get_vs_vgpr_comp_cnt(sscreen
, shader
, false);
1173 if (es_info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
]) {
1174 num_user_sgprs
= SI_SGPR_VS_BLIT_DATA
+
1175 es_info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
];
1177 num_user_sgprs
= si_get_num_vs_user_sgprs(shader
, GFX9_VSGS_NUM_USER_SGPR
);
1180 assert(es_type
== PIPE_SHADER_TESS_EVAL
);
1181 es_vgpr_comp_cnt
= es_enable_prim_id
? 3 : 2;
1182 num_user_sgprs
= GFX9_TESGS_NUM_USER_SGPR
;
1184 if (es_enable_prim_id
|| gs_info
->uses_primid
)
1185 break_wave_at_eoi
= true;
1188 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
1189 * VGPR[0:4] are always loaded.
1191 * Vertex shaders always need to load VGPR3, because they need to
1192 * pass edge flags for decomposed primitives (such as quads) to the PA
1193 * for the GL_LINE polygon mode to skip rendering lines on inner edges.
1195 if (gs_info
->uses_invocationid
||
1196 (gs_type
== PIPE_SHADER_VERTEX
&& !gfx10_is_ngg_passthrough(shader
)))
1197 gs_vgpr_comp_cnt
= 3; /* VGPR3 contains InvocationID, edge flags. */
1198 else if ((gs_type
== PIPE_SHADER_GEOMETRY
&& gs_info
->uses_primid
) ||
1199 (gs_type
== PIPE_SHADER_VERTEX
&& shader
->key
.mono
.u
.vs_export_prim_id
))
1200 gs_vgpr_comp_cnt
= 2; /* VGPR2 contains PrimitiveID. */
1201 else if (input_prim
>= PIPE_PRIM_TRIANGLES
&& !gfx10_is_ngg_passthrough(shader
))
1202 gs_vgpr_comp_cnt
= 1; /* VGPR1 contains offsets 2, 3 */
1204 gs_vgpr_comp_cnt
= 0; /* VGPR0 contains offsets 0, 1 */
1206 si_pm4_set_reg(pm4
, R_00B320_SPI_SHADER_PGM_LO_ES
, va
>> 8);
1207 si_pm4_set_reg(pm4
, R_00B324_SPI_SHADER_PGM_HI_ES
, va
>> 40);
1208 si_pm4_set_reg(pm4
, R_00B228_SPI_SHADER_PGM_RSRC1_GS
,
1209 S_00B228_VGPRS((shader
->config
.num_vgprs
- 1) /
1210 (sscreen
->ge_wave_size
== 32 ? 8 : 4)) |
1211 S_00B228_FLOAT_MODE(shader
->config
.float_mode
) |
1212 S_00B228_DX10_CLAMP(1) |
1213 S_00B228_MEM_ORDERED(1) |
1214 S_00B228_WGP_MODE(1) |
1215 S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt
));
1216 si_pm4_set_reg(pm4
, R_00B22C_SPI_SHADER_PGM_RSRC2_GS
,
1217 S_00B22C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0) |
1218 S_00B22C_USER_SGPR(num_user_sgprs
) |
1219 S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt
) |
1220 S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs
>> 5) |
1221 S_00B22C_OC_LDS_EN(es_type
== PIPE_SHADER_TESS_EVAL
) |
1222 S_00B22C_LDS_SIZE(shader
->config
.lds_size
));
1224 /* Determine LATE_ALLOC_GS. */
1225 unsigned num_cu_per_sh
= sscreen
->info
.num_good_cu_per_sh
;
1226 unsigned late_alloc_wave64
; /* The limit is per SH. */
1228 /* For Wave32, the hw will launch twice the number of late
1229 * alloc waves, so 1 == 2x wave32.
1231 * Don't use late alloc for NGG on Navi14 due to a hw bug.
1233 if (sscreen
->info
.family
== CHIP_NAVI14
|| !sscreen
->info
.use_late_alloc
)
1234 late_alloc_wave64
= 0;
1235 else if (num_cu_per_sh
<= 6)
1236 late_alloc_wave64
= num_cu_per_sh
- 2; /* All CUs enabled */
1237 else if (shader
->key
.opt
.ngg_culling
& SI_NGG_CULL_GS_FAST_LAUNCH_ALL
)
1238 late_alloc_wave64
= (num_cu_per_sh
- 2) * 6;
1240 late_alloc_wave64
= (num_cu_per_sh
- 2) * 4;
1242 /* Limit LATE_ALLOC_GS for prevent a hang (hw bug). */
1243 if (sscreen
->info
.family
== CHIP_NAVI10
||
1244 sscreen
->info
.family
== CHIP_NAVI12
||
1245 sscreen
->info
.family
== CHIP_NAVI14
)
1246 late_alloc_wave64
= MIN2(late_alloc_wave64
, 64);
1248 si_pm4_set_reg(pm4
, R_00B204_SPI_SHADER_PGM_RSRC4_GS
,
1249 S_00B204_CU_EN(0xffff) |
1250 S_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(late_alloc_wave64
));
1252 nparams
= MAX2(shader
->info
.nr_param_exports
, 1);
1253 shader
->ctx_reg
.ngg
.spi_vs_out_config
=
1254 S_0286C4_VS_EXPORT_COUNT(nparams
- 1) |
1255 S_0286C4_NO_PC_EXPORT(shader
->info
.nr_param_exports
== 0);
1257 shader
->ctx_reg
.ngg
.spi_shader_idx_format
=
1258 S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP
);
1259 shader
->ctx_reg
.ngg
.spi_shader_pos_format
=
1260 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
1261 S_02870C_POS1_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 1 ?
1262 V_02870C_SPI_SHADER_4COMP
:
1263 V_02870C_SPI_SHADER_NONE
) |
1264 S_02870C_POS2_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 2 ?
1265 V_02870C_SPI_SHADER_4COMP
:
1266 V_02870C_SPI_SHADER_NONE
) |
1267 S_02870C_POS3_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 3 ?
1268 V_02870C_SPI_SHADER_4COMP
:
1269 V_02870C_SPI_SHADER_NONE
);
1271 shader
->ctx_reg
.ngg
.vgt_primitiveid_en
=
1272 S_028A84_PRIMITIVEID_EN(es_enable_prim_id
) |
1273 S_028A84_NGG_DISABLE_PROVOK_REUSE(shader
->key
.mono
.u
.vs_export_prim_id
||
1274 gs_sel
->info
.writes_primid
);
1276 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1277 shader
->ctx_reg
.ngg
.vgt_esgs_ring_itemsize
= es_sel
->esgs_itemsize
/ 4;
1278 shader
->ctx_reg
.ngg
.vgt_gs_max_vert_out
= gs_sel
->gs_max_out_vertices
;
1280 shader
->ctx_reg
.ngg
.vgt_esgs_ring_itemsize
= 1;
1283 if (es_type
== PIPE_SHADER_TESS_EVAL
)
1284 si_set_tesseval_regs(sscreen
, es_sel
, pm4
);
1286 shader
->ctx_reg
.ngg
.vgt_gs_onchip_cntl
=
1287 S_028A44_ES_VERTS_PER_SUBGRP(shader
->ngg
.hw_max_esverts
) |
1288 S_028A44_GS_PRIMS_PER_SUBGRP(shader
->ngg
.max_gsprims
) |
1289 S_028A44_GS_INST_PRIMS_IN_SUBGRP(shader
->ngg
.max_gsprims
* gs_num_invocations
);
1290 shader
->ctx_reg
.ngg
.ge_max_output_per_subgroup
=
1291 S_0287FC_MAX_VERTS_PER_SUBGROUP(shader
->ngg
.max_out_verts
);
1292 shader
->ctx_reg
.ngg
.ge_ngg_subgrp_cntl
=
1293 S_028B4C_PRIM_AMP_FACTOR(shader
->ngg
.prim_amp_factor
) |
1294 S_028B4C_THDS_PER_SUBGRP(0); /* for fast launch */
1295 shader
->ctx_reg
.ngg
.vgt_gs_instance_cnt
=
1296 S_028B90_CNT(gs_num_invocations
) |
1297 S_028B90_ENABLE(gs_num_invocations
> 1) |
1298 S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE(
1299 shader
->ngg
.max_vert_out_per_gs_instance
);
1301 /* Always output hw-generated edge flags and pass them via the prim
1302 * export to prevent drawing lines on internal edges of decomposed
1303 * primitives (such as quads) with polygon mode = lines. Only VS needs
1306 shader
->ctx_reg
.ngg
.pa_cl_ngg_cntl
=
1307 S_028838_INDEX_BUF_EDGE_FLAG_ENA(gs_type
== PIPE_SHADER_VERTEX
);
1308 shader
->pa_cl_vs_out_cntl
= si_get_vs_out_cntl(gs_sel
, true);
1310 /* Oversubscribe PC. This improves performance when there are too many varyings. */
1311 float oversub_pc_factor
= 0.25;
1313 if (shader
->key
.opt
.ngg_culling
) {
1314 /* Be more aggressive with NGG culling. */
1315 if (shader
->info
.nr_param_exports
> 4)
1316 oversub_pc_factor
= 1;
1317 else if (shader
->info
.nr_param_exports
> 2)
1318 oversub_pc_factor
= 0.75;
1320 oversub_pc_factor
= 0.5;
1323 unsigned oversub_pc_lines
= sscreen
->info
.pc_lines
* oversub_pc_factor
;
1324 shader
->ctx_reg
.ngg
.ge_pc_alloc
= S_030980_OVERSUB_EN(sscreen
->info
.use_late_alloc
) |
1325 S_030980_NUM_PC_LINES(oversub_pc_lines
- 1);
1327 if (shader
->key
.opt
.ngg_culling
& SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST
) {
1329 S_03096C_PRIM_GRP_SIZE(shader
->ngg
.max_gsprims
) |
1330 S_03096C_VERT_GRP_SIZE(shader
->ngg
.max_gsprims
* 3);
1331 } else if (shader
->key
.opt
.ngg_culling
& SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP
) {
1333 S_03096C_PRIM_GRP_SIZE(shader
->ngg
.max_gsprims
) |
1334 S_03096C_VERT_GRP_SIZE(shader
->ngg
.max_gsprims
+ 2);
1337 S_03096C_PRIM_GRP_SIZE(shader
->ngg
.max_gsprims
) |
1338 S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */
1339 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi
);
1341 /* Bug workaround for a possible hang with non-tessellation cases.
1342 * Tessellation always sets GE_CNTL.VERT_GRP_SIZE = 0
1344 * Requirement: GE_CNTL.VERT_GRP_SIZE = VGT_GS_ONCHIP_CNTL.ES_VERTS_PER_SUBGRP - 5
1346 if ((sscreen
->info
.family
== CHIP_NAVI10
||
1347 sscreen
->info
.family
== CHIP_NAVI12
||
1348 sscreen
->info
.family
== CHIP_NAVI14
) &&
1349 (es_type
== PIPE_SHADER_VERTEX
|| gs_type
== PIPE_SHADER_VERTEX
) && /* = no tess */
1350 shader
->ngg
.hw_max_esverts
!= 256) {
1351 shader
->ge_cntl
&= C_03096C_VERT_GRP_SIZE
;
1353 if (shader
->ngg
.hw_max_esverts
> 5) {
1355 S_03096C_VERT_GRP_SIZE(shader
->ngg
.hw_max_esverts
- 5);
1361 shader
->ctx_reg
.ngg
.pa_cl_vte_cntl
=
1362 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1);
1364 shader
->ctx_reg
.ngg
.pa_cl_vte_cntl
=
1365 S_028818_VTX_W0_FMT(1) |
1366 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
1367 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
1368 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1);
1372 static void si_emit_shader_vs(struct si_context
*sctx
)
1374 struct si_shader
*shader
= sctx
->queued
.named
.vs
->shader
;
1375 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
1380 radeon_opt_set_context_reg(sctx
, R_028A40_VGT_GS_MODE
,
1381 SI_TRACKED_VGT_GS_MODE
,
1382 shader
->ctx_reg
.vs
.vgt_gs_mode
);
1383 radeon_opt_set_context_reg(sctx
, R_028A84_VGT_PRIMITIVEID_EN
,
1384 SI_TRACKED_VGT_PRIMITIVEID_EN
,
1385 shader
->ctx_reg
.vs
.vgt_primitiveid_en
);
1387 if (sctx
->chip_class
<= GFX8
) {
1388 radeon_opt_set_context_reg(sctx
, R_028AB4_VGT_REUSE_OFF
,
1389 SI_TRACKED_VGT_REUSE_OFF
,
1390 shader
->ctx_reg
.vs
.vgt_reuse_off
);
1393 radeon_opt_set_context_reg(sctx
, R_0286C4_SPI_VS_OUT_CONFIG
,
1394 SI_TRACKED_SPI_VS_OUT_CONFIG
,
1395 shader
->ctx_reg
.vs
.spi_vs_out_config
);
1397 radeon_opt_set_context_reg(sctx
, R_02870C_SPI_SHADER_POS_FORMAT
,
1398 SI_TRACKED_SPI_SHADER_POS_FORMAT
,
1399 shader
->ctx_reg
.vs
.spi_shader_pos_format
);
1401 radeon_opt_set_context_reg(sctx
, R_028818_PA_CL_VTE_CNTL
,
1402 SI_TRACKED_PA_CL_VTE_CNTL
,
1403 shader
->ctx_reg
.vs
.pa_cl_vte_cntl
);
1405 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
1406 radeon_opt_set_context_reg(sctx
, R_028B6C_VGT_TF_PARAM
,
1407 SI_TRACKED_VGT_TF_PARAM
,
1408 shader
->vgt_tf_param
);
1410 if (shader
->vgt_vertex_reuse_block_cntl
)
1411 radeon_opt_set_context_reg(sctx
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
1412 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL
,
1413 shader
->vgt_vertex_reuse_block_cntl
);
1415 /* Required programming for tessellation. (legacy pipeline only) */
1416 if (sctx
->chip_class
== GFX10
&&
1417 shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
1418 radeon_opt_set_context_reg(sctx
, R_028A44_VGT_GS_ONCHIP_CNTL
,
1419 SI_TRACKED_VGT_GS_ONCHIP_CNTL
,
1420 S_028A44_ES_VERTS_PER_SUBGRP(250) |
1421 S_028A44_GS_PRIMS_PER_SUBGRP(126) |
1422 S_028A44_GS_INST_PRIMS_IN_SUBGRP(126));
1425 if (sctx
->chip_class
>= GFX10
) {
1426 radeon_opt_set_context_reg_rmw(sctx
, R_02881C_PA_CL_VS_OUT_CNTL
,
1427 SI_TRACKED_PA_CL_VS_OUT_CNTL__VS
,
1428 shader
->pa_cl_vs_out_cntl
,
1429 SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK
);
1432 if (initial_cdw
!= sctx
->gfx_cs
->current
.cdw
)
1433 sctx
->context_roll
= true;
1435 /* GE_PC_ALLOC is not a context register, so it doesn't cause a context roll. */
1436 if (sctx
->chip_class
>= GFX10
)
1437 gfx10_emit_ge_pc_alloc(sctx
, shader
->ctx_reg
.vs
.ge_pc_alloc
);
1441 * Compute the state for \p shader, which will run as a vertex shader on the
1444 * If \p gs is non-NULL, it points to the geometry shader for which this shader
1445 * is the copy shader.
1447 static void si_shader_vs(struct si_screen
*sscreen
, struct si_shader
*shader
,
1448 struct si_shader_selector
*gs
)
1450 const struct si_shader_info
*info
= &shader
->selector
->info
;
1451 struct si_pm4_state
*pm4
;
1452 unsigned num_user_sgprs
, vgpr_comp_cnt
;
1454 unsigned nparams
, oc_lds_en
;
1455 unsigned window_space
=
1456 info
->properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
];
1457 bool enable_prim_id
= shader
->key
.mono
.u
.vs_export_prim_id
|| info
->uses_primid
;
1459 pm4
= si_get_shader_pm4_state(shader
);
1463 pm4
->atom
.emit
= si_emit_shader_vs
;
1465 /* We always write VGT_GS_MODE in the VS state, because every switch
1466 * between different shader pipelines involving a different GS or no
1467 * GS at all involves a switch of the VS (different GS use different
1468 * copy shaders). On the other hand, when the API switches from a GS to
1469 * no GS and then back to the same GS used originally, the GS state is
1473 unsigned mode
= V_028A40_GS_OFF
;
1475 /* PrimID needs GS scenario A. */
1477 mode
= V_028A40_GS_SCENARIO_A
;
1479 shader
->ctx_reg
.vs
.vgt_gs_mode
= S_028A40_MODE(mode
);
1480 shader
->ctx_reg
.vs
.vgt_primitiveid_en
= enable_prim_id
;
1482 shader
->ctx_reg
.vs
.vgt_gs_mode
= ac_vgt_gs_mode(gs
->gs_max_out_vertices
,
1483 sscreen
->info
.chip_class
);
1484 shader
->ctx_reg
.vs
.vgt_primitiveid_en
= 0;
1487 if (sscreen
->info
.chip_class
<= GFX8
) {
1488 /* Reuse needs to be set off if we write oViewport. */
1489 shader
->ctx_reg
.vs
.vgt_reuse_off
=
1490 S_028AB4_REUSE_OFF(info
->writes_viewport_index
);
1493 va
= shader
->bo
->gpu_address
;
1494 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
1497 vgpr_comp_cnt
= 0; /* only VertexID is needed for GS-COPY. */
1498 num_user_sgprs
= SI_GSCOPY_NUM_USER_SGPR
;
1499 } else if (shader
->selector
->type
== PIPE_SHADER_VERTEX
) {
1500 vgpr_comp_cnt
= si_get_vs_vgpr_comp_cnt(sscreen
, shader
, enable_prim_id
);
1502 if (info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
]) {
1503 num_user_sgprs
= SI_SGPR_VS_BLIT_DATA
+
1504 info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
];
1506 num_user_sgprs
= si_get_num_vs_user_sgprs(shader
, SI_VS_NUM_USER_SGPR
);
1508 } else if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
) {
1509 vgpr_comp_cnt
= enable_prim_id
? 3 : 2;
1510 num_user_sgprs
= SI_TES_NUM_USER_SGPR
;
1512 unreachable("invalid shader selector type");
1514 /* VS is required to export at least one param. */
1515 nparams
= MAX2(shader
->info
.nr_param_exports
, 1);
1516 shader
->ctx_reg
.vs
.spi_vs_out_config
= S_0286C4_VS_EXPORT_COUNT(nparams
- 1);
1518 if (sscreen
->info
.chip_class
>= GFX10
) {
1519 shader
->ctx_reg
.vs
.spi_vs_out_config
|=
1520 S_0286C4_NO_PC_EXPORT(shader
->info
.nr_param_exports
== 0);
1523 shader
->ctx_reg
.vs
.spi_shader_pos_format
=
1524 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP
) |
1525 S_02870C_POS1_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 1 ?
1526 V_02870C_SPI_SHADER_4COMP
:
1527 V_02870C_SPI_SHADER_NONE
) |
1528 S_02870C_POS2_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 2 ?
1529 V_02870C_SPI_SHADER_4COMP
:
1530 V_02870C_SPI_SHADER_NONE
) |
1531 S_02870C_POS3_EXPORT_FORMAT(shader
->info
.nr_pos_exports
> 3 ?
1532 V_02870C_SPI_SHADER_4COMP
:
1533 V_02870C_SPI_SHADER_NONE
);
1534 shader
->ctx_reg
.vs
.ge_pc_alloc
= S_030980_OVERSUB_EN(sscreen
->info
.use_late_alloc
) |
1535 S_030980_NUM_PC_LINES(sscreen
->info
.pc_lines
/ 4 - 1);
1536 shader
->pa_cl_vs_out_cntl
= si_get_vs_out_cntl(shader
->selector
, false);
1538 oc_lds_en
= shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
? 1 : 0;
1540 si_pm4_set_reg(pm4
, R_00B120_SPI_SHADER_PGM_LO_VS
, va
>> 8);
1541 si_pm4_set_reg(pm4
, R_00B124_SPI_SHADER_PGM_HI_VS
, S_00B124_MEM_BASE(va
>> 40));
1543 uint32_t rsrc1
= S_00B128_VGPRS((shader
->config
.num_vgprs
- 1) /
1544 (sscreen
->ge_wave_size
== 32 ? 8 : 4)) |
1545 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt
) |
1546 S_00B128_DX10_CLAMP(1) |
1547 S_00B128_MEM_ORDERED(sscreen
->info
.chip_class
>= GFX10
) |
1548 S_00B128_FLOAT_MODE(shader
->config
.float_mode
);
1549 uint32_t rsrc2
= S_00B12C_USER_SGPR(num_user_sgprs
) |
1550 S_00B12C_OC_LDS_EN(oc_lds_en
) |
1551 S_00B12C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0);
1553 if (sscreen
->info
.chip_class
>= GFX10
)
1554 rsrc2
|= S_00B12C_USER_SGPR_MSB_GFX10(num_user_sgprs
>> 5);
1555 else if (sscreen
->info
.chip_class
== GFX9
)
1556 rsrc2
|= S_00B12C_USER_SGPR_MSB_GFX9(num_user_sgprs
>> 5);
1558 if (sscreen
->info
.chip_class
<= GFX9
)
1559 rsrc1
|= S_00B128_SGPRS((shader
->config
.num_sgprs
- 1) / 8);
1561 if (!sscreen
->use_ngg_streamout
) {
1562 rsrc2
|= S_00B12C_SO_BASE0_EN(!!shader
->selector
->so
.stride
[0]) |
1563 S_00B12C_SO_BASE1_EN(!!shader
->selector
->so
.stride
[1]) |
1564 S_00B12C_SO_BASE2_EN(!!shader
->selector
->so
.stride
[2]) |
1565 S_00B12C_SO_BASE3_EN(!!shader
->selector
->so
.stride
[3]) |
1566 S_00B12C_SO_EN(!!shader
->selector
->so
.num_outputs
);
1569 si_pm4_set_reg(pm4
, R_00B128_SPI_SHADER_PGM_RSRC1_VS
, rsrc1
);
1570 si_pm4_set_reg(pm4
, R_00B12C_SPI_SHADER_PGM_RSRC2_VS
, rsrc2
);
1573 shader
->ctx_reg
.vs
.pa_cl_vte_cntl
=
1574 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1);
1576 shader
->ctx_reg
.vs
.pa_cl_vte_cntl
=
1577 S_028818_VTX_W0_FMT(1) |
1578 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
1579 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
1580 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1);
1582 if (shader
->selector
->type
== PIPE_SHADER_TESS_EVAL
)
1583 si_set_tesseval_regs(sscreen
, shader
->selector
, pm4
);
1585 polaris_set_vgt_vertex_reuse(sscreen
, shader
->selector
, shader
, pm4
);
1588 static unsigned si_get_ps_num_interp(struct si_shader
*ps
)
1590 struct si_shader_info
*info
= &ps
->selector
->info
;
1591 unsigned num_colors
= !!(info
->colors_read
& 0x0f) +
1592 !!(info
->colors_read
& 0xf0);
1593 unsigned num_interp
= ps
->selector
->info
.num_inputs
+
1594 (ps
->key
.part
.ps
.prolog
.color_two_side
? num_colors
: 0);
1596 assert(num_interp
<= 32);
1597 return MIN2(num_interp
, 32);
1600 static unsigned si_get_spi_shader_col_format(struct si_shader
*shader
)
1602 unsigned value
= shader
->key
.part
.ps
.epilog
.spi_shader_col_format
;
1603 unsigned i
, num_targets
= (util_last_bit(value
) + 3) / 4;
1605 /* If the i-th target format is set, all previous target formats must
1606 * be non-zero to avoid hangs.
1608 for (i
= 0; i
< num_targets
; i
++)
1609 if (!(value
& (0xf << (i
* 4))))
1610 value
|= V_028714_SPI_SHADER_32_R
<< (i
* 4);
1615 static void si_emit_shader_ps(struct si_context
*sctx
)
1617 struct si_shader
*shader
= sctx
->queued
.named
.ps
->shader
;
1618 unsigned initial_cdw
= sctx
->gfx_cs
->current
.cdw
;
1623 /* R_0286CC_SPI_PS_INPUT_ENA, R_0286D0_SPI_PS_INPUT_ADDR*/
1624 radeon_opt_set_context_reg2(sctx
, R_0286CC_SPI_PS_INPUT_ENA
,
1625 SI_TRACKED_SPI_PS_INPUT_ENA
,
1626 shader
->ctx_reg
.ps
.spi_ps_input_ena
,
1627 shader
->ctx_reg
.ps
.spi_ps_input_addr
);
1629 radeon_opt_set_context_reg(sctx
, R_0286E0_SPI_BARYC_CNTL
,
1630 SI_TRACKED_SPI_BARYC_CNTL
,
1631 shader
->ctx_reg
.ps
.spi_baryc_cntl
);
1632 radeon_opt_set_context_reg(sctx
, R_0286D8_SPI_PS_IN_CONTROL
,
1633 SI_TRACKED_SPI_PS_IN_CONTROL
,
1634 shader
->ctx_reg
.ps
.spi_ps_in_control
);
1636 /* R_028710_SPI_SHADER_Z_FORMAT, R_028714_SPI_SHADER_COL_FORMAT */
1637 radeon_opt_set_context_reg2(sctx
, R_028710_SPI_SHADER_Z_FORMAT
,
1638 SI_TRACKED_SPI_SHADER_Z_FORMAT
,
1639 shader
->ctx_reg
.ps
.spi_shader_z_format
,
1640 shader
->ctx_reg
.ps
.spi_shader_col_format
);
1642 radeon_opt_set_context_reg(sctx
, R_02823C_CB_SHADER_MASK
,
1643 SI_TRACKED_CB_SHADER_MASK
,
1644 shader
->ctx_reg
.ps
.cb_shader_mask
);
1646 if (initial_cdw
!= sctx
->gfx_cs
->current
.cdw
)
1647 sctx
->context_roll
= true;
1650 static void si_shader_ps(struct si_screen
*sscreen
, struct si_shader
*shader
)
1652 struct si_shader_info
*info
= &shader
->selector
->info
;
1653 struct si_pm4_state
*pm4
;
1654 unsigned spi_ps_in_control
, spi_shader_col_format
, cb_shader_mask
;
1655 unsigned spi_baryc_cntl
= S_0286E0_FRONT_FACE_ALL_BITS(1);
1657 unsigned input_ena
= shader
->config
.spi_ps_input_ena
;
1659 /* we need to enable at least one of them, otherwise we hang the GPU */
1660 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena
) ||
1661 G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1662 G_0286CC_PERSP_CENTROID_ENA(input_ena
) ||
1663 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena
) ||
1664 G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) ||
1665 G_0286CC_LINEAR_CENTER_ENA(input_ena
) ||
1666 G_0286CC_LINEAR_CENTROID_ENA(input_ena
) ||
1667 G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena
));
1668 /* POS_W_FLOAT_ENA requires one of the perspective weights. */
1669 assert(!G_0286CC_POS_W_FLOAT_ENA(input_ena
) ||
1670 G_0286CC_PERSP_SAMPLE_ENA(input_ena
) ||
1671 G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1672 G_0286CC_PERSP_CENTROID_ENA(input_ena
) ||
1673 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena
));
1675 /* Validate interpolation optimization flags (read as implications). */
1676 assert(!shader
->key
.part
.ps
.prolog
.bc_optimize_for_persp
||
1677 (G_0286CC_PERSP_CENTER_ENA(input_ena
) &&
1678 G_0286CC_PERSP_CENTROID_ENA(input_ena
)));
1679 assert(!shader
->key
.part
.ps
.prolog
.bc_optimize_for_linear
||
1680 (G_0286CC_LINEAR_CENTER_ENA(input_ena
) &&
1681 G_0286CC_LINEAR_CENTROID_ENA(input_ena
)));
1682 assert(!shader
->key
.part
.ps
.prolog
.force_persp_center_interp
||
1683 (!G_0286CC_PERSP_SAMPLE_ENA(input_ena
) &&
1684 !G_0286CC_PERSP_CENTROID_ENA(input_ena
)));
1685 assert(!shader
->key
.part
.ps
.prolog
.force_linear_center_interp
||
1686 (!G_0286CC_LINEAR_SAMPLE_ENA(input_ena
) &&
1687 !G_0286CC_LINEAR_CENTROID_ENA(input_ena
)));
1688 assert(!shader
->key
.part
.ps
.prolog
.force_persp_sample_interp
||
1689 (!G_0286CC_PERSP_CENTER_ENA(input_ena
) &&
1690 !G_0286CC_PERSP_CENTROID_ENA(input_ena
)));
1691 assert(!shader
->key
.part
.ps
.prolog
.force_linear_sample_interp
||
1692 (!G_0286CC_LINEAR_CENTER_ENA(input_ena
) &&
1693 !G_0286CC_LINEAR_CENTROID_ENA(input_ena
)));
1695 /* Validate cases when the optimizations are off (read as implications). */
1696 assert(shader
->key
.part
.ps
.prolog
.bc_optimize_for_persp
||
1697 !G_0286CC_PERSP_CENTER_ENA(input_ena
) ||
1698 !G_0286CC_PERSP_CENTROID_ENA(input_ena
));
1699 assert(shader
->key
.part
.ps
.prolog
.bc_optimize_for_linear
||
1700 !G_0286CC_LINEAR_CENTER_ENA(input_ena
) ||
1701 !G_0286CC_LINEAR_CENTROID_ENA(input_ena
));
1703 pm4
= si_get_shader_pm4_state(shader
);
1707 pm4
->atom
.emit
= si_emit_shader_ps
;
1709 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
1711 * 0 -> Position = pixel center
1712 * 1 -> Position = pixel centroid
1713 * 2 -> Position = at sample position
1715 * From GLSL 4.5 specification, section 7.1:
1716 * "The variable gl_FragCoord is available as an input variable from
1717 * within fragment shaders and it holds the window relative coordinates
1718 * (x, y, z, 1/w) values for the fragment. If multi-sampling, this
1719 * value can be for any location within the pixel, or one of the
1720 * fragment samples. The use of centroid does not further restrict
1721 * this value to be inside the current primitive."
1723 * Meaning that centroid has no effect and we can return anything within
1724 * the pixel. Thus, return the value at sample position, because that's
1725 * the most accurate one shaders can get.
1727 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_LOCATION(2);
1729 if (info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] ==
1730 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
)
1731 spi_baryc_cntl
|= S_0286E0_POS_FLOAT_ULC(1);
1733 spi_shader_col_format
= si_get_spi_shader_col_format(shader
);
1734 cb_shader_mask
= ac_get_cb_shader_mask(spi_shader_col_format
);
1736 /* Ensure that some export memory is always allocated, for two reasons:
1738 * 1) Correctness: The hardware ignores the EXEC mask if no export
1739 * memory is allocated, so KILL and alpha test do not work correctly
1741 * 2) Performance: Every shader needs at least a NULL export, even when
1742 * it writes no color/depth output. The NULL export instruction
1743 * stalls without this setting.
1745 * Don't add this to CB_SHADER_MASK.
1747 * GFX10 supports pixel shaders without exports by setting both
1748 * the color and Z formats to SPI_SHADER_ZERO. The hw will skip export
1749 * instructions if any are present.
1751 if ((sscreen
->info
.chip_class
<= GFX9
||
1753 shader
->key
.part
.ps
.epilog
.alpha_func
!= PIPE_FUNC_ALWAYS
) &&
1754 !spi_shader_col_format
&&
1755 !info
->writes_z
&& !info
->writes_stencil
&& !info
->writes_samplemask
)
1756 spi_shader_col_format
= V_028714_SPI_SHADER_32_R
;
1758 shader
->ctx_reg
.ps
.spi_ps_input_ena
= input_ena
;
1759 shader
->ctx_reg
.ps
.spi_ps_input_addr
= shader
->config
.spi_ps_input_addr
;
1761 /* Set interpolation controls. */
1762 spi_ps_in_control
= S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader
)) |
1763 S_0286D8_PS_W32_EN(sscreen
->ps_wave_size
== 32);
1765 shader
->ctx_reg
.ps
.spi_baryc_cntl
= spi_baryc_cntl
;
1766 shader
->ctx_reg
.ps
.spi_ps_in_control
= spi_ps_in_control
;
1767 shader
->ctx_reg
.ps
.spi_shader_z_format
=
1768 ac_get_spi_shader_z_format(info
->writes_z
,
1769 info
->writes_stencil
,
1770 info
->writes_samplemask
);
1771 shader
->ctx_reg
.ps
.spi_shader_col_format
= spi_shader_col_format
;
1772 shader
->ctx_reg
.ps
.cb_shader_mask
= cb_shader_mask
;
1774 va
= shader
->bo
->gpu_address
;
1775 si_pm4_add_bo(pm4
, shader
->bo
, RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BINARY
);
1776 si_pm4_set_reg(pm4
, R_00B020_SPI_SHADER_PGM_LO_PS
, va
>> 8);
1777 si_pm4_set_reg(pm4
, R_00B024_SPI_SHADER_PGM_HI_PS
, S_00B024_MEM_BASE(va
>> 40));
1780 S_00B028_VGPRS((shader
->config
.num_vgprs
- 1) /
1781 (sscreen
->ps_wave_size
== 32 ? 8 : 4)) |
1782 S_00B028_DX10_CLAMP(1) |
1783 S_00B028_MEM_ORDERED(sscreen
->info
.chip_class
>= GFX10
) |
1784 S_00B028_FLOAT_MODE(shader
->config
.float_mode
);
1786 if (sscreen
->info
.chip_class
< GFX10
) {
1787 rsrc1
|= S_00B028_SGPRS((shader
->config
.num_sgprs
- 1) / 8);
1790 si_pm4_set_reg(pm4
, R_00B028_SPI_SHADER_PGM_RSRC1_PS
, rsrc1
);
1791 si_pm4_set_reg(pm4
, R_00B02C_SPI_SHADER_PGM_RSRC2_PS
,
1792 S_00B02C_EXTRA_LDS_SIZE(shader
->config
.lds_size
) |
1793 S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR
) |
1794 S_00B32C_SCRATCH_EN(shader
->config
.scratch_bytes_per_wave
> 0));
1797 static void si_shader_init_pm4_state(struct si_screen
*sscreen
,
1798 struct si_shader
*shader
)
1800 switch (shader
->selector
->type
) {
1801 case PIPE_SHADER_VERTEX
:
1802 if (shader
->key
.as_ls
)
1803 si_shader_ls(sscreen
, shader
);
1804 else if (shader
->key
.as_es
)
1805 si_shader_es(sscreen
, shader
);
1806 else if (shader
->key
.as_ngg
)
1807 gfx10_shader_ngg(sscreen
, shader
);
1809 si_shader_vs(sscreen
, shader
, NULL
);
1811 case PIPE_SHADER_TESS_CTRL
:
1812 si_shader_hs(sscreen
, shader
);
1814 case PIPE_SHADER_TESS_EVAL
:
1815 if (shader
->key
.as_es
)
1816 si_shader_es(sscreen
, shader
);
1817 else if (shader
->key
.as_ngg
)
1818 gfx10_shader_ngg(sscreen
, shader
);
1820 si_shader_vs(sscreen
, shader
, NULL
);
1822 case PIPE_SHADER_GEOMETRY
:
1823 if (shader
->key
.as_ngg
)
1824 gfx10_shader_ngg(sscreen
, shader
);
1826 si_shader_gs(sscreen
, shader
);
1828 case PIPE_SHADER_FRAGMENT
:
1829 si_shader_ps(sscreen
, shader
);
1836 static unsigned si_get_alpha_test_func(struct si_context
*sctx
)
1838 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
1839 return sctx
->queued
.named
.dsa
->alpha_func
;
1842 void si_shader_selector_key_vs(struct si_context
*sctx
,
1843 struct si_shader_selector
*vs
,
1844 struct si_shader_key
*key
,
1845 struct si_vs_prolog_bits
*prolog_key
)
1847 if (!sctx
->vertex_elements
||
1848 vs
->info
.properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
])
1851 struct si_vertex_elements
*elts
= sctx
->vertex_elements
;
1853 prolog_key
->instance_divisor_is_one
= elts
->instance_divisor_is_one
;
1854 prolog_key
->instance_divisor_is_fetched
= elts
->instance_divisor_is_fetched
;
1855 prolog_key
->unpack_instance_id_from_vertex_id
=
1856 sctx
->prim_discard_cs_instancing
;
1858 /* Prefer a monolithic shader to allow scheduling divisions around
1860 if (prolog_key
->instance_divisor_is_fetched
)
1861 key
->opt
.prefer_mono
= 1;
1863 unsigned count
= MIN2(vs
->info
.num_inputs
, elts
->count
);
1864 unsigned count_mask
= (1 << count
) - 1;
1865 unsigned fix
= elts
->fix_fetch_always
& count_mask
;
1866 unsigned opencode
= elts
->fix_fetch_opencode
& count_mask
;
1868 if (sctx
->vertex_buffer_unaligned
& elts
->vb_alignment_check_mask
) {
1869 uint32_t mask
= elts
->fix_fetch_unaligned
& count_mask
;
1871 unsigned i
= u_bit_scan(&mask
);
1872 unsigned log_hw_load_size
= 1 + ((elts
->hw_load_is_dword
>> i
) & 1);
1873 unsigned vbidx
= elts
->vertex_buffer_index
[i
];
1874 struct pipe_vertex_buffer
*vb
= &sctx
->vertex_buffer
[vbidx
];
1875 unsigned align_mask
= (1 << log_hw_load_size
) - 1;
1876 if (vb
->buffer_offset
& align_mask
||
1877 vb
->stride
& align_mask
) {
1885 unsigned i
= u_bit_scan(&fix
);
1886 key
->mono
.vs_fix_fetch
[i
].bits
= elts
->fix_fetch
[i
];
1888 key
->mono
.vs_fetch_opencode
= opencode
;
1891 static void si_shader_selector_key_hw_vs(struct si_context
*sctx
,
1892 struct si_shader_selector
*vs
,
1893 struct si_shader_key
*key
)
1895 struct si_shader_selector
*ps
= sctx
->ps_shader
.cso
;
1897 key
->opt
.clip_disable
=
1898 sctx
->queued
.named
.rasterizer
->clip_plane_enable
== 0 &&
1899 (vs
->info
.clipdist_writemask
||
1900 vs
->info
.writes_clipvertex
) &&
1901 !vs
->info
.culldist_writemask
;
1903 /* Find out if PS is disabled. */
1904 bool ps_disabled
= true;
1906 bool ps_modifies_zs
= ps
->info
.uses_kill
||
1907 ps
->info
.writes_z
||
1908 ps
->info
.writes_stencil
||
1909 ps
->info
.writes_samplemask
||
1910 sctx
->queued
.named
.blend
->alpha_to_coverage
||
1911 si_get_alpha_test_func(sctx
) != PIPE_FUNC_ALWAYS
;
1912 unsigned ps_colormask
= si_get_total_colormask(sctx
);
1914 ps_disabled
= sctx
->queued
.named
.rasterizer
->rasterizer_discard
||
1917 !ps
->info
.writes_memory
);
1920 /* Find out which VS outputs aren't used by the PS. */
1921 uint64_t outputs_written
= vs
->outputs_written_before_ps
;
1922 uint64_t inputs_read
= 0;
1924 /* Ignore outputs that are not passed from VS to PS. */
1925 outputs_written
&= ~((1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_POSITION
, 0, true)) |
1926 (1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_PSIZE
, 0, true)) |
1927 (1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_CLIPVERTEX
, 0, true)));
1930 inputs_read
= ps
->inputs_read
;
1933 uint64_t linked
= outputs_written
& inputs_read
;
1935 key
->opt
.kill_outputs
= ~linked
& outputs_written
;
1936 key
->opt
.ngg_culling
= sctx
->ngg_culling
;
1939 /* Compute the key for the hw shader variant */
1940 static inline void si_shader_selector_key(struct pipe_context
*ctx
,
1941 struct si_shader_selector
*sel
,
1942 union si_vgt_stages_key stages_key
,
1943 struct si_shader_key
*key
)
1945 struct si_context
*sctx
= (struct si_context
*)ctx
;
1947 memset(key
, 0, sizeof(*key
));
1949 switch (sel
->type
) {
1950 case PIPE_SHADER_VERTEX
:
1951 si_shader_selector_key_vs(sctx
, sel
, key
, &key
->part
.vs
.prolog
);
1953 if (sctx
->tes_shader
.cso
)
1955 else if (sctx
->gs_shader
.cso
) {
1957 key
->as_ngg
= stages_key
.u
.ngg
;
1959 key
->as_ngg
= stages_key
.u
.ngg
;
1960 si_shader_selector_key_hw_vs(sctx
, sel
, key
);
1962 if (sctx
->ps_shader
.cso
&& sctx
->ps_shader
.cso
->info
.uses_primid
)
1963 key
->mono
.u
.vs_export_prim_id
= 1;
1966 case PIPE_SHADER_TESS_CTRL
:
1967 if (sctx
->chip_class
>= GFX9
) {
1968 si_shader_selector_key_vs(sctx
, sctx
->vs_shader
.cso
,
1969 key
, &key
->part
.tcs
.ls_prolog
);
1970 key
->part
.tcs
.ls
= sctx
->vs_shader
.cso
;
1972 /* When the LS VGPR fix is needed, monolithic shaders
1974 * - avoid initializing EXEC in both the LS prolog
1975 * and the LS main part when !vs_needs_prolog
1976 * - remove the fixup for unused input VGPRs
1978 key
->part
.tcs
.ls_prolog
.ls_vgpr_fix
= sctx
->ls_vgpr_fix
;
1980 /* The LS output / HS input layout can be communicated
1981 * directly instead of via user SGPRs for merged LS-HS.
1982 * The LS VGPR fix prefers this too.
1984 key
->opt
.prefer_mono
= 1;
1987 key
->part
.tcs
.epilog
.prim_mode
=
1988 sctx
->tes_shader
.cso
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
1989 key
->part
.tcs
.epilog
.invoc0_tess_factors_are_def
=
1990 sel
->info
.tessfactors_are_def_in_all_invocs
;
1991 key
->part
.tcs
.epilog
.tes_reads_tess_factors
=
1992 sctx
->tes_shader
.cso
->info
.reads_tess_factors
;
1994 if (sel
== sctx
->fixed_func_tcs_shader
.cso
)
1995 key
->mono
.u
.ff_tcs_inputs_to_copy
= sctx
->vs_shader
.cso
->outputs_written
;
1997 case PIPE_SHADER_TESS_EVAL
:
1998 key
->as_ngg
= stages_key
.u
.ngg
;
2000 if (sctx
->gs_shader
.cso
)
2003 si_shader_selector_key_hw_vs(sctx
, sel
, key
);
2005 if (sctx
->ps_shader
.cso
&& sctx
->ps_shader
.cso
->info
.uses_primid
)
2006 key
->mono
.u
.vs_export_prim_id
= 1;
2009 case PIPE_SHADER_GEOMETRY
:
2010 if (sctx
->chip_class
>= GFX9
) {
2011 if (sctx
->tes_shader
.cso
) {
2012 key
->part
.gs
.es
= sctx
->tes_shader
.cso
;
2014 si_shader_selector_key_vs(sctx
, sctx
->vs_shader
.cso
,
2015 key
, &key
->part
.gs
.vs_prolog
);
2016 key
->part
.gs
.es
= sctx
->vs_shader
.cso
;
2017 key
->part
.gs
.prolog
.gfx9_prev_is_vs
= 1;
2020 key
->as_ngg
= stages_key
.u
.ngg
;
2022 /* Merged ES-GS can have unbalanced wave usage.
2024 * ES threads are per-vertex, while GS threads are
2025 * per-primitive. So without any amplification, there
2026 * are fewer GS threads than ES threads, which can result
2027 * in empty (no-op) GS waves. With too much amplification,
2028 * there are more GS threads than ES threads, which
2029 * can result in empty (no-op) ES waves.
2031 * Non-monolithic shaders are implemented by setting EXEC
2032 * at the beginning of shader parts, and don't jump to
2033 * the end if EXEC is 0.
2035 * Monolithic shaders use conditional blocks, so they can
2036 * jump and skip empty waves of ES or GS. So set this to
2037 * always use optimized variants, which are monolithic.
2039 key
->opt
.prefer_mono
= 1;
2041 key
->part
.gs
.prolog
.tri_strip_adj_fix
= sctx
->gs_tri_strip_adj_fix
;
2043 case PIPE_SHADER_FRAGMENT
: {
2044 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
2045 struct si_state_blend
*blend
= sctx
->queued
.named
.blend
;
2047 if (sel
->info
.properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
] &&
2048 sel
->info
.colors_written
== 0x1)
2049 key
->part
.ps
.epilog
.last_cbuf
= MAX2(sctx
->framebuffer
.state
.nr_cbufs
, 1) - 1;
2051 /* Select the shader color format based on whether
2052 * blending or alpha are needed.
2054 key
->part
.ps
.epilog
.spi_shader_col_format
=
2055 (blend
->blend_enable_4bit
& blend
->need_src_alpha_4bit
&
2056 sctx
->framebuffer
.spi_shader_col_format_blend_alpha
) |
2057 (blend
->blend_enable_4bit
& ~blend
->need_src_alpha_4bit
&
2058 sctx
->framebuffer
.spi_shader_col_format_blend
) |
2059 (~blend
->blend_enable_4bit
& blend
->need_src_alpha_4bit
&
2060 sctx
->framebuffer
.spi_shader_col_format_alpha
) |
2061 (~blend
->blend_enable_4bit
& ~blend
->need_src_alpha_4bit
&
2062 sctx
->framebuffer
.spi_shader_col_format
);
2063 key
->part
.ps
.epilog
.spi_shader_col_format
&= blend
->cb_target_enabled_4bit
;
2065 /* The output for dual source blending should have
2066 * the same format as the first output.
2068 if (blend
->dual_src_blend
) {
2069 key
->part
.ps
.epilog
.spi_shader_col_format
|=
2070 (key
->part
.ps
.epilog
.spi_shader_col_format
& 0xf) << 4;
2073 /* If alpha-to-coverage is enabled, we have to export alpha
2074 * even if there is no color buffer.
2076 if (!(key
->part
.ps
.epilog
.spi_shader_col_format
& 0xf) &&
2077 blend
->alpha_to_coverage
)
2078 key
->part
.ps
.epilog
.spi_shader_col_format
|= V_028710_SPI_SHADER_32_AR
;
2080 /* On GFX6 and GFX7 except Hawaii, the CB doesn't clamp outputs
2081 * to the range supported by the type if a channel has less
2082 * than 16 bits and the export format is 16_ABGR.
2084 if (sctx
->chip_class
<= GFX7
&& sctx
->family
!= CHIP_HAWAII
) {
2085 key
->part
.ps
.epilog
.color_is_int8
= sctx
->framebuffer
.color_is_int8
;
2086 key
->part
.ps
.epilog
.color_is_int10
= sctx
->framebuffer
.color_is_int10
;
2089 /* Disable unwritten outputs (if WRITE_ALL_CBUFS isn't enabled). */
2090 if (!key
->part
.ps
.epilog
.last_cbuf
) {
2091 key
->part
.ps
.epilog
.spi_shader_col_format
&= sel
->colors_written_4bit
;
2092 key
->part
.ps
.epilog
.color_is_int8
&= sel
->info
.colors_written
;
2093 key
->part
.ps
.epilog
.color_is_int10
&= sel
->info
.colors_written
;
2096 bool is_poly
= !util_prim_is_points_or_lines(sctx
->current_rast_prim
);
2097 bool is_line
= util_prim_is_lines(sctx
->current_rast_prim
);
2099 key
->part
.ps
.prolog
.color_two_side
= rs
->two_side
&& sel
->info
.colors_read
;
2100 key
->part
.ps
.prolog
.flatshade_colors
= rs
->flatshade
&& sel
->info
.colors_read
;
2102 key
->part
.ps
.epilog
.alpha_to_one
= blend
->alpha_to_one
&&
2103 rs
->multisample_enable
;
2105 key
->part
.ps
.prolog
.poly_stipple
= rs
->poly_stipple_enable
&& is_poly
;
2106 key
->part
.ps
.epilog
.poly_line_smoothing
= ((is_poly
&& rs
->poly_smooth
) ||
2107 (is_line
&& rs
->line_smooth
)) &&
2108 sctx
->framebuffer
.nr_samples
<= 1;
2109 key
->part
.ps
.epilog
.clamp_color
= rs
->clamp_fragment_color
;
2111 if (sctx
->ps_iter_samples
> 1 &&
2112 sel
->info
.reads_samplemask
) {
2113 key
->part
.ps
.prolog
.samplemask_log_ps_iter
=
2114 util_logbase2(sctx
->ps_iter_samples
);
2117 if (rs
->force_persample_interp
&&
2118 rs
->multisample_enable
&&
2119 sctx
->framebuffer
.nr_samples
> 1 &&
2120 sctx
->ps_iter_samples
> 1) {
2121 key
->part
.ps
.prolog
.force_persp_sample_interp
=
2122 sel
->info
.uses_persp_center
||
2123 sel
->info
.uses_persp_centroid
;
2125 key
->part
.ps
.prolog
.force_linear_sample_interp
=
2126 sel
->info
.uses_linear_center
||
2127 sel
->info
.uses_linear_centroid
;
2128 } else if (rs
->multisample_enable
&&
2129 sctx
->framebuffer
.nr_samples
> 1) {
2130 key
->part
.ps
.prolog
.bc_optimize_for_persp
=
2131 sel
->info
.uses_persp_center
&&
2132 sel
->info
.uses_persp_centroid
;
2133 key
->part
.ps
.prolog
.bc_optimize_for_linear
=
2134 sel
->info
.uses_linear_center
&&
2135 sel
->info
.uses_linear_centroid
;
2137 /* Make sure SPI doesn't compute more than 1 pair
2138 * of (i,j), which is the optimization here. */
2139 key
->part
.ps
.prolog
.force_persp_center_interp
=
2140 sel
->info
.uses_persp_center
+
2141 sel
->info
.uses_persp_centroid
+
2142 sel
->info
.uses_persp_sample
> 1;
2144 key
->part
.ps
.prolog
.force_linear_center_interp
=
2145 sel
->info
.uses_linear_center
+
2146 sel
->info
.uses_linear_centroid
+
2147 sel
->info
.uses_linear_sample
> 1;
2149 if (sel
->info
.uses_persp_opcode_interp_sample
||
2150 sel
->info
.uses_linear_opcode_interp_sample
)
2151 key
->mono
.u
.ps
.interpolate_at_sample_force_center
= 1;
2154 key
->part
.ps
.epilog
.alpha_func
= si_get_alpha_test_func(sctx
);
2156 /* ps_uses_fbfetch is true only if the color buffer is bound. */
2157 if (sctx
->ps_uses_fbfetch
&& !sctx
->blitter
->running
) {
2158 struct pipe_surface
*cb0
= sctx
->framebuffer
.state
.cbufs
[0];
2159 struct pipe_resource
*tex
= cb0
->texture
;
2161 /* 1D textures are allocated and used as 2D on GFX9. */
2162 key
->mono
.u
.ps
.fbfetch_msaa
= sctx
->framebuffer
.nr_samples
> 1;
2163 key
->mono
.u
.ps
.fbfetch_is_1D
= sctx
->chip_class
!= GFX9
&&
2164 (tex
->target
== PIPE_TEXTURE_1D
||
2165 tex
->target
== PIPE_TEXTURE_1D_ARRAY
);
2166 key
->mono
.u
.ps
.fbfetch_layered
= tex
->target
== PIPE_TEXTURE_1D_ARRAY
||
2167 tex
->target
== PIPE_TEXTURE_2D_ARRAY
||
2168 tex
->target
== PIPE_TEXTURE_CUBE
||
2169 tex
->target
== PIPE_TEXTURE_CUBE_ARRAY
||
2170 tex
->target
== PIPE_TEXTURE_3D
;
2178 if (unlikely(sctx
->screen
->debug_flags
& DBG(NO_OPT_VARIANT
)))
2179 memset(&key
->opt
, 0, sizeof(key
->opt
));
2182 static void si_build_shader_variant(struct si_shader
*shader
,
2186 struct si_shader_selector
*sel
= shader
->selector
;
2187 struct si_screen
*sscreen
= sel
->screen
;
2188 struct ac_llvm_compiler
*compiler
;
2189 struct pipe_debug_callback
*debug
= &shader
->compiler_ctx_state
.debug
;
2191 if (thread_index
>= 0) {
2193 assert(thread_index
< ARRAY_SIZE(sscreen
->compiler_lowp
));
2194 compiler
= &sscreen
->compiler_lowp
[thread_index
];
2196 assert(thread_index
< ARRAY_SIZE(sscreen
->compiler
));
2197 compiler
= &sscreen
->compiler
[thread_index
];
2202 assert(!low_priority
);
2203 compiler
= shader
->compiler_ctx_state
.compiler
;
2206 if (!compiler
->passes
)
2207 si_init_compiler(sscreen
, compiler
);
2209 if (unlikely(!si_create_shader_variant(sscreen
, compiler
, shader
, debug
))) {
2210 PRINT_ERR("Failed to build shader variant (type=%u)\n",
2212 shader
->compilation_failed
= true;
2216 if (shader
->compiler_ctx_state
.is_debug_context
) {
2217 FILE *f
= open_memstream(&shader
->shader_log
,
2218 &shader
->shader_log_size
);
2220 si_shader_dump(sscreen
, shader
, NULL
, f
, false);
2225 si_shader_init_pm4_state(sscreen
, shader
);
2228 static void si_build_shader_variant_low_priority(void *job
, int thread_index
)
2230 struct si_shader
*shader
= (struct si_shader
*)job
;
2232 assert(thread_index
>= 0);
2234 si_build_shader_variant(shader
, thread_index
, true);
2237 static const struct si_shader_key zeroed
;
2239 static bool si_check_missing_main_part(struct si_screen
*sscreen
,
2240 struct si_shader_selector
*sel
,
2241 struct si_compiler_ctx_state
*compiler_state
,
2242 struct si_shader_key
*key
)
2244 struct si_shader
**mainp
= si_get_main_shader_part(sel
, key
);
2247 struct si_shader
*main_part
= CALLOC_STRUCT(si_shader
);
2252 /* We can leave the fence as permanently signaled because the
2253 * main part becomes visible globally only after it has been
2255 util_queue_fence_init(&main_part
->ready
);
2257 main_part
->selector
= sel
;
2258 main_part
->key
.as_es
= key
->as_es
;
2259 main_part
->key
.as_ls
= key
->as_ls
;
2260 main_part
->key
.as_ngg
= key
->as_ngg
;
2261 main_part
->is_monolithic
= false;
2263 if (!si_compile_shader(sscreen
, compiler_state
->compiler
,
2264 main_part
, &compiler_state
->debug
)) {
2274 * Select a shader variant according to the shader key.
2276 * \param optimized_or_none If the key describes an optimized shader variant and
2277 * the compilation isn't finished, don't select any
2278 * shader and return an error.
2280 int si_shader_select_with_key(struct si_screen
*sscreen
,
2281 struct si_shader_ctx_state
*state
,
2282 struct si_compiler_ctx_state
*compiler_state
,
2283 struct si_shader_key
*key
,
2285 bool optimized_or_none
)
2287 struct si_shader_selector
*sel
= state
->cso
;
2288 struct si_shader_selector
*previous_stage_sel
= NULL
;
2289 struct si_shader
*current
= state
->current
;
2290 struct si_shader
*iter
, *shader
= NULL
;
2293 /* Check if we don't need to change anything.
2294 * This path is also used for most shaders that don't need multiple
2295 * variants, it will cost just a computation of the key and this
2297 if (likely(current
&&
2298 memcmp(¤t
->key
, key
, sizeof(*key
)) == 0)) {
2299 if (unlikely(!util_queue_fence_is_signalled(¤t
->ready
))) {
2300 if (current
->is_optimized
) {
2301 if (optimized_or_none
)
2304 memset(&key
->opt
, 0, sizeof(key
->opt
));
2305 goto current_not_ready
;
2308 util_queue_fence_wait(¤t
->ready
);
2311 return current
->compilation_failed
? -1 : 0;
2315 /* This must be done before the mutex is locked, because async GS
2316 * compilation calls this function too, and therefore must enter
2319 * Only wait if we are in a draw call. Don't wait if we are
2320 * in a compiler thread.
2322 if (thread_index
< 0)
2323 util_queue_fence_wait(&sel
->ready
);
2325 simple_mtx_lock(&sel
->mutex
);
2327 /* Find the shader variant. */
2328 for (iter
= sel
->first_variant
; iter
; iter
= iter
->next_variant
) {
2329 /* Don't check the "current" shader. We checked it above. */
2330 if (current
!= iter
&&
2331 memcmp(&iter
->key
, key
, sizeof(*key
)) == 0) {
2332 simple_mtx_unlock(&sel
->mutex
);
2334 if (unlikely(!util_queue_fence_is_signalled(&iter
->ready
))) {
2335 /* If it's an optimized shader and its compilation has
2336 * been started but isn't done, use the unoptimized
2337 * shader so as not to cause a stall due to compilation.
2339 if (iter
->is_optimized
) {
2340 if (optimized_or_none
)
2342 memset(&key
->opt
, 0, sizeof(key
->opt
));
2346 util_queue_fence_wait(&iter
->ready
);
2349 if (iter
->compilation_failed
) {
2350 return -1; /* skip the draw call */
2353 state
->current
= iter
;
2358 /* Build a new shader. */
2359 shader
= CALLOC_STRUCT(si_shader
);
2361 simple_mtx_unlock(&sel
->mutex
);
2365 util_queue_fence_init(&shader
->ready
);
2367 shader
->selector
= sel
;
2369 shader
->compiler_ctx_state
= *compiler_state
;
2371 /* If this is a merged shader, get the first shader's selector. */
2372 if (sscreen
->info
.chip_class
>= GFX9
) {
2373 if (sel
->type
== PIPE_SHADER_TESS_CTRL
)
2374 previous_stage_sel
= key
->part
.tcs
.ls
;
2375 else if (sel
->type
== PIPE_SHADER_GEOMETRY
)
2376 previous_stage_sel
= key
->part
.gs
.es
;
2378 /* We need to wait for the previous shader. */
2379 if (previous_stage_sel
&& thread_index
< 0)
2380 util_queue_fence_wait(&previous_stage_sel
->ready
);
2383 bool is_pure_monolithic
=
2384 sscreen
->use_monolithic_shaders
||
2385 memcmp(&key
->mono
, &zeroed
.mono
, sizeof(key
->mono
)) != 0;
2387 /* Compile the main shader part if it doesn't exist. This can happen
2388 * if the initial guess was wrong.
2390 * The prim discard CS doesn't need the main shader part.
2392 if (!is_pure_monolithic
&&
2393 !key
->opt
.vs_as_prim_discard_cs
) {
2396 /* Make sure the main shader part is present. This is needed
2397 * for shaders that can be compiled as VS, LS, or ES, and only
2398 * one of them is compiled at creation.
2400 * It is also needed for GS, which can be compiled as non-NGG
2403 * For merged shaders, check that the starting shader's main
2406 if (previous_stage_sel
) {
2407 struct si_shader_key shader1_key
= zeroed
;
2409 if (sel
->type
== PIPE_SHADER_TESS_CTRL
) {
2410 shader1_key
.as_ls
= 1;
2411 } else if (sel
->type
== PIPE_SHADER_GEOMETRY
) {
2412 shader1_key
.as_es
= 1;
2413 shader1_key
.as_ngg
= key
->as_ngg
; /* for Wave32 vs Wave64 */
2418 simple_mtx_lock(&previous_stage_sel
->mutex
);
2419 ok
= si_check_missing_main_part(sscreen
,
2421 compiler_state
, &shader1_key
);
2422 simple_mtx_unlock(&previous_stage_sel
->mutex
);
2426 ok
= si_check_missing_main_part(sscreen
, sel
,
2427 compiler_state
, key
);
2432 simple_mtx_unlock(&sel
->mutex
);
2433 return -ENOMEM
; /* skip the draw call */
2437 /* Keep the reference to the 1st shader of merged shaders, so that
2438 * Gallium can't destroy it before we destroy the 2nd shader.
2440 * Set sctx = NULL, because it's unused if we're not releasing
2441 * the shader, and we don't have any sctx here.
2443 si_shader_selector_reference(NULL
, &shader
->previous_stage_sel
,
2444 previous_stage_sel
);
2446 /* Monolithic-only shaders don't make a distinction between optimized
2447 * and unoptimized. */
2448 shader
->is_monolithic
=
2449 is_pure_monolithic
||
2450 memcmp(&key
->opt
, &zeroed
.opt
, sizeof(key
->opt
)) != 0;
2452 /* The prim discard CS is always optimized. */
2453 shader
->is_optimized
=
2454 (!is_pure_monolithic
|| key
->opt
.vs_as_prim_discard_cs
) &&
2455 memcmp(&key
->opt
, &zeroed
.opt
, sizeof(key
->opt
)) != 0;
2457 /* If it's an optimized shader, compile it asynchronously. */
2458 if (shader
->is_optimized
&& thread_index
< 0) {
2459 /* Compile it asynchronously. */
2460 util_queue_add_job(&sscreen
->shader_compiler_queue_low_priority
,
2461 shader
, &shader
->ready
,
2462 si_build_shader_variant_low_priority
, NULL
,
2465 /* Add only after the ready fence was reset, to guard against a
2466 * race with si_bind_XX_shader. */
2467 if (!sel
->last_variant
) {
2468 sel
->first_variant
= shader
;
2469 sel
->last_variant
= shader
;
2471 sel
->last_variant
->next_variant
= shader
;
2472 sel
->last_variant
= shader
;
2475 /* Use the default (unoptimized) shader for now. */
2476 memset(&key
->opt
, 0, sizeof(key
->opt
));
2477 simple_mtx_unlock(&sel
->mutex
);
2479 if (sscreen
->options
.sync_compile
)
2480 util_queue_fence_wait(&shader
->ready
);
2482 if (optimized_or_none
)
2487 /* Reset the fence before adding to the variant list. */
2488 util_queue_fence_reset(&shader
->ready
);
2490 if (!sel
->last_variant
) {
2491 sel
->first_variant
= shader
;
2492 sel
->last_variant
= shader
;
2494 sel
->last_variant
->next_variant
= shader
;
2495 sel
->last_variant
= shader
;
2498 simple_mtx_unlock(&sel
->mutex
);
2500 assert(!shader
->is_optimized
);
2501 si_build_shader_variant(shader
, thread_index
, false);
2503 util_queue_fence_signal(&shader
->ready
);
2505 if (!shader
->compilation_failed
)
2506 state
->current
= shader
;
2508 return shader
->compilation_failed
? -1 : 0;
2511 static int si_shader_select(struct pipe_context
*ctx
,
2512 struct si_shader_ctx_state
*state
,
2513 union si_vgt_stages_key stages_key
,
2514 struct si_compiler_ctx_state
*compiler_state
)
2516 struct si_context
*sctx
= (struct si_context
*)ctx
;
2517 struct si_shader_key key
;
2519 si_shader_selector_key(ctx
, state
->cso
, stages_key
, &key
);
2520 return si_shader_select_with_key(sctx
->screen
, state
, compiler_state
,
2524 static void si_parse_next_shader_property(const struct si_shader_info
*info
,
2526 struct si_shader_key
*key
)
2528 unsigned next_shader
= info
->properties
[TGSI_PROPERTY_NEXT_SHADER
];
2530 switch (info
->processor
) {
2531 case PIPE_SHADER_VERTEX
:
2532 switch (next_shader
) {
2533 case PIPE_SHADER_GEOMETRY
:
2536 case PIPE_SHADER_TESS_CTRL
:
2537 case PIPE_SHADER_TESS_EVAL
:
2541 /* If POSITION isn't written, it can only be a HW VS
2542 * if streamout is used. If streamout isn't used,
2543 * assume that it's a HW LS. (the next shader is TCS)
2544 * This heuristic is needed for separate shader objects.
2546 if (!info
->writes_position
&& !streamout
)
2551 case PIPE_SHADER_TESS_EVAL
:
2552 if (next_shader
== PIPE_SHADER_GEOMETRY
||
2553 !info
->writes_position
)
2560 * Compile the main shader part or the monolithic shader as part of
2561 * si_shader_selector initialization. Since it can be done asynchronously,
2562 * there is no way to report compile failures to applications.
2564 static void si_init_shader_selector_async(void *job
, int thread_index
)
2566 struct si_shader_selector
*sel
= (struct si_shader_selector
*)job
;
2567 struct si_screen
*sscreen
= sel
->screen
;
2568 struct ac_llvm_compiler
*compiler
;
2569 struct pipe_debug_callback
*debug
= &sel
->compiler_ctx_state
.debug
;
2571 assert(!debug
->debug_message
|| debug
->async
);
2572 assert(thread_index
>= 0);
2573 assert(thread_index
< ARRAY_SIZE(sscreen
->compiler
));
2574 compiler
= &sscreen
->compiler
[thread_index
];
2576 if (!compiler
->passes
)
2577 si_init_compiler(sscreen
, compiler
);
2579 /* Serialize NIR to save memory. Monolithic shader variants
2580 * have to deserialize NIR before compilation.
2587 /* true = remove optional debugging data to increase
2588 * the likehood of getting more shader cache hits.
2589 * It also drops variable names, so we'll save more memory.
2591 nir_serialize(&blob
, sel
->nir
, true);
2592 blob_finish_get_buffer(&blob
, &sel
->nir_binary
, &size
);
2593 sel
->nir_size
= size
;
2596 /* Compile the main shader part for use with a prolog and/or epilog.
2597 * If this fails, the driver will try to compile a monolithic shader
2600 if (!sscreen
->use_monolithic_shaders
) {
2601 struct si_shader
*shader
= CALLOC_STRUCT(si_shader
);
2602 unsigned char ir_sha1_cache_key
[20];
2605 fprintf(stderr
, "radeonsi: can't allocate a main shader part\n");
2609 /* We can leave the fence signaled because use of the default
2610 * main part is guarded by the selector's ready fence. */
2611 util_queue_fence_init(&shader
->ready
);
2613 shader
->selector
= sel
;
2614 shader
->is_monolithic
= false;
2615 si_parse_next_shader_property(&sel
->info
,
2616 sel
->so
.num_outputs
!= 0,
2619 if (sscreen
->use_ngg
&&
2620 (!sel
->so
.num_outputs
|| sscreen
->use_ngg_streamout
) &&
2621 ((sel
->type
== PIPE_SHADER_VERTEX
&& !shader
->key
.as_ls
) ||
2622 sel
->type
== PIPE_SHADER_TESS_EVAL
||
2623 sel
->type
== PIPE_SHADER_GEOMETRY
))
2624 shader
->key
.as_ngg
= 1;
2627 si_get_ir_cache_key(sel
, shader
->key
.as_ngg
,
2628 shader
->key
.as_es
, ir_sha1_cache_key
);
2631 /* Try to load the shader from the shader cache. */
2632 simple_mtx_lock(&sscreen
->shader_cache_mutex
);
2634 if (si_shader_cache_load_shader(sscreen
, ir_sha1_cache_key
, shader
)) {
2635 simple_mtx_unlock(&sscreen
->shader_cache_mutex
);
2636 si_shader_dump_stats_for_shader_db(sscreen
, shader
, debug
);
2638 simple_mtx_unlock(&sscreen
->shader_cache_mutex
);
2640 /* Compile the shader if it hasn't been loaded from the cache. */
2641 if (!si_compile_shader(sscreen
, compiler
, shader
, debug
)) {
2643 fprintf(stderr
, "radeonsi: can't compile a main shader part\n");
2647 simple_mtx_lock(&sscreen
->shader_cache_mutex
);
2648 si_shader_cache_insert_shader(sscreen
, ir_sha1_cache_key
,
2650 simple_mtx_unlock(&sscreen
->shader_cache_mutex
);
2653 *si_get_main_shader_part(sel
, &shader
->key
) = shader
;
2655 /* Unset "outputs_written" flags for outputs converted to
2656 * DEFAULT_VAL, so that later inter-shader optimizations don't
2657 * try to eliminate outputs that don't exist in the final
2660 * This is only done if non-monolithic shaders are enabled.
2662 if ((sel
->type
== PIPE_SHADER_VERTEX
||
2663 sel
->type
== PIPE_SHADER_TESS_EVAL
) &&
2664 !shader
->key
.as_ls
&&
2665 !shader
->key
.as_es
) {
2668 for (i
= 0; i
< sel
->info
.num_outputs
; i
++) {
2669 unsigned offset
= shader
->info
.vs_output_param_offset
[i
];
2671 if (offset
<= AC_EXP_PARAM_OFFSET_31
)
2674 unsigned name
= sel
->info
.output_semantic_name
[i
];
2675 unsigned index
= sel
->info
.output_semantic_index
[i
];
2679 case TGSI_SEMANTIC_GENERIC
:
2680 /* don't process indices the function can't handle */
2681 if (index
>= SI_MAX_IO_GENERIC
)
2685 id
= si_shader_io_get_unique_index(name
, index
, true);
2686 sel
->outputs_written_before_ps
&= ~(1ull << id
);
2688 case TGSI_SEMANTIC_POSITION
: /* ignore these */
2689 case TGSI_SEMANTIC_PSIZE
:
2690 case TGSI_SEMANTIC_CLIPVERTEX
:
2691 case TGSI_SEMANTIC_EDGEFLAG
:
2698 /* The GS copy shader is always pre-compiled. */
2699 if (sel
->type
== PIPE_SHADER_GEOMETRY
&&
2700 (!sscreen
->use_ngg
||
2701 !sscreen
->use_ngg_streamout
|| /* also for PRIMITIVES_GENERATED */
2702 sel
->tess_turns_off_ngg
)) {
2703 sel
->gs_copy_shader
= si_generate_gs_copy_shader(sscreen
, compiler
, sel
, debug
);
2704 if (!sel
->gs_copy_shader
) {
2705 fprintf(stderr
, "radeonsi: can't create GS copy shader\n");
2709 si_shader_vs(sscreen
, sel
->gs_copy_shader
, sel
);
2712 /* Free NIR. We only keep serialized NIR after this point. */
2714 ralloc_free(sel
->nir
);
2719 void si_schedule_initial_compile(struct si_context
*sctx
, unsigned processor
,
2720 struct util_queue_fence
*ready_fence
,
2721 struct si_compiler_ctx_state
*compiler_ctx_state
,
2722 void *job
, util_queue_execute_func execute
)
2724 util_queue_fence_init(ready_fence
);
2726 struct util_async_debug_callback async_debug
;
2728 (sctx
->debug
.debug_message
&& !sctx
->debug
.async
) ||
2730 si_can_dump_shader(sctx
->screen
, processor
);
2733 u_async_debug_init(&async_debug
);
2734 compiler_ctx_state
->debug
= async_debug
.base
;
2737 util_queue_add_job(&sctx
->screen
->shader_compiler_queue
, job
,
2738 ready_fence
, execute
, NULL
, 0);
2741 util_queue_fence_wait(ready_fence
);
2742 u_async_debug_drain(&async_debug
, &sctx
->debug
);
2743 u_async_debug_cleanup(&async_debug
);
2746 if (sctx
->screen
->options
.sync_compile
)
2747 util_queue_fence_wait(ready_fence
);
2750 /* Return descriptor slot usage masks from the given shader info. */
2751 void si_get_active_slot_masks(const struct si_shader_info
*info
,
2752 uint32_t *const_and_shader_buffers
,
2753 uint64_t *samplers_and_images
)
2755 unsigned start
, num_shaderbufs
, num_constbufs
, num_images
, num_msaa_images
, num_samplers
;
2757 num_shaderbufs
= util_last_bit(info
->shader_buffers_declared
);
2758 num_constbufs
= util_last_bit(info
->const_buffers_declared
);
2759 /* two 8-byte images share one 16-byte slot */
2760 num_images
= align(util_last_bit(info
->images_declared
), 2);
2761 num_msaa_images
= align(util_last_bit(info
->msaa_images_declared
), 2);
2762 num_samplers
= util_last_bit(info
->samplers_declared
);
2764 /* The layout is: sb[last] ... sb[0], cb[0] ... cb[last] */
2765 start
= si_get_shaderbuf_slot(num_shaderbufs
- 1);
2766 *const_and_shader_buffers
=
2767 u_bit_consecutive(start
, num_shaderbufs
+ num_constbufs
);
2770 * - fmask[last] ... fmask[0] go to [15-last .. 15]
2771 * - image[last] ... image[0] go to [31-last .. 31]
2772 * - sampler[0] ... sampler[last] go to [32 .. 32+last*2]
2774 * FMASKs for images are placed separately, because MSAA images are rare,
2775 * and so we can benefit from a better cache hit rate if we keep image
2776 * descriptors together.
2778 if (num_msaa_images
)
2779 num_images
= SI_NUM_IMAGES
+ num_msaa_images
; /* add FMASK descriptors */
2781 start
= si_get_image_slot(num_images
- 1) / 2;
2782 *samplers_and_images
=
2783 u_bit_consecutive64(start
, num_images
/ 2 + num_samplers
);
2786 static void *si_create_shader_selector(struct pipe_context
*ctx
,
2787 const struct pipe_shader_state
*state
)
2789 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
2790 struct si_context
*sctx
= (struct si_context
*)ctx
;
2791 struct si_shader_selector
*sel
= CALLOC_STRUCT(si_shader_selector
);
2797 sel
->screen
= sscreen
;
2798 sel
->compiler_ctx_state
.debug
= sctx
->debug
;
2799 sel
->compiler_ctx_state
.is_debug_context
= sctx
->is_debug
;
2801 sel
->so
= state
->stream_output
;
2803 if (state
->type
== PIPE_SHADER_IR_TGSI
) {
2804 sel
->nir
= tgsi_to_nir(state
->tokens
, ctx
->screen
);
2806 assert(state
->type
== PIPE_SHADER_IR_NIR
);
2807 sel
->nir
= state
->ir
.nir
;
2810 si_nir_scan_shader(sel
->nir
, &sel
->info
);
2811 si_nir_adjust_driver_locations(sel
->nir
);
2813 sel
->type
= sel
->info
.processor
;
2814 p_atomic_inc(&sscreen
->num_shaders_created
);
2815 si_get_active_slot_masks(&sel
->info
,
2816 &sel
->active_const_and_shader_buffers
,
2817 &sel
->active_samplers_and_images
);
2819 /* Record which streamout buffers are enabled. */
2820 for (i
= 0; i
< sel
->so
.num_outputs
; i
++) {
2821 sel
->enabled_streamout_buffer_mask
|=
2822 (1 << sel
->so
.output
[i
].output_buffer
) <<
2823 (sel
->so
.output
[i
].stream
* 4);
2826 sel
->num_vs_inputs
= sel
->type
== PIPE_SHADER_VERTEX
&&
2827 !sel
->info
.properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
] ?
2828 sel
->info
.num_inputs
: 0;
2829 sel
->num_vbos_in_user_sgprs
=
2830 MIN2(sel
->num_vs_inputs
, sscreen
->num_vbos_in_user_sgprs
);
2832 /* The prolog is a no-op if there are no inputs. */
2833 sel
->vs_needs_prolog
= sel
->type
== PIPE_SHADER_VERTEX
&&
2834 sel
->info
.num_inputs
&&
2835 !sel
->info
.properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
];
2837 sel
->prim_discard_cs_allowed
=
2838 sel
->type
== PIPE_SHADER_VERTEX
&&
2839 !sel
->info
.uses_bindless_images
&&
2840 !sel
->info
.uses_bindless_samplers
&&
2841 !sel
->info
.writes_memory
&&
2842 !sel
->info
.writes_viewport_index
&&
2843 !sel
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
] &&
2844 !sel
->so
.num_outputs
;
2846 switch (sel
->type
) {
2847 case PIPE_SHADER_GEOMETRY
:
2848 sel
->gs_output_prim
=
2849 sel
->info
.properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
];
2851 /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
2852 sel
->rast_prim
= sel
->gs_output_prim
;
2853 if (util_rast_prim_is_triangles(sel
->rast_prim
))
2854 sel
->rast_prim
= PIPE_PRIM_TRIANGLES
;
2856 sel
->gs_max_out_vertices
=
2857 sel
->info
.properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
2858 sel
->gs_num_invocations
=
2859 sel
->info
.properties
[TGSI_PROPERTY_GS_INVOCATIONS
];
2860 sel
->gsvs_vertex_size
= sel
->info
.num_outputs
* 16;
2861 sel
->max_gsvs_emit_size
= sel
->gsvs_vertex_size
*
2862 sel
->gs_max_out_vertices
;
2864 sel
->max_gs_stream
= 0;
2865 for (i
= 0; i
< sel
->so
.num_outputs
; i
++)