ce7e791be3d6ebb8d86830f1e6d9ffb7b585972d
[mesa.git] / src / gallium / drivers / radeonsi / si_state_shaders.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_build_pm4.h"
26 #include "sid.h"
27
28 #include "compiler/nir/nir_serialize.h"
29 #include "nir/tgsi_to_nir.h"
30 #include "util/hash_table.h"
31 #include "util/crc32.h"
32 #include "util/u_async_debug.h"
33 #include "util/u_memory.h"
34 #include "util/u_prim.h"
35
36 #include "util/disk_cache.h"
37 #include "util/mesa-sha1.h"
38 #include "ac_exp_param.h"
39 #include "ac_shader_util.h"
40
41 /* SHADER_CACHE */
42
43 /**
44 * Return the IR key for the shader cache.
45 */
46 void si_get_ir_cache_key(struct si_shader_selector *sel, bool ngg, bool es,
47 unsigned char ir_sha1_cache_key[20])
48 {
49 struct blob blob = {};
50 unsigned ir_size;
51 void *ir_binary;
52
53 if (sel->nir_binary) {
54 ir_binary = sel->nir_binary;
55 ir_size = sel->nir_size;
56 } else {
57 assert(sel->nir);
58
59 blob_init(&blob);
60 nir_serialize(&blob, sel->nir, true);
61 ir_binary = blob.data;
62 ir_size = blob.size;
63 }
64
65 /* These settings affect the compilation, but they are not derived
66 * from the input shader IR.
67 */
68 unsigned shader_variant_flags = 0;
69
70 if (ngg)
71 shader_variant_flags |= 1 << 0;
72 if (sel->nir)
73 shader_variant_flags |= 1 << 1;
74 if (si_get_wave_size(sel->screen, sel->type, ngg, es) == 32)
75 shader_variant_flags |= 1 << 2;
76 if (sel->type == PIPE_SHADER_FRAGMENT &&
77 sel->info.uses_derivatives &&
78 sel->info.uses_kill &&
79 sel->screen->debug_flags & DBG(FS_CORRECT_DERIVS_AFTER_KILL))
80 shader_variant_flags |= 1 << 3;
81
82 /* This varies depending on whether compute-based culling is enabled. */
83 shader_variant_flags |= sel->screen->num_vbos_in_user_sgprs << 4;
84
85 struct mesa_sha1 ctx;
86 _mesa_sha1_init(&ctx);
87 _mesa_sha1_update(&ctx, &shader_variant_flags, 4);
88 _mesa_sha1_update(&ctx, ir_binary, ir_size);
89 if (sel->type == PIPE_SHADER_VERTEX ||
90 sel->type == PIPE_SHADER_TESS_EVAL ||
91 sel->type == PIPE_SHADER_GEOMETRY)
92 _mesa_sha1_update(&ctx, &sel->so, sizeof(sel->so));
93 _mesa_sha1_final(&ctx, ir_sha1_cache_key);
94
95 if (ir_binary == blob.data)
96 blob_finish(&blob);
97 }
98
99 /** Copy "data" to "ptr" and return the next dword following copied data. */
100 static uint32_t *write_data(uint32_t *ptr, const void *data, unsigned size)
101 {
102 /* data may be NULL if size == 0 */
103 if (size)
104 memcpy(ptr, data, size);
105 ptr += DIV_ROUND_UP(size, 4);
106 return ptr;
107 }
108
109 /** Read data from "ptr". Return the next dword following the data. */
110 static uint32_t *read_data(uint32_t *ptr, void *data, unsigned size)
111 {
112 memcpy(data, ptr, size);
113 ptr += DIV_ROUND_UP(size, 4);
114 return ptr;
115 }
116
117 /**
118 * Write the size as uint followed by the data. Return the next dword
119 * following the copied data.
120 */
121 static uint32_t *write_chunk(uint32_t *ptr, const void *data, unsigned size)
122 {
123 *ptr++ = size;
124 return write_data(ptr, data, size);
125 }
126
127 /**
128 * Read the size as uint followed by the data. Return both via parameters.
129 * Return the next dword following the data.
130 */
131 static uint32_t *read_chunk(uint32_t *ptr, void **data, unsigned *size)
132 {
133 *size = *ptr++;
134 assert(*data == NULL);
135 if (!*size)
136 return ptr;
137 *data = malloc(*size);
138 return read_data(ptr, *data, *size);
139 }
140
141 /**
142 * Return the shader binary in a buffer. The first 4 bytes contain its size
143 * as integer.
144 */
145 static void *si_get_shader_binary(struct si_shader *shader)
146 {
147 /* There is always a size of data followed by the data itself. */
148 unsigned llvm_ir_size = shader->binary.llvm_ir_string ?
149 strlen(shader->binary.llvm_ir_string) + 1 : 0;
150
151 /* Refuse to allocate overly large buffers and guard against integer
152 * overflow. */
153 if (shader->binary.elf_size > UINT_MAX / 4 ||
154 llvm_ir_size > UINT_MAX / 4)
155 return NULL;
156
157 unsigned size =
158 4 + /* total size */
159 4 + /* CRC32 of the data below */
160 align(sizeof(shader->config), 4) +
161 align(sizeof(shader->info), 4) +
162 4 + align(shader->binary.elf_size, 4) +
163 4 + align(llvm_ir_size, 4);
164 void *buffer = CALLOC(1, size);
165 uint32_t *ptr = (uint32_t*)buffer;
166
167 if (!buffer)
168 return NULL;
169
170 *ptr++ = size;
171 ptr++; /* CRC32 is calculated at the end. */
172
173 ptr = write_data(ptr, &shader->config, sizeof(shader->config));
174 ptr = write_data(ptr, &shader->info, sizeof(shader->info));
175 ptr = write_chunk(ptr, shader->binary.elf_buffer, shader->binary.elf_size);
176 ptr = write_chunk(ptr, shader->binary.llvm_ir_string, llvm_ir_size);
177 assert((char *)ptr - (char *)buffer == size);
178
179 /* Compute CRC32. */
180 ptr = (uint32_t*)buffer;
181 ptr++;
182 *ptr = util_hash_crc32(ptr + 1, size - 8);
183
184 return buffer;
185 }
186
187 static bool si_load_shader_binary(struct si_shader *shader, void *binary)
188 {
189 uint32_t *ptr = (uint32_t*)binary;
190 uint32_t size = *ptr++;
191 uint32_t crc32 = *ptr++;
192 unsigned chunk_size;
193 unsigned elf_size;
194
195 if (util_hash_crc32(ptr, size - 8) != crc32) {
196 fprintf(stderr, "radeonsi: binary shader has invalid CRC32\n");
197 return false;
198 }
199
200 ptr = read_data(ptr, &shader->config, sizeof(shader->config));
201 ptr = read_data(ptr, &shader->info, sizeof(shader->info));
202 ptr = read_chunk(ptr, (void**)&shader->binary.elf_buffer,
203 &elf_size);
204 shader->binary.elf_size = elf_size;
205 ptr = read_chunk(ptr, (void**)&shader->binary.llvm_ir_string, &chunk_size);
206
207 return true;
208 }
209
210 /**
211 * Insert a shader into the cache. It's assumed the shader is not in the cache.
212 * Use si_shader_cache_load_shader before calling this.
213 */
214 void si_shader_cache_insert_shader(struct si_screen *sscreen,
215 unsigned char ir_sha1_cache_key[20],
216 struct si_shader *shader,
217 bool insert_into_disk_cache)
218 {
219 void *hw_binary;
220 struct hash_entry *entry;
221 uint8_t key[CACHE_KEY_SIZE];
222
223 entry = _mesa_hash_table_search(sscreen->shader_cache, ir_sha1_cache_key);
224 if (entry)
225 return; /* already added */
226
227 hw_binary = si_get_shader_binary(shader);
228 if (!hw_binary)
229 return;
230
231 if (_mesa_hash_table_insert(sscreen->shader_cache,
232 mem_dup(ir_sha1_cache_key, 20),
233 hw_binary) == NULL) {
234 FREE(hw_binary);
235 return;
236 }
237
238 if (sscreen->disk_shader_cache && insert_into_disk_cache) {
239 disk_cache_compute_key(sscreen->disk_shader_cache,
240 ir_sha1_cache_key, 20, key);
241 disk_cache_put(sscreen->disk_shader_cache, key, hw_binary,
242 *((uint32_t *) hw_binary), NULL);
243 }
244 }
245
246 bool si_shader_cache_load_shader(struct si_screen *sscreen,
247 unsigned char ir_sha1_cache_key[20],
248 struct si_shader *shader)
249 {
250 struct hash_entry *entry =
251 _mesa_hash_table_search(sscreen->shader_cache, ir_sha1_cache_key);
252
253 if (entry) {
254 if (si_load_shader_binary(shader, entry->data)) {
255 p_atomic_inc(&sscreen->num_memory_shader_cache_hits);
256 return true;
257 }
258 }
259 p_atomic_inc(&sscreen->num_memory_shader_cache_misses);
260
261 if (!sscreen->disk_shader_cache)
262 return false;
263
264 unsigned char sha1[CACHE_KEY_SIZE];
265 disk_cache_compute_key(sscreen->disk_shader_cache, ir_sha1_cache_key,
266 20, sha1);
267
268 size_t binary_size;
269 uint8_t *buffer = disk_cache_get(sscreen->disk_shader_cache, sha1,
270 &binary_size);
271 if (buffer) {
272 if (binary_size >= sizeof(uint32_t) &&
273 *((uint32_t*)buffer) == binary_size) {
274 if (si_load_shader_binary(shader, buffer)) {
275 free(buffer);
276 si_shader_cache_insert_shader(sscreen, ir_sha1_cache_key,
277 shader, false);
278 p_atomic_inc(&sscreen->num_disk_shader_cache_hits);
279 return true;
280 }
281 } else {
282 /* Something has gone wrong discard the item from the cache and
283 * rebuild/link from source.
284 */
285 assert(!"Invalid radeonsi shader disk cache item!");
286 disk_cache_remove(sscreen->disk_shader_cache, sha1);
287 }
288 }
289
290 free(buffer);
291 p_atomic_inc(&sscreen->num_disk_shader_cache_misses);
292 return false;
293 }
294
295 static uint32_t si_shader_cache_key_hash(const void *key)
296 {
297 /* Take the first dword of SHA1. */
298 return *(uint32_t*)key;
299 }
300
301 static bool si_shader_cache_key_equals(const void *a, const void *b)
302 {
303 /* Compare SHA1s. */
304 return memcmp(a, b, 20) == 0;
305 }
306
307 static void si_destroy_shader_cache_entry(struct hash_entry *entry)
308 {
309 FREE((void*)entry->key);
310 FREE(entry->data);
311 }
312
313 bool si_init_shader_cache(struct si_screen *sscreen)
314 {
315 (void) simple_mtx_init(&sscreen->shader_cache_mutex, mtx_plain);
316 sscreen->shader_cache =
317 _mesa_hash_table_create(NULL,
318 si_shader_cache_key_hash,
319 si_shader_cache_key_equals);
320
321 return sscreen->shader_cache != NULL;
322 }
323
324 void si_destroy_shader_cache(struct si_screen *sscreen)
325 {
326 if (sscreen->shader_cache)
327 _mesa_hash_table_destroy(sscreen->shader_cache,
328 si_destroy_shader_cache_entry);
329 simple_mtx_destroy(&sscreen->shader_cache_mutex);
330 }
331
332 /* SHADER STATES */
333
334 static void si_set_tesseval_regs(struct si_screen *sscreen,
335 const struct si_shader_selector *tes,
336 struct si_pm4_state *pm4)
337 {
338 const struct si_shader_info *info = &tes->info;
339 unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE];
340 unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING];
341 bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW];
342 bool tes_point_mode = info->properties[TGSI_PROPERTY_TES_POINT_MODE];
343 unsigned type, partitioning, topology, distribution_mode;
344
345 switch (tes_prim_mode) {
346 case PIPE_PRIM_LINES:
347 type = V_028B6C_TESS_ISOLINE;
348 break;
349 case PIPE_PRIM_TRIANGLES:
350 type = V_028B6C_TESS_TRIANGLE;
351 break;
352 case PIPE_PRIM_QUADS:
353 type = V_028B6C_TESS_QUAD;
354 break;
355 default:
356 assert(0);
357 return;
358 }
359
360 switch (tes_spacing) {
361 case PIPE_TESS_SPACING_FRACTIONAL_ODD:
362 partitioning = V_028B6C_PART_FRAC_ODD;
363 break;
364 case PIPE_TESS_SPACING_FRACTIONAL_EVEN:
365 partitioning = V_028B6C_PART_FRAC_EVEN;
366 break;
367 case PIPE_TESS_SPACING_EQUAL:
368 partitioning = V_028B6C_PART_INTEGER;
369 break;
370 default:
371 assert(0);
372 return;
373 }
374
375 if (tes_point_mode)
376 topology = V_028B6C_OUTPUT_POINT;
377 else if (tes_prim_mode == PIPE_PRIM_LINES)
378 topology = V_028B6C_OUTPUT_LINE;
379 else if (tes_vertex_order_cw)
380 /* for some reason, this must be the other way around */
381 topology = V_028B6C_OUTPUT_TRIANGLE_CCW;
382 else
383 topology = V_028B6C_OUTPUT_TRIANGLE_CW;
384
385 if (sscreen->info.has_distributed_tess) {
386 if (sscreen->info.family == CHIP_FIJI ||
387 sscreen->info.family >= CHIP_POLARIS10)
388 distribution_mode = V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS;
389 else
390 distribution_mode = V_028B6C_DISTRIBUTION_MODE_DONUTS;
391 } else
392 distribution_mode = V_028B6C_DISTRIBUTION_MODE_NO_DIST;
393
394 assert(pm4->shader);
395 pm4->shader->vgt_tf_param = S_028B6C_TYPE(type) |
396 S_028B6C_PARTITIONING(partitioning) |
397 S_028B6C_TOPOLOGY(topology) |
398 S_028B6C_DISTRIBUTION_MODE(distribution_mode);
399 }
400
401 /* Polaris needs different VTX_REUSE_DEPTH settings depending on
402 * whether the "fractional odd" tessellation spacing is used.
403 *
404 * Possible VGT configurations and which state should set the register:
405 *
406 * Reg set in | VGT shader configuration | Value
407 * ------------------------------------------------------
408 * VS as VS | VS | 30
409 * VS as ES | ES -> GS -> VS | 30
410 * TES as VS | LS -> HS -> VS | 14 or 30
411 * TES as ES | LS -> HS -> ES -> GS -> VS | 14 or 30
412 *
413 * If "shader" is NULL, it's assumed it's not LS or GS copy shader.
414 */
415 static void polaris_set_vgt_vertex_reuse(struct si_screen *sscreen,
416 struct si_shader_selector *sel,
417 struct si_shader *shader,
418 struct si_pm4_state *pm4)
419 {
420 unsigned type = sel->type;
421
422 if (sscreen->info.family < CHIP_POLARIS10 ||
423 sscreen->info.chip_class >= GFX10)
424 return;
425
426 /* VS as VS, or VS as ES: */
427 if ((type == PIPE_SHADER_VERTEX &&
428 (!shader ||
429 (!shader->key.as_ls && !shader->is_gs_copy_shader))) ||
430 /* TES as VS, or TES as ES: */
431 type == PIPE_SHADER_TESS_EVAL) {
432 unsigned vtx_reuse_depth = 30;
433
434 if (type == PIPE_SHADER_TESS_EVAL &&
435 sel->info.properties[TGSI_PROPERTY_TES_SPACING] ==
436 PIPE_TESS_SPACING_FRACTIONAL_ODD)
437 vtx_reuse_depth = 14;
438
439 assert(pm4->shader);
440 pm4->shader->vgt_vertex_reuse_block_cntl = vtx_reuse_depth;
441 }
442 }
443
444 static struct si_pm4_state *si_get_shader_pm4_state(struct si_shader *shader)
445 {
446 if (shader->pm4)
447 si_pm4_clear_state(shader->pm4);
448 else
449 shader->pm4 = CALLOC_STRUCT(si_pm4_state);
450
451 if (shader->pm4) {
452 shader->pm4->shader = shader;
453 return shader->pm4;
454 } else {
455 fprintf(stderr, "radeonsi: Failed to create pm4 state.\n");
456 return NULL;
457 }
458 }
459
460 static unsigned si_get_num_vs_user_sgprs(struct si_shader *shader,
461 unsigned num_always_on_user_sgprs)
462 {
463 struct si_shader_selector *vs = shader->previous_stage_sel ?
464 shader->previous_stage_sel : shader->selector;
465 unsigned num_vbos_in_user_sgprs = vs->num_vbos_in_user_sgprs;
466
467 /* 1 SGPR is reserved for the vertex buffer pointer. */
468 assert(num_always_on_user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST - 1);
469
470 if (num_vbos_in_user_sgprs)
471 return SI_SGPR_VS_VB_DESCRIPTOR_FIRST + num_vbos_in_user_sgprs * 4;
472
473 /* Add the pointer to VBO descriptors. */
474 return num_always_on_user_sgprs + 1;
475 }
476
477 /* Return VGPR_COMP_CNT for the API vertex shader. This can be hw LS, LSHS, ES, ESGS, VS. */
478 static unsigned si_get_vs_vgpr_comp_cnt(struct si_screen *sscreen,
479 struct si_shader *shader, bool legacy_vs_prim_id)
480 {
481 assert(shader->selector->type == PIPE_SHADER_VERTEX ||
482 (shader->previous_stage_sel &&
483 shader->previous_stage_sel->type == PIPE_SHADER_VERTEX));
484
485 /* GFX6-9 LS (VertexID, RelAutoindex, InstanceID / StepRate0(==1), ...).
486 * GFX6-9 ES,VS (VertexID, InstanceID / StepRate0(==1), VSPrimID, ...)
487 * GFX10 LS (VertexID, RelAutoindex, UserVGPR1, InstanceID).
488 * GFX10 ES,VS (VertexID, UserVGPR0, UserVGPR1 or VSPrimID, UserVGPR2 or InstanceID)
489 */
490 bool is_ls = shader->selector->type == PIPE_SHADER_TESS_CTRL || shader->key.as_ls;
491
492 if (sscreen->info.chip_class >= GFX10 && shader->info.uses_instanceid)
493 return 3;
494 else if ((is_ls && shader->info.uses_instanceid) || legacy_vs_prim_id)
495 return 2;
496 else if (is_ls || shader->info.uses_instanceid)
497 return 1;
498 else
499 return 0;
500 }
501
502 static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader)
503 {
504 struct si_pm4_state *pm4;
505 uint64_t va;
506
507 assert(sscreen->info.chip_class <= GFX8);
508
509 pm4 = si_get_shader_pm4_state(shader);
510 if (!pm4)
511 return;
512
513 va = shader->bo->gpu_address;
514 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
515
516 si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
517 si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, S_00B524_MEM_BASE(va >> 40));
518
519 shader->config.rsrc1 = S_00B528_VGPRS((shader->config.num_vgprs - 1) / 4) |
520 S_00B528_SGPRS((shader->config.num_sgprs - 1) / 8) |
521 S_00B528_VGPR_COMP_CNT(si_get_vs_vgpr_comp_cnt(sscreen, shader, false)) |
522 S_00B528_DX10_CLAMP(1) |
523 S_00B528_FLOAT_MODE(shader->config.float_mode);
524 shader->config.rsrc2 = S_00B52C_USER_SGPR(si_get_num_vs_user_sgprs(shader, SI_VS_NUM_USER_SGPR)) |
525 S_00B52C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
526 }
527
528 static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader)
529 {
530 struct si_pm4_state *pm4;
531 uint64_t va;
532
533 pm4 = si_get_shader_pm4_state(shader);
534 if (!pm4)
535 return;
536
537 va = shader->bo->gpu_address;
538 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
539
540 if (sscreen->info.chip_class >= GFX9) {
541 if (sscreen->info.chip_class >= GFX10) {
542 si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
543 si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, S_00B524_MEM_BASE(va >> 40));
544 } else {
545 si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8);
546 si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, S_00B414_MEM_BASE(va >> 40));
547 }
548
549 unsigned num_user_sgprs =
550 si_get_num_vs_user_sgprs(shader, GFX9_TCS_NUM_USER_SGPR);
551
552 shader->config.rsrc2 =
553 S_00B42C_USER_SGPR(num_user_sgprs) |
554 S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
555
556 if (sscreen->info.chip_class >= GFX10)
557 shader->config.rsrc2 |= S_00B42C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5);
558 else
559 shader->config.rsrc2 |= S_00B42C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5);
560 } else {
561 si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8);
562 si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, S_00B424_MEM_BASE(va >> 40));
563
564 shader->config.rsrc2 =
565 S_00B42C_USER_SGPR(GFX6_TCS_NUM_USER_SGPR) |
566 S_00B42C_OC_LDS_EN(1) |
567 S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
568 }
569
570 si_pm4_set_reg(pm4, R_00B428_SPI_SHADER_PGM_RSRC1_HS,
571 S_00B428_VGPRS((shader->config.num_vgprs - 1) /
572 (sscreen->ge_wave_size == 32 ? 8 : 4)) |
573 (sscreen->info.chip_class <= GFX9 ?
574 S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8) : 0) |
575 S_00B428_DX10_CLAMP(1) |
576 S_00B428_MEM_ORDERED(sscreen->info.chip_class >= GFX10) |
577 S_00B428_WGP_MODE(sscreen->info.chip_class >= GFX10) |
578 S_00B428_FLOAT_MODE(shader->config.float_mode) |
579 S_00B428_LS_VGPR_COMP_CNT(sscreen->info.chip_class >= GFX9 ?
580 si_get_vs_vgpr_comp_cnt(sscreen, shader, false) : 0));
581
582 if (sscreen->info.chip_class <= GFX8) {
583 si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
584 shader->config.rsrc2);
585 }
586 }
587
588 static void si_emit_shader_es(struct si_context *sctx)
589 {
590 struct si_shader *shader = sctx->queued.named.es->shader;
591 unsigned initial_cdw = sctx->gfx_cs->current.cdw;
592
593 if (!shader)
594 return;
595
596 radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
597 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
598 shader->selector->esgs_itemsize / 4);
599
600 if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
601 radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
602 SI_TRACKED_VGT_TF_PARAM,
603 shader->vgt_tf_param);
604
605 if (shader->vgt_vertex_reuse_block_cntl)
606 radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
607 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
608 shader->vgt_vertex_reuse_block_cntl);
609
610 if (initial_cdw != sctx->gfx_cs->current.cdw)
611 sctx->context_roll = true;
612 }
613
614 static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader)
615 {
616 struct si_pm4_state *pm4;
617 unsigned num_user_sgprs;
618 unsigned vgpr_comp_cnt;
619 uint64_t va;
620 unsigned oc_lds_en;
621
622 assert(sscreen->info.chip_class <= GFX8);
623
624 pm4 = si_get_shader_pm4_state(shader);
625 if (!pm4)
626 return;
627
628 pm4->atom.emit = si_emit_shader_es;
629 va = shader->bo->gpu_address;
630 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
631
632 if (shader->selector->type == PIPE_SHADER_VERTEX) {
633 vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false);
634 num_user_sgprs = si_get_num_vs_user_sgprs(shader, SI_VS_NUM_USER_SGPR);
635 } else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
636 vgpr_comp_cnt = shader->selector->info.uses_primid ? 3 : 2;
637 num_user_sgprs = SI_TES_NUM_USER_SGPR;
638 } else
639 unreachable("invalid shader selector type");
640
641 oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0;
642
643 si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
644 si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, S_00B324_MEM_BASE(va >> 40));
645 si_pm4_set_reg(pm4, R_00B328_SPI_SHADER_PGM_RSRC1_ES,
646 S_00B328_VGPRS((shader->config.num_vgprs - 1) / 4) |
647 S_00B328_SGPRS((shader->config.num_sgprs - 1) / 8) |
648 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt) |
649 S_00B328_DX10_CLAMP(1) |
650 S_00B328_FLOAT_MODE(shader->config.float_mode));
651 si_pm4_set_reg(pm4, R_00B32C_SPI_SHADER_PGM_RSRC2_ES,
652 S_00B32C_USER_SGPR(num_user_sgprs) |
653 S_00B32C_OC_LDS_EN(oc_lds_en) |
654 S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
655
656 if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
657 si_set_tesseval_regs(sscreen, shader->selector, pm4);
658
659 polaris_set_vgt_vertex_reuse(sscreen, shader->selector, shader, pm4);
660 }
661
662 void gfx9_get_gs_info(struct si_shader_selector *es,
663 struct si_shader_selector *gs,
664 struct gfx9_gs_info *out)
665 {
666 unsigned gs_num_invocations = MAX2(gs->gs_num_invocations, 1);
667 unsigned input_prim = gs->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM];
668 bool uses_adjacency = input_prim >= PIPE_PRIM_LINES_ADJACENCY &&
669 input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
670
671 /* All these are in dwords: */
672 /* We can't allow using the whole LDS, because GS waves compete with
673 * other shader stages for LDS space. */
674 const unsigned max_lds_size = 8 * 1024;
675 const unsigned esgs_itemsize = es->esgs_itemsize / 4;
676 unsigned esgs_lds_size;
677
678 /* All these are per subgroup: */
679 const unsigned max_out_prims = 32 * 1024;
680 const unsigned max_es_verts = 255;
681 const unsigned ideal_gs_prims = 64;
682 unsigned max_gs_prims, gs_prims;
683 unsigned min_es_verts, es_verts, worst_case_es_verts;
684
685 if (uses_adjacency || gs_num_invocations > 1)
686 max_gs_prims = 127 / gs_num_invocations;
687 else
688 max_gs_prims = 255;
689
690 /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
691 * Make sure we don't go over the maximum value.
692 */
693 if (gs->gs_max_out_vertices > 0) {
694 max_gs_prims = MIN2(max_gs_prims,
695 max_out_prims /
696 (gs->gs_max_out_vertices * gs_num_invocations));
697 }
698 assert(max_gs_prims > 0);
699
700 /* If the primitive has adjacency, halve the number of vertices
701 * that will be reused in multiple primitives.
702 */
703 min_es_verts = gs->gs_input_verts_per_prim / (uses_adjacency ? 2 : 1);
704
705 gs_prims = MIN2(ideal_gs_prims, max_gs_prims);
706 worst_case_es_verts = MIN2(min_es_verts * gs_prims, max_es_verts);
707
708 /* Compute ESGS LDS size based on the worst case number of ES vertices
709 * needed to create the target number of GS prims per subgroup.
710 */
711 esgs_lds_size = esgs_itemsize * worst_case_es_verts;
712
713 /* If total LDS usage is too big, refactor partitions based on ratio
714 * of ESGS item sizes.
715 */
716 if (esgs_lds_size > max_lds_size) {
717 /* Our target GS Prims Per Subgroup was too large. Calculate
718 * the maximum number of GS Prims Per Subgroup that will fit
719 * into LDS, capped by the maximum that the hardware can support.
720 */
721 gs_prims = MIN2((max_lds_size / (esgs_itemsize * min_es_verts)),
722 max_gs_prims);
723 assert(gs_prims > 0);
724 worst_case_es_verts = MIN2(min_es_verts * gs_prims,
725 max_es_verts);
726
727 esgs_lds_size = esgs_itemsize * worst_case_es_verts;
728 assert(esgs_lds_size <= max_lds_size);
729 }
730
731 /* Now calculate remaining ESGS information. */
732 if (esgs_lds_size)
733 es_verts = MIN2(esgs_lds_size / esgs_itemsize, max_es_verts);
734 else
735 es_verts = max_es_verts;
736
737 /* Vertices for adjacency primitives are not always reused, so restore
738 * it for ES_VERTS_PER_SUBGRP.
739 */
740 min_es_verts = gs->gs_input_verts_per_prim;
741
742 /* For normal primitives, the VGT only checks if they are past the ES
743 * verts per subgroup after allocating a full GS primitive and if they
744 * are, kick off a new subgroup. But if those additional ES verts are
745 * unique (e.g. not reused) we need to make sure there is enough LDS
746 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
747 */
748 es_verts -= min_es_verts - 1;
749
750 out->es_verts_per_subgroup = es_verts;
751 out->gs_prims_per_subgroup = gs_prims;
752 out->gs_inst_prims_in_subgroup = gs_prims * gs_num_invocations;
753 out->max_prims_per_subgroup = out->gs_inst_prims_in_subgroup *
754 gs->gs_max_out_vertices;
755 out->esgs_ring_size = 4 * esgs_lds_size;
756
757 assert(out->max_prims_per_subgroup <= max_out_prims);
758 }
759
760 static void si_emit_shader_gs(struct si_context *sctx)
761 {
762 struct si_shader *shader = sctx->queued.named.gs->shader;
763 unsigned initial_cdw = sctx->gfx_cs->current.cdw;
764
765 if (!shader)
766 return;
767
768 /* R_028A60_VGT_GSVS_RING_OFFSET_1, R_028A64_VGT_GSVS_RING_OFFSET_2
769 * R_028A68_VGT_GSVS_RING_OFFSET_3 */
770 radeon_opt_set_context_reg3(sctx, R_028A60_VGT_GSVS_RING_OFFSET_1,
771 SI_TRACKED_VGT_GSVS_RING_OFFSET_1,
772 shader->ctx_reg.gs.vgt_gsvs_ring_offset_1,
773 shader->ctx_reg.gs.vgt_gsvs_ring_offset_2,
774 shader->ctx_reg.gs.vgt_gsvs_ring_offset_3);
775
776 /* R_028AB0_VGT_GSVS_RING_ITEMSIZE */
777 radeon_opt_set_context_reg(sctx, R_028AB0_VGT_GSVS_RING_ITEMSIZE,
778 SI_TRACKED_VGT_GSVS_RING_ITEMSIZE,
779 shader->ctx_reg.gs.vgt_gsvs_ring_itemsize);
780
781 /* R_028B38_VGT_GS_MAX_VERT_OUT */
782 radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT,
783 SI_TRACKED_VGT_GS_MAX_VERT_OUT,
784 shader->ctx_reg.gs.vgt_gs_max_vert_out);
785
786 /* R_028B5C_VGT_GS_VERT_ITEMSIZE, R_028B60_VGT_GS_VERT_ITEMSIZE_1
787 * R_028B64_VGT_GS_VERT_ITEMSIZE_2, R_028B68_VGT_GS_VERT_ITEMSIZE_3 */
788 radeon_opt_set_context_reg4(sctx, R_028B5C_VGT_GS_VERT_ITEMSIZE,
789 SI_TRACKED_VGT_GS_VERT_ITEMSIZE,
790 shader->ctx_reg.gs.vgt_gs_vert_itemsize,
791 shader->ctx_reg.gs.vgt_gs_vert_itemsize_1,
792 shader->ctx_reg.gs.vgt_gs_vert_itemsize_2,
793 shader->ctx_reg.gs.vgt_gs_vert_itemsize_3);
794
795 /* R_028B90_VGT_GS_INSTANCE_CNT */
796 radeon_opt_set_context_reg(sctx, R_028B90_VGT_GS_INSTANCE_CNT,
797 SI_TRACKED_VGT_GS_INSTANCE_CNT,
798 shader->ctx_reg.gs.vgt_gs_instance_cnt);
799
800 if (sctx->chip_class >= GFX9) {
801 /* R_028A44_VGT_GS_ONCHIP_CNTL */
802 radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL,
803 SI_TRACKED_VGT_GS_ONCHIP_CNTL,
804 shader->ctx_reg.gs.vgt_gs_onchip_cntl);
805 /* R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP */
806 radeon_opt_set_context_reg(sctx, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
807 SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
808 shader->ctx_reg.gs.vgt_gs_max_prims_per_subgroup);
809 /* R_028AAC_VGT_ESGS_RING_ITEMSIZE */
810 radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
811 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
812 shader->ctx_reg.gs.vgt_esgs_ring_itemsize);
813
814 if (shader->key.part.gs.es->type == PIPE_SHADER_TESS_EVAL)
815 radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
816 SI_TRACKED_VGT_TF_PARAM,
817 shader->vgt_tf_param);
818 if (shader->vgt_vertex_reuse_block_cntl)
819 radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
820 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
821 shader->vgt_vertex_reuse_block_cntl);
822 }
823
824 if (initial_cdw != sctx->gfx_cs->current.cdw)
825 sctx->context_roll = true;
826 }
827
828 static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
829 {
830 struct si_shader_selector *sel = shader->selector;
831 const ubyte *num_components = sel->info.num_stream_output_components;
832 unsigned gs_num_invocations = sel->gs_num_invocations;
833 struct si_pm4_state *pm4;
834 uint64_t va;
835 unsigned max_stream = sel->max_gs_stream;
836 unsigned offset;
837
838 pm4 = si_get_shader_pm4_state(shader);
839 if (!pm4)
840 return;
841
842 pm4->atom.emit = si_emit_shader_gs;
843
844 offset = num_components[0] * sel->gs_max_out_vertices;
845 shader->ctx_reg.gs.vgt_gsvs_ring_offset_1 = offset;
846
847 if (max_stream >= 1)
848 offset += num_components[1] * sel->gs_max_out_vertices;
849 shader->ctx_reg.gs.vgt_gsvs_ring_offset_2 = offset;
850
851 if (max_stream >= 2)
852 offset += num_components[2] * sel->gs_max_out_vertices;
853 shader->ctx_reg.gs.vgt_gsvs_ring_offset_3 = offset;
854
855 if (max_stream >= 3)
856 offset += num_components[3] * sel->gs_max_out_vertices;
857 shader->ctx_reg.gs.vgt_gsvs_ring_itemsize = offset;
858
859 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
860 assert(offset < (1 << 15));
861
862 shader->ctx_reg.gs.vgt_gs_max_vert_out = sel->gs_max_out_vertices;
863
864 shader->ctx_reg.gs.vgt_gs_vert_itemsize = num_components[0];
865 shader->ctx_reg.gs.vgt_gs_vert_itemsize_1 = (max_stream >= 1) ? num_components[1] : 0;
866 shader->ctx_reg.gs.vgt_gs_vert_itemsize_2 = (max_stream >= 2) ? num_components[2] : 0;
867 shader->ctx_reg.gs.vgt_gs_vert_itemsize_3 = (max_stream >= 3) ? num_components[3] : 0;
868
869 shader->ctx_reg.gs.vgt_gs_instance_cnt = S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
870 S_028B90_ENABLE(gs_num_invocations > 0);
871
872 va = shader->bo->gpu_address;
873 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
874
875 if (sscreen->info.chip_class >= GFX9) {
876 unsigned input_prim = sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM];
877 unsigned es_type = shader->key.part.gs.es->type;
878 unsigned es_vgpr_comp_cnt, gs_vgpr_comp_cnt;
879
880 if (es_type == PIPE_SHADER_VERTEX) {
881 es_vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false);
882 } else if (es_type == PIPE_SHADER_TESS_EVAL)
883 es_vgpr_comp_cnt = shader->key.part.gs.es->info.uses_primid ? 3 : 2;
884 else
885 unreachable("invalid shader selector type");
886
887 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
888 * VGPR[0:4] are always loaded.
889 */
890 if (sel->info.uses_invocationid)
891 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
892 else if (sel->info.uses_primid)
893 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
894 else if (input_prim >= PIPE_PRIM_TRIANGLES)
895 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
896 else
897 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
898
899 unsigned num_user_sgprs;
900 if (es_type == PIPE_SHADER_VERTEX)
901 num_user_sgprs = si_get_num_vs_user_sgprs(shader, GFX9_VSGS_NUM_USER_SGPR);
902 else
903 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
904
905 if (sscreen->info.chip_class >= GFX10) {
906 si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
907 si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, S_00B324_MEM_BASE(va >> 40));
908 } else {
909 si_pm4_set_reg(pm4, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8);
910 si_pm4_set_reg(pm4, R_00B214_SPI_SHADER_PGM_HI_ES, S_00B214_MEM_BASE(va >> 40));
911 }
912
913 uint32_t rsrc1 =
914 S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
915 S_00B228_DX10_CLAMP(1) |
916 S_00B228_MEM_ORDERED(sscreen->info.chip_class >= GFX10) |
917 S_00B228_WGP_MODE(sscreen->info.chip_class >= GFX10) |
918 S_00B228_FLOAT_MODE(shader->config.float_mode) |
919 S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
920 uint32_t rsrc2 =
921 S_00B22C_USER_SGPR(num_user_sgprs) |
922 S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
923 S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) |
924 S_00B22C_LDS_SIZE(shader->config.lds_size) |
925 S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
926
927 if (sscreen->info.chip_class >= GFX10) {
928 rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5);
929 } else {
930 rsrc1 |= S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8);
931 rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5);
932 }
933
934 si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS, rsrc1);
935 si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS, rsrc2);
936
937 if (sscreen->info.chip_class >= GFX10) {
938 si_pm4_set_reg(pm4, R_00B204_SPI_SHADER_PGM_RSRC4_GS,
939 S_00B204_CU_EN(0xffff) |
940 S_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(0));
941 }
942
943 shader->ctx_reg.gs.vgt_gs_onchip_cntl =
944 S_028A44_ES_VERTS_PER_SUBGRP(shader->gs_info.es_verts_per_subgroup) |
945 S_028A44_GS_PRIMS_PER_SUBGRP(shader->gs_info.gs_prims_per_subgroup) |
946 S_028A44_GS_INST_PRIMS_IN_SUBGRP(shader->gs_info.gs_inst_prims_in_subgroup);
947 shader->ctx_reg.gs.vgt_gs_max_prims_per_subgroup =
948 S_028A94_MAX_PRIMS_PER_SUBGROUP(shader->gs_info.max_prims_per_subgroup);
949 shader->ctx_reg.gs.vgt_esgs_ring_itemsize =
950 shader->key.part.gs.es->esgs_itemsize / 4;
951
952 if (es_type == PIPE_SHADER_TESS_EVAL)
953 si_set_tesseval_regs(sscreen, shader->key.part.gs.es, pm4);
954
955 polaris_set_vgt_vertex_reuse(sscreen, shader->key.part.gs.es,
956 NULL, pm4);
957 } else {
958 si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
959 si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, S_00B224_MEM_BASE(va >> 40));
960
961 si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
962 S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
963 S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) |
964 S_00B228_DX10_CLAMP(1) |
965 S_00B228_FLOAT_MODE(shader->config.float_mode));
966 si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
967 S_00B22C_USER_SGPR(GFX6_GS_NUM_USER_SGPR) |
968 S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
969 }
970 }
971
972 static void gfx10_emit_ge_pc_alloc(struct si_context *sctx, unsigned value)
973 {
974 enum si_tracked_reg reg = SI_TRACKED_GE_PC_ALLOC;
975
976 if (((sctx->tracked_regs.reg_saved >> reg) & 0x1) != 0x1 ||
977 sctx->tracked_regs.reg_value[reg] != value) {
978 struct radeon_cmdbuf *cs = sctx->gfx_cs;
979
980 if (sctx->family == CHIP_NAVI10 ||
981 sctx->family == CHIP_NAVI12 ||
982 sctx->family == CHIP_NAVI14) {
983 /* SQ_NON_EVENT must be emitted before GE_PC_ALLOC is written. */
984 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
985 radeon_emit(cs, EVENT_TYPE(V_028A90_SQ_NON_EVENT) | EVENT_INDEX(0));
986 }
987
988 radeon_set_uconfig_reg(cs, R_030980_GE_PC_ALLOC, value);
989
990 sctx->tracked_regs.reg_saved |= 0x1ull << reg;
991 sctx->tracked_regs.reg_value[reg] = value;
992 }
993 }
994
995 /* Common tail code for NGG primitive shaders. */
996 static void gfx10_emit_shader_ngg_tail(struct si_context *sctx,
997 struct si_shader *shader,
998 unsigned initial_cdw)
999 {
1000 radeon_opt_set_context_reg(sctx, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP,
1001 SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP,
1002 shader->ctx_reg.ngg.ge_max_output_per_subgroup);
1003 radeon_opt_set_context_reg(sctx, R_028B4C_GE_NGG_SUBGRP_CNTL,
1004 SI_TRACKED_GE_NGG_SUBGRP_CNTL,
1005 shader->ctx_reg.ngg.ge_ngg_subgrp_cntl);
1006 radeon_opt_set_context_reg(sctx, R_028A84_VGT_PRIMITIVEID_EN,
1007 SI_TRACKED_VGT_PRIMITIVEID_EN,
1008 shader->ctx_reg.ngg.vgt_primitiveid_en);
1009 radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL,
1010 SI_TRACKED_VGT_GS_ONCHIP_CNTL,
1011 shader->ctx_reg.ngg.vgt_gs_onchip_cntl);
1012 radeon_opt_set_context_reg(sctx, R_028B90_VGT_GS_INSTANCE_CNT,
1013 SI_TRACKED_VGT_GS_INSTANCE_CNT,
1014 shader->ctx_reg.ngg.vgt_gs_instance_cnt);
1015 radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
1016 SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
1017 shader->ctx_reg.ngg.vgt_esgs_ring_itemsize);
1018 radeon_opt_set_context_reg(sctx, R_0286C4_SPI_VS_OUT_CONFIG,
1019 SI_TRACKED_SPI_VS_OUT_CONFIG,
1020 shader->ctx_reg.ngg.spi_vs_out_config);
1021 radeon_opt_set_context_reg2(sctx, R_028708_SPI_SHADER_IDX_FORMAT,
1022 SI_TRACKED_SPI_SHADER_IDX_FORMAT,
1023 shader->ctx_reg.ngg.spi_shader_idx_format,
1024 shader->ctx_reg.ngg.spi_shader_pos_format);
1025 radeon_opt_set_context_reg(sctx, R_028818_PA_CL_VTE_CNTL,
1026 SI_TRACKED_PA_CL_VTE_CNTL,
1027 shader->ctx_reg.ngg.pa_cl_vte_cntl);
1028 radeon_opt_set_context_reg(sctx, R_028838_PA_CL_NGG_CNTL,
1029 SI_TRACKED_PA_CL_NGG_CNTL,
1030 shader->ctx_reg.ngg.pa_cl_ngg_cntl);
1031
1032 radeon_opt_set_context_reg_rmw(sctx, R_02881C_PA_CL_VS_OUT_CNTL,
1033 SI_TRACKED_PA_CL_VS_OUT_CNTL__VS,
1034 shader->pa_cl_vs_out_cntl,
1035 SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK);
1036
1037 if (initial_cdw != sctx->gfx_cs->current.cdw)
1038 sctx->context_roll = true;
1039
1040 /* GE_PC_ALLOC is not a context register, so it doesn't cause a context roll. */
1041 gfx10_emit_ge_pc_alloc(sctx, shader->ctx_reg.ngg.ge_pc_alloc);
1042 }
1043
1044 static void gfx10_emit_shader_ngg_notess_nogs(struct si_context *sctx)
1045 {
1046 struct si_shader *shader = sctx->queued.named.gs->shader;
1047 unsigned initial_cdw = sctx->gfx_cs->current.cdw;
1048
1049 if (!shader)
1050 return;
1051
1052 gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw);
1053 }
1054
1055 static void gfx10_emit_shader_ngg_tess_nogs(struct si_context *sctx)
1056 {
1057 struct si_shader *shader = sctx->queued.named.gs->shader;
1058 unsigned initial_cdw = sctx->gfx_cs->current.cdw;
1059
1060 if (!shader)
1061 return;
1062
1063 radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
1064 SI_TRACKED_VGT_TF_PARAM,
1065 shader->vgt_tf_param);
1066
1067 gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw);
1068 }
1069
1070 static void gfx10_emit_shader_ngg_notess_gs(struct si_context *sctx)
1071 {
1072 struct si_shader *shader = sctx->queued.named.gs->shader;
1073 unsigned initial_cdw = sctx->gfx_cs->current.cdw;
1074
1075 if (!shader)
1076 return;
1077
1078 radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT,
1079 SI_TRACKED_VGT_GS_MAX_VERT_OUT,
1080 shader->ctx_reg.ngg.vgt_gs_max_vert_out);
1081
1082 gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw);
1083 }
1084
1085 static void gfx10_emit_shader_ngg_tess_gs(struct si_context *sctx)
1086 {
1087 struct si_shader *shader = sctx->queued.named.gs->shader;
1088 unsigned initial_cdw = sctx->gfx_cs->current.cdw;
1089
1090 if (!shader)
1091 return;
1092
1093 radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT,
1094 SI_TRACKED_VGT_GS_MAX_VERT_OUT,
1095 shader->ctx_reg.ngg.vgt_gs_max_vert_out);
1096 radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
1097 SI_TRACKED_VGT_TF_PARAM,
1098 shader->vgt_tf_param);
1099
1100 gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw);
1101 }
1102
1103 unsigned si_get_input_prim(const struct si_shader_selector *gs)
1104 {
1105 if (gs->type == PIPE_SHADER_GEOMETRY)
1106 return gs->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM];
1107
1108 if (gs->type == PIPE_SHADER_TESS_EVAL) {
1109 if (gs->info.properties[TGSI_PROPERTY_TES_POINT_MODE])
1110 return PIPE_PRIM_POINTS;
1111 if (gs->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
1112 return PIPE_PRIM_LINES;
1113 return PIPE_PRIM_TRIANGLES;
1114 }
1115
1116 /* TODO: Set this correctly if the primitive type is set in the shader key. */
1117 return PIPE_PRIM_TRIANGLES; /* worst case for all callers */
1118 }
1119
1120 static unsigned si_get_vs_out_cntl(const struct si_shader_selector *sel, bool ngg)
1121 {
1122 bool misc_vec_ena =
1123 sel->info.writes_psize || (sel->info.writes_edgeflag && !ngg) ||
1124 sel->info.writes_layer || sel->info.writes_viewport_index;
1125 return S_02881C_USE_VTX_POINT_SIZE(sel->info.writes_psize) |
1126 S_02881C_USE_VTX_EDGE_FLAG(sel->info.writes_edgeflag && !ngg) |
1127 S_02881C_USE_VTX_RENDER_TARGET_INDX(sel->info.writes_layer) |
1128 S_02881C_USE_VTX_VIEWPORT_INDX(sel->info.writes_viewport_index) |
1129 S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
1130 S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena);
1131 }
1132
1133 /**
1134 * Prepare the PM4 image for \p shader, which will run as a merged ESGS shader
1135 * in NGG mode.
1136 */
1137 static void gfx10_shader_ngg(struct si_screen *sscreen, struct si_shader *shader)
1138 {
1139 const struct si_shader_selector *gs_sel = shader->selector;
1140 const struct si_shader_info *gs_info = &gs_sel->info;
1141 enum pipe_shader_type gs_type = shader->selector->type;
1142 const struct si_shader_selector *es_sel =
1143 shader->previous_stage_sel ? shader->previous_stage_sel : shader->selector;
1144 const struct si_shader_info *es_info = &es_sel->info;
1145 enum pipe_shader_type es_type = es_sel->type;
1146 unsigned num_user_sgprs;
1147 unsigned nparams, es_vgpr_comp_cnt, gs_vgpr_comp_cnt;
1148 uint64_t va;
1149 unsigned window_space =
1150 gs_info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
1151 bool es_enable_prim_id = shader->key.mono.u.vs_export_prim_id || es_info->uses_primid;
1152 unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1);
1153 unsigned input_prim = si_get_input_prim(gs_sel);
1154 bool break_wave_at_eoi = false;
1155 struct si_pm4_state *pm4 = si_get_shader_pm4_state(shader);
1156 if (!pm4)
1157 return;
1158
1159 if (es_type == PIPE_SHADER_TESS_EVAL) {
1160 pm4->atom.emit = gs_type == PIPE_SHADER_GEOMETRY ? gfx10_emit_shader_ngg_tess_gs
1161 : gfx10_emit_shader_ngg_tess_nogs;
1162 } else {
1163 pm4->atom.emit = gs_type == PIPE_SHADER_GEOMETRY ? gfx10_emit_shader_ngg_notess_gs
1164 : gfx10_emit_shader_ngg_notess_nogs;
1165 }
1166
1167 va = shader->bo->gpu_address;
1168 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
1169
1170 if (es_type == PIPE_SHADER_VERTEX) {
1171 es_vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false);
1172
1173 if (es_info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
1174 num_user_sgprs = SI_SGPR_VS_BLIT_DATA +
1175 es_info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
1176 } else {
1177 num_user_sgprs = si_get_num_vs_user_sgprs(shader, GFX9_VSGS_NUM_USER_SGPR);
1178 }
1179 } else {
1180 assert(es_type == PIPE_SHADER_TESS_EVAL);
1181 es_vgpr_comp_cnt = es_enable_prim_id ? 3 : 2;
1182 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
1183
1184 if (es_enable_prim_id || gs_info->uses_primid)
1185 break_wave_at_eoi = true;
1186 }
1187
1188 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
1189 * VGPR[0:4] are always loaded.
1190 *
1191 * Vertex shaders always need to load VGPR3, because they need to
1192 * pass edge flags for decomposed primitives (such as quads) to the PA
1193 * for the GL_LINE polygon mode to skip rendering lines on inner edges.
1194 */
1195 if (gs_info->uses_invocationid ||
1196 (gs_type == PIPE_SHADER_VERTEX && !gfx10_is_ngg_passthrough(shader)))
1197 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID, edge flags. */
1198 else if ((gs_type == PIPE_SHADER_GEOMETRY && gs_info->uses_primid) ||
1199 (gs_type == PIPE_SHADER_VERTEX && shader->key.mono.u.vs_export_prim_id))
1200 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
1201 else if (input_prim >= PIPE_PRIM_TRIANGLES && !gfx10_is_ngg_passthrough(shader))
1202 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
1203 else
1204 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
1205
1206 si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
1207 si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, va >> 40);
1208 si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
1209 S_00B228_VGPRS((shader->config.num_vgprs - 1) /
1210 (sscreen->ge_wave_size == 32 ? 8 : 4)) |
1211 S_00B228_FLOAT_MODE(shader->config.float_mode) |
1212 S_00B228_DX10_CLAMP(1) |
1213 S_00B228_MEM_ORDERED(1) |
1214 S_00B228_WGP_MODE(1) |
1215 S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt));
1216 si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
1217 S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0) |
1218 S_00B22C_USER_SGPR(num_user_sgprs) |
1219 S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
1220 S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5) |
1221 S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) |
1222 S_00B22C_LDS_SIZE(shader->config.lds_size));
1223
1224 /* Determine LATE_ALLOC_GS. */
1225 unsigned num_cu_per_sh = sscreen->info.num_good_cu_per_sh;
1226 unsigned late_alloc_wave64; /* The limit is per SH. */
1227
1228 /* For Wave32, the hw will launch twice the number of late
1229 * alloc waves, so 1 == 2x wave32.
1230 *
1231 * Don't use late alloc for NGG on Navi14 due to a hw bug.
1232 */
1233 if (sscreen->info.family == CHIP_NAVI14 || !sscreen->info.use_late_alloc)
1234 late_alloc_wave64 = 0;
1235 else if (num_cu_per_sh <= 6)
1236 late_alloc_wave64 = num_cu_per_sh - 2; /* All CUs enabled */
1237 else if (shader->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL)
1238 late_alloc_wave64 = (num_cu_per_sh - 2) * 6;
1239 else
1240 late_alloc_wave64 = (num_cu_per_sh - 2) * 4;
1241
1242 /* Limit LATE_ALLOC_GS for prevent a hang (hw bug). */
1243 if (sscreen->info.family == CHIP_NAVI10 ||
1244 sscreen->info.family == CHIP_NAVI12 ||
1245 sscreen->info.family == CHIP_NAVI14)
1246 late_alloc_wave64 = MIN2(late_alloc_wave64, 64);
1247
1248 si_pm4_set_reg(pm4, R_00B204_SPI_SHADER_PGM_RSRC4_GS,
1249 S_00B204_CU_EN(0xffff) |
1250 S_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(late_alloc_wave64));
1251
1252 nparams = MAX2(shader->info.nr_param_exports, 1);
1253 shader->ctx_reg.ngg.spi_vs_out_config =
1254 S_0286C4_VS_EXPORT_COUNT(nparams - 1) |
1255 S_0286C4_NO_PC_EXPORT(shader->info.nr_param_exports == 0);
1256
1257 shader->ctx_reg.ngg.spi_shader_idx_format =
1258 S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP);
1259 shader->ctx_reg.ngg.spi_shader_pos_format =
1260 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
1261 S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ?
1262 V_02870C_SPI_SHADER_4COMP :
1263 V_02870C_SPI_SHADER_NONE) |
1264 S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ?
1265 V_02870C_SPI_SHADER_4COMP :
1266 V_02870C_SPI_SHADER_NONE) |
1267 S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ?
1268 V_02870C_SPI_SHADER_4COMP :
1269 V_02870C_SPI_SHADER_NONE);
1270
1271 shader->ctx_reg.ngg.vgt_primitiveid_en =
1272 S_028A84_PRIMITIVEID_EN(es_enable_prim_id) |
1273 S_028A84_NGG_DISABLE_PROVOK_REUSE(shader->key.mono.u.vs_export_prim_id ||
1274 gs_sel->info.writes_primid);
1275
1276 if (gs_type == PIPE_SHADER_GEOMETRY) {
1277 shader->ctx_reg.ngg.vgt_esgs_ring_itemsize = es_sel->esgs_itemsize / 4;
1278 shader->ctx_reg.ngg.vgt_gs_max_vert_out = gs_sel->gs_max_out_vertices;
1279 } else {
1280 shader->ctx_reg.ngg.vgt_esgs_ring_itemsize = 1;
1281 }
1282
1283 if (es_type == PIPE_SHADER_TESS_EVAL)
1284 si_set_tesseval_regs(sscreen, es_sel, pm4);
1285
1286 shader->ctx_reg.ngg.vgt_gs_onchip_cntl =
1287 S_028A44_ES_VERTS_PER_SUBGRP(shader->ngg.hw_max_esverts) |
1288 S_028A44_GS_PRIMS_PER_SUBGRP(shader->ngg.max_gsprims) |
1289 S_028A44_GS_INST_PRIMS_IN_SUBGRP(shader->ngg.max_gsprims * gs_num_invocations);
1290 shader->ctx_reg.ngg.ge_max_output_per_subgroup =
1291 S_0287FC_MAX_VERTS_PER_SUBGROUP(shader->ngg.max_out_verts);
1292 shader->ctx_reg.ngg.ge_ngg_subgrp_cntl =
1293 S_028B4C_PRIM_AMP_FACTOR(shader->ngg.prim_amp_factor) |
1294 S_028B4C_THDS_PER_SUBGRP(0); /* for fast launch */
1295 shader->ctx_reg.ngg.vgt_gs_instance_cnt =
1296 S_028B90_CNT(gs_num_invocations) |
1297 S_028B90_ENABLE(gs_num_invocations > 1) |
1298 S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE(
1299 shader->ngg.max_vert_out_per_gs_instance);
1300
1301 /* Always output hw-generated edge flags and pass them via the prim
1302 * export to prevent drawing lines on internal edges of decomposed
1303 * primitives (such as quads) with polygon mode = lines. Only VS needs
1304 * this.
1305 */
1306 shader->ctx_reg.ngg.pa_cl_ngg_cntl =
1307 S_028838_INDEX_BUF_EDGE_FLAG_ENA(gs_type == PIPE_SHADER_VERTEX);
1308 shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(gs_sel, true);
1309
1310 /* Oversubscribe PC. This improves performance when there are too many varyings. */
1311 float oversub_pc_factor = 0.25;
1312
1313 if (shader->key.opt.ngg_culling) {
1314 /* Be more aggressive with NGG culling. */
1315 if (shader->info.nr_param_exports > 4)
1316 oversub_pc_factor = 1;
1317 else if (shader->info.nr_param_exports > 2)
1318 oversub_pc_factor = 0.75;
1319 else
1320 oversub_pc_factor = 0.5;
1321 }
1322
1323 unsigned oversub_pc_lines = sscreen->info.pc_lines * oversub_pc_factor;
1324 shader->ctx_reg.ngg.ge_pc_alloc = S_030980_OVERSUB_EN(sscreen->info.use_late_alloc) |
1325 S_030980_NUM_PC_LINES(oversub_pc_lines - 1);
1326
1327 if (shader->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST) {
1328 shader->ge_cntl =
1329 S_03096C_PRIM_GRP_SIZE(shader->ngg.max_gsprims) |
1330 S_03096C_VERT_GRP_SIZE(shader->ngg.max_gsprims * 3);
1331 } else if (shader->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP) {
1332 shader->ge_cntl =
1333 S_03096C_PRIM_GRP_SIZE(shader->ngg.max_gsprims) |
1334 S_03096C_VERT_GRP_SIZE(shader->ngg.max_gsprims + 2);
1335 } else {
1336 shader->ge_cntl =
1337 S_03096C_PRIM_GRP_SIZE(shader->ngg.max_gsprims) |
1338 S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */
1339 S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi);
1340
1341 /* Bug workaround for a possible hang with non-tessellation cases.
1342 * Tessellation always sets GE_CNTL.VERT_GRP_SIZE = 0
1343 *
1344 * Requirement: GE_CNTL.VERT_GRP_SIZE = VGT_GS_ONCHIP_CNTL.ES_VERTS_PER_SUBGRP - 5
1345 */
1346 if ((sscreen->info.family == CHIP_NAVI10 ||
1347 sscreen->info.family == CHIP_NAVI12 ||
1348 sscreen->info.family == CHIP_NAVI14) &&
1349 (es_type == PIPE_SHADER_VERTEX || gs_type == PIPE_SHADER_VERTEX) && /* = no tess */
1350 shader->ngg.hw_max_esverts != 256) {
1351 shader->ge_cntl &= C_03096C_VERT_GRP_SIZE;
1352
1353 if (shader->ngg.hw_max_esverts > 5) {
1354 shader->ge_cntl |=
1355 S_03096C_VERT_GRP_SIZE(shader->ngg.hw_max_esverts - 5);
1356 }
1357 }
1358 }
1359
1360 if (window_space) {
1361 shader->ctx_reg.ngg.pa_cl_vte_cntl =
1362 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1);
1363 } else {
1364 shader->ctx_reg.ngg.pa_cl_vte_cntl =
1365 S_028818_VTX_W0_FMT(1) |
1366 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
1367 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
1368 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1);
1369 }
1370 }
1371
1372 static void si_emit_shader_vs(struct si_context *sctx)
1373 {
1374 struct si_shader *shader = sctx->queued.named.vs->shader;
1375 unsigned initial_cdw = sctx->gfx_cs->current.cdw;
1376
1377 if (!shader)
1378 return;
1379
1380 radeon_opt_set_context_reg(sctx, R_028A40_VGT_GS_MODE,
1381 SI_TRACKED_VGT_GS_MODE,
1382 shader->ctx_reg.vs.vgt_gs_mode);
1383 radeon_opt_set_context_reg(sctx, R_028A84_VGT_PRIMITIVEID_EN,
1384 SI_TRACKED_VGT_PRIMITIVEID_EN,
1385 shader->ctx_reg.vs.vgt_primitiveid_en);
1386
1387 if (sctx->chip_class <= GFX8) {
1388 radeon_opt_set_context_reg(sctx, R_028AB4_VGT_REUSE_OFF,
1389 SI_TRACKED_VGT_REUSE_OFF,
1390 shader->ctx_reg.vs.vgt_reuse_off);
1391 }
1392
1393 radeon_opt_set_context_reg(sctx, R_0286C4_SPI_VS_OUT_CONFIG,
1394 SI_TRACKED_SPI_VS_OUT_CONFIG,
1395 shader->ctx_reg.vs.spi_vs_out_config);
1396
1397 radeon_opt_set_context_reg(sctx, R_02870C_SPI_SHADER_POS_FORMAT,
1398 SI_TRACKED_SPI_SHADER_POS_FORMAT,
1399 shader->ctx_reg.vs.spi_shader_pos_format);
1400
1401 radeon_opt_set_context_reg(sctx, R_028818_PA_CL_VTE_CNTL,
1402 SI_TRACKED_PA_CL_VTE_CNTL,
1403 shader->ctx_reg.vs.pa_cl_vte_cntl);
1404
1405 if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
1406 radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
1407 SI_TRACKED_VGT_TF_PARAM,
1408 shader->vgt_tf_param);
1409
1410 if (shader->vgt_vertex_reuse_block_cntl)
1411 radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
1412 SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
1413 shader->vgt_vertex_reuse_block_cntl);
1414
1415 /* Required programming for tessellation. (legacy pipeline only) */
1416 if (sctx->chip_class == GFX10 &&
1417 shader->selector->type == PIPE_SHADER_TESS_EVAL) {
1418 radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL,
1419 SI_TRACKED_VGT_GS_ONCHIP_CNTL,
1420 S_028A44_ES_VERTS_PER_SUBGRP(250) |
1421 S_028A44_GS_PRIMS_PER_SUBGRP(126) |
1422 S_028A44_GS_INST_PRIMS_IN_SUBGRP(126));
1423 }
1424
1425 if (sctx->chip_class >= GFX10) {
1426 radeon_opt_set_context_reg_rmw(sctx, R_02881C_PA_CL_VS_OUT_CNTL,
1427 SI_TRACKED_PA_CL_VS_OUT_CNTL__VS,
1428 shader->pa_cl_vs_out_cntl,
1429 SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK);
1430 }
1431
1432 if (initial_cdw != sctx->gfx_cs->current.cdw)
1433 sctx->context_roll = true;
1434
1435 /* GE_PC_ALLOC is not a context register, so it doesn't cause a context roll. */
1436 if (sctx->chip_class >= GFX10)
1437 gfx10_emit_ge_pc_alloc(sctx, shader->ctx_reg.vs.ge_pc_alloc);
1438 }
1439
1440 /**
1441 * Compute the state for \p shader, which will run as a vertex shader on the
1442 * hardware.
1443 *
1444 * If \p gs is non-NULL, it points to the geometry shader for which this shader
1445 * is the copy shader.
1446 */
1447 static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader,
1448 struct si_shader_selector *gs)
1449 {
1450 const struct si_shader_info *info = &shader->selector->info;
1451 struct si_pm4_state *pm4;
1452 unsigned num_user_sgprs, vgpr_comp_cnt;
1453 uint64_t va;
1454 unsigned nparams, oc_lds_en;
1455 unsigned window_space =
1456 info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
1457 bool enable_prim_id = shader->key.mono.u.vs_export_prim_id || info->uses_primid;
1458
1459 pm4 = si_get_shader_pm4_state(shader);
1460 if (!pm4)
1461 return;
1462
1463 pm4->atom.emit = si_emit_shader_vs;
1464
1465 /* We always write VGT_GS_MODE in the VS state, because every switch
1466 * between different shader pipelines involving a different GS or no
1467 * GS at all involves a switch of the VS (different GS use different
1468 * copy shaders). On the other hand, when the API switches from a GS to
1469 * no GS and then back to the same GS used originally, the GS state is
1470 * not sent again.
1471 */
1472 if (!gs) {
1473 unsigned mode = V_028A40_GS_OFF;
1474
1475 /* PrimID needs GS scenario A. */
1476 if (enable_prim_id)
1477 mode = V_028A40_GS_SCENARIO_A;
1478
1479 shader->ctx_reg.vs.vgt_gs_mode = S_028A40_MODE(mode);
1480 shader->ctx_reg.vs.vgt_primitiveid_en = enable_prim_id;
1481 } else {
1482 shader->ctx_reg.vs.vgt_gs_mode = ac_vgt_gs_mode(gs->gs_max_out_vertices,
1483 sscreen->info.chip_class);
1484 shader->ctx_reg.vs.vgt_primitiveid_en = 0;
1485 }
1486
1487 if (sscreen->info.chip_class <= GFX8) {
1488 /* Reuse needs to be set off if we write oViewport. */
1489 shader->ctx_reg.vs.vgt_reuse_off =
1490 S_028AB4_REUSE_OFF(info->writes_viewport_index);
1491 }
1492
1493 va = shader->bo->gpu_address;
1494 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
1495
1496 if (gs) {
1497 vgpr_comp_cnt = 0; /* only VertexID is needed for GS-COPY. */
1498 num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR;
1499 } else if (shader->selector->type == PIPE_SHADER_VERTEX) {
1500 vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, enable_prim_id);
1501
1502 if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
1503 num_user_sgprs = SI_SGPR_VS_BLIT_DATA +
1504 info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
1505 } else {
1506 num_user_sgprs = si_get_num_vs_user_sgprs(shader, SI_VS_NUM_USER_SGPR);
1507 }
1508 } else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
1509 vgpr_comp_cnt = enable_prim_id ? 3 : 2;
1510 num_user_sgprs = SI_TES_NUM_USER_SGPR;
1511 } else
1512 unreachable("invalid shader selector type");
1513
1514 /* VS is required to export at least one param. */
1515 nparams = MAX2(shader->info.nr_param_exports, 1);
1516 shader->ctx_reg.vs.spi_vs_out_config = S_0286C4_VS_EXPORT_COUNT(nparams - 1);
1517
1518 if (sscreen->info.chip_class >= GFX10) {
1519 shader->ctx_reg.vs.spi_vs_out_config |=
1520 S_0286C4_NO_PC_EXPORT(shader->info.nr_param_exports == 0);
1521 }
1522
1523 shader->ctx_reg.vs.spi_shader_pos_format =
1524 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
1525 S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ?
1526 V_02870C_SPI_SHADER_4COMP :
1527 V_02870C_SPI_SHADER_NONE) |
1528 S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ?
1529 V_02870C_SPI_SHADER_4COMP :
1530 V_02870C_SPI_SHADER_NONE) |
1531 S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ?
1532 V_02870C_SPI_SHADER_4COMP :
1533 V_02870C_SPI_SHADER_NONE);
1534 shader->ctx_reg.vs.ge_pc_alloc = S_030980_OVERSUB_EN(sscreen->info.use_late_alloc) |
1535 S_030980_NUM_PC_LINES(sscreen->info.pc_lines / 4 - 1);
1536 shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(shader->selector, false);
1537
1538 oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0;
1539
1540 si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
1541 si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, S_00B124_MEM_BASE(va >> 40));
1542
1543 uint32_t rsrc1 = S_00B128_VGPRS((shader->config.num_vgprs - 1) /
1544 (sscreen->ge_wave_size == 32 ? 8 : 4)) |
1545 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt) |
1546 S_00B128_DX10_CLAMP(1) |
1547 S_00B128_MEM_ORDERED(sscreen->info.chip_class >= GFX10) |
1548 S_00B128_FLOAT_MODE(shader->config.float_mode);
1549 uint32_t rsrc2 = S_00B12C_USER_SGPR(num_user_sgprs) |
1550 S_00B12C_OC_LDS_EN(oc_lds_en) |
1551 S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
1552
1553 if (sscreen->info.chip_class >= GFX10)
1554 rsrc2 |= S_00B12C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5);
1555 else if (sscreen->info.chip_class == GFX9)
1556 rsrc2 |= S_00B12C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5);
1557
1558 if (sscreen->info.chip_class <= GFX9)
1559 rsrc1 |= S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8);
1560
1561 if (!sscreen->use_ngg_streamout) {
1562 rsrc2 |= S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) |
1563 S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) |
1564 S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
1565 S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
1566 S_00B12C_SO_EN(!!shader->selector->so.num_outputs);
1567 }
1568
1569 si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS, rsrc1);
1570 si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS, rsrc2);
1571
1572 if (window_space)
1573 shader->ctx_reg.vs.pa_cl_vte_cntl =
1574 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1);
1575 else
1576 shader->ctx_reg.vs.pa_cl_vte_cntl =
1577 S_028818_VTX_W0_FMT(1) |
1578 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
1579 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
1580 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1);
1581
1582 if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
1583 si_set_tesseval_regs(sscreen, shader->selector, pm4);
1584
1585 polaris_set_vgt_vertex_reuse(sscreen, shader->selector, shader, pm4);
1586 }
1587
1588 static unsigned si_get_ps_num_interp(struct si_shader *ps)
1589 {
1590 struct si_shader_info *info = &ps->selector->info;
1591 unsigned num_colors = !!(info->colors_read & 0x0f) +
1592 !!(info->colors_read & 0xf0);
1593 unsigned num_interp = ps->selector->info.num_inputs +
1594 (ps->key.part.ps.prolog.color_two_side ? num_colors : 0);
1595
1596 assert(num_interp <= 32);
1597 return MIN2(num_interp, 32);
1598 }
1599
1600 static unsigned si_get_spi_shader_col_format(struct si_shader *shader)
1601 {
1602 unsigned value = shader->key.part.ps.epilog.spi_shader_col_format;
1603 unsigned i, num_targets = (util_last_bit(value) + 3) / 4;
1604
1605 /* If the i-th target format is set, all previous target formats must
1606 * be non-zero to avoid hangs.
1607 */
1608 for (i = 0; i < num_targets; i++)
1609 if (!(value & (0xf << (i * 4))))
1610 value |= V_028714_SPI_SHADER_32_R << (i * 4);
1611
1612 return value;
1613 }
1614
1615 static void si_emit_shader_ps(struct si_context *sctx)
1616 {
1617 struct si_shader *shader = sctx->queued.named.ps->shader;
1618 unsigned initial_cdw = sctx->gfx_cs->current.cdw;
1619
1620 if (!shader)
1621 return;
1622
1623 /* R_0286CC_SPI_PS_INPUT_ENA, R_0286D0_SPI_PS_INPUT_ADDR*/
1624 radeon_opt_set_context_reg2(sctx, R_0286CC_SPI_PS_INPUT_ENA,
1625 SI_TRACKED_SPI_PS_INPUT_ENA,
1626 shader->ctx_reg.ps.spi_ps_input_ena,
1627 shader->ctx_reg.ps.spi_ps_input_addr);
1628
1629 radeon_opt_set_context_reg(sctx, R_0286E0_SPI_BARYC_CNTL,
1630 SI_TRACKED_SPI_BARYC_CNTL,
1631 shader->ctx_reg.ps.spi_baryc_cntl);
1632 radeon_opt_set_context_reg(sctx, R_0286D8_SPI_PS_IN_CONTROL,
1633 SI_TRACKED_SPI_PS_IN_CONTROL,
1634 shader->ctx_reg.ps.spi_ps_in_control);
1635
1636 /* R_028710_SPI_SHADER_Z_FORMAT, R_028714_SPI_SHADER_COL_FORMAT */
1637 radeon_opt_set_context_reg2(sctx, R_028710_SPI_SHADER_Z_FORMAT,
1638 SI_TRACKED_SPI_SHADER_Z_FORMAT,
1639 shader->ctx_reg.ps.spi_shader_z_format,
1640 shader->ctx_reg.ps.spi_shader_col_format);
1641
1642 radeon_opt_set_context_reg(sctx, R_02823C_CB_SHADER_MASK,
1643 SI_TRACKED_CB_SHADER_MASK,
1644 shader->ctx_reg.ps.cb_shader_mask);
1645
1646 if (initial_cdw != sctx->gfx_cs->current.cdw)
1647 sctx->context_roll = true;
1648 }
1649
1650 static void si_shader_ps(struct si_screen *sscreen, struct si_shader *shader)
1651 {
1652 struct si_shader_info *info = &shader->selector->info;
1653 struct si_pm4_state *pm4;
1654 unsigned spi_ps_in_control, spi_shader_col_format, cb_shader_mask;
1655 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
1656 uint64_t va;
1657 unsigned input_ena = shader->config.spi_ps_input_ena;
1658
1659 /* we need to enable at least one of them, otherwise we hang the GPU */
1660 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena) ||
1661 G_0286CC_PERSP_CENTER_ENA(input_ena) ||
1662 G_0286CC_PERSP_CENTROID_ENA(input_ena) ||
1663 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena) ||
1664 G_0286CC_LINEAR_SAMPLE_ENA(input_ena) ||
1665 G_0286CC_LINEAR_CENTER_ENA(input_ena) ||
1666 G_0286CC_LINEAR_CENTROID_ENA(input_ena) ||
1667 G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena));
1668 /* POS_W_FLOAT_ENA requires one of the perspective weights. */
1669 assert(!G_0286CC_POS_W_FLOAT_ENA(input_ena) ||
1670 G_0286CC_PERSP_SAMPLE_ENA(input_ena) ||
1671 G_0286CC_PERSP_CENTER_ENA(input_ena) ||
1672 G_0286CC_PERSP_CENTROID_ENA(input_ena) ||
1673 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena));
1674
1675 /* Validate interpolation optimization flags (read as implications). */
1676 assert(!shader->key.part.ps.prolog.bc_optimize_for_persp ||
1677 (G_0286CC_PERSP_CENTER_ENA(input_ena) &&
1678 G_0286CC_PERSP_CENTROID_ENA(input_ena)));
1679 assert(!shader->key.part.ps.prolog.bc_optimize_for_linear ||
1680 (G_0286CC_LINEAR_CENTER_ENA(input_ena) &&
1681 G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
1682 assert(!shader->key.part.ps.prolog.force_persp_center_interp ||
1683 (!G_0286CC_PERSP_SAMPLE_ENA(input_ena) &&
1684 !G_0286CC_PERSP_CENTROID_ENA(input_ena)));
1685 assert(!shader->key.part.ps.prolog.force_linear_center_interp ||
1686 (!G_0286CC_LINEAR_SAMPLE_ENA(input_ena) &&
1687 !G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
1688 assert(!shader->key.part.ps.prolog.force_persp_sample_interp ||
1689 (!G_0286CC_PERSP_CENTER_ENA(input_ena) &&
1690 !G_0286CC_PERSP_CENTROID_ENA(input_ena)));
1691 assert(!shader->key.part.ps.prolog.force_linear_sample_interp ||
1692 (!G_0286CC_LINEAR_CENTER_ENA(input_ena) &&
1693 !G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
1694
1695 /* Validate cases when the optimizations are off (read as implications). */
1696 assert(shader->key.part.ps.prolog.bc_optimize_for_persp ||
1697 !G_0286CC_PERSP_CENTER_ENA(input_ena) ||
1698 !G_0286CC_PERSP_CENTROID_ENA(input_ena));
1699 assert(shader->key.part.ps.prolog.bc_optimize_for_linear ||
1700 !G_0286CC_LINEAR_CENTER_ENA(input_ena) ||
1701 !G_0286CC_LINEAR_CENTROID_ENA(input_ena));
1702
1703 pm4 = si_get_shader_pm4_state(shader);
1704 if (!pm4)
1705 return;
1706
1707 pm4->atom.emit = si_emit_shader_ps;
1708
1709 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
1710 * Possible vaules:
1711 * 0 -> Position = pixel center
1712 * 1 -> Position = pixel centroid
1713 * 2 -> Position = at sample position
1714 *
1715 * From GLSL 4.5 specification, section 7.1:
1716 * "The variable gl_FragCoord is available as an input variable from
1717 * within fragment shaders and it holds the window relative coordinates
1718 * (x, y, z, 1/w) values for the fragment. If multi-sampling, this
1719 * value can be for any location within the pixel, or one of the
1720 * fragment samples. The use of centroid does not further restrict
1721 * this value to be inside the current primitive."
1722 *
1723 * Meaning that centroid has no effect and we can return anything within
1724 * the pixel. Thus, return the value at sample position, because that's
1725 * the most accurate one shaders can get.
1726 */
1727 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(2);
1728
1729 if (info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] ==
1730 TGSI_FS_COORD_PIXEL_CENTER_INTEGER)
1731 spi_baryc_cntl |= S_0286E0_POS_FLOAT_ULC(1);
1732
1733 spi_shader_col_format = si_get_spi_shader_col_format(shader);
1734 cb_shader_mask = ac_get_cb_shader_mask(spi_shader_col_format);
1735
1736 /* Ensure that some export memory is always allocated, for two reasons:
1737 *
1738 * 1) Correctness: The hardware ignores the EXEC mask if no export
1739 * memory is allocated, so KILL and alpha test do not work correctly
1740 * without this.
1741 * 2) Performance: Every shader needs at least a NULL export, even when
1742 * it writes no color/depth output. The NULL export instruction
1743 * stalls without this setting.
1744 *
1745 * Don't add this to CB_SHADER_MASK.
1746 *
1747 * GFX10 supports pixel shaders without exports by setting both
1748 * the color and Z formats to SPI_SHADER_ZERO. The hw will skip export
1749 * instructions if any are present.
1750 */
1751 if ((sscreen->info.chip_class <= GFX9 ||
1752 info->uses_kill ||
1753 shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS) &&
1754 !spi_shader_col_format &&
1755 !info->writes_z && !info->writes_stencil && !info->writes_samplemask)
1756 spi_shader_col_format = V_028714_SPI_SHADER_32_R;
1757
1758 shader->ctx_reg.ps.spi_ps_input_ena = input_ena;
1759 shader->ctx_reg.ps.spi_ps_input_addr = shader->config.spi_ps_input_addr;
1760
1761 /* Set interpolation controls. */
1762 spi_ps_in_control = S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader)) |
1763 S_0286D8_PS_W32_EN(sscreen->ps_wave_size == 32);
1764
1765 shader->ctx_reg.ps.spi_baryc_cntl = spi_baryc_cntl;
1766 shader->ctx_reg.ps.spi_ps_in_control = spi_ps_in_control;
1767 shader->ctx_reg.ps.spi_shader_z_format =
1768 ac_get_spi_shader_z_format(info->writes_z,
1769 info->writes_stencil,
1770 info->writes_samplemask);
1771 shader->ctx_reg.ps.spi_shader_col_format = spi_shader_col_format;
1772 shader->ctx_reg.ps.cb_shader_mask = cb_shader_mask;
1773
1774 va = shader->bo->gpu_address;
1775 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
1776 si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
1777 si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, S_00B024_MEM_BASE(va >> 40));
1778
1779 uint32_t rsrc1 =
1780 S_00B028_VGPRS((shader->config.num_vgprs - 1) /
1781 (sscreen->ps_wave_size == 32 ? 8 : 4)) |
1782 S_00B028_DX10_CLAMP(1) |
1783 S_00B028_MEM_ORDERED(sscreen->info.chip_class >= GFX10) |
1784 S_00B028_FLOAT_MODE(shader->config.float_mode);
1785
1786 if (sscreen->info.chip_class < GFX10) {
1787 rsrc1 |= S_00B028_SGPRS((shader->config.num_sgprs - 1) / 8);
1788 }
1789
1790 si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS, rsrc1);
1791 si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
1792 S_00B02C_EXTRA_LDS_SIZE(shader->config.lds_size) |
1793 S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR) |
1794 S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
1795 }
1796
1797 static void si_shader_init_pm4_state(struct si_screen *sscreen,
1798 struct si_shader *shader)
1799 {
1800 switch (shader->selector->type) {
1801 case PIPE_SHADER_VERTEX:
1802 if (shader->key.as_ls)
1803 si_shader_ls(sscreen, shader);
1804 else if (shader->key.as_es)
1805 si_shader_es(sscreen, shader);
1806 else if (shader->key.as_ngg)
1807 gfx10_shader_ngg(sscreen, shader);
1808 else
1809 si_shader_vs(sscreen, shader, NULL);
1810 break;
1811 case PIPE_SHADER_TESS_CTRL:
1812 si_shader_hs(sscreen, shader);
1813 break;
1814 case PIPE_SHADER_TESS_EVAL:
1815 if (shader->key.as_es)
1816 si_shader_es(sscreen, shader);
1817 else if (shader->key.as_ngg)
1818 gfx10_shader_ngg(sscreen, shader);
1819 else
1820 si_shader_vs(sscreen, shader, NULL);
1821 break;
1822 case PIPE_SHADER_GEOMETRY:
1823 if (shader->key.as_ngg)
1824 gfx10_shader_ngg(sscreen, shader);
1825 else
1826 si_shader_gs(sscreen, shader);
1827 break;
1828 case PIPE_SHADER_FRAGMENT:
1829 si_shader_ps(sscreen, shader);
1830 break;
1831 default:
1832 assert(0);
1833 }
1834 }
1835
1836 static unsigned si_get_alpha_test_func(struct si_context *sctx)
1837 {
1838 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
1839 return sctx->queued.named.dsa->alpha_func;
1840 }
1841
1842 void si_shader_selector_key_vs(struct si_context *sctx,
1843 struct si_shader_selector *vs,
1844 struct si_shader_key *key,
1845 struct si_vs_prolog_bits *prolog_key)
1846 {
1847 if (!sctx->vertex_elements ||
1848 vs->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD])
1849 return;
1850
1851 struct si_vertex_elements *elts = sctx->vertex_elements;
1852
1853 prolog_key->instance_divisor_is_one = elts->instance_divisor_is_one;
1854 prolog_key->instance_divisor_is_fetched = elts->instance_divisor_is_fetched;
1855 prolog_key->unpack_instance_id_from_vertex_id =
1856 sctx->prim_discard_cs_instancing;
1857
1858 /* Prefer a monolithic shader to allow scheduling divisions around
1859 * VBO loads. */
1860 if (prolog_key->instance_divisor_is_fetched)
1861 key->opt.prefer_mono = 1;
1862
1863 unsigned count = MIN2(vs->info.num_inputs, elts->count);
1864 unsigned count_mask = (1 << count) - 1;
1865 unsigned fix = elts->fix_fetch_always & count_mask;
1866 unsigned opencode = elts->fix_fetch_opencode & count_mask;
1867
1868 if (sctx->vertex_buffer_unaligned & elts->vb_alignment_check_mask) {
1869 uint32_t mask = elts->fix_fetch_unaligned & count_mask;
1870 while (mask) {
1871 unsigned i = u_bit_scan(&mask);
1872 unsigned log_hw_load_size = 1 + ((elts->hw_load_is_dword >> i) & 1);
1873 unsigned vbidx = elts->vertex_buffer_index[i];
1874 struct pipe_vertex_buffer *vb = &sctx->vertex_buffer[vbidx];
1875 unsigned align_mask = (1 << log_hw_load_size) - 1;
1876 if (vb->buffer_offset & align_mask ||
1877 vb->stride & align_mask) {
1878 fix |= 1 << i;
1879 opencode |= 1 << i;
1880 }
1881 }
1882 }
1883
1884 while (fix) {
1885 unsigned i = u_bit_scan(&fix);
1886 key->mono.vs_fix_fetch[i].bits = elts->fix_fetch[i];
1887 }
1888 key->mono.vs_fetch_opencode = opencode;
1889 }
1890
1891 static void si_shader_selector_key_hw_vs(struct si_context *sctx,
1892 struct si_shader_selector *vs,
1893 struct si_shader_key *key)
1894 {
1895 struct si_shader_selector *ps = sctx->ps_shader.cso;
1896
1897 key->opt.clip_disable =
1898 sctx->queued.named.rasterizer->clip_plane_enable == 0 &&
1899 (vs->info.clipdist_writemask ||
1900 vs->info.writes_clipvertex) &&
1901 !vs->info.culldist_writemask;
1902
1903 /* Find out if PS is disabled. */
1904 bool ps_disabled = true;
1905 if (ps) {
1906 bool ps_modifies_zs = ps->info.uses_kill ||
1907 ps->info.writes_z ||
1908 ps->info.writes_stencil ||
1909 ps->info.writes_samplemask ||
1910 sctx->queued.named.blend->alpha_to_coverage ||
1911 si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS;
1912 unsigned ps_colormask = si_get_total_colormask(sctx);
1913
1914 ps_disabled = sctx->queued.named.rasterizer->rasterizer_discard ||
1915 (!ps_colormask &&
1916 !ps_modifies_zs &&
1917 !ps->info.writes_memory);
1918 }
1919
1920 /* Find out which VS outputs aren't used by the PS. */
1921 uint64_t outputs_written = vs->outputs_written_before_ps;
1922 uint64_t inputs_read = 0;
1923
1924 /* Ignore outputs that are not passed from VS to PS. */
1925 outputs_written &= ~((1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_POSITION, 0, true)) |
1926 (1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_PSIZE, 0, true)) |
1927 (1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_CLIPVERTEX, 0, true)));
1928
1929 if (!ps_disabled) {
1930 inputs_read = ps->inputs_read;
1931 }
1932
1933 uint64_t linked = outputs_written & inputs_read;
1934
1935 key->opt.kill_outputs = ~linked & outputs_written;
1936 key->opt.ngg_culling = sctx->ngg_culling;
1937 }
1938
1939 /* Compute the key for the hw shader variant */
1940 static inline void si_shader_selector_key(struct pipe_context *ctx,
1941 struct si_shader_selector *sel,
1942 union si_vgt_stages_key stages_key,
1943 struct si_shader_key *key)
1944 {
1945 struct si_context *sctx = (struct si_context *)ctx;
1946
1947 memset(key, 0, sizeof(*key));
1948
1949 switch (sel->type) {
1950 case PIPE_SHADER_VERTEX:
1951 si_shader_selector_key_vs(sctx, sel, key, &key->part.vs.prolog);
1952
1953 if (sctx->tes_shader.cso)
1954 key->as_ls = 1;
1955 else if (sctx->gs_shader.cso) {
1956 key->as_es = 1;
1957 key->as_ngg = stages_key.u.ngg;
1958 } else {
1959 key->as_ngg = stages_key.u.ngg;
1960 si_shader_selector_key_hw_vs(sctx, sel, key);
1961
1962 if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
1963 key->mono.u.vs_export_prim_id = 1;
1964 }
1965 break;
1966 case PIPE_SHADER_TESS_CTRL:
1967 if (sctx->chip_class >= GFX9) {
1968 si_shader_selector_key_vs(sctx, sctx->vs_shader.cso,
1969 key, &key->part.tcs.ls_prolog);
1970 key->part.tcs.ls = sctx->vs_shader.cso;
1971
1972 /* When the LS VGPR fix is needed, monolithic shaders
1973 * can:
1974 * - avoid initializing EXEC in both the LS prolog
1975 * and the LS main part when !vs_needs_prolog
1976 * - remove the fixup for unused input VGPRs
1977 */
1978 key->part.tcs.ls_prolog.ls_vgpr_fix = sctx->ls_vgpr_fix;
1979
1980 /* The LS output / HS input layout can be communicated
1981 * directly instead of via user SGPRs for merged LS-HS.
1982 * The LS VGPR fix prefers this too.
1983 */
1984 key->opt.prefer_mono = 1;
1985 }
1986
1987 key->part.tcs.epilog.prim_mode =
1988 sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
1989 key->part.tcs.epilog.invoc0_tess_factors_are_def =
1990 sel->info.tessfactors_are_def_in_all_invocs;
1991 key->part.tcs.epilog.tes_reads_tess_factors =
1992 sctx->tes_shader.cso->info.reads_tess_factors;
1993
1994 if (sel == sctx->fixed_func_tcs_shader.cso)
1995 key->mono.u.ff_tcs_inputs_to_copy = sctx->vs_shader.cso->outputs_written;
1996 break;
1997 case PIPE_SHADER_TESS_EVAL:
1998 key->as_ngg = stages_key.u.ngg;
1999
2000 if (sctx->gs_shader.cso)
2001 key->as_es = 1;
2002 else {
2003 si_shader_selector_key_hw_vs(sctx, sel, key);
2004
2005 if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
2006 key->mono.u.vs_export_prim_id = 1;
2007 }
2008 break;
2009 case PIPE_SHADER_GEOMETRY:
2010 if (sctx->chip_class >= GFX9) {
2011 if (sctx->tes_shader.cso) {
2012 key->part.gs.es = sctx->tes_shader.cso;
2013 } else {
2014 si_shader_selector_key_vs(sctx, sctx->vs_shader.cso,
2015 key, &key->part.gs.vs_prolog);
2016 key->part.gs.es = sctx->vs_shader.cso;
2017 key->part.gs.prolog.gfx9_prev_is_vs = 1;
2018 }
2019
2020 key->as_ngg = stages_key.u.ngg;
2021
2022 /* Merged ES-GS can have unbalanced wave usage.
2023 *
2024 * ES threads are per-vertex, while GS threads are
2025 * per-primitive. So without any amplification, there
2026 * are fewer GS threads than ES threads, which can result
2027 * in empty (no-op) GS waves. With too much amplification,
2028 * there are more GS threads than ES threads, which
2029 * can result in empty (no-op) ES waves.
2030 *
2031 * Non-monolithic shaders are implemented by setting EXEC
2032 * at the beginning of shader parts, and don't jump to
2033 * the end if EXEC is 0.
2034 *
2035 * Monolithic shaders use conditional blocks, so they can
2036 * jump and skip empty waves of ES or GS. So set this to
2037 * always use optimized variants, which are monolithic.
2038 */
2039 key->opt.prefer_mono = 1;
2040 }
2041 key->part.gs.prolog.tri_strip_adj_fix = sctx->gs_tri_strip_adj_fix;
2042 break;
2043 case PIPE_SHADER_FRAGMENT: {
2044 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
2045 struct si_state_blend *blend = sctx->queued.named.blend;
2046
2047 if (sel->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] &&
2048 sel->info.colors_written == 0x1)
2049 key->part.ps.epilog.last_cbuf = MAX2(sctx->framebuffer.state.nr_cbufs, 1) - 1;
2050
2051 /* Select the shader color format based on whether
2052 * blending or alpha are needed.
2053 */
2054 key->part.ps.epilog.spi_shader_col_format =
2055 (blend->blend_enable_4bit & blend->need_src_alpha_4bit &
2056 sctx->framebuffer.spi_shader_col_format_blend_alpha) |
2057 (blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
2058 sctx->framebuffer.spi_shader_col_format_blend) |
2059 (~blend->blend_enable_4bit & blend->need_src_alpha_4bit &
2060 sctx->framebuffer.spi_shader_col_format_alpha) |
2061 (~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
2062 sctx->framebuffer.spi_shader_col_format);
2063 key->part.ps.epilog.spi_shader_col_format &= blend->cb_target_enabled_4bit;
2064
2065 /* The output for dual source blending should have
2066 * the same format as the first output.
2067 */
2068 if (blend->dual_src_blend) {
2069 key->part.ps.epilog.spi_shader_col_format |=
2070 (key->part.ps.epilog.spi_shader_col_format & 0xf) << 4;
2071 }
2072
2073 /* If alpha-to-coverage is enabled, we have to export alpha
2074 * even if there is no color buffer.
2075 */
2076 if (!(key->part.ps.epilog.spi_shader_col_format & 0xf) &&
2077 blend->alpha_to_coverage)
2078 key->part.ps.epilog.spi_shader_col_format |= V_028710_SPI_SHADER_32_AR;
2079
2080 /* On GFX6 and GFX7 except Hawaii, the CB doesn't clamp outputs
2081 * to the range supported by the type if a channel has less
2082 * than 16 bits and the export format is 16_ABGR.
2083 */
2084 if (sctx->chip_class <= GFX7 && sctx->family != CHIP_HAWAII) {
2085 key->part.ps.epilog.color_is_int8 = sctx->framebuffer.color_is_int8;
2086 key->part.ps.epilog.color_is_int10 = sctx->framebuffer.color_is_int10;
2087 }
2088
2089 /* Disable unwritten outputs (if WRITE_ALL_CBUFS isn't enabled). */
2090 if (!key->part.ps.epilog.last_cbuf) {
2091 key->part.ps.epilog.spi_shader_col_format &= sel->colors_written_4bit;
2092 key->part.ps.epilog.color_is_int8 &= sel->info.colors_written;
2093 key->part.ps.epilog.color_is_int10 &= sel->info.colors_written;
2094 }
2095
2096 bool is_poly = !util_prim_is_points_or_lines(sctx->current_rast_prim);
2097 bool is_line = util_prim_is_lines(sctx->current_rast_prim);
2098
2099 key->part.ps.prolog.color_two_side = rs->two_side && sel->info.colors_read;
2100 key->part.ps.prolog.flatshade_colors = rs->flatshade && sel->info.colors_read;
2101
2102 key->part.ps.epilog.alpha_to_one = blend->alpha_to_one &&
2103 rs->multisample_enable;
2104
2105 key->part.ps.prolog.poly_stipple = rs->poly_stipple_enable && is_poly;
2106 key->part.ps.epilog.poly_line_smoothing = ((is_poly && rs->poly_smooth) ||
2107 (is_line && rs->line_smooth)) &&
2108 sctx->framebuffer.nr_samples <= 1;
2109 key->part.ps.epilog.clamp_color = rs->clamp_fragment_color;
2110
2111 if (sctx->ps_iter_samples > 1 &&
2112 sel->info.reads_samplemask) {
2113 key->part.ps.prolog.samplemask_log_ps_iter =
2114 util_logbase2(sctx->ps_iter_samples);
2115 }
2116
2117 if (rs->force_persample_interp &&
2118 rs->multisample_enable &&
2119 sctx->framebuffer.nr_samples > 1 &&
2120 sctx->ps_iter_samples > 1) {
2121 key->part.ps.prolog.force_persp_sample_interp =
2122 sel->info.uses_persp_center ||
2123 sel->info.uses_persp_centroid;
2124
2125 key->part.ps.prolog.force_linear_sample_interp =
2126 sel->info.uses_linear_center ||
2127 sel->info.uses_linear_centroid;
2128 } else if (rs->multisample_enable &&
2129 sctx->framebuffer.nr_samples > 1) {
2130 key->part.ps.prolog.bc_optimize_for_persp =
2131 sel->info.uses_persp_center &&
2132 sel->info.uses_persp_centroid;
2133 key->part.ps.prolog.bc_optimize_for_linear =
2134 sel->info.uses_linear_center &&
2135 sel->info.uses_linear_centroid;
2136 } else {
2137 /* Make sure SPI doesn't compute more than 1 pair
2138 * of (i,j), which is the optimization here. */
2139 key->part.ps.prolog.force_persp_center_interp =
2140 sel->info.uses_persp_center +
2141 sel->info.uses_persp_centroid +
2142 sel->info.uses_persp_sample > 1;
2143
2144 key->part.ps.prolog.force_linear_center_interp =
2145 sel->info.uses_linear_center +
2146 sel->info.uses_linear_centroid +
2147 sel->info.uses_linear_sample > 1;
2148
2149 if (sel->info.uses_persp_opcode_interp_sample ||
2150 sel->info.uses_linear_opcode_interp_sample)
2151 key->mono.u.ps.interpolate_at_sample_force_center = 1;
2152 }
2153
2154 key->part.ps.epilog.alpha_func = si_get_alpha_test_func(sctx);
2155
2156 /* ps_uses_fbfetch is true only if the color buffer is bound. */
2157 if (sctx->ps_uses_fbfetch && !sctx->blitter->running) {
2158 struct pipe_surface *cb0 = sctx->framebuffer.state.cbufs[0];
2159 struct pipe_resource *tex = cb0->texture;
2160
2161 /* 1D textures are allocated and used as 2D on GFX9. */
2162 key->mono.u.ps.fbfetch_msaa = sctx->framebuffer.nr_samples > 1;
2163 key->mono.u.ps.fbfetch_is_1D = sctx->chip_class != GFX9 &&
2164 (tex->target == PIPE_TEXTURE_1D ||
2165 tex->target == PIPE_TEXTURE_1D_ARRAY);
2166 key->mono.u.ps.fbfetch_layered = tex->target == PIPE_TEXTURE_1D_ARRAY ||
2167 tex->target == PIPE_TEXTURE_2D_ARRAY ||
2168 tex->target == PIPE_TEXTURE_CUBE ||
2169 tex->target == PIPE_TEXTURE_CUBE_ARRAY ||
2170 tex->target == PIPE_TEXTURE_3D;
2171 }
2172 break;
2173 }
2174 default:
2175 assert(0);
2176 }
2177
2178 if (unlikely(sctx->screen->debug_flags & DBG(NO_OPT_VARIANT)))
2179 memset(&key->opt, 0, sizeof(key->opt));
2180 }
2181
2182 static void si_build_shader_variant(struct si_shader *shader,
2183 int thread_index,
2184 bool low_priority)
2185 {
2186 struct si_shader_selector *sel = shader->selector;
2187 struct si_screen *sscreen = sel->screen;
2188 struct ac_llvm_compiler *compiler;
2189 struct pipe_debug_callback *debug = &shader->compiler_ctx_state.debug;
2190
2191 if (thread_index >= 0) {
2192 if (low_priority) {
2193 assert(thread_index < ARRAY_SIZE(sscreen->compiler_lowp));
2194 compiler = &sscreen->compiler_lowp[thread_index];
2195 } else {
2196 assert(thread_index < ARRAY_SIZE(sscreen->compiler));
2197 compiler = &sscreen->compiler[thread_index];
2198 }
2199 if (!debug->async)
2200 debug = NULL;
2201 } else {
2202 assert(!low_priority);
2203 compiler = shader->compiler_ctx_state.compiler;
2204 }
2205
2206 if (!compiler->passes)
2207 si_init_compiler(sscreen, compiler);
2208
2209 if (unlikely(!si_create_shader_variant(sscreen, compiler, shader, debug))) {
2210 PRINT_ERR("Failed to build shader variant (type=%u)\n",
2211 sel->type);
2212 shader->compilation_failed = true;
2213 return;
2214 }
2215
2216 if (shader->compiler_ctx_state.is_debug_context) {
2217 FILE *f = open_memstream(&shader->shader_log,
2218 &shader->shader_log_size);
2219 if (f) {
2220 si_shader_dump(sscreen, shader, NULL, f, false);
2221 fclose(f);
2222 }
2223 }
2224
2225 si_shader_init_pm4_state(sscreen, shader);
2226 }
2227
2228 static void si_build_shader_variant_low_priority(void *job, int thread_index)
2229 {
2230 struct si_shader *shader = (struct si_shader *)job;
2231
2232 assert(thread_index >= 0);
2233
2234 si_build_shader_variant(shader, thread_index, true);
2235 }
2236
2237 static const struct si_shader_key zeroed;
2238
2239 static bool si_check_missing_main_part(struct si_screen *sscreen,
2240 struct si_shader_selector *sel,
2241 struct si_compiler_ctx_state *compiler_state,
2242 struct si_shader_key *key)
2243 {
2244 struct si_shader **mainp = si_get_main_shader_part(sel, key);
2245
2246 if (!*mainp) {
2247 struct si_shader *main_part = CALLOC_STRUCT(si_shader);
2248
2249 if (!main_part)
2250 return false;
2251
2252 /* We can leave the fence as permanently signaled because the
2253 * main part becomes visible globally only after it has been
2254 * compiled. */
2255 util_queue_fence_init(&main_part->ready);
2256
2257 main_part->selector = sel;
2258 main_part->key.as_es = key->as_es;
2259 main_part->key.as_ls = key->as_ls;
2260 main_part->key.as_ngg = key->as_ngg;
2261 main_part->is_monolithic = false;
2262
2263 if (!si_compile_shader(sscreen, compiler_state->compiler,
2264 main_part, &compiler_state->debug)) {
2265 FREE(main_part);
2266 return false;
2267 }
2268 *mainp = main_part;
2269 }
2270 return true;
2271 }
2272
2273 /**
2274 * Select a shader variant according to the shader key.
2275 *
2276 * \param optimized_or_none If the key describes an optimized shader variant and
2277 * the compilation isn't finished, don't select any
2278 * shader and return an error.
2279 */
2280 int si_shader_select_with_key(struct si_screen *sscreen,
2281 struct si_shader_ctx_state *state,
2282 struct si_compiler_ctx_state *compiler_state,
2283 struct si_shader_key *key,
2284 int thread_index,
2285 bool optimized_or_none)
2286 {
2287 struct si_shader_selector *sel = state->cso;
2288 struct si_shader_selector *previous_stage_sel = NULL;
2289 struct si_shader *current = state->current;
2290 struct si_shader *iter, *shader = NULL;
2291
2292 again:
2293 /* Check if we don't need to change anything.
2294 * This path is also used for most shaders that don't need multiple
2295 * variants, it will cost just a computation of the key and this
2296 * test. */
2297 if (likely(current &&
2298 memcmp(&current->key, key, sizeof(*key)) == 0)) {
2299 if (unlikely(!util_queue_fence_is_signalled(&current->ready))) {
2300 if (current->is_optimized) {
2301 if (optimized_or_none)
2302 return -1;
2303
2304 memset(&key->opt, 0, sizeof(key->opt));
2305 goto current_not_ready;
2306 }
2307
2308 util_queue_fence_wait(&current->ready);
2309 }
2310
2311 return current->compilation_failed ? -1 : 0;
2312 }
2313 current_not_ready:
2314
2315 /* This must be done before the mutex is locked, because async GS
2316 * compilation calls this function too, and therefore must enter
2317 * the mutex first.
2318 *
2319 * Only wait if we are in a draw call. Don't wait if we are
2320 * in a compiler thread.
2321 */
2322 if (thread_index < 0)
2323 util_queue_fence_wait(&sel->ready);
2324
2325 simple_mtx_lock(&sel->mutex);
2326
2327 /* Find the shader variant. */
2328 for (iter = sel->first_variant; iter; iter = iter->next_variant) {
2329 /* Don't check the "current" shader. We checked it above. */
2330 if (current != iter &&
2331 memcmp(&iter->key, key, sizeof(*key)) == 0) {
2332 simple_mtx_unlock(&sel->mutex);
2333
2334 if (unlikely(!util_queue_fence_is_signalled(&iter->ready))) {
2335 /* If it's an optimized shader and its compilation has
2336 * been started but isn't done, use the unoptimized
2337 * shader so as not to cause a stall due to compilation.
2338 */
2339 if (iter->is_optimized) {
2340 if (optimized_or_none)
2341 return -1;
2342 memset(&key->opt, 0, sizeof(key->opt));
2343 goto again;
2344 }
2345
2346 util_queue_fence_wait(&iter->ready);
2347 }
2348
2349 if (iter->compilation_failed) {
2350 return -1; /* skip the draw call */
2351 }
2352
2353 state->current = iter;
2354 return 0;
2355 }
2356 }
2357
2358 /* Build a new shader. */
2359 shader = CALLOC_STRUCT(si_shader);
2360 if (!shader) {
2361 simple_mtx_unlock(&sel->mutex);
2362 return -ENOMEM;
2363 }
2364
2365 util_queue_fence_init(&shader->ready);
2366
2367 shader->selector = sel;
2368 shader->key = *key;
2369 shader->compiler_ctx_state = *compiler_state;
2370
2371 /* If this is a merged shader, get the first shader's selector. */
2372 if (sscreen->info.chip_class >= GFX9) {
2373 if (sel->type == PIPE_SHADER_TESS_CTRL)
2374 previous_stage_sel = key->part.tcs.ls;
2375 else if (sel->type == PIPE_SHADER_GEOMETRY)
2376 previous_stage_sel = key->part.gs.es;
2377
2378 /* We need to wait for the previous shader. */
2379 if (previous_stage_sel && thread_index < 0)
2380 util_queue_fence_wait(&previous_stage_sel->ready);
2381 }
2382
2383 bool is_pure_monolithic =
2384 sscreen->use_monolithic_shaders ||
2385 memcmp(&key->mono, &zeroed.mono, sizeof(key->mono)) != 0;
2386
2387 /* Compile the main shader part if it doesn't exist. This can happen
2388 * if the initial guess was wrong.
2389 *
2390 * The prim discard CS doesn't need the main shader part.
2391 */
2392 if (!is_pure_monolithic &&
2393 !key->opt.vs_as_prim_discard_cs) {
2394 bool ok = true;
2395
2396 /* Make sure the main shader part is present. This is needed
2397 * for shaders that can be compiled as VS, LS, or ES, and only
2398 * one of them is compiled at creation.
2399 *
2400 * It is also needed for GS, which can be compiled as non-NGG
2401 * and NGG.
2402 *
2403 * For merged shaders, check that the starting shader's main
2404 * part is present.
2405 */
2406 if (previous_stage_sel) {
2407 struct si_shader_key shader1_key = zeroed;
2408
2409 if (sel->type == PIPE_SHADER_TESS_CTRL) {
2410 shader1_key.as_ls = 1;
2411 } else if (sel->type == PIPE_SHADER_GEOMETRY) {
2412 shader1_key.as_es = 1;
2413 shader1_key.as_ngg = key->as_ngg; /* for Wave32 vs Wave64 */
2414 } else {
2415 assert(0);
2416 }
2417
2418 simple_mtx_lock(&previous_stage_sel->mutex);
2419 ok = si_check_missing_main_part(sscreen,
2420 previous_stage_sel,
2421 compiler_state, &shader1_key);
2422 simple_mtx_unlock(&previous_stage_sel->mutex);
2423 }
2424
2425 if (ok) {
2426 ok = si_check_missing_main_part(sscreen, sel,
2427 compiler_state, key);
2428 }
2429
2430 if (!ok) {
2431 FREE(shader);
2432 simple_mtx_unlock(&sel->mutex);
2433 return -ENOMEM; /* skip the draw call */
2434 }
2435 }
2436
2437 /* Keep the reference to the 1st shader of merged shaders, so that
2438 * Gallium can't destroy it before we destroy the 2nd shader.
2439 *
2440 * Set sctx = NULL, because it's unused if we're not releasing
2441 * the shader, and we don't have any sctx here.
2442 */
2443 si_shader_selector_reference(NULL, &shader->previous_stage_sel,
2444 previous_stage_sel);
2445
2446 /* Monolithic-only shaders don't make a distinction between optimized
2447 * and unoptimized. */
2448 shader->is_monolithic =
2449 is_pure_monolithic ||
2450 memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
2451
2452 /* The prim discard CS is always optimized. */
2453 shader->is_optimized =
2454 (!is_pure_monolithic || key->opt.vs_as_prim_discard_cs) &&
2455 memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
2456
2457 /* If it's an optimized shader, compile it asynchronously. */
2458 if (shader->is_optimized && thread_index < 0) {
2459 /* Compile it asynchronously. */
2460 util_queue_add_job(&sscreen->shader_compiler_queue_low_priority,
2461 shader, &shader->ready,
2462 si_build_shader_variant_low_priority, NULL,
2463 0);
2464
2465 /* Add only after the ready fence was reset, to guard against a
2466 * race with si_bind_XX_shader. */
2467 if (!sel->last_variant) {
2468 sel->first_variant = shader;
2469 sel->last_variant = shader;
2470 } else {
2471 sel->last_variant->next_variant = shader;
2472 sel->last_variant = shader;
2473 }
2474
2475 /* Use the default (unoptimized) shader for now. */
2476 memset(&key->opt, 0, sizeof(key->opt));
2477 simple_mtx_unlock(&sel->mutex);
2478
2479 if (sscreen->options.sync_compile)
2480 util_queue_fence_wait(&shader->ready);
2481
2482 if (optimized_or_none)
2483 return -1;
2484 goto again;
2485 }
2486
2487 /* Reset the fence before adding to the variant list. */
2488 util_queue_fence_reset(&shader->ready);
2489
2490 if (!sel->last_variant) {
2491 sel->first_variant = shader;
2492 sel->last_variant = shader;
2493 } else {
2494 sel->last_variant->next_variant = shader;
2495 sel->last_variant = shader;
2496 }
2497
2498 simple_mtx_unlock(&sel->mutex);
2499
2500 assert(!shader->is_optimized);
2501 si_build_shader_variant(shader, thread_index, false);
2502
2503 util_queue_fence_signal(&shader->ready);
2504
2505 if (!shader->compilation_failed)
2506 state->current = shader;
2507
2508 return shader->compilation_failed ? -1 : 0;
2509 }
2510
2511 static int si_shader_select(struct pipe_context *ctx,
2512 struct si_shader_ctx_state *state,
2513 union si_vgt_stages_key stages_key,
2514 struct si_compiler_ctx_state *compiler_state)
2515 {
2516 struct si_context *sctx = (struct si_context *)ctx;
2517 struct si_shader_key key;
2518
2519 si_shader_selector_key(ctx, state->cso, stages_key, &key);
2520 return si_shader_select_with_key(sctx->screen, state, compiler_state,
2521 &key, -1, false);
2522 }
2523
2524 static void si_parse_next_shader_property(const struct si_shader_info *info,
2525 bool streamout,
2526 struct si_shader_key *key)
2527 {
2528 unsigned next_shader = info->properties[TGSI_PROPERTY_NEXT_SHADER];
2529
2530 switch (info->processor) {
2531 case PIPE_SHADER_VERTEX:
2532 switch (next_shader) {
2533 case PIPE_SHADER_GEOMETRY:
2534 key->as_es = 1;
2535 break;
2536 case PIPE_SHADER_TESS_CTRL:
2537 case PIPE_SHADER_TESS_EVAL:
2538 key->as_ls = 1;
2539 break;
2540 default:
2541 /* If POSITION isn't written, it can only be a HW VS
2542 * if streamout is used. If streamout isn't used,
2543 * assume that it's a HW LS. (the next shader is TCS)
2544 * This heuristic is needed for separate shader objects.
2545 */
2546 if (!info->writes_position && !streamout)
2547 key->as_ls = 1;
2548 }
2549 break;
2550
2551 case PIPE_SHADER_TESS_EVAL:
2552 if (next_shader == PIPE_SHADER_GEOMETRY ||
2553 !info->writes_position)
2554 key->as_es = 1;
2555 break;
2556 }
2557 }
2558
2559 /**
2560 * Compile the main shader part or the monolithic shader as part of
2561 * si_shader_selector initialization. Since it can be done asynchronously,
2562 * there is no way to report compile failures to applications.
2563 */
2564 static void si_init_shader_selector_async(void *job, int thread_index)
2565 {
2566 struct si_shader_selector *sel = (struct si_shader_selector *)job;
2567 struct si_screen *sscreen = sel->screen;
2568 struct ac_llvm_compiler *compiler;
2569 struct pipe_debug_callback *debug = &sel->compiler_ctx_state.debug;
2570
2571 assert(!debug->debug_message || debug->async);
2572 assert(thread_index >= 0);
2573 assert(thread_index < ARRAY_SIZE(sscreen->compiler));
2574 compiler = &sscreen->compiler[thread_index];
2575
2576 if (!compiler->passes)
2577 si_init_compiler(sscreen, compiler);
2578
2579 /* Serialize NIR to save memory. Monolithic shader variants
2580 * have to deserialize NIR before compilation.
2581 */
2582 if (sel->nir) {
2583 struct blob blob;
2584 size_t size;
2585
2586 blob_init(&blob);
2587 /* true = remove optional debugging data to increase
2588 * the likehood of getting more shader cache hits.
2589 * It also drops variable names, so we'll save more memory.
2590 */
2591 nir_serialize(&blob, sel->nir, true);
2592 blob_finish_get_buffer(&blob, &sel->nir_binary, &size);
2593 sel->nir_size = size;
2594 }
2595
2596 /* Compile the main shader part for use with a prolog and/or epilog.
2597 * If this fails, the driver will try to compile a monolithic shader
2598 * on demand.
2599 */
2600 if (!sscreen->use_monolithic_shaders) {
2601 struct si_shader *shader = CALLOC_STRUCT(si_shader);
2602 unsigned char ir_sha1_cache_key[20];
2603
2604 if (!shader) {
2605 fprintf(stderr, "radeonsi: can't allocate a main shader part\n");
2606 return;
2607 }
2608
2609 /* We can leave the fence signaled because use of the default
2610 * main part is guarded by the selector's ready fence. */
2611 util_queue_fence_init(&shader->ready);
2612
2613 shader->selector = sel;
2614 shader->is_monolithic = false;
2615 si_parse_next_shader_property(&sel->info,
2616 sel->so.num_outputs != 0,
2617 &shader->key);
2618
2619 if (sscreen->use_ngg &&
2620 (!sel->so.num_outputs || sscreen->use_ngg_streamout) &&
2621 ((sel->type == PIPE_SHADER_VERTEX && !shader->key.as_ls) ||
2622 sel->type == PIPE_SHADER_TESS_EVAL ||
2623 sel->type == PIPE_SHADER_GEOMETRY))
2624 shader->key.as_ngg = 1;
2625
2626 if (sel->nir) {
2627 si_get_ir_cache_key(sel, shader->key.as_ngg,
2628 shader->key.as_es, ir_sha1_cache_key);
2629 }
2630
2631 /* Try to load the shader from the shader cache. */
2632 simple_mtx_lock(&sscreen->shader_cache_mutex);
2633
2634 if (si_shader_cache_load_shader(sscreen, ir_sha1_cache_key, shader)) {
2635 simple_mtx_unlock(&sscreen->shader_cache_mutex);
2636 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
2637 } else {
2638 simple_mtx_unlock(&sscreen->shader_cache_mutex);
2639
2640 /* Compile the shader if it hasn't been loaded from the cache. */
2641 if (!si_compile_shader(sscreen, compiler, shader, debug)) {
2642 FREE(shader);
2643 fprintf(stderr, "radeonsi: can't compile a main shader part\n");
2644 return;
2645 }
2646
2647 simple_mtx_lock(&sscreen->shader_cache_mutex);
2648 si_shader_cache_insert_shader(sscreen, ir_sha1_cache_key,
2649 shader, true);
2650 simple_mtx_unlock(&sscreen->shader_cache_mutex);
2651 }
2652
2653 *si_get_main_shader_part(sel, &shader->key) = shader;
2654
2655 /* Unset "outputs_written" flags for outputs converted to
2656 * DEFAULT_VAL, so that later inter-shader optimizations don't
2657 * try to eliminate outputs that don't exist in the final
2658 * shader.
2659 *
2660 * This is only done if non-monolithic shaders are enabled.
2661 */
2662 if ((sel->type == PIPE_SHADER_VERTEX ||
2663 sel->type == PIPE_SHADER_TESS_EVAL) &&
2664 !shader->key.as_ls &&
2665 !shader->key.as_es) {
2666 unsigned i;
2667
2668 for (i = 0; i < sel->info.num_outputs; i++) {
2669 unsigned offset = shader->info.vs_output_param_offset[i];
2670
2671 if (offset <= AC_EXP_PARAM_OFFSET_31)
2672 continue;
2673
2674 unsigned name = sel->info.output_semantic_name[i];
2675 unsigned index = sel->info.output_semantic_index[i];
2676 unsigned id;
2677
2678 switch (name) {
2679 case TGSI_SEMANTIC_GENERIC:
2680 /* don't process indices the function can't handle */
2681 if (index >= SI_MAX_IO_GENERIC)
2682 break;
2683 /* fall through */
2684 default:
2685 id = si_shader_io_get_unique_index(name, index, true);
2686 sel->outputs_written_before_ps &= ~(1ull << id);
2687 break;
2688 case TGSI_SEMANTIC_POSITION: /* ignore these */
2689 case TGSI_SEMANTIC_PSIZE:
2690 case TGSI_SEMANTIC_CLIPVERTEX:
2691 case TGSI_SEMANTIC_EDGEFLAG:
2692 break;
2693 }
2694 }
2695 }
2696 }
2697
2698 /* The GS copy shader is always pre-compiled. */
2699 if (sel->type == PIPE_SHADER_GEOMETRY &&
2700 (!sscreen->use_ngg ||
2701 !sscreen->use_ngg_streamout || /* also for PRIMITIVES_GENERATED */
2702 sel->tess_turns_off_ngg)) {
2703 sel->gs_copy_shader = si_generate_gs_copy_shader(sscreen, compiler, sel, debug);
2704 if (!sel->gs_copy_shader) {
2705 fprintf(stderr, "radeonsi: can't create GS copy shader\n");
2706 return;
2707 }
2708
2709 si_shader_vs(sscreen, sel->gs_copy_shader, sel);
2710 }
2711
2712 /* Free NIR. We only keep serialized NIR after this point. */
2713 if (sel->nir) {
2714 ralloc_free(sel->nir);
2715 sel->nir = NULL;
2716 }
2717 }
2718
2719 void si_schedule_initial_compile(struct si_context *sctx, unsigned processor,
2720 struct util_queue_fence *ready_fence,
2721 struct si_compiler_ctx_state *compiler_ctx_state,
2722 void *job, util_queue_execute_func execute)
2723 {
2724 util_queue_fence_init(ready_fence);
2725
2726 struct util_async_debug_callback async_debug;
2727 bool debug =
2728 (sctx->debug.debug_message && !sctx->debug.async) ||
2729 sctx->is_debug ||
2730 si_can_dump_shader(sctx->screen, processor);
2731
2732 if (debug) {
2733 u_async_debug_init(&async_debug);
2734 compiler_ctx_state->debug = async_debug.base;
2735 }
2736
2737 util_queue_add_job(&sctx->screen->shader_compiler_queue, job,
2738 ready_fence, execute, NULL, 0);
2739
2740 if (debug) {
2741 util_queue_fence_wait(ready_fence);
2742 u_async_debug_drain(&async_debug, &sctx->debug);
2743 u_async_debug_cleanup(&async_debug);
2744 }
2745
2746 if (sctx->screen->options.sync_compile)
2747 util_queue_fence_wait(ready_fence);
2748 }
2749
2750 /* Return descriptor slot usage masks from the given shader info. */
2751 void si_get_active_slot_masks(const struct si_shader_info *info,
2752 uint32_t *const_and_shader_buffers,
2753 uint64_t *samplers_and_images)
2754 {
2755 unsigned start, num_shaderbufs, num_constbufs, num_images, num_msaa_images, num_samplers;
2756
2757 num_shaderbufs = util_last_bit(info->shader_buffers_declared);
2758 num_constbufs = util_last_bit(info->const_buffers_declared);
2759 /* two 8-byte images share one 16-byte slot */
2760 num_images = align(util_last_bit(info->images_declared), 2);
2761 num_msaa_images = align(util_last_bit(info->msaa_images_declared), 2);
2762 num_samplers = util_last_bit(info->samplers_declared);
2763
2764 /* The layout is: sb[last] ... sb[0], cb[0] ... cb[last] */
2765 start = si_get_shaderbuf_slot(num_shaderbufs - 1);
2766 *const_and_shader_buffers =
2767 u_bit_consecutive(start, num_shaderbufs + num_constbufs);
2768
2769 /* The layout is:
2770 * - fmask[last] ... fmask[0] go to [15-last .. 15]
2771 * - image[last] ... image[0] go to [31-last .. 31]
2772 * - sampler[0] ... sampler[last] go to [32 .. 32+last*2]
2773 *
2774 * FMASKs for images are placed separately, because MSAA images are rare,
2775 * and so we can benefit from a better cache hit rate if we keep image
2776 * descriptors together.
2777 */
2778 if (num_msaa_images)
2779 num_images = SI_NUM_IMAGES + num_msaa_images; /* add FMASK descriptors */
2780
2781 start = si_get_image_slot(num_images - 1) / 2;
2782 *samplers_and_images =
2783 u_bit_consecutive64(start, num_images / 2 + num_samplers);
2784 }
2785
2786 static void *si_create_shader_selector(struct pipe_context *ctx,
2787 const struct pipe_shader_state *state)
2788 {
2789 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
2790 struct si_context *sctx = (struct si_context*)ctx;
2791 struct si_shader_selector *sel = CALLOC_STRUCT(si_shader_selector);
2792 int i;
2793
2794 if (!sel)
2795 return NULL;
2796
2797 sel->screen = sscreen;
2798 sel->compiler_ctx_state.debug = sctx->debug;
2799 sel->compiler_ctx_state.is_debug_context = sctx->is_debug;
2800
2801 sel->so = state->stream_output;
2802
2803 if (state->type == PIPE_SHADER_IR_TGSI) {
2804 sel->nir = tgsi_to_nir(state->tokens, ctx->screen);
2805 } else {
2806 assert(state->type == PIPE_SHADER_IR_NIR);
2807 sel->nir = state->ir.nir;
2808 }
2809
2810 si_nir_scan_shader(sel->nir, &sel->info);
2811 si_nir_adjust_driver_locations(sel->nir);
2812
2813 sel->type = sel->info.processor;
2814 p_atomic_inc(&sscreen->num_shaders_created);
2815 si_get_active_slot_masks(&sel->info,
2816 &sel->active_const_and_shader_buffers,
2817 &sel->active_samplers_and_images);
2818
2819 /* Record which streamout buffers are enabled. */
2820 for (i = 0; i < sel->so.num_outputs; i++) {
2821 sel->enabled_streamout_buffer_mask |=
2822 (1 << sel->so.output[i].output_buffer) <<
2823 (sel->so.output[i].stream * 4);
2824 }
2825
2826 sel->num_vs_inputs = sel->type == PIPE_SHADER_VERTEX &&
2827 !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD] ?
2828 sel->info.num_inputs : 0;
2829 sel->num_vbos_in_user_sgprs =
2830 MIN2(sel->num_vs_inputs, sscreen->num_vbos_in_user_sgprs);
2831
2832 /* The prolog is a no-op if there are no inputs. */
2833 sel->vs_needs_prolog = sel->type == PIPE_SHADER_VERTEX &&
2834 sel->info.num_inputs &&
2835 !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
2836
2837 sel->prim_discard_cs_allowed =
2838 sel->type == PIPE_SHADER_VERTEX &&
2839 !sel->info.uses_bindless_images &&
2840 !sel->info.uses_bindless_samplers &&
2841 !sel->info.writes_memory &&
2842 !sel->info.writes_viewport_index &&
2843 !sel->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] &&
2844 !sel->so.num_outputs;
2845
2846 switch (sel->type) {
2847 case PIPE_SHADER_GEOMETRY:
2848 sel->gs_output_prim =
2849 sel->info.properties[TGSI_PROPERTY_GS_OUTPUT_PRIM];
2850
2851 /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
2852 sel->rast_prim = sel->gs_output_prim;
2853 if (util_rast_prim_is_triangles(sel->rast_prim))
2854 sel->rast_prim = PIPE_PRIM_TRIANGLES;
2855
2856 sel->gs_max_out_vertices =
2857 sel->info.properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES];
2858 sel->gs_num_invocations =
2859 sel->info.properties[TGSI_PROPERTY_GS_INVOCATIONS];
2860 sel->gsvs_vertex_size = sel->info.num_outputs * 16;
2861 sel->max_gsvs_emit_size = sel->gsvs_vertex_size *
2862 sel->gs_max_out_vertices;
2863
2864 sel->max_gs_stream = 0;
2865 for (i = 0; i < sel->so.num_outputs; i++)
2866 sel->max_gs_stream = MAX2(sel->max_gs_stream,
2867 sel->so.output[i].stream);
2868
2869 sel->gs_input_verts_per_prim =
2870 u_vertices_per_prim(sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]);
2871
2872 /* EN_MAX_VERT_OUT_PER_GS_INSTANCE does not work with tesselation. */
2873 sel->tess_turns_off_ngg =
2874 sscreen->info.chip_class == GFX10 &&
2875 sel->gs_num_invocations * sel->gs_max_out_vertices > 256;
2876 break;
2877
2878 case PIPE_SHADER_TESS_CTRL:
2879 /* Always reserve space for these. */
2880 sel->patch_outputs_written |=
2881 (1ull << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0)) |
2882 (1ull << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0));
2883 /* fall through */
2884 case PIPE_SHADER_VERTEX:
2885 case PIPE_SHADER_TESS_EVAL:
2886 for (i = 0; i < sel->info.num_outputs; i++) {
2887 unsigned name = sel->info.output_semantic_name[i];
2888 unsigned index = sel->info.output_semantic_index[i];
2889
2890 switch (name) {
2891 case TGSI_SEMANTIC_TESSINNER:
2892 case TGSI_SEMANTIC_TESSOUTER:
2893 case TGSI_SEMANTIC_PATCH:
2894 sel->patch_outputs_written |=
2895 1ull << si_shader_io_get_unique_index_patch(name, index);
2896 break;
2897
2898 case TGSI_SEMANTIC_GENERIC:
2899 /* don't process indices the function can't handle */
2900 if (index >= SI_MAX_IO_GENERIC)
2901 break;
2902 /* fall through */
2903 default:
2904 sel->outputs_written |=
2905 1ull << si_shader_io_get_unique_index(name, index, false);
2906 sel->outputs_written_before_ps |=
2907 1ull << si_shader_io_get_unique_index(name, index, true);
2908 break;
2909 case TGSI_SEMANTIC_EDGEFLAG:
2910 break;
2911 }
2912 }
2913 sel->esgs_itemsize = util_last_bit64(sel->outputs_written) * 16;
2914 sel->lshs_vertex_stride = sel->esgs_itemsize;
2915
2916 /* Add 1 dword to reduce LDS bank conflicts, so that each vertex
2917 * will start on a different bank. (except for the maximum 32*16).
2918 */
2919 if (sel->lshs_vertex_stride < 32*16)
2920 sel->lshs_vertex_stride += 4;
2921
2922 /* For the ESGS ring in LDS, add 1 dword to reduce LDS bank
2923 * conflicts, i.e. each vertex will start at a different bank.
2924 */
2925 if (sctx->chip_class >= GFX9)
2926 sel->esgs_itemsize += 4;
2927
2928 assert(((sel->esgs_itemsize / 4) & C_028AAC_ITEMSIZE) == 0);
2929
2930 /* Only for TES: */
2931 if (sel->info.properties[TGSI_PROPERTY_TES_POINT_MODE])
2932 sel->rast_prim = PIPE_PRIM_POINTS;
2933 else if (sel->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
2934 sel->rast_prim = PIPE_PRIM_LINE_STRIP;
2935 else
2936 sel->rast_prim = PIPE_PRIM_TRIANGLES;
2937 break;
2938
2939 case PIPE_SHADER_FRAGMENT:
2940 for (i = 0; i < sel->info.num_inputs; i++) {
2941 unsigned name = sel->info.input_semantic_name[i];
2942 unsigned index = sel->info.input_semantic_index[i];
2943
2944 switch (name) {
2945 case TGSI_SEMANTIC_GENERIC:
2946 /* don't process indices the function can't handle */
2947 if (index >= SI_MAX_IO_GENERIC)
2948 break;
2949 /* fall through */
2950 default:
2951 sel->inputs_read |=
2952 1ull << si_shader_io_get_unique_index(name, index, true);
2953 break;
2954 case TGSI_SEMANTIC_PCOORD: /* ignore this */
2955 break;
2956 }
2957 }
2958
2959 for (i = 0; i < 8; i++)
2960 if (sel->info.colors_written & (1 << i))
2961 sel->colors_written_4bit |= 0xf << (4 * i);
2962
2963 for (i = 0; i < sel->info.num_inputs; i++) {
2964 if (sel->info.input_semantic_name[i] == TGSI_SEMANTIC_COLOR) {
2965 int index = sel->info.input_semantic_index[i];
2966 sel->color_attr_index[index] = i;
2967 }
2968 }
2969 break;
2970 default:;
2971 }
2972
2973 sel->ngg_culling_allowed =
2974 sscreen->info.chip_class == GFX10 &&
2975 sscreen->info.has_dedicated_vram &&
2976 sscreen->use_ngg_culling &&
2977 /* Disallow TES by default, because TessMark results are mixed. */
2978 (sel->type == PIPE_SHADER_VERTEX ||
2979 (sscreen->always_use_ngg_culling && sel->type == PIPE_SHADER_TESS_EVAL)) &&
2980 sel->info.writes_position &&
2981 !sel->info.writes_viewport_index && /* cull only against viewport 0 */
2982 !sel->info.writes_memory &&
2983 !sel->so.num_outputs &&
2984 !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD] &&
2985 !sel->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
2986
2987 /* PA_CL_VS_OUT_CNTL */
2988 if (sctx->chip_class <= GFX9)
2989 sel->pa_cl_vs_out_cntl = si_get_vs_out_cntl(sel, false);
2990
2991 sel->clipdist_mask = sel->info.writes_clipvertex ?
2992 SIX_BITS : sel->info.clipdist_writemask;
2993 sel->culldist_mask = sel->info.culldist_writemask <<
2994 sel->info.num_written_clipdistance;
2995
2996 /* DB_SHADER_CONTROL */
2997 sel->db_shader_control =
2998 S_02880C_Z_EXPORT_ENABLE(sel->info.writes_z) |
2999 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(sel->info.writes_stencil) |
3000 S_02880C_MASK_EXPORT_ENABLE(sel->info.writes_samplemask) |
3001 S_02880C_KILL_ENABLE(sel->info.uses_kill);
3002
3003 switch (sel->info.properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT]) {
3004 case TGSI_FS_DEPTH_LAYOUT_GREATER:
3005 sel->db_shader_control |=
3006 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z);
3007 break;
3008 case TGSI_FS_DEPTH_LAYOUT_LESS:
3009 sel->db_shader_control |=
3010 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z);
3011 break;
3012 }
3013
3014 /* Z_ORDER, EXEC_ON_HIER_FAIL and EXEC_ON_NOOP should be set as following:
3015 *
3016 * | early Z/S | writes_mem | allow_ReZ? | Z_ORDER | EXEC_ON_HIER_FAIL | EXEC_ON_NOOP
3017 * --|-----------|------------|------------|--------------------|-------------------|-------------
3018 * 1a| false | false | true | EarlyZ_Then_ReZ | 0 | 0
3019 * 1b| false | false | false | EarlyZ_Then_LateZ | 0 | 0
3020 * 2 | false | true | n/a | LateZ | 1 | 0
3021 * 3 | true | false | n/a | EarlyZ_Then_LateZ | 0 | 0
3022 * 4 | true | true | n/a | EarlyZ_Then_LateZ | 0 | 1
3023 *
3024 * In cases 3 and 4, HW will force Z_ORDER to EarlyZ regardless of what's set in the register.
3025 * In case 2, NOOP_CULL is a don't care field. In case 2, 3 and 4, ReZ doesn't make sense.
3026 *
3027 * Don't use ReZ without profiling !!!
3028 *
3029 * ReZ decreases performance by 15% in DiRT: Showdown on Ultra settings, which has pretty complex
3030 * shaders.
3031 */
3032 if (sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL]) {
3033 /* Cases 3, 4. */
3034 sel->db_shader_control |= S_02880C_DEPTH_BEFORE_SHADER(1) |
3035 S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z) |
3036 S_02880C_EXEC_ON_NOOP(sel->info.writes_memory);
3037 } else if (sel->info.writes_memory) {
3038 /* Case 2. */
3039 sel->db_shader_control |= S_02880C_Z_ORDER(V_02880C_LATE_Z) |
3040 S_02880C_EXEC_ON_HIER_FAIL(1);
3041 } else {
3042 /* Case 1. */
3043 sel->db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z);
3044 }
3045
3046 if (sel->info.properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE])
3047 sel->db_shader_control |= S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(1);
3048
3049 (void) simple_mtx_init(&sel->mutex, mtx_plain);
3050
3051 si_schedule_initial_compile(sctx, sel->info.processor, &sel->ready,
3052 &sel->compiler_ctx_state, sel,
3053 si_init_shader_selector_async);
3054 return sel;
3055 }
3056
3057 static void *si_create_shader(struct pipe_context *ctx,
3058 const struct pipe_shader_state *state)
3059 {
3060 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
3061
3062 return util_live_shader_cache_get(ctx, &sscreen->live_shader_cache, state);
3063 }
3064
3065 static void si_update_streamout_state(struct si_context *sctx)
3066 {
3067 struct si_shader_selector *shader_with_so = si_get_vs(sctx)->cso;
3068
3069 if (!shader_with_so)
3070 return;
3071
3072 sctx->streamout.enabled_stream_buffers_mask =
3073 shader_with_so->enabled_streamout_buffer_mask;
3074 sctx->streamout.stride_in_dw = shader_with_so->so.stride;
3075 }
3076
3077 static void si_update_clip_regs(struct si_context *sctx,
3078 struct si_shader_selector *old_hw_vs,
3079 struct si_shader *old_hw_vs_variant,
3080 struct si_shader_selector *next_hw_vs,
3081 struct si_shader *next_hw_vs_variant)
3082 {
3083 if (next_hw_vs &&
3084 (!old_hw_vs ||
3085 old_hw_vs->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] !=
3086 next_hw_vs->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] ||
3087 old_hw_vs->pa_cl_vs_out_cntl != next_hw_vs->pa_cl_vs_out_cntl ||
3088 old_hw_vs->clipdist_mask != next_hw_vs->clipdist_mask ||
3089 old_hw_vs->culldist_mask != next_hw_vs->culldist_mask ||
3090 !old_hw_vs_variant ||
3091 !next_hw_vs_variant ||
3092 old_hw_vs_variant->key.opt.clip_disable !=
3093 next_hw_vs_variant->key.opt.clip_disable))
3094 si_mark_atom_dirty(sctx, &sctx->atoms.s.clip_regs);
3095 }
3096
3097 static void si_update_common_shader_state(struct si_context *sctx)
3098 {
3099 sctx->uses_bindless_samplers =
3100 si_shader_uses_bindless_samplers(sctx->vs_shader.cso) ||
3101 si_shader_uses_bindless_samplers(sctx->gs_shader.cso) ||
3102 si_shader_uses_bindless_samplers(sctx->ps_shader.cso) ||
3103 si_shader_uses_bindless_samplers(sctx->tcs_shader.cso) ||
3104 si_shader_uses_bindless_samplers(sctx->tes_shader.cso);
3105 sctx->uses_bindless_images =
3106 si_shader_uses_bindless_images(sctx->vs_shader.cso) ||
3107 si_shader_uses_bindless_images(sctx->gs_shader.cso) ||
3108 si_shader_uses_bindless_images(sctx->ps_shader.cso) ||
3109 si_shader_uses_bindless_images(sctx->tcs_shader.cso) ||
3110 si_shader_uses_bindless_images(sctx->tes_shader.cso);
3111 sctx->do_update_shaders = true;
3112 }
3113
3114 static void si_bind_vs_shader(struct pipe_context *ctx, void *state)
3115 {
3116 struct si_context *sctx = (struct si_context *)ctx;
3117 struct si_shader_selector *old_hw_vs = si_get_vs(sctx)->cso;
3118 struct si_shader *old_hw_vs_variant = si_get_vs_state(sctx);
3119 struct si_shader_selector *sel = state;
3120
3121 if (sctx->vs_shader.cso == sel)
3122 return;
3123
3124 sctx->vs_shader.cso = sel;
3125 sctx->vs_shader.current = sel ? sel->first_variant : NULL;
3126 sctx->num_vs_blit_sgprs = sel ? sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD] : 0;
3127
3128 if (si_update_ngg(sctx))
3129 si_shader_change_notify(sctx);
3130
3131 si_update_common_shader_state(sctx);
3132 si_update_vs_viewport_state(sctx);
3133 si_set_active_descriptors_for_shader(sctx, sel);
3134 si_update_streamout_state(sctx);
3135 si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant,
3136 si_get_vs(sctx)->cso, si_get_vs_state(sctx));
3137 }
3138
3139 static void si_update_tess_uses_prim_id(struct si_context *sctx)
3140 {
3141 sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id =
3142 (sctx->tes_shader.cso &&
3143 sctx->tes_shader.cso->info.uses_primid) ||
3144 (sctx->tcs_shader.cso &&
3145 sctx->tcs_shader.cso->info.uses_primid) ||
3146 (sctx->gs_shader.cso &&
3147 sctx->gs_shader.cso->info.uses_primid) ||
3148 (sctx->ps_shader.cso && !sctx->gs_shader.cso &&
3149 sctx->ps_shader.cso->info.uses_primid);
3150 }
3151
3152 bool si_update_ngg(struct si_context *sctx)
3153 {
3154 if (!sctx->screen->use_ngg) {
3155 assert(!sctx->ngg);
3156 return false;
3157 }
3158
3159 bool new_ngg = true;
3160
3161 if (sctx->gs_shader.cso && sctx->tes_shader.cso &&
3162 sctx->gs_shader.cso->tess_turns_off_ngg) {
3163 new_ngg = false;
3164 } else if (!sctx->screen->use_ngg_streamout) {
3165 struct si_shader_selector *last = si_get_vs(sctx)->cso;
3166
3167 if ((last && last->so.num_outputs) ||
3168 sctx->streamout.prims_gen_query_enabled)
3169 new_ngg = false;
3170 }
3171
3172 if (new_ngg != sctx->ngg) {
3173 /* Transitioning from NGG to legacy GS requires VGT_FLUSH on Navi10-14.
3174 * VGT_FLUSH is also emitted at the beginning of IBs when legacy GS ring
3175 * pointers are set.
3176 */
3177 if ((sctx->family == CHIP_NAVI10 ||
3178 sctx->family == CHIP_NAVI12 ||
3179 sctx->family == CHIP_NAVI14) &&
3180 !new_ngg)
3181 sctx->flags |= SI_CONTEXT_VGT_FLUSH;
3182
3183 sctx->ngg = new_ngg;
3184 sctx->last_gs_out_prim = -1; /* reset this so that it gets updated */
3185 return true;
3186 }
3187 return false;
3188 }
3189
3190 static void si_bind_gs_shader(struct pipe_context *ctx, void *state)
3191 {
3192 struct si_context *sctx = (struct si_context *)ctx;
3193 struct si_shader_selector *old_hw_vs = si_get_vs(sctx)->cso;
3194 struct si_shader *old_hw_vs_variant = si_get_vs_state(sctx);
3195 struct si_shader_selector *sel = state;
3196 bool enable_changed = !!sctx->gs_shader.cso != !!sel;
3197 bool ngg_changed;
3198
3199 if (sctx->gs_shader.cso == sel)
3200 return;
3201
3202 sctx->gs_shader.cso = sel;
3203 sctx->gs_shader.current = sel ? sel->first_variant : NULL;
3204 sctx->ia_multi_vgt_param_key.u.uses_gs = sel != NULL;
3205
3206 si_update_common_shader_state(sctx);
3207 sctx->last_gs_out_prim = -1; /* reset this so that it gets updated */
3208
3209 ngg_changed = si_update_ngg(sctx);
3210 if (ngg_changed || enable_changed)
3211 si_shader_change_notify(sctx);
3212 if (enable_changed) {
3213 if (sctx->ia_multi_vgt_param_key.u.uses_tess)
3214 si_update_tess_uses_prim_id(sctx);
3215 }
3216 si_update_vs_viewport_state(sctx);
3217 si_set_active_descriptors_for_shader(sctx, sel);
3218 si_update_streamout_state(sctx);
3219 si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant,
3220 si_get_vs(sctx)->cso, si_get_vs_state(sctx));
3221 }
3222
3223 static void si_bind_tcs_shader(struct pipe_context *ctx, void *state)
3224 {
3225 struct si_context *sctx = (struct si_context *)ctx;
3226 struct si_shader_selector *sel = state;
3227 bool enable_changed = !!sctx->tcs_shader.cso != !!sel;
3228
3229 if (sctx->tcs_shader.cso == sel)
3230 return;
3231
3232 sctx->tcs_shader.cso = sel;
3233 sctx->tcs_shader.current = sel ? sel->first_variant : NULL;
3234 si_update_tess_uses_prim_id(sctx);
3235
3236 si_update_common_shader_state(sctx);
3237
3238 if (enable_changed)
3239 sctx->last_tcs = NULL; /* invalidate derived tess state */
3240
3241 si_set_active_descriptors_for_shader(sctx, sel);
3242 }
3243
3244 static void si_bind_tes_shader(struct pipe_context *ctx, void *state)
3245 {
3246 struct si_context *sctx = (struct si_context *)ctx;
3247 struct si_shader_selector *old_hw_vs = si_get_vs(sctx)->cso;
3248 struct si_shader *old_hw_vs_variant = si_get_vs_state(sctx);
3249 struct si_shader_selector *sel = state;
3250 bool enable_changed = !!sctx->tes_shader.cso != !!sel;
3251
3252 if (sctx->tes_shader.cso == sel)
3253 return;
3254
3255 sctx->tes_shader.cso = sel;
3256 sctx->tes_shader.current = sel ? sel->first_variant : NULL;
3257 sctx->ia_multi_vgt_param_key.u.uses_tess = sel != NULL;
3258 si_update_tess_uses_prim_id(sctx);
3259
3260 si_update_common_shader_state(sctx);
3261 sctx->last_gs_out_prim = -1; /* reset this so that it gets updated */
3262
3263 bool ngg_changed = si_update_ngg(sctx);
3264 if (ngg_changed || enable_changed)
3265 si_shader_change_notify(sctx);
3266 if (enable_changed)
3267 sctx->last_tes_sh_base = -1; /* invalidate derived tess state */
3268 si_update_vs_viewport_state(sctx);
3269 si_set_active_descriptors_for_shader(sctx, sel);
3270 si_update_streamout_state(sctx);
3271 si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant,
3272 si_get_vs(sctx)->cso, si_get_vs_state(sctx));
3273 }
3274
3275 static void si_bind_ps_shader(struct pipe_context *ctx, void *state)
3276 {
3277 struct si_context *sctx = (struct si_context *)ctx;
3278 struct si_shader_selector *old_sel = sctx->ps_shader.cso;
3279 struct si_shader_selector *sel = state;
3280
3281 /* skip if supplied shader is one already in use */
3282 if (old_sel == sel)
3283 return;
3284
3285 sctx->ps_shader.cso = sel;
3286 sctx->ps_shader.current = sel ? sel->first_variant : NULL;
3287
3288 si_update_common_shader_state(sctx);
3289 if (sel) {
3290 if (sctx->ia_multi_vgt_param_key.u.uses_tess)
3291 si_update_tess_uses_prim_id(sctx);
3292
3293 if (!old_sel ||
3294 old_sel->info.colors_written != sel->info.colors_written)
3295 si_mark_atom_dirty(sctx, &sctx->atoms.s.cb_render_state);
3296
3297 if (sctx->screen->has_out_of_order_rast &&
3298 (!old_sel ||
3299 old_sel->info.writes_memory != sel->info.writes_memory ||
3300 old_sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] !=
3301 sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL]))
3302 si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
3303 }
3304 si_set_active_descriptors_for_shader(sctx, sel);
3305 si_update_ps_colorbuf0_slot(sctx);
3306 }
3307
3308 static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
3309 {
3310 if (shader->is_optimized) {
3311 util_queue_drop_job(&sctx->screen->shader_compiler_queue_low_priority,
3312 &shader->ready);
3313 }
3314
3315 util_queue_fence_destroy(&shader->ready);
3316
3317 if (shader->pm4) {
3318 /* If destroyed shaders were not unbound, the next compiled
3319 * shader variant could get the same pointer address and so
3320 * binding it to the same shader stage would be considered
3321 * a no-op, causing random behavior.
3322 */
3323 switch (shader->selector->type) {
3324 case PIPE_SHADER_VERTEX:
3325 if (shader->key.as_ls) {
3326 assert(sctx->chip_class <= GFX8);
3327 si_pm4_delete_state(sctx, ls, shader->pm4);
3328 } else if (shader->key.as_es) {
3329 assert(sctx->chip_class <= GFX8);
3330 si_pm4_delete_state(sctx, es, shader->pm4);
3331 } else if (shader->key.as_ngg) {
3332 si_pm4_delete_state(sctx, gs, shader->pm4);
3333 } else {
3334 si_pm4_delete_state(sctx, vs, shader->pm4);
3335 }
3336 break;
3337 case PIPE_SHADER_TESS_CTRL:
3338 si_pm4_delete_state(sctx, hs, shader->pm4);
3339 break;
3340 case PIPE_SHADER_TESS_EVAL:
3341 if (shader->key.as_es) {
3342 assert(sctx->chip_class <= GFX8);
3343 si_pm4_delete_state(sctx, es, shader->pm4);
3344 } else if (shader->key.as_ngg) {
3345 si_pm4_delete_state(sctx, gs, shader->pm4);
3346 } else {
3347 si_pm4_delete_state(sctx, vs, shader->pm4);
3348 }
3349 break;
3350 case PIPE_SHADER_GEOMETRY:
3351 if (shader->is_gs_copy_shader)
3352 si_pm4_delete_state(sctx, vs, shader->pm4);
3353 else
3354 si_pm4_delete_state(sctx, gs, shader->pm4);
3355 break;
3356 case PIPE_SHADER_FRAGMENT:
3357 si_pm4_delete_state(sctx, ps, shader->pm4);
3358 break;
3359 default:;
3360 }
3361 }
3362
3363 si_shader_selector_reference(sctx, &shader->previous_stage_sel, NULL);
3364 si_shader_destroy(shader);
3365 free(shader);
3366 }
3367
3368 static void si_destroy_shader_selector(struct pipe_context *ctx, void *cso)
3369 {
3370 struct si_context *sctx = (struct si_context*)ctx;
3371 struct si_shader_selector *sel = (struct si_shader_selector *)cso;
3372 struct si_shader *p = sel->first_variant, *c;
3373 struct si_shader_ctx_state *current_shader[SI_NUM_SHADERS] = {
3374 [PIPE_SHADER_VERTEX] = &sctx->vs_shader,
3375 [PIPE_SHADER_TESS_CTRL] = &sctx->tcs_shader,
3376 [PIPE_SHADER_TESS_EVAL] = &sctx->tes_shader,
3377 [PIPE_SHADER_GEOMETRY] = &sctx->gs_shader,
3378 [PIPE_SHADER_FRAGMENT] = &sctx->ps_shader,
3379 };
3380
3381 util_queue_drop_job(&sctx->screen->shader_compiler_queue, &sel->ready);
3382
3383 if (current_shader[sel->type]->cso == sel) {
3384 current_shader[sel->type]->cso = NULL;
3385 current_shader[sel->type]->current = NULL;
3386 }
3387
3388 while (p) {
3389 c = p->next_variant;
3390 si_delete_shader(sctx, p);
3391 p = c;
3392 }
3393
3394 if (sel->main_shader_part)
3395 si_delete_shader(sctx, sel->main_shader_part);
3396 if (sel->main_shader_part_ls)
3397 si_delete_shader(sctx, sel->main_shader_part_ls);
3398 if (sel->main_shader_part_es)
3399 si_delete_shader(sctx, sel->main_shader_part_es);
3400 if (sel->main_shader_part_ngg)
3401 si_delete_shader(sctx, sel->main_shader_part_ngg);
3402 if (sel->gs_copy_shader)
3403 si_delete_shader(sctx, sel->gs_copy_shader);
3404
3405 util_queue_fence_destroy(&sel->ready);
3406 simple_mtx_destroy(&sel->mutex);
3407 ralloc_free(sel->nir);
3408 free(sel->nir_binary);
3409 free(sel);
3410 }
3411
3412 static void si_delete_shader_selector(struct pipe_context *ctx, void *state)
3413 {
3414 struct si_context *sctx = (struct si_context *)ctx;
3415 struct si_shader_selector *sel = (struct si_shader_selector *)state;
3416
3417 si_shader_selector_reference(sctx, &sel, NULL);
3418 }
3419
3420 static unsigned si_get_ps_input_cntl(struct si_context *sctx,
3421 struct si_shader *vs, unsigned name,
3422 unsigned index, unsigned interpolate)
3423 {
3424 struct si_shader_info *vsinfo = &vs->selector->info;
3425 unsigned j, offset, ps_input_cntl = 0;
3426
3427 if (interpolate == TGSI_INTERPOLATE_CONSTANT ||
3428 (interpolate == TGSI_INTERPOLATE_COLOR && sctx->flatshade) ||
3429 name == TGSI_SEMANTIC_PRIMID)
3430 ps_input_cntl |= S_028644_FLAT_SHADE(1);
3431
3432 if (name == TGSI_SEMANTIC_PCOORD ||
3433 (name == TGSI_SEMANTIC_TEXCOORD &&
3434 sctx->sprite_coord_enable & (1 << index))) {
3435 ps_input_cntl |= S_028644_PT_SPRITE_TEX(1);
3436 }
3437
3438 for (j = 0; j < vsinfo->num_outputs; j++) {
3439 if (name == vsinfo->output_semantic_name[j] &&
3440 index == vsinfo->output_semantic_index[j]) {
3441 offset = vs->info.vs_output_param_offset[j];
3442
3443 if (offset <= AC_EXP_PARAM_OFFSET_31) {
3444 /* The input is loaded from parameter memory. */
3445 ps_input_cntl |= S_028644_OFFSET(offset);
3446 } else if (!G_028644_PT_SPRITE_TEX(ps_input_cntl)) {
3447 if (offset == AC_EXP_PARAM_UNDEFINED) {
3448 /* This can happen with depth-only rendering. */
3449 offset = 0;
3450 } else {
3451 /* The input is a DEFAULT_VAL constant. */
3452 assert(offset >= AC_EXP_PARAM_DEFAULT_VAL_0000 &&
3453 offset <= AC_EXP_PARAM_DEFAULT_VAL_1111);
3454 offset -= AC_EXP_PARAM_DEFAULT_VAL_0000;
3455 }
3456
3457 ps_input_cntl = S_028644_OFFSET(0x20) |
3458 S_028644_DEFAULT_VAL(offset);
3459 }
3460 break;
3461 }
3462 }
3463
3464 if (j == vsinfo->num_outputs && name == TGSI_SEMANTIC_PRIMID)
3465 /* PrimID is written after the last output when HW VS is used. */
3466 ps_input_cntl |= S_028644_OFFSET(vs->info.vs_output_param_offset[vsinfo->num_outputs]);
3467 else if (j == vsinfo->num_outputs && !G_028644_PT_SPRITE_TEX(ps_input_cntl)) {
3468 /* No corresponding output found, load defaults into input.
3469 * Don't set any other bits.
3470 * (FLAT_SHADE=1 completely changes behavior) */
3471 ps_input_cntl = S_028644_OFFSET(0x20);
3472 /* D3D 9 behaviour. GL is undefined */
3473 if (name == TGSI_SEMANTIC_COLOR && index == 0)
3474 ps_input_cntl |= S_028644_DEFAULT_VAL(3);
3475 }
3476 return ps_input_cntl;
3477 }
3478
3479 static void si_emit_spi_map(struct si_context *sctx)
3480 {
3481 struct si_shader *ps = sctx->ps_shader.current;
3482 struct si_shader *vs = si_get_vs_state(sctx);
3483 struct si_shader_info *psinfo = ps ? &ps->selector->info : NULL;
3484 unsigned i, num_interp, num_written = 0, bcol_interp[2];
3485 unsigned spi_ps_input_cntl[32];
3486
3487 if (!ps || !ps->selector->info.num_inputs)
3488 return;
3489
3490 num_interp = si_get_ps_num_interp(ps);
3491 assert(num_interp > 0);
3492
3493 for (i = 0; i < psinfo->num_inputs; i++) {
3494 unsigned name = psinfo->input_semantic_name[i];
3495 unsigned index = psinfo->input_semantic_index[i];
3496 unsigned interpolate = psinfo->input_interpolate[i];
3497
3498 spi_ps_input_cntl[num_written++] = si_get_ps_input_cntl(sctx, vs, name,
3499 index, interpolate);
3500
3501 if (name == TGSI_SEMANTIC_COLOR) {
3502 assert(index < ARRAY_SIZE(bcol_interp));
3503 bcol_interp[index] = interpolate;
3504 }
3505 }
3506
3507 if (ps->key.part.ps.prolog.color_two_side) {
3508 unsigned bcol = TGSI_SEMANTIC_BCOLOR;
3509
3510 for (i = 0; i < 2; i++) {
3511 if (!(psinfo->colors_read & (0xf << (i * 4))))
3512 continue;
3513
3514 spi_ps_input_cntl[num_written++] =
3515 si_get_ps_input_cntl(sctx, vs, bcol, i, bcol_interp[i]);
3516
3517 }
3518 }
3519 assert(num_interp == num_written);
3520
3521 /* R_028644_SPI_PS_INPUT_CNTL_0 */
3522 /* Dota 2: Only ~16% of SPI map updates set different values. */
3523 /* Talos: Only ~9% of SPI map updates set different values. */
3524 unsigned initial_cdw = sctx->gfx_cs->current.cdw;
3525 radeon_opt_set_context_regn(sctx, R_028644_SPI_PS_INPUT_CNTL_0,
3526 spi_ps_input_cntl,
3527 sctx->tracked_regs.spi_ps_input_cntl, num_interp);
3528
3529 if (initial_cdw != sctx->gfx_cs->current.cdw)
3530 sctx->context_roll = true;
3531 }
3532
3533 /**
3534 * Writing CONFIG or UCONFIG VGT registers requires VGT_FLUSH before that.
3535 */
3536 static void si_init_config_add_vgt_flush(struct si_context *sctx)
3537 {
3538 if (sctx->init_config_has_vgt_flush)
3539 return;
3540
3541 /* Done by Vulkan before VGT_FLUSH. */
3542 si_pm4_cmd_begin(sctx->init_config, PKT3_EVENT_WRITE);
3543 si_pm4_cmd_add(sctx->init_config,
3544 EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
3545 si_pm4_cmd_end(sctx->init_config, false);
3546
3547 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
3548 si_pm4_cmd_begin(sctx->init_config, PKT3_EVENT_WRITE);
3549 si_pm4_cmd_add(sctx->init_config, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
3550 si_pm4_cmd_end(sctx->init_config, false);
3551 sctx->init_config_has_vgt_flush = true;
3552 }
3553
3554 /* Initialize state related to ESGS / GSVS ring buffers */
3555 static bool si_update_gs_ring_buffers(struct si_context *sctx)
3556 {
3557 struct si_shader_selector *es =
3558 sctx->tes_shader.cso ? sctx->tes_shader.cso : sctx->vs_shader.cso;
3559 struct si_shader_selector *gs = sctx->gs_shader.cso;
3560 struct si_pm4_state *pm4;
3561
3562 /* Chip constants. */
3563 unsigned num_se = sctx->screen->info.max_se;
3564 unsigned wave_size = 64;
3565 unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
3566 /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
3567 * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
3568 */
3569 unsigned gs_vertex_reuse = (sctx->chip_class >= GFX8 ? 32 : 16) * num_se;
3570 unsigned alignment = 256 * num_se;
3571 /* The maximum size is 63.999 MB per SE. */
3572 unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
3573
3574 /* Calculate the minimum size. */
3575 unsigned min_esgs_ring_size = align(es->esgs_itemsize * gs_vertex_reuse *
3576 wave_size, alignment);
3577
3578 /* These are recommended sizes, not minimum sizes. */
3579 unsigned esgs_ring_size = max_gs_waves * 2 * wave_size *
3580 es->esgs_itemsize * gs->gs_input_verts_per_prim;
3581 unsigned gsvs_ring_size = max_gs_waves * 2 * wave_size *
3582 gs->max_gsvs_emit_size;
3583
3584 min_esgs_ring_size = align(min_esgs_ring_size, alignment);
3585 esgs_ring_size = align(esgs_ring_size, alignment);
3586 gsvs_ring_size = align(gsvs_ring_size, alignment);
3587
3588 esgs_ring_size = CLAMP(esgs_ring_size, min_esgs_ring_size, max_size);
3589 gsvs_ring_size = MIN2(gsvs_ring_size, max_size);
3590
3591 /* Some rings don't have to be allocated if shaders don't use them.
3592 * (e.g. no varyings between ES and GS or GS and VS)
3593 *
3594 * GFX9 doesn't have the ESGS ring.
3595 */
3596 bool update_esgs = sctx->chip_class <= GFX8 &&
3597 esgs_ring_size &&
3598 (!sctx->esgs_ring ||
3599 sctx->esgs_ring->width0 < esgs_ring_size);
3600 bool update_gsvs = gsvs_ring_size &&
3601 (!sctx->gsvs_ring ||
3602 sctx->gsvs_ring->width0 < gsvs_ring_size);
3603
3604 if (!update_esgs && !update_gsvs)
3605 return true;
3606
3607 if (update_esgs) {
3608 pipe_resource_reference(&sctx->esgs_ring, NULL);
3609 sctx->esgs_ring =
3610 pipe_aligned_buffer_create(sctx->b.screen,
3611 SI_RESOURCE_FLAG_UNMAPPABLE,
3612 PIPE_USAGE_DEFAULT,
3613 esgs_ring_size,
3614 sctx->screen->info.pte_fragment_size);
3615 if (!sctx->esgs_ring)
3616 return false;
3617 }
3618
3619 if (update_gsvs) {
3620 pipe_resource_reference(&sctx->gsvs_ring, NULL);
3621 sctx->gsvs_ring =
3622 pipe_aligned_buffer_create(sctx->b.screen,
3623 SI_RESOURCE_FLAG_UNMAPPABLE,
3624 PIPE_USAGE_DEFAULT,
3625 gsvs_ring_size,
3626 sctx->screen->info.pte_fragment_size);
3627 if (!sctx->gsvs_ring)
3628 return false;
3629 }
3630
3631 /* Create the "init_config_gs_rings" state. */
3632 pm4 = CALLOC_STRUCT(si_pm4_state);
3633 if (!pm4)
3634 return false;
3635
3636 if (sctx->chip_class >= GFX7) {
3637 if (sctx->esgs_ring) {
3638 assert(sctx->chip_class <= GFX8);
3639 si_pm4_set_reg(pm4, R_030900_VGT_ESGS_RING_SIZE,
3640 sctx->esgs_ring->width0 / 256);
3641 }
3642 if (sctx->gsvs_ring)
3643 si_pm4_set_reg(pm4, R_030904_VGT_GSVS_RING_SIZE,
3644 sctx->gsvs_ring->width0 / 256);
3645 } else {
3646 if (sctx->esgs_ring)
3647 si_pm4_set_reg(pm4, R_0088C8_VGT_ESGS_RING_SIZE,
3648 sctx->esgs_ring->width0 / 256);
3649 if (sctx->gsvs_ring)
3650 si_pm4_set_reg(pm4, R_0088CC_VGT_GSVS_RING_SIZE,
3651 sctx->gsvs_ring->width0 / 256);
3652 }
3653
3654 /* Set the state. */
3655 if (sctx->init_config_gs_rings)
3656 si_pm4_free_state(sctx, sctx->init_config_gs_rings, ~0);
3657 sctx->init_config_gs_rings = pm4;
3658
3659 if (!sctx->init_config_has_vgt_flush) {
3660 si_init_config_add_vgt_flush(sctx);
3661 si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
3662 }
3663
3664 /* Flush the context to re-emit both init_config states. */
3665 sctx->initial_gfx_cs_size = 0; /* force flush */
3666 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
3667
3668 /* Set ring bindings. */
3669 if (sctx->esgs_ring) {
3670 assert(sctx->chip_class <= GFX8);
3671 si_set_ring_buffer(sctx, SI_ES_RING_ESGS,
3672 sctx->esgs_ring, 0, sctx->esgs_ring->width0,
3673 true, true, 4, 64, 0);
3674 si_set_ring_buffer(sctx, SI_GS_RING_ESGS,
3675 sctx->esgs_ring, 0, sctx->esgs_ring->width0,
3676 false, false, 0, 0, 0);
3677 }
3678 if (sctx->gsvs_ring) {
3679 si_set_ring_buffer(sctx, SI_RING_GSVS,
3680 sctx->gsvs_ring, 0, sctx->gsvs_ring->width0,
3681 false, false, 0, 0, 0);
3682 }
3683
3684 return true;
3685 }
3686
3687 static void si_shader_lock(struct si_shader *shader)
3688 {
3689 simple_mtx_lock(&shader->selector->mutex);
3690 if (shader->previous_stage_sel) {
3691 assert(shader->previous_stage_sel != shader->selector);
3692 simple_mtx_lock(&shader->previous_stage_sel->mutex);
3693 }
3694 }
3695
3696 static void si_shader_unlock(struct si_shader *shader)
3697 {
3698 if (shader->previous_stage_sel)
3699 simple_mtx_unlock(&shader->previous_stage_sel->mutex);
3700 simple_mtx_unlock(&shader->selector->mutex);
3701 }
3702
3703 /**
3704 * @returns 1 if \p sel has been updated to use a new scratch buffer
3705 * 0 if not
3706 * < 0 if there was a failure
3707 */
3708 static int si_update_scratch_buffer(struct si_context *sctx,
3709 struct si_shader *shader)
3710 {
3711 uint64_t scratch_va = sctx->scratch_buffer->gpu_address;
3712
3713 if (!shader)
3714 return 0;
3715
3716 /* This shader doesn't need a scratch buffer */
3717 if (shader->config.scratch_bytes_per_wave == 0)
3718 return 0;
3719
3720 /* Prevent race conditions when updating:
3721 * - si_shader::scratch_bo
3722 * - si_shader::binary::code
3723 * - si_shader::previous_stage::binary::code.
3724 */
3725 si_shader_lock(shader);
3726
3727 /* This shader is already configured to use the current
3728 * scratch buffer. */
3729 if (shader->scratch_bo == sctx->scratch_buffer) {
3730 si_shader_unlock(shader);
3731 return 0;
3732 }
3733
3734 assert(sctx->scratch_buffer);
3735
3736 /* Replace the shader bo with a new bo that has the relocs applied. */
3737 if (!si_shader_binary_upload(sctx->screen, shader, scratch_va)) {
3738 si_shader_unlock(shader);
3739 return -1;
3740 }
3741
3742 /* Update the shader state to use the new shader bo. */
3743 si_shader_init_pm4_state(sctx->screen, shader);
3744
3745 si_resource_reference(&shader->scratch_bo, sctx->scratch_buffer);
3746
3747 si_shader_unlock(shader);
3748 return 1;
3749 }
3750
3751 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader *shader)
3752 {
3753 return shader ? shader->config.scratch_bytes_per_wave : 0;
3754 }
3755
3756 static struct si_shader *si_get_tcs_current(struct si_context *sctx)
3757 {
3758 if (!sctx->tes_shader.cso)
3759 return NULL; /* tessellation disabled */
3760
3761 return sctx->tcs_shader.cso ? sctx->tcs_shader.current :
3762 sctx->fixed_func_tcs_shader.current;
3763 }
3764
3765 static bool si_update_scratch_relocs(struct si_context *sctx)
3766 {
3767 struct si_shader *tcs = si_get_tcs_current(sctx);
3768 int r;
3769
3770 /* Update the shaders, so that they are using the latest scratch.
3771 * The scratch buffer may have been changed since these shaders were
3772 * last used, so we still need to try to update them, even if they
3773 * require scratch buffers smaller than the current size.
3774 */
3775 r = si_update_scratch_buffer(sctx, sctx->ps_shader.current);
3776 if (r < 0)
3777 return false;
3778 if (r == 1)
3779 si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4);
3780
3781 r = si_update_scratch_buffer(sctx, sctx->gs_shader.current);
3782 if (r < 0)
3783 return false;
3784 if (r == 1)
3785 si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
3786
3787 r = si_update_scratch_buffer(sctx, tcs);
3788 if (r < 0)
3789 return false;
3790 if (r == 1)
3791 si_pm4_bind_state(sctx, hs, tcs->pm4);
3792
3793 /* VS can be bound as LS, ES, or VS. */
3794 r = si_update_scratch_buffer(sctx, sctx->vs_shader.current);
3795 if (r < 0)
3796 return false;
3797 if (r == 1) {
3798 if (sctx->vs_shader.current->key.as_ls)
3799 si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
3800 else if (sctx->vs_shader.current->key.as_es)
3801 si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
3802 else if (sctx->vs_shader.current->key.as_ngg)
3803 si_pm4_bind_state(sctx, gs, sctx->vs_shader.current->pm4);
3804 else
3805 si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
3806 }
3807
3808 /* TES can be bound as ES or VS. */
3809 r = si_update_scratch_buffer(sctx, sctx->tes_shader.current);
3810 if (r < 0)
3811 return false;
3812 if (r == 1) {
3813 if (sctx->tes_shader.current->key.as_es)
3814 si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
3815 else if (sctx->tes_shader.current->key.as_ngg)
3816 si_pm4_bind_state(sctx, gs, sctx->tes_shader.current->pm4);
3817 else
3818 si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
3819 }
3820
3821 return true;
3822 }
3823
3824 static bool si_update_spi_tmpring_size(struct si_context *sctx)
3825 {
3826 /* SPI_TMPRING_SIZE.WAVESIZE must be constant for each scratch buffer.
3827 * There are 2 cases to handle:
3828 *
3829 * - If the current needed size is less than the maximum seen size,
3830 * use the maximum seen size, so that WAVESIZE remains the same.
3831 *
3832 * - If the current needed size is greater than the maximum seen size,
3833 * the scratch buffer is reallocated, so we can increase WAVESIZE.
3834 *
3835 * Shaders that set SCRATCH_EN=0 don't allocate scratch space.
3836 * Otherwise, the number of waves that can use scratch is
3837 * SPI_TMPRING_SIZE.WAVES.
3838 */
3839 unsigned bytes = 0;
3840
3841 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current));
3842 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current));
3843 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current));
3844
3845 if (sctx->tes_shader.cso) {
3846 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current));
3847 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(si_get_tcs_current(sctx)));
3848 }
3849
3850 sctx->max_seen_scratch_bytes_per_wave =
3851 MAX2(sctx->max_seen_scratch_bytes_per_wave, bytes);
3852
3853 unsigned scratch_needed_size =
3854 sctx->max_seen_scratch_bytes_per_wave * sctx->scratch_waves;
3855 unsigned spi_tmpring_size;
3856
3857 if (scratch_needed_size > 0) {
3858 if (!sctx->scratch_buffer ||
3859 scratch_needed_size > sctx->scratch_buffer->b.b.width0) {
3860 /* Create a bigger scratch buffer */
3861 si_resource_reference(&sctx->scratch_buffer, NULL);
3862
3863 sctx->scratch_buffer =
3864 si_aligned_buffer_create(&sctx->screen->b,
3865 SI_RESOURCE_FLAG_UNMAPPABLE,
3866 PIPE_USAGE_DEFAULT,
3867 scratch_needed_size,
3868 sctx->screen->info.pte_fragment_size);
3869 if (!sctx->scratch_buffer)
3870 return false;
3871
3872 si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state);
3873 si_context_add_resource_size(sctx,
3874 &sctx->scratch_buffer->b.b);
3875 }
3876
3877 if (!si_update_scratch_relocs(sctx))
3878 return false;
3879 }
3880
3881 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
3882 assert((scratch_needed_size & ~0x3FF) == scratch_needed_size &&
3883 "scratch size should already be aligned correctly.");
3884
3885 spi_tmpring_size = S_0286E8_WAVES(sctx->scratch_waves) |
3886 S_0286E8_WAVESIZE(sctx->max_seen_scratch_bytes_per_wave >> 10);
3887 if (spi_tmpring_size != sctx->spi_tmpring_size) {
3888 sctx->spi_tmpring_size = spi_tmpring_size;
3889 si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state);
3890 }
3891 return true;
3892 }
3893
3894 static void si_init_tess_factor_ring(struct si_context *sctx)
3895 {
3896 assert(!sctx->tess_rings);
3897 assert(((sctx->screen->tess_factor_ring_size / 4) & C_030938_SIZE) == 0);
3898
3899 /* The address must be aligned to 2^19, because the shader only
3900 * receives the high 13 bits.
3901 */
3902 sctx->tess_rings = pipe_aligned_buffer_create(sctx->b.screen,
3903 SI_RESOURCE_FLAG_32BIT,
3904 PIPE_USAGE_DEFAULT,
3905 sctx->screen->tess_offchip_ring_size +
3906 sctx->screen->tess_factor_ring_size,
3907 1 << 19);
3908 if (!sctx->tess_rings)
3909 return;
3910
3911 si_init_config_add_vgt_flush(sctx);
3912
3913 si_pm4_add_bo(sctx->init_config, si_resource(sctx->tess_rings),
3914 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS);
3915
3916 uint64_t factor_va = si_resource(sctx->tess_rings)->gpu_address +
3917 sctx->screen->tess_offchip_ring_size;
3918
3919 /* Append these registers to the init config state. */
3920 if (sctx->chip_class >= GFX7) {
3921 si_pm4_set_reg(sctx->init_config, R_030938_VGT_TF_RING_SIZE,
3922 S_030938_SIZE(sctx->screen->tess_factor_ring_size / 4));
3923 si_pm4_set_reg(sctx->init_config, R_030940_VGT_TF_MEMORY_BASE,
3924 factor_va >> 8);
3925 if (sctx->chip_class >= GFX10)
3926 si_pm4_set_reg(sctx->init_config, R_030984_VGT_TF_MEMORY_BASE_HI_UMD,
3927 S_030984_BASE_HI(factor_va >> 40));
3928 else if (sctx->chip_class == GFX9)
3929 si_pm4_set_reg(sctx->init_config, R_030944_VGT_TF_MEMORY_BASE_HI,
3930 S_030944_BASE_HI(factor_va >> 40));
3931 si_pm4_set_reg(sctx->init_config, R_03093C_VGT_HS_OFFCHIP_PARAM,
3932 sctx->screen->vgt_hs_offchip_param);
3933 } else {
3934 si_pm4_set_reg(sctx->init_config, R_008988_VGT_TF_RING_SIZE,
3935 S_008988_SIZE(sctx->screen->tess_factor_ring_size / 4));
3936 si_pm4_set_reg(sctx->init_config, R_0089B8_VGT_TF_MEMORY_BASE,
3937 factor_va >> 8);
3938 si_pm4_set_reg(sctx->init_config, R_0089B0_VGT_HS_OFFCHIP_PARAM,
3939 sctx->screen->vgt_hs_offchip_param);
3940 }
3941
3942 /* Flush the context to re-emit the init_config state.
3943 * This is done only once in a lifetime of a context.
3944 */
3945 si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
3946 sctx->initial_gfx_cs_size = 0; /* force flush */
3947 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
3948 }
3949
3950 static struct si_pm4_state *si_build_vgt_shader_config(struct si_screen *screen,
3951 union si_vgt_stages_key key)
3952 {
3953 struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
3954 uint32_t stages = 0;
3955
3956 if (key.u.tess) {
3957 stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) |
3958 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
3959
3960 if (key.u.gs)
3961 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) |
3962 S_028B54_GS_EN(1);
3963 else if (key.u.ngg)
3964 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS);
3965 else
3966 stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS);
3967 } else if (key.u.gs) {
3968 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) |
3969 S_028B54_GS_EN(1);
3970 } else if (key.u.ngg) {
3971 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL);
3972 }
3973
3974 if (key.u.ngg) {
3975 stages |= S_028B54_PRIMGEN_EN(1) |
3976 S_028B54_GS_FAST_LAUNCH(key.u.ngg_gs_fast_launch) |
3977 S_028B54_NGG_WAVE_ID_EN(key.u.streamout) |
3978 S_028B54_PRIMGEN_PASSTHRU_EN(key.u.ngg_passthrough);
3979 } else if (key.u.gs)
3980 stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
3981
3982 if (screen->info.chip_class >= GFX9)
3983 stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
3984
3985 if (screen->info.chip_class >= GFX10 && screen->ge_wave_size == 32) {
3986 stages |= S_028B54_HS_W32_EN(1) |
3987 S_028B54_GS_W32_EN(key.u.ngg) | /* legacy GS only supports Wave64 */
3988 S_028B54_VS_W32_EN(1);
3989 }
3990
3991 si_pm4_set_reg(pm4, R_028B54_VGT_SHADER_STAGES_EN, stages);
3992 return pm4;
3993 }
3994
3995 static void si_update_vgt_shader_config(struct si_context *sctx,
3996 union si_vgt_stages_key key)
3997 {
3998 struct si_pm4_state **pm4 = &sctx->vgt_shader_config[key.index];
3999
4000 if (unlikely(!*pm4))
4001 *pm4 = si_build_vgt_shader_config(sctx->screen, key);
4002 si_pm4_bind_state(sctx, vgt_shader_config, *pm4);
4003 }
4004
4005 bool si_update_shaders(struct si_context *sctx)
4006 {
4007 struct pipe_context *ctx = (struct pipe_context*)sctx;
4008 struct si_compiler_ctx_state compiler_state;
4009 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
4010 struct si_shader *old_vs = si_get_vs_state(sctx);
4011 bool old_clip_disable = old_vs ? old_vs->key.opt.clip_disable : false;
4012 struct si_shader *old_ps = sctx->ps_shader.current;
4013 union si_vgt_stages_key key;
4014 unsigned old_spi_shader_col_format =
4015 old_ps ? old_ps->key.part.ps.epilog.spi_shader_col_format : 0;
4016 int r;
4017
4018 if (!sctx->compiler.passes)
4019 si_init_compiler(sctx->screen, &sctx->compiler);
4020
4021 compiler_state.compiler = &sctx->compiler;
4022 compiler_state.debug = sctx->debug;
4023 compiler_state.is_debug_context = sctx->is_debug;
4024
4025 key.index = 0;
4026
4027 if (sctx->tes_shader.cso)
4028 key.u.tess = 1;
4029 if (sctx->gs_shader.cso)
4030 key.u.gs = 1;
4031
4032 if (sctx->ngg) {
4033 key.u.ngg = 1;
4034 key.u.streamout = !!si_get_vs(sctx)->cso->so.num_outputs;
4035 }
4036
4037 /* Update TCS and TES. */
4038 if (sctx->tes_shader.cso) {
4039 if (!sctx->tess_rings) {
4040 si_init_tess_factor_ring(sctx);
4041 if (!sctx->tess_rings)
4042 return false;
4043 }
4044
4045 if (sctx->tcs_shader.cso) {
4046 r = si_shader_select(ctx, &sctx->tcs_shader, key,
4047 &compiler_state);
4048 if (r)
4049 return false;
4050 si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4);
4051 } else {
4052 if (!sctx->fixed_func_tcs_shader.cso) {
4053 sctx->fixed_func_tcs_shader.cso =
4054 si_create_fixed_func_tcs(sctx);
4055 if (!sctx->fixed_func_tcs_shader.cso)
4056 return false;
4057 }
4058
4059 r = si_shader_select(ctx, &sctx->fixed_func_tcs_shader,
4060 key, &compiler_state);
4061 if (r)
4062 return false;
4063 si_pm4_bind_state(sctx, hs,
4064 sctx->fixed_func_tcs_shader.current->pm4);
4065 }
4066
4067 if (!sctx->gs_shader.cso || sctx->chip_class <= GFX8) {
4068 r = si_shader_select(ctx, &sctx->tes_shader, key, &compiler_state);
4069 if (r)
4070 return false;
4071
4072 if (sctx->gs_shader.cso) {
4073 /* TES as ES */
4074 assert(sctx->chip_class <= GFX8);
4075 si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
4076 } else if (key.u.ngg) {
4077 si_pm4_bind_state(sctx, gs, sctx->tes_shader.current->pm4);
4078 } else {
4079 si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
4080 }
4081 }
4082 } else {
4083 if (sctx->chip_class <= GFX8)
4084 si_pm4_bind_state(sctx, ls, NULL);
4085 si_pm4_bind_state(sctx, hs, NULL);
4086 }
4087
4088 /* Update GS. */
4089 if (sctx->gs_shader.cso) {
4090 r = si_shader_select(ctx, &sctx->gs_shader, key, &compiler_state);
4091 if (r)
4092 return false;
4093 si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
4094 if (!key.u.ngg) {
4095 si_pm4_bind_state(sctx, vs, sctx->gs_shader.cso->gs_copy_shader->pm4);
4096
4097 if (!si_update_gs_ring_buffers(sctx))
4098 return false;
4099 } else {
4100 si_pm4_bind_state(sctx, vs, NULL);
4101 }
4102 } else {
4103 if (!key.u.ngg) {
4104 si_pm4_bind_state(sctx, gs, NULL);
4105 if (sctx->chip_class <= GFX8)
4106 si_pm4_bind_state(sctx, es, NULL);
4107 }
4108 }
4109
4110 /* Update VS. */
4111 if ((!key.u.tess && !key.u.gs) || sctx->chip_class <= GFX8) {
4112 r = si_shader_select(ctx, &sctx->vs_shader, key, &compiler_state);
4113 if (r)
4114 return false;
4115
4116 if (!key.u.tess && !key.u.gs) {
4117 if (key.u.ngg) {
4118 si_pm4_bind_state(sctx, gs, sctx->vs_shader.current->pm4);
4119 si_pm4_bind_state(sctx, vs, NULL);
4120 } else {
4121 si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
4122 }
4123 } else if (sctx->tes_shader.cso) {
4124 si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
4125 } else {
4126 assert(sctx->gs_shader.cso);
4127 si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
4128 }
4129 }
4130
4131 /* This must be done after the shader variant is selected. */
4132 if (sctx->ngg) {
4133 struct si_shader *vs = si_get_vs(sctx)->current;
4134
4135 key.u.ngg_passthrough = gfx10_is_ngg_passthrough(vs);
4136 key.u.ngg_gs_fast_launch = !!(vs->key.opt.ngg_culling &
4137 SI_NGG_CULL_GS_FAST_LAUNCH_ALL);
4138 }
4139
4140 si_update_vgt_shader_config(sctx, key);
4141
4142 if (old_clip_disable != si_get_vs_state(sctx)->key.opt.clip_disable)
4143 si_mark_atom_dirty(sctx, &sctx->atoms.s.clip_regs);
4144
4145 if (sctx->ps_shader.cso) {
4146 unsigned db_shader_control;
4147
4148 r = si_shader_select(ctx, &sctx->ps_shader, key, &compiler_state);
4149 if (r)
4150 return false;
4151 si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4);
4152
4153 db_shader_control =
4154 sctx->ps_shader.cso->db_shader_control |
4155 S_02880C_KILL_ENABLE(si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS);
4156
4157 if (si_pm4_state_changed(sctx, ps) ||
4158 si_pm4_state_changed(sctx, vs) ||
4159 (key.u.ngg && si_pm4_state_changed(sctx, gs)) ||
4160 sctx->sprite_coord_enable != rs->sprite_coord_enable ||
4161 sctx->flatshade != rs->flatshade) {
4162 sctx->sprite_coord_enable = rs->sprite_coord_enable;
4163 sctx->flatshade = rs->flatshade;
4164 si_mark_atom_dirty(sctx, &sctx->atoms.s.spi_map);
4165 }
4166
4167 if (sctx->screen->info.rbplus_allowed &&
4168 si_pm4_state_changed(sctx, ps) &&
4169 (!old_ps ||
4170 old_spi_shader_col_format !=
4171 sctx->ps_shader.current->key.part.ps.epilog.spi_shader_col_format))
4172 si_mark_atom_dirty(sctx, &sctx->atoms.s.cb_render_state);
4173
4174 if (sctx->ps_db_shader_control != db_shader_control) {
4175 sctx->ps_db_shader_control = db_shader_control;
4176 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
4177 if (sctx->screen->dpbb_allowed)
4178 si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
4179 }
4180
4181 if (sctx->smoothing_enabled != sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing) {
4182 sctx->smoothing_enabled = sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing;
4183 si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
4184
4185 if (sctx->chip_class == GFX6)
4186 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
4187
4188 if (sctx->framebuffer.nr_samples <= 1)
4189 si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_sample_locs);
4190 }
4191 }
4192
4193 if (si_pm4_state_enabled_and_changed(sctx, ls) ||
4194 si_pm4_state_enabled_and_changed(sctx, hs) ||
4195 si_pm4_state_enabled_and_changed(sctx, es) ||
4196 si_pm4_state_enabled_and_changed(sctx, gs) ||
4197 si_pm4_state_enabled_and_changed(sctx, vs) ||
4198 si_pm4_state_enabled_and_changed(sctx, ps)) {
4199 if (!si_update_spi_tmpring_size(sctx))
4200 return false;
4201 }
4202
4203 if (sctx->chip_class >= GFX7) {
4204 if (si_pm4_state_enabled_and_changed(sctx, ls))
4205 sctx->prefetch_L2_mask |= SI_PREFETCH_LS;
4206 else if (!sctx->queued.named.ls)
4207 sctx->prefetch_L2_mask &= ~SI_PREFETCH_LS;
4208
4209 if (si_pm4_state_enabled_and_changed(sctx, hs))
4210 sctx->prefetch_L2_mask |= SI_PREFETCH_HS;
4211 else if (!sctx->queued.named.hs)
4212 sctx->prefetch_L2_mask &= ~SI_PREFETCH_HS;
4213
4214 if (si_pm4_state_enabled_and_changed(sctx, es))
4215 sctx->prefetch_L2_mask |= SI_PREFETCH_ES;
4216 else if (!sctx->queued.named.es)
4217 sctx->prefetch_L2_mask &= ~SI_PREFETCH_ES;
4218
4219 if (si_pm4_state_enabled_and_changed(sctx, gs))
4220 sctx->prefetch_L2_mask |= SI_PREFETCH_GS;
4221 else if (!sctx->queued.named.gs)
4222 sctx->prefetch_L2_mask &= ~SI_PREFETCH_GS;
4223
4224 if (si_pm4_state_enabled_and_changed(sctx, vs))
4225 sctx->prefetch_L2_mask |= SI_PREFETCH_VS;
4226 else if (!sctx->queued.named.vs)
4227 sctx->prefetch_L2_mask &= ~SI_PREFETCH_VS;
4228
4229 if (si_pm4_state_enabled_and_changed(sctx, ps))
4230 sctx->prefetch_L2_mask |= SI_PREFETCH_PS;
4231 else if (!sctx->queued.named.ps)
4232 sctx->prefetch_L2_mask &= ~SI_PREFETCH_PS;
4233 }
4234
4235 sctx->do_update_shaders = false;
4236 return true;
4237 }
4238
4239 static void si_emit_scratch_state(struct si_context *sctx)
4240 {
4241 struct radeon_cmdbuf *cs = sctx->gfx_cs;
4242
4243 radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
4244 sctx->spi_tmpring_size);
4245
4246 if (sctx->scratch_buffer) {
4247 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
4248 sctx->scratch_buffer, RADEON_USAGE_READWRITE,
4249 RADEON_PRIO_SCRATCH_BUFFER);
4250 }
4251 }
4252
4253 void si_init_screen_live_shader_cache(struct si_screen *sscreen)
4254 {
4255 util_live_shader_cache_init(&sscreen->live_shader_cache,
4256 si_create_shader_selector,
4257 si_destroy_shader_selector);
4258 }
4259
4260 void si_init_shader_functions(struct si_context *sctx)
4261 {
4262 sctx->atoms.s.spi_map.emit = si_emit_spi_map;
4263 sctx->atoms.s.scratch_state.emit = si_emit_scratch_state;
4264
4265 sctx->b.create_vs_state = si_create_shader;
4266 sctx->b.create_tcs_state = si_create_shader;
4267 sctx->b.create_tes_state = si_create_shader;
4268 sctx->b.create_gs_state = si_create_shader;
4269 sctx->b.create_fs_state = si_create_shader;
4270
4271 sctx->b.bind_vs_state = si_bind_vs_shader;
4272 sctx->b.bind_tcs_state = si_bind_tcs_shader;
4273 sctx->b.bind_tes_state = si_bind_tes_shader;
4274 sctx->b.bind_gs_state = si_bind_gs_shader;
4275 sctx->b.bind_fs_state = si_bind_ps_shader;
4276
4277 sctx->b.delete_vs_state = si_delete_shader_selector;
4278 sctx->b.delete_tcs_state = si_delete_shader_selector;
4279 sctx->b.delete_tes_state = si_delete_shader_selector;
4280 sctx->b.delete_gs_state = si_delete_shader_selector;
4281 sctx->b.delete_fs_state = si_delete_shader_selector;
4282 }