radeonsi: use R600_RESOURCE_FLAG_UNMAPPABLE where it's desirable
[mesa.git] / src / gallium / drivers / radeonsi / si_state_shaders.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Christian König <christian.koenig@amd.com>
25 * Marek Olšák <maraeo@gmail.com>
26 */
27
28 #include "si_pipe.h"
29 #include "sid.h"
30 #include "radeon/r600_cs.h"
31
32 #include "tgsi/tgsi_parse.h"
33 #include "tgsi/tgsi_ureg.h"
34 #include "util/hash_table.h"
35 #include "util/crc32.h"
36 #include "util/u_memory.h"
37 #include "util/u_prim.h"
38
39 /* SHADER_CACHE */
40
41 /**
42 * Return the TGSI binary in a buffer. The first 4 bytes contain its size as
43 * integer.
44 */
45 static void *si_get_tgsi_binary(struct si_shader_selector *sel)
46 {
47 unsigned tgsi_size = tgsi_num_tokens(sel->tokens) *
48 sizeof(struct tgsi_token);
49 unsigned size = 4 + tgsi_size + sizeof(sel->so);
50 char *result = (char*)MALLOC(size);
51
52 if (!result)
53 return NULL;
54
55 *((uint32_t*)result) = size;
56 memcpy(result + 4, sel->tokens, tgsi_size);
57 memcpy(result + 4 + tgsi_size, &sel->so, sizeof(sel->so));
58 return result;
59 }
60
61 /** Copy "data" to "ptr" and return the next dword following copied data. */
62 static uint32_t *write_data(uint32_t *ptr, const void *data, unsigned size)
63 {
64 /* data may be NULL if size == 0 */
65 if (size)
66 memcpy(ptr, data, size);
67 ptr += DIV_ROUND_UP(size, 4);
68 return ptr;
69 }
70
71 /** Read data from "ptr". Return the next dword following the data. */
72 static uint32_t *read_data(uint32_t *ptr, void *data, unsigned size)
73 {
74 memcpy(data, ptr, size);
75 ptr += DIV_ROUND_UP(size, 4);
76 return ptr;
77 }
78
79 /**
80 * Write the size as uint followed by the data. Return the next dword
81 * following the copied data.
82 */
83 static uint32_t *write_chunk(uint32_t *ptr, const void *data, unsigned size)
84 {
85 *ptr++ = size;
86 return write_data(ptr, data, size);
87 }
88
89 /**
90 * Read the size as uint followed by the data. Return both via parameters.
91 * Return the next dword following the data.
92 */
93 static uint32_t *read_chunk(uint32_t *ptr, void **data, unsigned *size)
94 {
95 *size = *ptr++;
96 assert(*data == NULL);
97 if (!*size)
98 return ptr;
99 *data = malloc(*size);
100 return read_data(ptr, *data, *size);
101 }
102
103 /**
104 * Return the shader binary in a buffer. The first 4 bytes contain its size
105 * as integer.
106 */
107 static void *si_get_shader_binary(struct si_shader *shader)
108 {
109 /* There is always a size of data followed by the data itself. */
110 unsigned relocs_size = shader->binary.reloc_count *
111 sizeof(shader->binary.relocs[0]);
112 unsigned disasm_size = strlen(shader->binary.disasm_string) + 1;
113 unsigned llvm_ir_size = shader->binary.llvm_ir_string ?
114 strlen(shader->binary.llvm_ir_string) + 1 : 0;
115 unsigned size =
116 4 + /* total size */
117 4 + /* CRC32 of the data below */
118 align(sizeof(shader->config), 4) +
119 align(sizeof(shader->info), 4) +
120 4 + align(shader->binary.code_size, 4) +
121 4 + align(shader->binary.rodata_size, 4) +
122 4 + align(relocs_size, 4) +
123 4 + align(disasm_size, 4) +
124 4 + align(llvm_ir_size, 4);
125 void *buffer = CALLOC(1, size);
126 uint32_t *ptr = (uint32_t*)buffer;
127
128 if (!buffer)
129 return NULL;
130
131 *ptr++ = size;
132 ptr++; /* CRC32 is calculated at the end. */
133
134 ptr = write_data(ptr, &shader->config, sizeof(shader->config));
135 ptr = write_data(ptr, &shader->info, sizeof(shader->info));
136 ptr = write_chunk(ptr, shader->binary.code, shader->binary.code_size);
137 ptr = write_chunk(ptr, shader->binary.rodata, shader->binary.rodata_size);
138 ptr = write_chunk(ptr, shader->binary.relocs, relocs_size);
139 ptr = write_chunk(ptr, shader->binary.disasm_string, disasm_size);
140 ptr = write_chunk(ptr, shader->binary.llvm_ir_string, llvm_ir_size);
141 assert((char *)ptr - (char *)buffer == size);
142
143 /* Compute CRC32. */
144 ptr = (uint32_t*)buffer;
145 ptr++;
146 *ptr = util_hash_crc32(ptr + 1, size - 8);
147
148 return buffer;
149 }
150
151 static bool si_load_shader_binary(struct si_shader *shader, void *binary)
152 {
153 uint32_t *ptr = (uint32_t*)binary;
154 uint32_t size = *ptr++;
155 uint32_t crc32 = *ptr++;
156 unsigned chunk_size;
157
158 if (util_hash_crc32(ptr, size - 8) != crc32) {
159 fprintf(stderr, "radeonsi: binary shader has invalid CRC32\n");
160 return false;
161 }
162
163 ptr = read_data(ptr, &shader->config, sizeof(shader->config));
164 ptr = read_data(ptr, &shader->info, sizeof(shader->info));
165 ptr = read_chunk(ptr, (void**)&shader->binary.code,
166 &shader->binary.code_size);
167 ptr = read_chunk(ptr, (void**)&shader->binary.rodata,
168 &shader->binary.rodata_size);
169 ptr = read_chunk(ptr, (void**)&shader->binary.relocs, &chunk_size);
170 shader->binary.reloc_count = chunk_size / sizeof(shader->binary.relocs[0]);
171 ptr = read_chunk(ptr, (void**)&shader->binary.disasm_string, &chunk_size);
172 ptr = read_chunk(ptr, (void**)&shader->binary.llvm_ir_string, &chunk_size);
173
174 return true;
175 }
176
177 /**
178 * Insert a shader into the cache. It's assumed the shader is not in the cache.
179 * Use si_shader_cache_load_shader before calling this.
180 *
181 * Returns false on failure, in which case the tgsi_binary should be freed.
182 */
183 static bool si_shader_cache_insert_shader(struct si_screen *sscreen,
184 void *tgsi_binary,
185 struct si_shader *shader)
186 {
187 void *hw_binary;
188 struct hash_entry *entry;
189
190 entry = _mesa_hash_table_search(sscreen->shader_cache, tgsi_binary);
191 if (entry)
192 return false; /* already added */
193
194 hw_binary = si_get_shader_binary(shader);
195 if (!hw_binary)
196 return false;
197
198 if (_mesa_hash_table_insert(sscreen->shader_cache, tgsi_binary,
199 hw_binary) == NULL) {
200 FREE(hw_binary);
201 return false;
202 }
203
204 return true;
205 }
206
207 static bool si_shader_cache_load_shader(struct si_screen *sscreen,
208 void *tgsi_binary,
209 struct si_shader *shader)
210 {
211 struct hash_entry *entry =
212 _mesa_hash_table_search(sscreen->shader_cache, tgsi_binary);
213 if (!entry)
214 return false;
215
216 if (!si_load_shader_binary(shader, entry->data))
217 return false;
218
219 p_atomic_inc(&sscreen->b.num_shader_cache_hits);
220 return true;
221 }
222
223 static uint32_t si_shader_cache_key_hash(const void *key)
224 {
225 /* The first dword is the key size. */
226 return util_hash_crc32(key, *(uint32_t*)key);
227 }
228
229 static bool si_shader_cache_key_equals(const void *a, const void *b)
230 {
231 uint32_t *keya = (uint32_t*)a;
232 uint32_t *keyb = (uint32_t*)b;
233
234 /* The first dword is the key size. */
235 if (*keya != *keyb)
236 return false;
237
238 return memcmp(keya, keyb, *keya) == 0;
239 }
240
241 static void si_destroy_shader_cache_entry(struct hash_entry *entry)
242 {
243 FREE((void*)entry->key);
244 FREE(entry->data);
245 }
246
247 bool si_init_shader_cache(struct si_screen *sscreen)
248 {
249 pipe_mutex_init(sscreen->shader_cache_mutex);
250 sscreen->shader_cache =
251 _mesa_hash_table_create(NULL,
252 si_shader_cache_key_hash,
253 si_shader_cache_key_equals);
254 return sscreen->shader_cache != NULL;
255 }
256
257 void si_destroy_shader_cache(struct si_screen *sscreen)
258 {
259 if (sscreen->shader_cache)
260 _mesa_hash_table_destroy(sscreen->shader_cache,
261 si_destroy_shader_cache_entry);
262 pipe_mutex_destroy(sscreen->shader_cache_mutex);
263 }
264
265 /* SHADER STATES */
266
267 static void si_set_tesseval_regs(struct si_screen *sscreen,
268 struct si_shader *shader,
269 struct si_pm4_state *pm4)
270 {
271 struct tgsi_shader_info *info = &shader->selector->info;
272 unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE];
273 unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING];
274 bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW];
275 bool tes_point_mode = info->properties[TGSI_PROPERTY_TES_POINT_MODE];
276 unsigned type, partitioning, topology, distribution_mode;
277
278 switch (tes_prim_mode) {
279 case PIPE_PRIM_LINES:
280 type = V_028B6C_TESS_ISOLINE;
281 break;
282 case PIPE_PRIM_TRIANGLES:
283 type = V_028B6C_TESS_TRIANGLE;
284 break;
285 case PIPE_PRIM_QUADS:
286 type = V_028B6C_TESS_QUAD;
287 break;
288 default:
289 assert(0);
290 return;
291 }
292
293 switch (tes_spacing) {
294 case PIPE_TESS_SPACING_FRACTIONAL_ODD:
295 partitioning = V_028B6C_PART_FRAC_ODD;
296 break;
297 case PIPE_TESS_SPACING_FRACTIONAL_EVEN:
298 partitioning = V_028B6C_PART_FRAC_EVEN;
299 break;
300 case PIPE_TESS_SPACING_EQUAL:
301 partitioning = V_028B6C_PART_INTEGER;
302 break;
303 default:
304 assert(0);
305 return;
306 }
307
308 if (tes_point_mode)
309 topology = V_028B6C_OUTPUT_POINT;
310 else if (tes_prim_mode == PIPE_PRIM_LINES)
311 topology = V_028B6C_OUTPUT_LINE;
312 else if (tes_vertex_order_cw)
313 /* for some reason, this must be the other way around */
314 topology = V_028B6C_OUTPUT_TRIANGLE_CCW;
315 else
316 topology = V_028B6C_OUTPUT_TRIANGLE_CW;
317
318 if (sscreen->has_distributed_tess) {
319 if (sscreen->b.family == CHIP_FIJI ||
320 sscreen->b.family >= CHIP_POLARIS10)
321 distribution_mode = V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS;
322 else
323 distribution_mode = V_028B6C_DISTRIBUTION_MODE_DONUTS;
324 } else
325 distribution_mode = V_028B6C_DISTRIBUTION_MODE_NO_DIST;
326
327 si_pm4_set_reg(pm4, R_028B6C_VGT_TF_PARAM,
328 S_028B6C_TYPE(type) |
329 S_028B6C_PARTITIONING(partitioning) |
330 S_028B6C_TOPOLOGY(topology) |
331 S_028B6C_DISTRIBUTION_MODE(distribution_mode));
332 }
333
334 /* Polaris needs different VTX_REUSE_DEPTH settings depending on
335 * whether the "fractional odd" tessellation spacing is used.
336 *
337 * Possible VGT configurations and which state should set the register:
338 *
339 * Reg set in | VGT shader configuration | Value
340 * ------------------------------------------------------
341 * VS as VS | VS | 30
342 * VS as ES | ES -> GS -> VS | 30
343 * TES as VS | LS -> HS -> VS | 14 or 30
344 * TES as ES | LS -> HS -> ES -> GS -> VS | 14 or 30
345 */
346 static void polaris_set_vgt_vertex_reuse(struct si_screen *sscreen,
347 struct si_shader *shader,
348 struct si_pm4_state *pm4)
349 {
350 unsigned type = shader->selector->type;
351
352 if (sscreen->b.family < CHIP_POLARIS10)
353 return;
354
355 /* VS as VS, or VS as ES: */
356 if ((type == PIPE_SHADER_VERTEX &&
357 !shader->key.as_ls &&
358 !shader->is_gs_copy_shader) ||
359 /* TES as VS, or TES as ES: */
360 type == PIPE_SHADER_TESS_EVAL) {
361 unsigned vtx_reuse_depth = 30;
362
363 if (type == PIPE_SHADER_TESS_EVAL &&
364 shader->selector->info.properties[TGSI_PROPERTY_TES_SPACING] ==
365 PIPE_TESS_SPACING_FRACTIONAL_ODD)
366 vtx_reuse_depth = 14;
367
368 si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
369 vtx_reuse_depth);
370 }
371 }
372
373 static struct si_pm4_state *si_get_shader_pm4_state(struct si_shader *shader)
374 {
375 if (shader->pm4)
376 si_pm4_clear_state(shader->pm4);
377 else
378 shader->pm4 = CALLOC_STRUCT(si_pm4_state);
379
380 return shader->pm4;
381 }
382
383 static void si_shader_ls(struct si_shader *shader)
384 {
385 struct si_pm4_state *pm4;
386 unsigned vgpr_comp_cnt;
387 uint64_t va;
388
389 pm4 = si_get_shader_pm4_state(shader);
390 if (!pm4)
391 return;
392
393 va = shader->bo->gpu_address;
394 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
395
396 /* We need at least 2 components for LS.
397 * VGPR0-3: (VertexID, RelAutoindex, ???, InstanceID). */
398 vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : 1;
399
400 si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
401 si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, va >> 40);
402
403 shader->config.rsrc1 = S_00B528_VGPRS((shader->config.num_vgprs - 1) / 4) |
404 S_00B528_SGPRS((shader->config.num_sgprs - 1) / 8) |
405 S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt) |
406 S_00B528_DX10_CLAMP(1) |
407 S_00B528_FLOAT_MODE(shader->config.float_mode);
408 shader->config.rsrc2 = S_00B52C_USER_SGPR(SI_LS_NUM_USER_SGPR) |
409 S_00B52C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
410 }
411
412 static void si_shader_hs(struct si_shader *shader)
413 {
414 struct si_pm4_state *pm4;
415 uint64_t va;
416
417 pm4 = si_get_shader_pm4_state(shader);
418 if (!pm4)
419 return;
420
421 va = shader->bo->gpu_address;
422 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
423
424 si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8);
425 si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, va >> 40);
426 si_pm4_set_reg(pm4, R_00B428_SPI_SHADER_PGM_RSRC1_HS,
427 S_00B428_VGPRS((shader->config.num_vgprs - 1) / 4) |
428 S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8) |
429 S_00B428_DX10_CLAMP(1) |
430 S_00B428_FLOAT_MODE(shader->config.float_mode));
431 si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
432 S_00B42C_USER_SGPR(SI_TCS_NUM_USER_SGPR) |
433 S_00B42C_OC_LDS_EN(1) |
434 S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
435 }
436
437 static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader)
438 {
439 struct si_pm4_state *pm4;
440 unsigned num_user_sgprs;
441 unsigned vgpr_comp_cnt;
442 uint64_t va;
443 unsigned oc_lds_en;
444
445 pm4 = si_get_shader_pm4_state(shader);
446 if (!pm4)
447 return;
448
449 va = shader->bo->gpu_address;
450 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
451
452 if (shader->selector->type == PIPE_SHADER_VERTEX) {
453 vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : 0;
454 num_user_sgprs = SI_ES_NUM_USER_SGPR;
455 } else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
456 vgpr_comp_cnt = 3; /* all components are needed for TES */
457 num_user_sgprs = SI_TES_NUM_USER_SGPR;
458 } else
459 unreachable("invalid shader selector type");
460
461 oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0;
462
463 si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
464 shader->selector->esgs_itemsize / 4);
465 si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
466 si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, va >> 40);
467 si_pm4_set_reg(pm4, R_00B328_SPI_SHADER_PGM_RSRC1_ES,
468 S_00B328_VGPRS((shader->config.num_vgprs - 1) / 4) |
469 S_00B328_SGPRS((shader->config.num_sgprs - 1) / 8) |
470 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt) |
471 S_00B328_DX10_CLAMP(1) |
472 S_00B328_FLOAT_MODE(shader->config.float_mode));
473 si_pm4_set_reg(pm4, R_00B32C_SPI_SHADER_PGM_RSRC2_ES,
474 S_00B32C_USER_SGPR(num_user_sgprs) |
475 S_00B32C_OC_LDS_EN(oc_lds_en) |
476 S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
477
478 if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
479 si_set_tesseval_regs(sscreen, shader, pm4);
480
481 polaris_set_vgt_vertex_reuse(sscreen, shader, pm4);
482 }
483
484 /**
485 * Calculate the appropriate setting of VGT_GS_MODE when \p shader is a
486 * geometry shader.
487 */
488 static uint32_t si_vgt_gs_mode(struct si_shader_selector *sel)
489 {
490 unsigned gs_max_vert_out = sel->gs_max_out_vertices;
491 unsigned cut_mode;
492
493 if (gs_max_vert_out <= 128) {
494 cut_mode = V_028A40_GS_CUT_128;
495 } else if (gs_max_vert_out <= 256) {
496 cut_mode = V_028A40_GS_CUT_256;
497 } else if (gs_max_vert_out <= 512) {
498 cut_mode = V_028A40_GS_CUT_512;
499 } else {
500 assert(gs_max_vert_out <= 1024);
501 cut_mode = V_028A40_GS_CUT_1024;
502 }
503
504 return S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
505 S_028A40_CUT_MODE(cut_mode)|
506 S_028A40_ES_WRITE_OPTIMIZE(1) |
507 S_028A40_GS_WRITE_OPTIMIZE(1);
508 }
509
510 static void si_shader_gs(struct si_shader *shader)
511 {
512 struct si_shader_selector *sel = shader->selector;
513 const ubyte *num_components = sel->info.num_stream_output_components;
514 unsigned gs_num_invocations = sel->gs_num_invocations;
515 struct si_pm4_state *pm4;
516 uint64_t va;
517 unsigned max_stream = sel->max_gs_stream;
518 unsigned offset;
519
520 pm4 = si_get_shader_pm4_state(shader);
521 if (!pm4)
522 return;
523
524 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(shader->selector));
525
526 offset = num_components[0] * sel->gs_max_out_vertices;
527 si_pm4_set_reg(pm4, R_028A60_VGT_GSVS_RING_OFFSET_1, offset);
528 if (max_stream >= 1)
529 offset += num_components[1] * sel->gs_max_out_vertices;
530 si_pm4_set_reg(pm4, R_028A64_VGT_GSVS_RING_OFFSET_2, offset);
531 if (max_stream >= 2)
532 offset += num_components[2] * sel->gs_max_out_vertices;
533 si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, offset);
534 if (max_stream >= 3)
535 offset += num_components[3] * sel->gs_max_out_vertices;
536 si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, offset);
537
538 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
539 assert(offset < (1 << 15));
540
541 si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, shader->selector->gs_max_out_vertices);
542
543 si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, num_components[0]);
544 si_pm4_set_reg(pm4, R_028B60_VGT_GS_VERT_ITEMSIZE_1, (max_stream >= 1) ? num_components[1] : 0);
545 si_pm4_set_reg(pm4, R_028B64_VGT_GS_VERT_ITEMSIZE_2, (max_stream >= 2) ? num_components[2] : 0);
546 si_pm4_set_reg(pm4, R_028B68_VGT_GS_VERT_ITEMSIZE_3, (max_stream >= 3) ? num_components[3] : 0);
547
548 si_pm4_set_reg(pm4, R_028B90_VGT_GS_INSTANCE_CNT,
549 S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
550 S_028B90_ENABLE(gs_num_invocations > 0));
551
552 va = shader->bo->gpu_address;
553 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
554 si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
555 si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40);
556
557 si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
558 S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
559 S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) |
560 S_00B228_DX10_CLAMP(1) |
561 S_00B228_FLOAT_MODE(shader->config.float_mode));
562 si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
563 S_00B22C_USER_SGPR(SI_GS_NUM_USER_SGPR) |
564 S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
565 }
566
567 /**
568 * Compute the state for \p shader, which will run as a vertex shader on the
569 * hardware.
570 *
571 * If \p gs is non-NULL, it points to the geometry shader for which this shader
572 * is the copy shader.
573 */
574 static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader,
575 struct si_shader_selector *gs)
576 {
577 struct si_pm4_state *pm4;
578 unsigned num_user_sgprs;
579 unsigned nparams, vgpr_comp_cnt;
580 uint64_t va;
581 unsigned oc_lds_en;
582 unsigned window_space =
583 shader->selector->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
584 bool enable_prim_id = si_vs_exports_prim_id(shader);
585
586 pm4 = si_get_shader_pm4_state(shader);
587 if (!pm4)
588 return;
589
590 /* We always write VGT_GS_MODE in the VS state, because every switch
591 * between different shader pipelines involving a different GS or no
592 * GS at all involves a switch of the VS (different GS use different
593 * copy shaders). On the other hand, when the API switches from a GS to
594 * no GS and then back to the same GS used originally, the GS state is
595 * not sent again.
596 */
597 if (!gs) {
598 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE,
599 S_028A40_MODE(enable_prim_id ? V_028A40_GS_SCENARIO_A : 0));
600 si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, enable_prim_id);
601 } else {
602 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(gs));
603 si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, 0);
604 }
605
606 va = shader->bo->gpu_address;
607 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
608
609 if (gs) {
610 vgpr_comp_cnt = 0; /* only VertexID is needed for GS-COPY. */
611 num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR;
612 } else if (shader->selector->type == PIPE_SHADER_VERTEX) {
613 vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : (enable_prim_id ? 2 : 0);
614 num_user_sgprs = SI_VS_NUM_USER_SGPR;
615 } else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
616 vgpr_comp_cnt = 3; /* all components are needed for TES */
617 num_user_sgprs = SI_TES_NUM_USER_SGPR;
618 } else
619 unreachable("invalid shader selector type");
620
621 /* VS is required to export at least one param. */
622 nparams = MAX2(shader->info.nr_param_exports, 1);
623 si_pm4_set_reg(pm4, R_0286C4_SPI_VS_OUT_CONFIG,
624 S_0286C4_VS_EXPORT_COUNT(nparams - 1));
625
626 si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT,
627 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
628 S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ?
629 V_02870C_SPI_SHADER_4COMP :
630 V_02870C_SPI_SHADER_NONE) |
631 S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ?
632 V_02870C_SPI_SHADER_4COMP :
633 V_02870C_SPI_SHADER_NONE) |
634 S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ?
635 V_02870C_SPI_SHADER_4COMP :
636 V_02870C_SPI_SHADER_NONE));
637
638 oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0;
639
640 si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
641 si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, va >> 40);
642 si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS,
643 S_00B128_VGPRS((shader->config.num_vgprs - 1) / 4) |
644 S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8) |
645 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt) |
646 S_00B128_DX10_CLAMP(1) |
647 S_00B128_FLOAT_MODE(shader->config.float_mode));
648 si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS,
649 S_00B12C_USER_SGPR(num_user_sgprs) |
650 S_00B12C_OC_LDS_EN(oc_lds_en) |
651 S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) |
652 S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) |
653 S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
654 S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
655 S_00B12C_SO_EN(!!shader->selector->so.num_outputs) |
656 S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
657 if (window_space)
658 si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL,
659 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
660 else
661 si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL,
662 S_028818_VTX_W0_FMT(1) |
663 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
664 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
665 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
666
667 if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
668 si_set_tesseval_regs(sscreen, shader, pm4);
669
670 polaris_set_vgt_vertex_reuse(sscreen, shader, pm4);
671 }
672
673 static unsigned si_get_ps_num_interp(struct si_shader *ps)
674 {
675 struct tgsi_shader_info *info = &ps->selector->info;
676 unsigned num_colors = !!(info->colors_read & 0x0f) +
677 !!(info->colors_read & 0xf0);
678 unsigned num_interp = ps->selector->info.num_inputs +
679 (ps->key.part.ps.prolog.color_two_side ? num_colors : 0);
680
681 assert(num_interp <= 32);
682 return MIN2(num_interp, 32);
683 }
684
685 static unsigned si_get_spi_shader_col_format(struct si_shader *shader)
686 {
687 unsigned value = shader->key.part.ps.epilog.spi_shader_col_format;
688 unsigned i, num_targets = (util_last_bit(value) + 3) / 4;
689
690 /* If the i-th target format is set, all previous target formats must
691 * be non-zero to avoid hangs.
692 */
693 for (i = 0; i < num_targets; i++)
694 if (!(value & (0xf << (i * 4))))
695 value |= V_028714_SPI_SHADER_32_R << (i * 4);
696
697 return value;
698 }
699
700 static unsigned si_get_cb_shader_mask(unsigned spi_shader_col_format)
701 {
702 unsigned i, cb_shader_mask = 0;
703
704 for (i = 0; i < 8; i++) {
705 switch ((spi_shader_col_format >> (i * 4)) & 0xf) {
706 case V_028714_SPI_SHADER_ZERO:
707 break;
708 case V_028714_SPI_SHADER_32_R:
709 cb_shader_mask |= 0x1 << (i * 4);
710 break;
711 case V_028714_SPI_SHADER_32_GR:
712 cb_shader_mask |= 0x3 << (i * 4);
713 break;
714 case V_028714_SPI_SHADER_32_AR:
715 cb_shader_mask |= 0x9 << (i * 4);
716 break;
717 case V_028714_SPI_SHADER_FP16_ABGR:
718 case V_028714_SPI_SHADER_UNORM16_ABGR:
719 case V_028714_SPI_SHADER_SNORM16_ABGR:
720 case V_028714_SPI_SHADER_UINT16_ABGR:
721 case V_028714_SPI_SHADER_SINT16_ABGR:
722 case V_028714_SPI_SHADER_32_ABGR:
723 cb_shader_mask |= 0xf << (i * 4);
724 break;
725 default:
726 assert(0);
727 }
728 }
729 return cb_shader_mask;
730 }
731
732 static void si_shader_ps(struct si_shader *shader)
733 {
734 struct tgsi_shader_info *info = &shader->selector->info;
735 struct si_pm4_state *pm4;
736 unsigned spi_ps_in_control, spi_shader_col_format, cb_shader_mask;
737 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
738 uint64_t va;
739 unsigned input_ena = shader->config.spi_ps_input_ena;
740
741 /* we need to enable at least one of them, otherwise we hang the GPU */
742 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena) ||
743 G_0286CC_PERSP_CENTER_ENA(input_ena) ||
744 G_0286CC_PERSP_CENTROID_ENA(input_ena) ||
745 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena) ||
746 G_0286CC_LINEAR_SAMPLE_ENA(input_ena) ||
747 G_0286CC_LINEAR_CENTER_ENA(input_ena) ||
748 G_0286CC_LINEAR_CENTROID_ENA(input_ena) ||
749 G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena));
750 /* POS_W_FLOAT_ENA requires one of the perspective weights. */
751 assert(!G_0286CC_POS_W_FLOAT_ENA(input_ena) ||
752 G_0286CC_PERSP_SAMPLE_ENA(input_ena) ||
753 G_0286CC_PERSP_CENTER_ENA(input_ena) ||
754 G_0286CC_PERSP_CENTROID_ENA(input_ena) ||
755 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena));
756
757 /* Validate interpolation optimization flags (read as implications). */
758 assert(!shader->key.part.ps.prolog.bc_optimize_for_persp ||
759 (G_0286CC_PERSP_CENTER_ENA(input_ena) &&
760 G_0286CC_PERSP_CENTROID_ENA(input_ena)));
761 assert(!shader->key.part.ps.prolog.bc_optimize_for_linear ||
762 (G_0286CC_LINEAR_CENTER_ENA(input_ena) &&
763 G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
764 assert(!shader->key.part.ps.prolog.force_persp_center_interp ||
765 (!G_0286CC_PERSP_SAMPLE_ENA(input_ena) &&
766 !G_0286CC_PERSP_CENTROID_ENA(input_ena)));
767 assert(!shader->key.part.ps.prolog.force_linear_center_interp ||
768 (!G_0286CC_LINEAR_SAMPLE_ENA(input_ena) &&
769 !G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
770 assert(!shader->key.part.ps.prolog.force_persp_sample_interp ||
771 (!G_0286CC_PERSP_CENTER_ENA(input_ena) &&
772 !G_0286CC_PERSP_CENTROID_ENA(input_ena)));
773 assert(!shader->key.part.ps.prolog.force_linear_sample_interp ||
774 (!G_0286CC_LINEAR_CENTER_ENA(input_ena) &&
775 !G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
776
777 /* Validate cases when the optimizations are off (read as implications). */
778 assert(shader->key.part.ps.prolog.bc_optimize_for_persp ||
779 !G_0286CC_PERSP_CENTER_ENA(input_ena) ||
780 !G_0286CC_PERSP_CENTROID_ENA(input_ena));
781 assert(shader->key.part.ps.prolog.bc_optimize_for_linear ||
782 !G_0286CC_LINEAR_CENTER_ENA(input_ena) ||
783 !G_0286CC_LINEAR_CENTROID_ENA(input_ena));
784
785 pm4 = si_get_shader_pm4_state(shader);
786 if (!pm4)
787 return;
788
789 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
790 * Possible vaules:
791 * 0 -> Position = pixel center
792 * 1 -> Position = pixel centroid
793 * 2 -> Position = at sample position
794 *
795 * From GLSL 4.5 specification, section 7.1:
796 * "The variable gl_FragCoord is available as an input variable from
797 * within fragment shaders and it holds the window relative coordinates
798 * (x, y, z, 1/w) values for the fragment. If multi-sampling, this
799 * value can be for any location within the pixel, or one of the
800 * fragment samples. The use of centroid does not further restrict
801 * this value to be inside the current primitive."
802 *
803 * Meaning that centroid has no effect and we can return anything within
804 * the pixel. Thus, return the value at sample position, because that's
805 * the most accurate one shaders can get.
806 */
807 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(2);
808
809 if (info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] ==
810 TGSI_FS_COORD_PIXEL_CENTER_INTEGER)
811 spi_baryc_cntl |= S_0286E0_POS_FLOAT_ULC(1);
812
813 spi_shader_col_format = si_get_spi_shader_col_format(shader);
814 cb_shader_mask = si_get_cb_shader_mask(spi_shader_col_format);
815
816 /* Ensure that some export memory is always allocated, for two reasons:
817 *
818 * 1) Correctness: The hardware ignores the EXEC mask if no export
819 * memory is allocated, so KILL and alpha test do not work correctly
820 * without this.
821 * 2) Performance: Every shader needs at least a NULL export, even when
822 * it writes no color/depth output. The NULL export instruction
823 * stalls without this setting.
824 *
825 * Don't add this to CB_SHADER_MASK.
826 */
827 if (!spi_shader_col_format &&
828 !info->writes_z && !info->writes_stencil && !info->writes_samplemask)
829 spi_shader_col_format = V_028714_SPI_SHADER_32_R;
830
831 si_pm4_set_reg(pm4, R_0286CC_SPI_PS_INPUT_ENA, input_ena);
832 si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR,
833 shader->config.spi_ps_input_addr);
834
835 /* Set interpolation controls. */
836 spi_ps_in_control = S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader));
837
838 /* Set registers. */
839 si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
840 si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
841
842 si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT,
843 si_get_spi_shader_z_format(info->writes_z,
844 info->writes_stencil,
845 info->writes_samplemask));
846
847 si_pm4_set_reg(pm4, R_028714_SPI_SHADER_COL_FORMAT, spi_shader_col_format);
848 si_pm4_set_reg(pm4, R_02823C_CB_SHADER_MASK, cb_shader_mask);
849
850 va = shader->bo->gpu_address;
851 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
852 si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
853 si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, va >> 40);
854
855 si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS,
856 S_00B028_VGPRS((shader->config.num_vgprs - 1) / 4) |
857 S_00B028_SGPRS((shader->config.num_sgprs - 1) / 8) |
858 S_00B028_DX10_CLAMP(1) |
859 S_00B028_FLOAT_MODE(shader->config.float_mode));
860 si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
861 S_00B02C_EXTRA_LDS_SIZE(shader->config.lds_size) |
862 S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR) |
863 S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
864 }
865
866 static void si_shader_init_pm4_state(struct si_screen *sscreen,
867 struct si_shader *shader)
868 {
869 switch (shader->selector->type) {
870 case PIPE_SHADER_VERTEX:
871 if (shader->key.as_ls)
872 si_shader_ls(shader);
873 else if (shader->key.as_es)
874 si_shader_es(sscreen, shader);
875 else
876 si_shader_vs(sscreen, shader, NULL);
877 break;
878 case PIPE_SHADER_TESS_CTRL:
879 si_shader_hs(shader);
880 break;
881 case PIPE_SHADER_TESS_EVAL:
882 if (shader->key.as_es)
883 si_shader_es(sscreen, shader);
884 else
885 si_shader_vs(sscreen, shader, NULL);
886 break;
887 case PIPE_SHADER_GEOMETRY:
888 si_shader_gs(shader);
889 break;
890 case PIPE_SHADER_FRAGMENT:
891 si_shader_ps(shader);
892 break;
893 default:
894 assert(0);
895 }
896 }
897
898 static unsigned si_get_alpha_test_func(struct si_context *sctx)
899 {
900 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
901 if (sctx->queued.named.dsa)
902 return sctx->queued.named.dsa->alpha_func;
903
904 return PIPE_FUNC_ALWAYS;
905 }
906
907 static void si_shader_selector_key_hw_vs(struct si_context *sctx,
908 struct si_shader_selector *vs,
909 struct si_shader_key *key)
910 {
911 struct si_shader_selector *ps = sctx->ps_shader.cso;
912
913 key->opt.hw_vs.clip_disable =
914 sctx->queued.named.rasterizer->clip_plane_enable == 0 &&
915 (vs->info.clipdist_writemask ||
916 vs->info.writes_clipvertex) &&
917 !vs->info.culldist_writemask;
918
919 /* Find out if PS is disabled. */
920 bool ps_disabled = true;
921 if (ps) {
922 bool ps_modifies_zs = ps->info.uses_kill ||
923 ps->info.writes_z ||
924 ps->info.writes_stencil ||
925 ps->info.writes_samplemask ||
926 si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS;
927
928 unsigned ps_colormask = sctx->framebuffer.colorbuf_enabled_4bit &
929 sctx->queued.named.blend->cb_target_mask;
930 if (!ps->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS])
931 ps_colormask &= ps->colors_written_4bit;
932
933 ps_disabled = sctx->queued.named.rasterizer->rasterizer_discard ||
934 (!ps_colormask &&
935 !ps_modifies_zs &&
936 !ps->info.writes_memory);
937 }
938
939 /* Find out which VS outputs aren't used by the PS. */
940 uint64_t outputs_written = vs->outputs_written;
941 uint32_t outputs_written2 = vs->outputs_written2;
942 uint64_t inputs_read = 0;
943 uint32_t inputs_read2 = 0;
944
945 outputs_written &= ~0x3; /* ignore POSITION, PSIZE */
946
947 if (!ps_disabled) {
948 inputs_read = ps->inputs_read;
949 inputs_read2 = ps->inputs_read2;
950 }
951
952 uint64_t linked = outputs_written & inputs_read;
953 uint32_t linked2 = outputs_written2 & inputs_read2;
954
955 key->opt.hw_vs.kill_outputs = ~linked & outputs_written;
956 key->opt.hw_vs.kill_outputs2 = ~linked2 & outputs_written2;
957 }
958
959 /* Compute the key for the hw shader variant */
960 static inline void si_shader_selector_key(struct pipe_context *ctx,
961 struct si_shader_selector *sel,
962 struct si_shader_key *key)
963 {
964 struct si_context *sctx = (struct si_context *)ctx;
965 unsigned i;
966
967 memset(key, 0, sizeof(*key));
968
969 switch (sel->type) {
970 case PIPE_SHADER_VERTEX:
971 if (sctx->vertex_elements) {
972 unsigned count = MIN2(sel->info.num_inputs,
973 sctx->vertex_elements->count);
974 for (i = 0; i < count; ++i)
975 key->part.vs.prolog.instance_divisors[i] =
976 sctx->vertex_elements->elements[i].instance_divisor;
977
978 memcpy(key->mono.vs.fix_fetch,
979 sctx->vertex_elements->fix_fetch, count);
980 }
981 if (sctx->tes_shader.cso)
982 key->as_ls = 1;
983 else if (sctx->gs_shader.cso)
984 key->as_es = 1;
985 else {
986 si_shader_selector_key_hw_vs(sctx, sel, key);
987
988 if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
989 key->part.vs.epilog.export_prim_id = 1;
990 }
991 break;
992 case PIPE_SHADER_TESS_CTRL:
993 key->part.tcs.epilog.prim_mode =
994 sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
995
996 if (sel == sctx->fixed_func_tcs_shader.cso)
997 key->mono.tcs.inputs_to_copy = sctx->vs_shader.cso->outputs_written;
998 break;
999 case PIPE_SHADER_TESS_EVAL:
1000 if (sctx->gs_shader.cso)
1001 key->as_es = 1;
1002 else {
1003 si_shader_selector_key_hw_vs(sctx, sel, key);
1004
1005 if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
1006 key->part.tes.epilog.export_prim_id = 1;
1007 }
1008 break;
1009 case PIPE_SHADER_GEOMETRY:
1010 key->part.gs.prolog.tri_strip_adj_fix = sctx->gs_tri_strip_adj_fix;
1011 break;
1012 case PIPE_SHADER_FRAGMENT: {
1013 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
1014 struct si_state_blend *blend = sctx->queued.named.blend;
1015
1016 if (sel->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] &&
1017 sel->info.colors_written == 0x1)
1018 key->part.ps.epilog.last_cbuf = MAX2(sctx->framebuffer.state.nr_cbufs, 1) - 1;
1019
1020 if (blend) {
1021 /* Select the shader color format based on whether
1022 * blending or alpha are needed.
1023 */
1024 key->part.ps.epilog.spi_shader_col_format =
1025 (blend->blend_enable_4bit & blend->need_src_alpha_4bit &
1026 sctx->framebuffer.spi_shader_col_format_blend_alpha) |
1027 (blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
1028 sctx->framebuffer.spi_shader_col_format_blend) |
1029 (~blend->blend_enable_4bit & blend->need_src_alpha_4bit &
1030 sctx->framebuffer.spi_shader_col_format_alpha) |
1031 (~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
1032 sctx->framebuffer.spi_shader_col_format);
1033
1034 /* The output for dual source blending should have
1035 * the same format as the first output.
1036 */
1037 if (blend->dual_src_blend)
1038 key->part.ps.epilog.spi_shader_col_format |=
1039 (key->part.ps.epilog.spi_shader_col_format & 0xf) << 4;
1040 } else
1041 key->part.ps.epilog.spi_shader_col_format = sctx->framebuffer.spi_shader_col_format;
1042
1043 /* If alpha-to-coverage is enabled, we have to export alpha
1044 * even if there is no color buffer.
1045 */
1046 if (!(key->part.ps.epilog.spi_shader_col_format & 0xf) &&
1047 blend && blend->alpha_to_coverage)
1048 key->part.ps.epilog.spi_shader_col_format |= V_028710_SPI_SHADER_32_AR;
1049
1050 /* On SI and CIK except Hawaii, the CB doesn't clamp outputs
1051 * to the range supported by the type if a channel has less
1052 * than 16 bits and the export format is 16_ABGR.
1053 */
1054 if (sctx->b.chip_class <= CIK && sctx->b.family != CHIP_HAWAII)
1055 key->part.ps.epilog.color_is_int8 = sctx->framebuffer.color_is_int8;
1056
1057 /* Disable unwritten outputs (if WRITE_ALL_CBUFS isn't enabled). */
1058 if (!key->part.ps.epilog.last_cbuf) {
1059 key->part.ps.epilog.spi_shader_col_format &= sel->colors_written_4bit;
1060 key->part.ps.epilog.color_is_int8 &= sel->info.colors_written;
1061 }
1062
1063 if (rs) {
1064 bool is_poly = (sctx->current_rast_prim >= PIPE_PRIM_TRIANGLES &&
1065 sctx->current_rast_prim <= PIPE_PRIM_POLYGON) ||
1066 sctx->current_rast_prim >= PIPE_PRIM_TRIANGLES_ADJACENCY;
1067 bool is_line = !is_poly && sctx->current_rast_prim != PIPE_PRIM_POINTS;
1068
1069 key->part.ps.prolog.color_two_side = rs->two_side && sel->info.colors_read;
1070 key->part.ps.prolog.flatshade_colors = rs->flatshade && sel->info.colors_read;
1071
1072 if (sctx->queued.named.blend) {
1073 key->part.ps.epilog.alpha_to_one = sctx->queued.named.blend->alpha_to_one &&
1074 rs->multisample_enable;
1075 }
1076
1077 key->part.ps.prolog.poly_stipple = rs->poly_stipple_enable && is_poly;
1078 key->part.ps.epilog.poly_line_smoothing = ((is_poly && rs->poly_smooth) ||
1079 (is_line && rs->line_smooth)) &&
1080 sctx->framebuffer.nr_samples <= 1;
1081 key->part.ps.epilog.clamp_color = rs->clamp_fragment_color;
1082
1083 if (rs->force_persample_interp &&
1084 rs->multisample_enable &&
1085 sctx->framebuffer.nr_samples > 1 &&
1086 sctx->ps_iter_samples > 1) {
1087 key->part.ps.prolog.force_persp_sample_interp =
1088 sel->info.uses_persp_center ||
1089 sel->info.uses_persp_centroid;
1090
1091 key->part.ps.prolog.force_linear_sample_interp =
1092 sel->info.uses_linear_center ||
1093 sel->info.uses_linear_centroid;
1094 } else if (rs->multisample_enable &&
1095 sctx->framebuffer.nr_samples > 1) {
1096 key->part.ps.prolog.bc_optimize_for_persp =
1097 sel->info.uses_persp_center &&
1098 sel->info.uses_persp_centroid;
1099 key->part.ps.prolog.bc_optimize_for_linear =
1100 sel->info.uses_linear_center &&
1101 sel->info.uses_linear_centroid;
1102 } else {
1103 /* Make sure SPI doesn't compute more than 1 pair
1104 * of (i,j), which is the optimization here. */
1105 key->part.ps.prolog.force_persp_center_interp =
1106 sel->info.uses_persp_center +
1107 sel->info.uses_persp_centroid +
1108 sel->info.uses_persp_sample > 1;
1109
1110 key->part.ps.prolog.force_linear_center_interp =
1111 sel->info.uses_linear_center +
1112 sel->info.uses_linear_centroid +
1113 sel->info.uses_linear_sample > 1;
1114 }
1115 }
1116
1117 key->part.ps.epilog.alpha_func = si_get_alpha_test_func(sctx);
1118 break;
1119 }
1120 default:
1121 assert(0);
1122 }
1123 }
1124
1125 static void si_build_shader_variant(void *job, int thread_index)
1126 {
1127 struct si_shader *shader = (struct si_shader *)job;
1128 struct si_shader_selector *sel = shader->selector;
1129 struct si_screen *sscreen = sel->screen;
1130 LLVMTargetMachineRef tm;
1131 struct pipe_debug_callback *debug = &shader->compiler_ctx_state.debug;
1132 int r;
1133
1134 if (thread_index >= 0) {
1135 assert(thread_index < ARRAY_SIZE(sscreen->tm));
1136 tm = sscreen->tm[thread_index];
1137 if (!debug->async)
1138 debug = NULL;
1139 } else {
1140 tm = shader->compiler_ctx_state.tm;
1141 }
1142
1143 r = si_shader_create(sscreen, tm, shader, debug);
1144 if (unlikely(r)) {
1145 R600_ERR("Failed to build shader variant (type=%u) %d\n",
1146 sel->type, r);
1147 shader->compilation_failed = true;
1148 return;
1149 }
1150
1151 if (shader->compiler_ctx_state.is_debug_context) {
1152 FILE *f = open_memstream(&shader->shader_log,
1153 &shader->shader_log_size);
1154 if (f) {
1155 si_shader_dump(sscreen, shader, NULL, sel->type, f, false);
1156 fclose(f);
1157 }
1158 }
1159
1160 si_shader_init_pm4_state(sscreen, shader);
1161 }
1162
1163 /* Select the hw shader variant depending on the current state. */
1164 static int si_shader_select_with_key(struct si_screen *sscreen,
1165 struct si_shader_ctx_state *state,
1166 struct si_compiler_ctx_state *compiler_state,
1167 struct si_shader_key *key,
1168 int thread_index)
1169 {
1170 static const struct si_shader_key zeroed;
1171 struct si_shader_selector *sel = state->cso;
1172 struct si_shader *current = state->current;
1173 struct si_shader *iter, *shader = NULL;
1174
1175 if (unlikely(sscreen->b.debug_flags & DBG_NO_OPT_VARIANT)) {
1176 memset(&key->opt, 0, sizeof(key->opt));
1177 }
1178
1179 again:
1180 /* Check if we don't need to change anything.
1181 * This path is also used for most shaders that don't need multiple
1182 * variants, it will cost just a computation of the key and this
1183 * test. */
1184 if (likely(current &&
1185 memcmp(&current->key, key, sizeof(*key)) == 0 &&
1186 (!current->is_optimized ||
1187 util_queue_fence_is_signalled(&current->optimized_ready))))
1188 return 0;
1189
1190 /* This must be done before the mutex is locked, because async GS
1191 * compilation calls this function too, and therefore must enter
1192 * the mutex first.
1193 *
1194 * Only wait if we are in a draw call. Don't wait if we are
1195 * in a compiler thread.
1196 */
1197 if (thread_index < 0)
1198 util_queue_job_wait(&sel->ready);
1199
1200 pipe_mutex_lock(sel->mutex);
1201
1202 /* Find the shader variant. */
1203 for (iter = sel->first_variant; iter; iter = iter->next_variant) {
1204 /* Don't check the "current" shader. We checked it above. */
1205 if (current != iter &&
1206 memcmp(&iter->key, key, sizeof(*key)) == 0) {
1207 /* If it's an optimized shader and its compilation has
1208 * been started but isn't done, use the unoptimized
1209 * shader so as not to cause a stall due to compilation.
1210 */
1211 if (iter->is_optimized &&
1212 !util_queue_fence_is_signalled(&iter->optimized_ready)) {
1213 memset(&key->opt, 0, sizeof(key->opt));
1214 pipe_mutex_unlock(sel->mutex);
1215 goto again;
1216 }
1217
1218 if (iter->compilation_failed) {
1219 pipe_mutex_unlock(sel->mutex);
1220 return -1; /* skip the draw call */
1221 }
1222
1223 state->current = iter;
1224 pipe_mutex_unlock(sel->mutex);
1225 return 0;
1226 }
1227 }
1228
1229 /* Build a new shader. */
1230 shader = CALLOC_STRUCT(si_shader);
1231 if (!shader) {
1232 pipe_mutex_unlock(sel->mutex);
1233 return -ENOMEM;
1234 }
1235 shader->selector = sel;
1236 shader->key = *key;
1237 shader->compiler_ctx_state = *compiler_state;
1238
1239 /* Compile the main shader part if it doesn't exist. This can happen
1240 * if the initial guess was wrong. */
1241 struct si_shader **mainp = si_get_main_shader_part(sel, key);
1242 bool is_pure_monolithic =
1243 memcmp(&key->mono, &zeroed.mono, sizeof(key->mono)) != 0;
1244
1245 if (!*mainp && !is_pure_monolithic) {
1246 struct si_shader *main_part = CALLOC_STRUCT(si_shader);
1247
1248 if (!main_part) {
1249 FREE(shader);
1250 pipe_mutex_unlock(sel->mutex);
1251 return -ENOMEM; /* skip the draw call */
1252 }
1253
1254 main_part->selector = sel;
1255 main_part->key.as_es = key->as_es;
1256 main_part->key.as_ls = key->as_ls;
1257
1258 if (si_compile_tgsi_shader(sscreen, compiler_state->tm,
1259 main_part, false,
1260 &compiler_state->debug) != 0) {
1261 FREE(main_part);
1262 FREE(shader);
1263 pipe_mutex_unlock(sel->mutex);
1264 return -ENOMEM; /* skip the draw call */
1265 }
1266 *mainp = main_part;
1267 }
1268
1269 /* Monolithic-only shaders don't make a distinction between optimized
1270 * and unoptimized. */
1271 shader->is_monolithic =
1272 is_pure_monolithic ||
1273 memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
1274
1275 shader->is_optimized =
1276 !sscreen->use_monolithic_shaders &&
1277 memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
1278 if (shader->is_optimized)
1279 util_queue_fence_init(&shader->optimized_ready);
1280
1281 if (!sel->last_variant) {
1282 sel->first_variant = shader;
1283 sel->last_variant = shader;
1284 } else {
1285 sel->last_variant->next_variant = shader;
1286 sel->last_variant = shader;
1287 }
1288
1289 /* If it's an optimized shader, compile it asynchronously. */
1290 if (shader->is_optimized &&
1291 !is_pure_monolithic &&
1292 thread_index < 0) {
1293 /* Compile it asynchronously. */
1294 util_queue_add_job(&sscreen->shader_compiler_queue,
1295 shader, &shader->optimized_ready,
1296 si_build_shader_variant, NULL);
1297
1298 /* Use the default (unoptimized) shader for now. */
1299 memset(&key->opt, 0, sizeof(key->opt));
1300 pipe_mutex_unlock(sel->mutex);
1301 goto again;
1302 }
1303
1304 assert(!shader->is_optimized);
1305 si_build_shader_variant(shader, thread_index);
1306
1307 if (!shader->compilation_failed)
1308 state->current = shader;
1309
1310 pipe_mutex_unlock(sel->mutex);
1311 return shader->compilation_failed ? -1 : 0;
1312 }
1313
1314 static int si_shader_select(struct pipe_context *ctx,
1315 struct si_shader_ctx_state *state,
1316 struct si_compiler_ctx_state *compiler_state)
1317 {
1318 struct si_context *sctx = (struct si_context *)ctx;
1319 struct si_shader_key key;
1320
1321 si_shader_selector_key(ctx, state->cso, &key);
1322 return si_shader_select_with_key(sctx->screen, state, compiler_state,
1323 &key, -1);
1324 }
1325
1326 static void si_parse_next_shader_property(const struct tgsi_shader_info *info,
1327 struct si_shader_key *key)
1328 {
1329 unsigned next_shader = info->properties[TGSI_PROPERTY_NEXT_SHADER];
1330
1331 switch (info->processor) {
1332 case PIPE_SHADER_VERTEX:
1333 switch (next_shader) {
1334 case PIPE_SHADER_GEOMETRY:
1335 key->as_es = 1;
1336 break;
1337 case PIPE_SHADER_TESS_CTRL:
1338 case PIPE_SHADER_TESS_EVAL:
1339 key->as_ls = 1;
1340 break;
1341 default:
1342 /* If POSITION isn't written, it can't be a HW VS.
1343 * Assume that it's a HW LS. (the next shader is TCS)
1344 * This heuristic is needed for separate shader objects.
1345 */
1346 if (!info->writes_position)
1347 key->as_ls = 1;
1348 }
1349 break;
1350
1351 case PIPE_SHADER_TESS_EVAL:
1352 if (next_shader == PIPE_SHADER_GEOMETRY ||
1353 !info->writes_position)
1354 key->as_es = 1;
1355 break;
1356 }
1357 }
1358
1359 /**
1360 * Compile the main shader part or the monolithic shader as part of
1361 * si_shader_selector initialization. Since it can be done asynchronously,
1362 * there is no way to report compile failures to applications.
1363 */
1364 void si_init_shader_selector_async(void *job, int thread_index)
1365 {
1366 struct si_shader_selector *sel = (struct si_shader_selector *)job;
1367 struct si_screen *sscreen = sel->screen;
1368 LLVMTargetMachineRef tm;
1369 struct pipe_debug_callback *debug = &sel->compiler_ctx_state.debug;
1370 unsigned i;
1371
1372 if (thread_index >= 0) {
1373 assert(thread_index < ARRAY_SIZE(sscreen->tm));
1374 tm = sscreen->tm[thread_index];
1375 if (!debug->async)
1376 debug = NULL;
1377 } else {
1378 tm = sel->compiler_ctx_state.tm;
1379 }
1380
1381 /* Compile the main shader part for use with a prolog and/or epilog.
1382 * If this fails, the driver will try to compile a monolithic shader
1383 * on demand.
1384 */
1385 if (!sscreen->use_monolithic_shaders) {
1386 struct si_shader *shader = CALLOC_STRUCT(si_shader);
1387 void *tgsi_binary;
1388
1389 if (!shader) {
1390 fprintf(stderr, "radeonsi: can't allocate a main shader part\n");
1391 return;
1392 }
1393
1394 shader->selector = sel;
1395 si_parse_next_shader_property(&sel->info, &shader->key);
1396
1397 tgsi_binary = si_get_tgsi_binary(sel);
1398
1399 /* Try to load the shader from the shader cache. */
1400 pipe_mutex_lock(sscreen->shader_cache_mutex);
1401
1402 if (tgsi_binary &&
1403 si_shader_cache_load_shader(sscreen, tgsi_binary, shader)) {
1404 FREE(tgsi_binary);
1405 pipe_mutex_unlock(sscreen->shader_cache_mutex);
1406 } else {
1407 pipe_mutex_unlock(sscreen->shader_cache_mutex);
1408
1409 /* Compile the shader if it hasn't been loaded from the cache. */
1410 if (si_compile_tgsi_shader(sscreen, tm, shader, false,
1411 debug) != 0) {
1412 FREE(shader);
1413 FREE(tgsi_binary);
1414 fprintf(stderr, "radeonsi: can't compile a main shader part\n");
1415 return;
1416 }
1417
1418 if (tgsi_binary) {
1419 pipe_mutex_lock(sscreen->shader_cache_mutex);
1420 if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader))
1421 FREE(tgsi_binary);
1422 pipe_mutex_unlock(sscreen->shader_cache_mutex);
1423 }
1424 }
1425
1426 *si_get_main_shader_part(sel, &shader->key) = shader;
1427
1428 /* Unset "outputs_written" flags for outputs converted to
1429 * DEFAULT_VAL, so that later inter-shader optimizations don't
1430 * try to eliminate outputs that don't exist in the final
1431 * shader.
1432 *
1433 * This is only done if non-monolithic shaders are enabled.
1434 */
1435 if ((sel->type == PIPE_SHADER_VERTEX ||
1436 sel->type == PIPE_SHADER_TESS_EVAL) &&
1437 !shader->key.as_ls &&
1438 !shader->key.as_es) {
1439 unsigned i;
1440
1441 for (i = 0; i < sel->info.num_outputs; i++) {
1442 unsigned offset = shader->info.vs_output_param_offset[i];
1443
1444 if (offset <= EXP_PARAM_OFFSET_31)
1445 continue;
1446
1447 unsigned name = sel->info.output_semantic_name[i];
1448 unsigned index = sel->info.output_semantic_index[i];
1449 unsigned id;
1450
1451 switch (name) {
1452 case TGSI_SEMANTIC_GENERIC:
1453 /* don't process indices the function can't handle */
1454 if (index >= 60)
1455 break;
1456 /* fall through */
1457 case TGSI_SEMANTIC_CLIPDIST:
1458 id = si_shader_io_get_unique_index(name, index);
1459 sel->outputs_written &= ~(1ull << id);
1460 break;
1461 case TGSI_SEMANTIC_POSITION: /* ignore these */
1462 case TGSI_SEMANTIC_PSIZE:
1463 case TGSI_SEMANTIC_CLIPVERTEX:
1464 case TGSI_SEMANTIC_EDGEFLAG:
1465 break;
1466 default:
1467 id = si_shader_io_get_unique_index2(name, index);
1468 sel->outputs_written2 &= ~(1u << id);
1469 }
1470 }
1471 }
1472 }
1473
1474 /* Pre-compilation. */
1475 if (sscreen->b.debug_flags & DBG_PRECOMPILE) {
1476 struct si_shader_ctx_state state = {sel};
1477 struct si_shader_key key;
1478
1479 memset(&key, 0, sizeof(key));
1480 si_parse_next_shader_property(&sel->info, &key);
1481
1482 /* Set reasonable defaults, so that the shader key doesn't
1483 * cause any code to be eliminated.
1484 */
1485 switch (sel->type) {
1486 case PIPE_SHADER_TESS_CTRL:
1487 key.part.tcs.epilog.prim_mode = PIPE_PRIM_TRIANGLES;
1488 break;
1489 case PIPE_SHADER_FRAGMENT:
1490 key.part.ps.prolog.bc_optimize_for_persp =
1491 sel->info.uses_persp_center &&
1492 sel->info.uses_persp_centroid;
1493 key.part.ps.prolog.bc_optimize_for_linear =
1494 sel->info.uses_linear_center &&
1495 sel->info.uses_linear_centroid;
1496 key.part.ps.epilog.alpha_func = PIPE_FUNC_ALWAYS;
1497 for (i = 0; i < 8; i++)
1498 if (sel->info.colors_written & (1 << i))
1499 key.part.ps.epilog.spi_shader_col_format |=
1500 V_028710_SPI_SHADER_FP16_ABGR << (i * 4);
1501 break;
1502 }
1503
1504 if (si_shader_select_with_key(sscreen, &state,
1505 &sel->compiler_ctx_state, &key,
1506 thread_index))
1507 fprintf(stderr, "radeonsi: can't create a monolithic shader\n");
1508 }
1509
1510 /* The GS copy shader is always pre-compiled. */
1511 if (sel->type == PIPE_SHADER_GEOMETRY) {
1512 sel->gs_copy_shader = si_generate_gs_copy_shader(sscreen, tm, sel, debug);
1513 if (!sel->gs_copy_shader) {
1514 fprintf(stderr, "radeonsi: can't create GS copy shader\n");
1515 return;
1516 }
1517
1518 si_shader_vs(sscreen, sel->gs_copy_shader, sel);
1519 }
1520 }
1521
1522 static void *si_create_shader_selector(struct pipe_context *ctx,
1523 const struct pipe_shader_state *state)
1524 {
1525 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
1526 struct si_context *sctx = (struct si_context*)ctx;
1527 struct si_shader_selector *sel = CALLOC_STRUCT(si_shader_selector);
1528 int i;
1529
1530 if (!sel)
1531 return NULL;
1532
1533 sel->screen = sscreen;
1534 sel->compiler_ctx_state.tm = sctx->tm;
1535 sel->compiler_ctx_state.debug = sctx->b.debug;
1536 sel->compiler_ctx_state.is_debug_context = sctx->is_debug;
1537 sel->tokens = tgsi_dup_tokens(state->tokens);
1538 if (!sel->tokens) {
1539 FREE(sel);
1540 return NULL;
1541 }
1542
1543 sel->so = state->stream_output;
1544 tgsi_scan_shader(state->tokens, &sel->info);
1545 sel->type = sel->info.processor;
1546 p_atomic_inc(&sscreen->b.num_shaders_created);
1547
1548 /* Set which opcode uses which (i,j) pair. */
1549 if (sel->info.uses_persp_opcode_interp_centroid)
1550 sel->info.uses_persp_centroid = true;
1551
1552 if (sel->info.uses_linear_opcode_interp_centroid)
1553 sel->info.uses_linear_centroid = true;
1554
1555 if (sel->info.uses_persp_opcode_interp_offset ||
1556 sel->info.uses_persp_opcode_interp_sample)
1557 sel->info.uses_persp_center = true;
1558
1559 if (sel->info.uses_linear_opcode_interp_offset ||
1560 sel->info.uses_linear_opcode_interp_sample)
1561 sel->info.uses_linear_center = true;
1562
1563 switch (sel->type) {
1564 case PIPE_SHADER_GEOMETRY:
1565 sel->gs_output_prim =
1566 sel->info.properties[TGSI_PROPERTY_GS_OUTPUT_PRIM];
1567 sel->gs_max_out_vertices =
1568 sel->info.properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES];
1569 sel->gs_num_invocations =
1570 sel->info.properties[TGSI_PROPERTY_GS_INVOCATIONS];
1571 sel->gsvs_vertex_size = sel->info.num_outputs * 16;
1572 sel->max_gsvs_emit_size = sel->gsvs_vertex_size *
1573 sel->gs_max_out_vertices;
1574
1575 sel->max_gs_stream = 0;
1576 for (i = 0; i < sel->so.num_outputs; i++)
1577 sel->max_gs_stream = MAX2(sel->max_gs_stream,
1578 sel->so.output[i].stream);
1579
1580 sel->gs_input_verts_per_prim =
1581 u_vertices_per_prim(sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]);
1582 break;
1583
1584 case PIPE_SHADER_TESS_CTRL:
1585 /* Always reserve space for these. */
1586 sel->patch_outputs_written |=
1587 (1llu << si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSINNER, 0)) |
1588 (1llu << si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSOUTER, 0));
1589 /* fall through */
1590 case PIPE_SHADER_VERTEX:
1591 case PIPE_SHADER_TESS_EVAL:
1592 for (i = 0; i < sel->info.num_outputs; i++) {
1593 unsigned name = sel->info.output_semantic_name[i];
1594 unsigned index = sel->info.output_semantic_index[i];
1595
1596 switch (name) {
1597 case TGSI_SEMANTIC_TESSINNER:
1598 case TGSI_SEMANTIC_TESSOUTER:
1599 case TGSI_SEMANTIC_PATCH:
1600 sel->patch_outputs_written |=
1601 1llu << si_shader_io_get_unique_index(name, index);
1602 break;
1603
1604 case TGSI_SEMANTIC_GENERIC:
1605 /* don't process indices the function can't handle */
1606 if (index >= 60)
1607 break;
1608 /* fall through */
1609 case TGSI_SEMANTIC_POSITION:
1610 case TGSI_SEMANTIC_PSIZE:
1611 case TGSI_SEMANTIC_CLIPDIST:
1612 sel->outputs_written |=
1613 1llu << si_shader_io_get_unique_index(name, index);
1614 break;
1615 case TGSI_SEMANTIC_CLIPVERTEX: /* ignore these */
1616 case TGSI_SEMANTIC_EDGEFLAG:
1617 break;
1618 default:
1619 sel->outputs_written2 |=
1620 1u << si_shader_io_get_unique_index2(name, index);
1621 }
1622 }
1623 sel->esgs_itemsize = util_last_bit64(sel->outputs_written) * 16;
1624 break;
1625
1626 case PIPE_SHADER_FRAGMENT:
1627 for (i = 0; i < sel->info.num_inputs; i++) {
1628 unsigned name = sel->info.input_semantic_name[i];
1629 unsigned index = sel->info.input_semantic_index[i];
1630
1631 switch (name) {
1632 case TGSI_SEMANTIC_CLIPDIST:
1633 case TGSI_SEMANTIC_GENERIC:
1634 sel->inputs_read |=
1635 1llu << si_shader_io_get_unique_index(name, index);
1636 break;
1637 case TGSI_SEMANTIC_PCOORD: /* ignore this */
1638 break;
1639 default:
1640 sel->inputs_read2 |=
1641 1u << si_shader_io_get_unique_index2(name, index);
1642 }
1643 }
1644
1645 for (i = 0; i < 8; i++)
1646 if (sel->info.colors_written & (1 << i))
1647 sel->colors_written_4bit |= 0xf << (4 * i);
1648
1649 for (i = 0; i < sel->info.num_inputs; i++) {
1650 if (sel->info.input_semantic_name[i] == TGSI_SEMANTIC_COLOR) {
1651 int index = sel->info.input_semantic_index[i];
1652 sel->color_attr_index[index] = i;
1653 }
1654 }
1655 break;
1656 }
1657
1658 /* DB_SHADER_CONTROL */
1659 sel->db_shader_control =
1660 S_02880C_Z_EXPORT_ENABLE(sel->info.writes_z) |
1661 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(sel->info.writes_stencil) |
1662 S_02880C_MASK_EXPORT_ENABLE(sel->info.writes_samplemask) |
1663 S_02880C_KILL_ENABLE(sel->info.uses_kill);
1664
1665 switch (sel->info.properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT]) {
1666 case TGSI_FS_DEPTH_LAYOUT_GREATER:
1667 sel->db_shader_control |=
1668 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z);
1669 break;
1670 case TGSI_FS_DEPTH_LAYOUT_LESS:
1671 sel->db_shader_control |=
1672 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z);
1673 break;
1674 }
1675
1676 /* Z_ORDER, EXEC_ON_HIER_FAIL and EXEC_ON_NOOP should be set as following:
1677 *
1678 * | early Z/S | writes_mem | allow_ReZ? | Z_ORDER | EXEC_ON_HIER_FAIL | EXEC_ON_NOOP
1679 * --|-----------|------------|------------|--------------------|-------------------|-------------
1680 * 1a| false | false | true | EarlyZ_Then_ReZ | 0 | 0
1681 * 1b| false | false | false | EarlyZ_Then_LateZ | 0 | 0
1682 * 2 | false | true | n/a | LateZ | 1 | 0
1683 * 3 | true | false | n/a | EarlyZ_Then_LateZ | 0 | 0
1684 * 4 | true | true | n/a | EarlyZ_Then_LateZ | 0 | 1
1685 *
1686 * In cases 3 and 4, HW will force Z_ORDER to EarlyZ regardless of what's set in the register.
1687 * In case 2, NOOP_CULL is a don't care field. In case 2, 3 and 4, ReZ doesn't make sense.
1688 *
1689 * Don't use ReZ without profiling !!!
1690 *
1691 * ReZ decreases performance by 15% in DiRT: Showdown on Ultra settings, which has pretty complex
1692 * shaders.
1693 */
1694 if (sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL]) {
1695 /* Cases 3, 4. */
1696 sel->db_shader_control |= S_02880C_DEPTH_BEFORE_SHADER(1) |
1697 S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z) |
1698 S_02880C_EXEC_ON_NOOP(sel->info.writes_memory);
1699 } else if (sel->info.writes_memory) {
1700 /* Case 2. */
1701 sel->db_shader_control |= S_02880C_Z_ORDER(V_02880C_LATE_Z) |
1702 S_02880C_EXEC_ON_HIER_FAIL(1);
1703 } else {
1704 /* Case 1. */
1705 sel->db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z);
1706 }
1707
1708 pipe_mutex_init(sel->mutex);
1709 util_queue_fence_init(&sel->ready);
1710
1711 if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
1712 sctx->is_debug ||
1713 r600_can_dump_shader(&sscreen->b, sel->info.processor) ||
1714 !util_queue_is_initialized(&sscreen->shader_compiler_queue))
1715 si_init_shader_selector_async(sel, -1);
1716 else
1717 util_queue_add_job(&sscreen->shader_compiler_queue, sel,
1718 &sel->ready, si_init_shader_selector_async,
1719 NULL);
1720
1721 return sel;
1722 }
1723
1724 static void si_bind_vs_shader(struct pipe_context *ctx, void *state)
1725 {
1726 struct si_context *sctx = (struct si_context *)ctx;
1727 struct si_shader_selector *sel = state;
1728
1729 if (sctx->vs_shader.cso == sel)
1730 return;
1731
1732 sctx->vs_shader.cso = sel;
1733 sctx->vs_shader.current = sel ? sel->first_variant : NULL;
1734 sctx->do_update_shaders = true;
1735 si_mark_atom_dirty(sctx, &sctx->clip_regs);
1736 r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
1737 }
1738
1739 static void si_bind_gs_shader(struct pipe_context *ctx, void *state)
1740 {
1741 struct si_context *sctx = (struct si_context *)ctx;
1742 struct si_shader_selector *sel = state;
1743 bool enable_changed = !!sctx->gs_shader.cso != !!sel;
1744
1745 if (sctx->gs_shader.cso == sel)
1746 return;
1747
1748 sctx->gs_shader.cso = sel;
1749 sctx->gs_shader.current = sel ? sel->first_variant : NULL;
1750 sctx->ia_multi_vgt_param_key.u.uses_gs = sel != NULL;
1751 sctx->do_update_shaders = true;
1752 si_mark_atom_dirty(sctx, &sctx->clip_regs);
1753 sctx->last_rast_prim = -1; /* reset this so that it gets updated */
1754
1755 if (enable_changed)
1756 si_shader_change_notify(sctx);
1757 r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
1758 }
1759
1760 static void si_update_tcs_tes_uses_prim_id(struct si_context *sctx)
1761 {
1762 sctx->ia_multi_vgt_param_key.u.tcs_tes_uses_prim_id =
1763 (sctx->tes_shader.cso &&
1764 sctx->tes_shader.cso->info.uses_primid) ||
1765 (sctx->tcs_shader.cso &&
1766 sctx->tcs_shader.cso->info.uses_primid);
1767 }
1768
1769 static void si_bind_tcs_shader(struct pipe_context *ctx, void *state)
1770 {
1771 struct si_context *sctx = (struct si_context *)ctx;
1772 struct si_shader_selector *sel = state;
1773 bool enable_changed = !!sctx->tcs_shader.cso != !!sel;
1774
1775 if (sctx->tcs_shader.cso == sel)
1776 return;
1777
1778 sctx->tcs_shader.cso = sel;
1779 sctx->tcs_shader.current = sel ? sel->first_variant : NULL;
1780 si_update_tcs_tes_uses_prim_id(sctx);
1781 sctx->do_update_shaders = true;
1782
1783 if (enable_changed)
1784 sctx->last_tcs = NULL; /* invalidate derived tess state */
1785 }
1786
1787 static void si_bind_tes_shader(struct pipe_context *ctx, void *state)
1788 {
1789 struct si_context *sctx = (struct si_context *)ctx;
1790 struct si_shader_selector *sel = state;
1791 bool enable_changed = !!sctx->tes_shader.cso != !!sel;
1792
1793 if (sctx->tes_shader.cso == sel)
1794 return;
1795
1796 sctx->tes_shader.cso = sel;
1797 sctx->tes_shader.current = sel ? sel->first_variant : NULL;
1798 sctx->ia_multi_vgt_param_key.u.uses_tess = sel != NULL;
1799 si_update_tcs_tes_uses_prim_id(sctx);
1800 sctx->do_update_shaders = true;
1801 si_mark_atom_dirty(sctx, &sctx->clip_regs);
1802 sctx->last_rast_prim = -1; /* reset this so that it gets updated */
1803
1804 if (enable_changed) {
1805 si_shader_change_notify(sctx);
1806 sctx->last_tes_sh_base = -1; /* invalidate derived tess state */
1807 }
1808 r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
1809 }
1810
1811 static void si_bind_ps_shader(struct pipe_context *ctx, void *state)
1812 {
1813 struct si_context *sctx = (struct si_context *)ctx;
1814 struct si_shader_selector *sel = state;
1815
1816 /* skip if supplied shader is one already in use */
1817 if (sctx->ps_shader.cso == sel)
1818 return;
1819
1820 sctx->ps_shader.cso = sel;
1821 sctx->ps_shader.current = sel ? sel->first_variant : NULL;
1822 sctx->do_update_shaders = true;
1823 si_mark_atom_dirty(sctx, &sctx->cb_render_state);
1824 }
1825
1826 static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
1827 {
1828 if (shader->is_optimized) {
1829 util_queue_job_wait(&shader->optimized_ready);
1830 util_queue_fence_destroy(&shader->optimized_ready);
1831 }
1832
1833 if (shader->pm4) {
1834 switch (shader->selector->type) {
1835 case PIPE_SHADER_VERTEX:
1836 if (shader->key.as_ls)
1837 si_pm4_delete_state(sctx, ls, shader->pm4);
1838 else if (shader->key.as_es)
1839 si_pm4_delete_state(sctx, es, shader->pm4);
1840 else
1841 si_pm4_delete_state(sctx, vs, shader->pm4);
1842 break;
1843 case PIPE_SHADER_TESS_CTRL:
1844 si_pm4_delete_state(sctx, hs, shader->pm4);
1845 break;
1846 case PIPE_SHADER_TESS_EVAL:
1847 if (shader->key.as_es)
1848 si_pm4_delete_state(sctx, es, shader->pm4);
1849 else
1850 si_pm4_delete_state(sctx, vs, shader->pm4);
1851 break;
1852 case PIPE_SHADER_GEOMETRY:
1853 if (shader->is_gs_copy_shader)
1854 si_pm4_delete_state(sctx, vs, shader->pm4);
1855 else
1856 si_pm4_delete_state(sctx, gs, shader->pm4);
1857 break;
1858 case PIPE_SHADER_FRAGMENT:
1859 si_pm4_delete_state(sctx, ps, shader->pm4);
1860 break;
1861 }
1862 }
1863
1864 si_shader_destroy(shader);
1865 free(shader);
1866 }
1867
1868 static void si_delete_shader_selector(struct pipe_context *ctx, void *state)
1869 {
1870 struct si_context *sctx = (struct si_context *)ctx;
1871 struct si_shader_selector *sel = (struct si_shader_selector *)state;
1872 struct si_shader *p = sel->first_variant, *c;
1873 struct si_shader_ctx_state *current_shader[SI_NUM_SHADERS] = {
1874 [PIPE_SHADER_VERTEX] = &sctx->vs_shader,
1875 [PIPE_SHADER_TESS_CTRL] = &sctx->tcs_shader,
1876 [PIPE_SHADER_TESS_EVAL] = &sctx->tes_shader,
1877 [PIPE_SHADER_GEOMETRY] = &sctx->gs_shader,
1878 [PIPE_SHADER_FRAGMENT] = &sctx->ps_shader,
1879 };
1880
1881 util_queue_job_wait(&sel->ready);
1882
1883 if (current_shader[sel->type]->cso == sel) {
1884 current_shader[sel->type]->cso = NULL;
1885 current_shader[sel->type]->current = NULL;
1886 }
1887
1888 while (p) {
1889 c = p->next_variant;
1890 si_delete_shader(sctx, p);
1891 p = c;
1892 }
1893
1894 if (sel->main_shader_part)
1895 si_delete_shader(sctx, sel->main_shader_part);
1896 if (sel->main_shader_part_ls)
1897 si_delete_shader(sctx, sel->main_shader_part_ls);
1898 if (sel->main_shader_part_es)
1899 si_delete_shader(sctx, sel->main_shader_part_es);
1900 if (sel->gs_copy_shader)
1901 si_delete_shader(sctx, sel->gs_copy_shader);
1902
1903 util_queue_fence_destroy(&sel->ready);
1904 pipe_mutex_destroy(sel->mutex);
1905 free(sel->tokens);
1906 free(sel);
1907 }
1908
1909 static unsigned si_get_ps_input_cntl(struct si_context *sctx,
1910 struct si_shader *vs, unsigned name,
1911 unsigned index, unsigned interpolate)
1912 {
1913 struct tgsi_shader_info *vsinfo = &vs->selector->info;
1914 unsigned j, offset, ps_input_cntl = 0;
1915
1916 if (interpolate == TGSI_INTERPOLATE_CONSTANT ||
1917 (interpolate == TGSI_INTERPOLATE_COLOR && sctx->flatshade))
1918 ps_input_cntl |= S_028644_FLAT_SHADE(1);
1919
1920 if (name == TGSI_SEMANTIC_PCOORD ||
1921 (name == TGSI_SEMANTIC_TEXCOORD &&
1922 sctx->sprite_coord_enable & (1 << index))) {
1923 ps_input_cntl |= S_028644_PT_SPRITE_TEX(1);
1924 }
1925
1926 for (j = 0; j < vsinfo->num_outputs; j++) {
1927 if (name == vsinfo->output_semantic_name[j] &&
1928 index == vsinfo->output_semantic_index[j]) {
1929 offset = vs->info.vs_output_param_offset[j];
1930
1931 if (offset <= EXP_PARAM_OFFSET_31) {
1932 /* The input is loaded from parameter memory. */
1933 ps_input_cntl |= S_028644_OFFSET(offset);
1934 } else if (!G_028644_PT_SPRITE_TEX(ps_input_cntl)) {
1935 if (offset == EXP_PARAM_UNDEFINED) {
1936 /* This can happen with depth-only rendering. */
1937 offset = 0;
1938 } else {
1939 /* The input is a DEFAULT_VAL constant. */
1940 assert(offset >= EXP_PARAM_DEFAULT_VAL_0000 &&
1941 offset <= EXP_PARAM_DEFAULT_VAL_1111);
1942 offset -= EXP_PARAM_DEFAULT_VAL_0000;
1943 }
1944
1945 ps_input_cntl = S_028644_OFFSET(0x20) |
1946 S_028644_DEFAULT_VAL(offset);
1947 }
1948 break;
1949 }
1950 }
1951
1952 if (name == TGSI_SEMANTIC_PRIMID)
1953 /* PrimID is written after the last output. */
1954 ps_input_cntl |= S_028644_OFFSET(vs->info.vs_output_param_offset[vsinfo->num_outputs]);
1955 else if (j == vsinfo->num_outputs && !G_028644_PT_SPRITE_TEX(ps_input_cntl)) {
1956 /* No corresponding output found, load defaults into input.
1957 * Don't set any other bits.
1958 * (FLAT_SHADE=1 completely changes behavior) */
1959 ps_input_cntl = S_028644_OFFSET(0x20);
1960 /* D3D 9 behaviour. GL is undefined */
1961 if (name == TGSI_SEMANTIC_COLOR && index == 0)
1962 ps_input_cntl |= S_028644_DEFAULT_VAL(3);
1963 }
1964 return ps_input_cntl;
1965 }
1966
1967 static void si_emit_spi_map(struct si_context *sctx, struct r600_atom *atom)
1968 {
1969 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1970 struct si_shader *ps = sctx->ps_shader.current;
1971 struct si_shader *vs = si_get_vs_state(sctx);
1972 struct tgsi_shader_info *psinfo = ps ? &ps->selector->info : NULL;
1973 unsigned i, num_interp, num_written = 0, bcol_interp[2];
1974
1975 if (!ps || !ps->selector->info.num_inputs)
1976 return;
1977
1978 num_interp = si_get_ps_num_interp(ps);
1979 assert(num_interp > 0);
1980 radeon_set_context_reg_seq(cs, R_028644_SPI_PS_INPUT_CNTL_0, num_interp);
1981
1982 for (i = 0; i < psinfo->num_inputs; i++) {
1983 unsigned name = psinfo->input_semantic_name[i];
1984 unsigned index = psinfo->input_semantic_index[i];
1985 unsigned interpolate = psinfo->input_interpolate[i];
1986
1987 radeon_emit(cs, si_get_ps_input_cntl(sctx, vs, name, index,
1988 interpolate));
1989 num_written++;
1990
1991 if (name == TGSI_SEMANTIC_COLOR) {
1992 assert(index < ARRAY_SIZE(bcol_interp));
1993 bcol_interp[index] = interpolate;
1994 }
1995 }
1996
1997 if (ps->key.part.ps.prolog.color_two_side) {
1998 unsigned bcol = TGSI_SEMANTIC_BCOLOR;
1999
2000 for (i = 0; i < 2; i++) {
2001 if (!(psinfo->colors_read & (0xf << (i * 4))))
2002 continue;
2003
2004 radeon_emit(cs, si_get_ps_input_cntl(sctx, vs, bcol,
2005 i, bcol_interp[i]));
2006 num_written++;
2007 }
2008 }
2009 assert(num_interp == num_written);
2010 }
2011
2012 /**
2013 * Writing CONFIG or UCONFIG VGT registers requires VGT_FLUSH before that.
2014 */
2015 static void si_init_config_add_vgt_flush(struct si_context *sctx)
2016 {
2017 if (sctx->init_config_has_vgt_flush)
2018 return;
2019
2020 /* Done by Vulkan before VGT_FLUSH. */
2021 si_pm4_cmd_begin(sctx->init_config, PKT3_EVENT_WRITE);
2022 si_pm4_cmd_add(sctx->init_config,
2023 EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
2024 si_pm4_cmd_end(sctx->init_config, false);
2025
2026 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
2027 si_pm4_cmd_begin(sctx->init_config, PKT3_EVENT_WRITE);
2028 si_pm4_cmd_add(sctx->init_config, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
2029 si_pm4_cmd_end(sctx->init_config, false);
2030 sctx->init_config_has_vgt_flush = true;
2031 }
2032
2033 /* Initialize state related to ESGS / GSVS ring buffers */
2034 static bool si_update_gs_ring_buffers(struct si_context *sctx)
2035 {
2036 struct si_shader_selector *es =
2037 sctx->tes_shader.cso ? sctx->tes_shader.cso : sctx->vs_shader.cso;
2038 struct si_shader_selector *gs = sctx->gs_shader.cso;
2039 struct si_pm4_state *pm4;
2040
2041 /* Chip constants. */
2042 unsigned num_se = sctx->screen->b.info.max_se;
2043 unsigned wave_size = 64;
2044 unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
2045 unsigned gs_vertex_reuse = 16 * num_se; /* GS_VERTEX_REUSE register (per SE) */
2046 unsigned alignment = 256 * num_se;
2047 /* The maximum size is 63.999 MB per SE. */
2048 unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
2049
2050 /* Calculate the minimum size. */
2051 unsigned min_esgs_ring_size = align(es->esgs_itemsize * gs_vertex_reuse *
2052 wave_size, alignment);
2053
2054 /* These are recommended sizes, not minimum sizes. */
2055 unsigned esgs_ring_size = max_gs_waves * 2 * wave_size *
2056 es->esgs_itemsize * gs->gs_input_verts_per_prim;
2057 unsigned gsvs_ring_size = max_gs_waves * 2 * wave_size *
2058 gs->max_gsvs_emit_size;
2059
2060 min_esgs_ring_size = align(min_esgs_ring_size, alignment);
2061 esgs_ring_size = align(esgs_ring_size, alignment);
2062 gsvs_ring_size = align(gsvs_ring_size, alignment);
2063
2064 esgs_ring_size = CLAMP(esgs_ring_size, min_esgs_ring_size, max_size);
2065 gsvs_ring_size = MIN2(gsvs_ring_size, max_size);
2066
2067 /* Some rings don't have to be allocated if shaders don't use them.
2068 * (e.g. no varyings between ES and GS or GS and VS)
2069 */
2070 bool update_esgs = esgs_ring_size &&
2071 (!sctx->esgs_ring ||
2072 sctx->esgs_ring->width0 < esgs_ring_size);
2073 bool update_gsvs = gsvs_ring_size &&
2074 (!sctx->gsvs_ring ||
2075 sctx->gsvs_ring->width0 < gsvs_ring_size);
2076
2077 if (!update_esgs && !update_gsvs)
2078 return true;
2079
2080 if (update_esgs) {
2081 pipe_resource_reference(&sctx->esgs_ring, NULL);
2082 sctx->esgs_ring =
2083 r600_aligned_buffer_create(sctx->b.b.screen,
2084 R600_RESOURCE_FLAG_UNMAPPABLE,
2085 PIPE_USAGE_DEFAULT,
2086 esgs_ring_size, alignment);
2087 if (!sctx->esgs_ring)
2088 return false;
2089 }
2090
2091 if (update_gsvs) {
2092 pipe_resource_reference(&sctx->gsvs_ring, NULL);
2093 sctx->gsvs_ring =
2094 r600_aligned_buffer_create(sctx->b.b.screen,
2095 R600_RESOURCE_FLAG_UNMAPPABLE,
2096 PIPE_USAGE_DEFAULT,
2097 gsvs_ring_size, alignment);
2098 if (!sctx->gsvs_ring)
2099 return false;
2100 }
2101
2102 /* Create the "init_config_gs_rings" state. */
2103 pm4 = CALLOC_STRUCT(si_pm4_state);
2104 if (!pm4)
2105 return false;
2106
2107 if (sctx->b.chip_class >= CIK) {
2108 if (sctx->esgs_ring)
2109 si_pm4_set_reg(pm4, R_030900_VGT_ESGS_RING_SIZE,
2110 sctx->esgs_ring->width0 / 256);
2111 if (sctx->gsvs_ring)
2112 si_pm4_set_reg(pm4, R_030904_VGT_GSVS_RING_SIZE,
2113 sctx->gsvs_ring->width0 / 256);
2114 } else {
2115 if (sctx->esgs_ring)
2116 si_pm4_set_reg(pm4, R_0088C8_VGT_ESGS_RING_SIZE,
2117 sctx->esgs_ring->width0 / 256);
2118 if (sctx->gsvs_ring)
2119 si_pm4_set_reg(pm4, R_0088CC_VGT_GSVS_RING_SIZE,
2120 sctx->gsvs_ring->width0 / 256);
2121 }
2122
2123 /* Set the state. */
2124 if (sctx->init_config_gs_rings)
2125 si_pm4_free_state(sctx, sctx->init_config_gs_rings, ~0);
2126 sctx->init_config_gs_rings = pm4;
2127
2128 if (!sctx->init_config_has_vgt_flush) {
2129 si_init_config_add_vgt_flush(sctx);
2130 si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
2131 }
2132
2133 /* Flush the context to re-emit both init_config states. */
2134 sctx->b.initial_gfx_cs_size = 0; /* force flush */
2135 si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
2136
2137 /* Set ring bindings. */
2138 if (sctx->esgs_ring) {
2139 si_set_ring_buffer(&sctx->b.b, SI_ES_RING_ESGS,
2140 sctx->esgs_ring, 0, sctx->esgs_ring->width0,
2141 true, true, 4, 64, 0);
2142 si_set_ring_buffer(&sctx->b.b, SI_GS_RING_ESGS,
2143 sctx->esgs_ring, 0, sctx->esgs_ring->width0,
2144 false, false, 0, 0, 0);
2145 }
2146 if (sctx->gsvs_ring) {
2147 si_set_ring_buffer(&sctx->b.b, SI_RING_GSVS,
2148 sctx->gsvs_ring, 0, sctx->gsvs_ring->width0,
2149 false, false, 0, 0, 0);
2150 }
2151
2152 return true;
2153 }
2154
2155 /**
2156 * @returns 1 if \p sel has been updated to use a new scratch buffer
2157 * 0 if not
2158 * < 0 if there was a failure
2159 */
2160 static int si_update_scratch_buffer(struct si_context *sctx,
2161 struct si_shader *shader)
2162 {
2163 uint64_t scratch_va = sctx->scratch_buffer->gpu_address;
2164 int r;
2165
2166 if (!shader)
2167 return 0;
2168
2169 /* This shader doesn't need a scratch buffer */
2170 if (shader->config.scratch_bytes_per_wave == 0)
2171 return 0;
2172
2173 /* This shader is already configured to use the current
2174 * scratch buffer. */
2175 if (shader->scratch_bo == sctx->scratch_buffer)
2176 return 0;
2177
2178 assert(sctx->scratch_buffer);
2179
2180 si_shader_apply_scratch_relocs(sctx, shader, &shader->config, scratch_va);
2181
2182 /* Replace the shader bo with a new bo that has the relocs applied. */
2183 r = si_shader_binary_upload(sctx->screen, shader);
2184 if (r)
2185 return r;
2186
2187 /* Update the shader state to use the new shader bo. */
2188 si_shader_init_pm4_state(sctx->screen, shader);
2189
2190 r600_resource_reference(&shader->scratch_bo, sctx->scratch_buffer);
2191
2192 return 1;
2193 }
2194
2195 static unsigned si_get_current_scratch_buffer_size(struct si_context *sctx)
2196 {
2197 return sctx->scratch_buffer ? sctx->scratch_buffer->b.b.width0 : 0;
2198 }
2199
2200 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader *shader)
2201 {
2202 return shader ? shader->config.scratch_bytes_per_wave : 0;
2203 }
2204
2205 static unsigned si_get_max_scratch_bytes_per_wave(struct si_context *sctx)
2206 {
2207 unsigned bytes = 0;
2208
2209 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current));
2210 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current));
2211 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current));
2212 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tcs_shader.current));
2213 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current));
2214 return bytes;
2215 }
2216
2217 static bool si_update_spi_tmpring_size(struct si_context *sctx)
2218 {
2219 unsigned current_scratch_buffer_size =
2220 si_get_current_scratch_buffer_size(sctx);
2221 unsigned scratch_bytes_per_wave =
2222 si_get_max_scratch_bytes_per_wave(sctx);
2223 unsigned scratch_needed_size = scratch_bytes_per_wave *
2224 sctx->scratch_waves;
2225 unsigned spi_tmpring_size;
2226 int r;
2227
2228 if (scratch_needed_size > 0) {
2229 if (scratch_needed_size > current_scratch_buffer_size) {
2230 /* Create a bigger scratch buffer */
2231 r600_resource_reference(&sctx->scratch_buffer, NULL);
2232
2233 sctx->scratch_buffer = (struct r600_resource*)
2234 r600_aligned_buffer_create(&sctx->screen->b.b,
2235 R600_RESOURCE_FLAG_UNMAPPABLE,
2236 PIPE_USAGE_DEFAULT,
2237 scratch_needed_size, 256);
2238 if (!sctx->scratch_buffer)
2239 return false;
2240
2241 si_mark_atom_dirty(sctx, &sctx->scratch_state);
2242 r600_context_add_resource_size(&sctx->b.b,
2243 &sctx->scratch_buffer->b.b);
2244 }
2245
2246 /* Update the shaders, so they are using the latest scratch. The
2247 * scratch buffer may have been changed since these shaders were
2248 * last used, so we still need to try to update them, even if
2249 * they require scratch buffers smaller than the current size.
2250 */
2251 r = si_update_scratch_buffer(sctx, sctx->ps_shader.current);
2252 if (r < 0)
2253 return false;
2254 if (r == 1)
2255 si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4);
2256
2257 r = si_update_scratch_buffer(sctx, sctx->gs_shader.current);
2258 if (r < 0)
2259 return false;
2260 if (r == 1)
2261 si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
2262
2263 r = si_update_scratch_buffer(sctx, sctx->tcs_shader.current);
2264 if (r < 0)
2265 return false;
2266 if (r == 1)
2267 si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4);
2268
2269 /* VS can be bound as LS, ES, or VS. */
2270 r = si_update_scratch_buffer(sctx, sctx->vs_shader.current);
2271 if (r < 0)
2272 return false;
2273 if (r == 1) {
2274 if (sctx->tes_shader.current)
2275 si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
2276 else if (sctx->gs_shader.current)
2277 si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
2278 else
2279 si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
2280 }
2281
2282 /* TES can be bound as ES or VS. */
2283 r = si_update_scratch_buffer(sctx, sctx->tes_shader.current);
2284 if (r < 0)
2285 return false;
2286 if (r == 1) {
2287 if (sctx->gs_shader.current)
2288 si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
2289 else
2290 si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
2291 }
2292 }
2293
2294 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
2295 assert((scratch_needed_size & ~0x3FF) == scratch_needed_size &&
2296 "scratch size should already be aligned correctly.");
2297
2298 spi_tmpring_size = S_0286E8_WAVES(sctx->scratch_waves) |
2299 S_0286E8_WAVESIZE(scratch_bytes_per_wave >> 10);
2300 if (spi_tmpring_size != sctx->spi_tmpring_size) {
2301 sctx->spi_tmpring_size = spi_tmpring_size;
2302 si_mark_atom_dirty(sctx, &sctx->scratch_state);
2303 }
2304 return true;
2305 }
2306
2307 static void si_init_tess_factor_ring(struct si_context *sctx)
2308 {
2309 bool double_offchip_buffers = sctx->b.chip_class >= CIK;
2310 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
2311 unsigned max_offchip_buffers = max_offchip_buffers_per_se *
2312 sctx->screen->b.info.max_se;
2313 unsigned offchip_granularity;
2314
2315 switch (sctx->screen->tess_offchip_block_dw_size) {
2316 default:
2317 assert(0);
2318 /* fall through */
2319 case 8192:
2320 offchip_granularity = V_03093C_X_8K_DWORDS;
2321 break;
2322 case 4096:
2323 offchip_granularity = V_03093C_X_4K_DWORDS;
2324 break;
2325 }
2326
2327 switch (sctx->b.chip_class) {
2328 case SI:
2329 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
2330 break;
2331 case CIK:
2332 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
2333 break;
2334 case VI:
2335 default:
2336 max_offchip_buffers = MIN2(max_offchip_buffers, 512);
2337 break;
2338 }
2339
2340 assert(!sctx->tf_ring);
2341 sctx->tf_ring = r600_aligned_buffer_create(sctx->b.b.screen,
2342 R600_RESOURCE_FLAG_UNMAPPABLE,
2343 PIPE_USAGE_DEFAULT,
2344 32768 * sctx->screen->b.info.max_se,
2345 256);
2346 if (!sctx->tf_ring)
2347 return;
2348
2349 assert(((sctx->tf_ring->width0 / 4) & C_030938_SIZE) == 0);
2350
2351 sctx->tess_offchip_ring =
2352 r600_aligned_buffer_create(sctx->b.b.screen,
2353 R600_RESOURCE_FLAG_UNMAPPABLE,
2354 PIPE_USAGE_DEFAULT,
2355 max_offchip_buffers *
2356 sctx->screen->tess_offchip_block_dw_size * 4,
2357 256);
2358 if (!sctx->tess_offchip_ring)
2359 return;
2360
2361 si_init_config_add_vgt_flush(sctx);
2362
2363 /* Append these registers to the init config state. */
2364 if (sctx->b.chip_class >= CIK) {
2365 if (sctx->b.chip_class >= VI)
2366 --max_offchip_buffers;
2367
2368 si_pm4_set_reg(sctx->init_config, R_030938_VGT_TF_RING_SIZE,
2369 S_030938_SIZE(sctx->tf_ring->width0 / 4));
2370 si_pm4_set_reg(sctx->init_config, R_030940_VGT_TF_MEMORY_BASE,
2371 r600_resource(sctx->tf_ring)->gpu_address >> 8);
2372 si_pm4_set_reg(sctx->init_config, R_03093C_VGT_HS_OFFCHIP_PARAM,
2373 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
2374 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity));
2375 } else {
2376 assert(offchip_granularity == V_03093C_X_8K_DWORDS);
2377 si_pm4_set_reg(sctx->init_config, R_008988_VGT_TF_RING_SIZE,
2378 S_008988_SIZE(sctx->tf_ring->width0 / 4));
2379 si_pm4_set_reg(sctx->init_config, R_0089B8_VGT_TF_MEMORY_BASE,
2380 r600_resource(sctx->tf_ring)->gpu_address >> 8);
2381 si_pm4_set_reg(sctx->init_config, R_0089B0_VGT_HS_OFFCHIP_PARAM,
2382 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers));
2383 }
2384
2385 /* Flush the context to re-emit the init_config state.
2386 * This is done only once in a lifetime of a context.
2387 */
2388 si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
2389 sctx->b.initial_gfx_cs_size = 0; /* force flush */
2390 si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
2391
2392 si_set_ring_buffer(&sctx->b.b, SI_HS_RING_TESS_FACTOR, sctx->tf_ring,
2393 0, sctx->tf_ring->width0, false, false, 0, 0, 0);
2394
2395 si_set_ring_buffer(&sctx->b.b, SI_HS_RING_TESS_OFFCHIP,
2396 sctx->tess_offchip_ring, 0,
2397 sctx->tess_offchip_ring->width0, false, false, 0, 0, 0);
2398 }
2399
2400 /**
2401 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
2402 * VS passes its outputs to TES directly, so the fixed-function shader only
2403 * has to write TESSOUTER and TESSINNER.
2404 */
2405 static void si_generate_fixed_func_tcs(struct si_context *sctx)
2406 {
2407 struct ureg_src outer, inner;
2408 struct ureg_dst tessouter, tessinner;
2409 struct ureg_program *ureg = ureg_create(PIPE_SHADER_TESS_CTRL);
2410
2411 if (!ureg)
2412 return; /* if we get here, we're screwed */
2413
2414 assert(!sctx->fixed_func_tcs_shader.cso);
2415
2416 outer = ureg_DECL_system_value(ureg,
2417 TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI, 0);
2418 inner = ureg_DECL_system_value(ureg,
2419 TGSI_SEMANTIC_DEFAULT_TESSINNER_SI, 0);
2420
2421 tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0);
2422 tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0);
2423
2424 ureg_MOV(ureg, tessouter, outer);
2425 ureg_MOV(ureg, tessinner, inner);
2426 ureg_END(ureg);
2427
2428 sctx->fixed_func_tcs_shader.cso =
2429 ureg_create_shader_and_destroy(ureg, &sctx->b.b);
2430 }
2431
2432 static void si_update_vgt_shader_config(struct si_context *sctx)
2433 {
2434 /* Calculate the index of the config.
2435 * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */
2436 unsigned index = 2*!!sctx->tes_shader.cso + !!sctx->gs_shader.cso;
2437 struct si_pm4_state **pm4 = &sctx->vgt_shader_config[index];
2438
2439 if (!*pm4) {
2440 uint32_t stages = 0;
2441
2442 *pm4 = CALLOC_STRUCT(si_pm4_state);
2443
2444 if (sctx->tes_shader.cso) {
2445 stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) |
2446 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
2447
2448 if (sctx->gs_shader.cso)
2449 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) |
2450 S_028B54_GS_EN(1) |
2451 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
2452 else
2453 stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS);
2454 } else if (sctx->gs_shader.cso) {
2455 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) |
2456 S_028B54_GS_EN(1) |
2457 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
2458 }
2459
2460 si_pm4_set_reg(*pm4, R_028B54_VGT_SHADER_STAGES_EN, stages);
2461 }
2462 si_pm4_bind_state(sctx, vgt_shader_config, *pm4);
2463 }
2464
2465 static void si_update_so(struct si_context *sctx, struct si_shader_selector *shader)
2466 {
2467 struct pipe_stream_output_info *so = &shader->so;
2468 uint32_t enabled_stream_buffers_mask = 0;
2469 int i;
2470
2471 for (i = 0; i < so->num_outputs; i++)
2472 enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << (so->output[i].stream * 4);
2473 sctx->b.streamout.enabled_stream_buffers_mask = enabled_stream_buffers_mask;
2474 sctx->b.streamout.stride_in_dw = shader->so.stride;
2475 }
2476
2477 bool si_update_shaders(struct si_context *sctx)
2478 {
2479 struct pipe_context *ctx = (struct pipe_context*)sctx;
2480 struct si_compiler_ctx_state compiler_state;
2481 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
2482 struct si_shader *old_vs = si_get_vs_state(sctx);
2483 bool old_clip_disable = old_vs ? old_vs->key.opt.hw_vs.clip_disable : false;
2484 int r;
2485
2486 compiler_state.tm = sctx->tm;
2487 compiler_state.debug = sctx->b.debug;
2488 compiler_state.is_debug_context = sctx->is_debug;
2489
2490 /* Update stages before GS. */
2491 if (sctx->tes_shader.cso) {
2492 if (!sctx->tf_ring) {
2493 si_init_tess_factor_ring(sctx);
2494 if (!sctx->tf_ring)
2495 return false;
2496 }
2497
2498 /* VS as LS */
2499 r = si_shader_select(ctx, &sctx->vs_shader, &compiler_state);
2500 if (r)
2501 return false;
2502 si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
2503
2504 if (sctx->tcs_shader.cso) {
2505 r = si_shader_select(ctx, &sctx->tcs_shader,
2506 &compiler_state);
2507 if (r)
2508 return false;
2509 si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4);
2510 } else {
2511 if (!sctx->fixed_func_tcs_shader.cso) {
2512 si_generate_fixed_func_tcs(sctx);
2513 if (!sctx->fixed_func_tcs_shader.cso)
2514 return false;
2515 }
2516
2517 r = si_shader_select(ctx, &sctx->fixed_func_tcs_shader,
2518 &compiler_state);
2519 if (r)
2520 return false;
2521 si_pm4_bind_state(sctx, hs,
2522 sctx->fixed_func_tcs_shader.current->pm4);
2523 }
2524
2525 r = si_shader_select(ctx, &sctx->tes_shader, &compiler_state);
2526 if (r)
2527 return false;
2528
2529 if (sctx->gs_shader.cso) {
2530 /* TES as ES */
2531 si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
2532 } else {
2533 /* TES as VS */
2534 si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
2535 si_update_so(sctx, sctx->tes_shader.cso);
2536 }
2537 } else if (sctx->gs_shader.cso) {
2538 /* VS as ES */
2539 r = si_shader_select(ctx, &sctx->vs_shader, &compiler_state);
2540 if (r)
2541 return false;
2542 si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
2543
2544 si_pm4_bind_state(sctx, ls, NULL);
2545 si_pm4_bind_state(sctx, hs, NULL);
2546 } else {
2547 /* VS as VS */
2548 r = si_shader_select(ctx, &sctx->vs_shader, &compiler_state);
2549 if (r)
2550 return false;
2551 si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
2552 si_update_so(sctx, sctx->vs_shader.cso);
2553
2554 si_pm4_bind_state(sctx, ls, NULL);
2555 si_pm4_bind_state(sctx, hs, NULL);
2556 }
2557
2558 /* Update GS. */
2559 if (sctx->gs_shader.cso) {
2560 r = si_shader_select(ctx, &sctx->gs_shader, &compiler_state);
2561 if (r)
2562 return false;
2563 si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
2564 si_pm4_bind_state(sctx, vs, sctx->gs_shader.cso->gs_copy_shader->pm4);
2565 si_update_so(sctx, sctx->gs_shader.cso);
2566
2567 if (!si_update_gs_ring_buffers(sctx))
2568 return false;
2569 } else {
2570 si_pm4_bind_state(sctx, gs, NULL);
2571 si_pm4_bind_state(sctx, es, NULL);
2572 }
2573
2574 si_update_vgt_shader_config(sctx);
2575
2576 if (old_clip_disable != si_get_vs_state(sctx)->key.opt.hw_vs.clip_disable)
2577 si_mark_atom_dirty(sctx, &sctx->clip_regs);
2578
2579 if (sctx->ps_shader.cso) {
2580 unsigned db_shader_control;
2581
2582 r = si_shader_select(ctx, &sctx->ps_shader, &compiler_state);
2583 if (r)
2584 return false;
2585 si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4);
2586
2587 db_shader_control =
2588 sctx->ps_shader.cso->db_shader_control |
2589 S_02880C_KILL_ENABLE(si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS);
2590
2591 if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs) ||
2592 sctx->sprite_coord_enable != rs->sprite_coord_enable ||
2593 sctx->flatshade != rs->flatshade) {
2594 sctx->sprite_coord_enable = rs->sprite_coord_enable;
2595 sctx->flatshade = rs->flatshade;
2596 si_mark_atom_dirty(sctx, &sctx->spi_map);
2597 }
2598
2599 if (sctx->b.family == CHIP_STONEY && si_pm4_state_changed(sctx, ps))
2600 si_mark_atom_dirty(sctx, &sctx->cb_render_state);
2601
2602 if (sctx->ps_db_shader_control != db_shader_control) {
2603 sctx->ps_db_shader_control = db_shader_control;
2604 si_mark_atom_dirty(sctx, &sctx->db_render_state);
2605 }
2606
2607 if (sctx->smoothing_enabled != sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing) {
2608 sctx->smoothing_enabled = sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing;
2609 si_mark_atom_dirty(sctx, &sctx->msaa_config);
2610
2611 if (sctx->b.chip_class == SI)
2612 si_mark_atom_dirty(sctx, &sctx->db_render_state);
2613
2614 if (sctx->framebuffer.nr_samples <= 1)
2615 si_mark_atom_dirty(sctx, &sctx->msaa_sample_locs.atom);
2616 }
2617 }
2618
2619 if (si_pm4_state_changed(sctx, ls) ||
2620 si_pm4_state_changed(sctx, hs) ||
2621 si_pm4_state_changed(sctx, es) ||
2622 si_pm4_state_changed(sctx, gs) ||
2623 si_pm4_state_changed(sctx, vs) ||
2624 si_pm4_state_changed(sctx, ps)) {
2625 if (!si_update_spi_tmpring_size(sctx))
2626 return false;
2627 }
2628
2629 if (sctx->b.chip_class >= CIK)
2630 si_mark_atom_dirty(sctx, &sctx->prefetch_L2);
2631
2632 sctx->do_update_shaders = false;
2633 return true;
2634 }
2635
2636 static void si_emit_scratch_state(struct si_context *sctx,
2637 struct r600_atom *atom)
2638 {
2639 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
2640
2641 radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
2642 sctx->spi_tmpring_size);
2643
2644 if (sctx->scratch_buffer) {
2645 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
2646 sctx->scratch_buffer, RADEON_USAGE_READWRITE,
2647 RADEON_PRIO_SCRATCH_BUFFER);
2648 }
2649 }
2650
2651 void si_init_shader_functions(struct si_context *sctx)
2652 {
2653 si_init_atom(sctx, &sctx->spi_map, &sctx->atoms.s.spi_map, si_emit_spi_map);
2654 si_init_atom(sctx, &sctx->scratch_state, &sctx->atoms.s.scratch_state,
2655 si_emit_scratch_state);
2656
2657 sctx->b.b.create_vs_state = si_create_shader_selector;
2658 sctx->b.b.create_tcs_state = si_create_shader_selector;
2659 sctx->b.b.create_tes_state = si_create_shader_selector;
2660 sctx->b.b.create_gs_state = si_create_shader_selector;
2661 sctx->b.b.create_fs_state = si_create_shader_selector;
2662
2663 sctx->b.b.bind_vs_state = si_bind_vs_shader;
2664 sctx->b.b.bind_tcs_state = si_bind_tcs_shader;
2665 sctx->b.b.bind_tes_state = si_bind_tes_shader;
2666 sctx->b.b.bind_gs_state = si_bind_gs_shader;
2667 sctx->b.b.bind_fs_state = si_bind_ps_shader;
2668
2669 sctx->b.b.delete_vs_state = si_delete_shader_selector;
2670 sctx->b.b.delete_tcs_state = si_delete_shader_selector;
2671 sctx->b.b.delete_tes_state = si_delete_shader_selector;
2672 sctx->b.b.delete_gs_state = si_delete_shader_selector;
2673 sctx->b.b.delete_fs_state = si_delete_shader_selector;
2674 }