radeonsi: fix R600_DEBUG=precompile for shader-db
[mesa.git] / src / gallium / drivers / radeonsi / si_state_shaders.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Christian König <christian.koenig@amd.com>
25 * Marek Olšák <maraeo@gmail.com>
26 */
27
28 #include "si_pipe.h"
29 #include "sid.h"
30 #include "radeon/r600_cs.h"
31
32 #include "tgsi/tgsi_parse.h"
33 #include "tgsi/tgsi_ureg.h"
34 #include "util/hash_table.h"
35 #include "util/u_hash.h"
36 #include "util/u_memory.h"
37 #include "util/u_prim.h"
38
39 /* SHADER_CACHE */
40
41 /**
42 * Return the TGSI binary in a buffer. The first 4 bytes contain its size as
43 * integer.
44 */
45 static void *si_get_tgsi_binary(struct si_shader_selector *sel)
46 {
47 unsigned tgsi_size = tgsi_num_tokens(sel->tokens) *
48 sizeof(struct tgsi_token);
49 unsigned size = 4 + tgsi_size + sizeof(sel->so);
50 char *result = (char*)MALLOC(size);
51
52 if (!result)
53 return NULL;
54
55 *((uint32_t*)result) = size;
56 memcpy(result + 4, sel->tokens, tgsi_size);
57 memcpy(result + 4 + tgsi_size, &sel->so, sizeof(sel->so));
58 return result;
59 }
60
61 /** Copy "data" to "ptr" and return the next dword following copied data. */
62 static uint32_t *write_data(uint32_t *ptr, const void *data, unsigned size)
63 {
64 /* data may be NULL if size == 0 */
65 if (size)
66 memcpy(ptr, data, size);
67 ptr += DIV_ROUND_UP(size, 4);
68 return ptr;
69 }
70
71 /** Read data from "ptr". Return the next dword following the data. */
72 static uint32_t *read_data(uint32_t *ptr, void *data, unsigned size)
73 {
74 memcpy(data, ptr, size);
75 ptr += DIV_ROUND_UP(size, 4);
76 return ptr;
77 }
78
79 /**
80 * Write the size as uint followed by the data. Return the next dword
81 * following the copied data.
82 */
83 static uint32_t *write_chunk(uint32_t *ptr, const void *data, unsigned size)
84 {
85 *ptr++ = size;
86 return write_data(ptr, data, size);
87 }
88
89 /**
90 * Read the size as uint followed by the data. Return both via parameters.
91 * Return the next dword following the data.
92 */
93 static uint32_t *read_chunk(uint32_t *ptr, void **data, unsigned *size)
94 {
95 *size = *ptr++;
96 assert(*data == NULL);
97 if (!*size)
98 return ptr;
99 *data = malloc(*size);
100 return read_data(ptr, *data, *size);
101 }
102
103 /**
104 * Return the shader binary in a buffer. The first 4 bytes contain its size
105 * as integer.
106 */
107 static void *si_get_shader_binary(struct si_shader *shader)
108 {
109 /* There is always a size of data followed by the data itself. */
110 unsigned relocs_size = shader->binary.reloc_count *
111 sizeof(shader->binary.relocs[0]);
112 unsigned disasm_size = strlen(shader->binary.disasm_string) + 1;
113 unsigned llvm_ir_size = shader->binary.llvm_ir_string ?
114 strlen(shader->binary.llvm_ir_string) + 1 : 0;
115 unsigned size =
116 4 + /* total size */
117 4 + /* CRC32 of the data below */
118 align(sizeof(shader->config), 4) +
119 align(sizeof(shader->info), 4) +
120 4 + align(shader->binary.code_size, 4) +
121 4 + align(shader->binary.rodata_size, 4) +
122 4 + align(relocs_size, 4) +
123 4 + align(disasm_size, 4) +
124 4 + align(llvm_ir_size, 4);
125 void *buffer = CALLOC(1, size);
126 uint32_t *ptr = (uint32_t*)buffer;
127
128 if (!buffer)
129 return NULL;
130
131 *ptr++ = size;
132 ptr++; /* CRC32 is calculated at the end. */
133
134 ptr = write_data(ptr, &shader->config, sizeof(shader->config));
135 ptr = write_data(ptr, &shader->info, sizeof(shader->info));
136 ptr = write_chunk(ptr, shader->binary.code, shader->binary.code_size);
137 ptr = write_chunk(ptr, shader->binary.rodata, shader->binary.rodata_size);
138 ptr = write_chunk(ptr, shader->binary.relocs, relocs_size);
139 ptr = write_chunk(ptr, shader->binary.disasm_string, disasm_size);
140 ptr = write_chunk(ptr, shader->binary.llvm_ir_string, llvm_ir_size);
141 assert((char *)ptr - (char *)buffer == size);
142
143 /* Compute CRC32. */
144 ptr = (uint32_t*)buffer;
145 ptr++;
146 *ptr = util_hash_crc32(ptr + 1, size - 8);
147
148 return buffer;
149 }
150
151 static bool si_load_shader_binary(struct si_shader *shader, void *binary)
152 {
153 uint32_t *ptr = (uint32_t*)binary;
154 uint32_t size = *ptr++;
155 uint32_t crc32 = *ptr++;
156 unsigned chunk_size;
157
158 if (util_hash_crc32(ptr, size - 8) != crc32) {
159 fprintf(stderr, "radeonsi: binary shader has invalid CRC32\n");
160 return false;
161 }
162
163 ptr = read_data(ptr, &shader->config, sizeof(shader->config));
164 ptr = read_data(ptr, &shader->info, sizeof(shader->info));
165 ptr = read_chunk(ptr, (void**)&shader->binary.code,
166 &shader->binary.code_size);
167 ptr = read_chunk(ptr, (void**)&shader->binary.rodata,
168 &shader->binary.rodata_size);
169 ptr = read_chunk(ptr, (void**)&shader->binary.relocs, &chunk_size);
170 shader->binary.reloc_count = chunk_size / sizeof(shader->binary.relocs[0]);
171 ptr = read_chunk(ptr, (void**)&shader->binary.disasm_string, &chunk_size);
172 ptr = read_chunk(ptr, (void**)&shader->binary.llvm_ir_string, &chunk_size);
173
174 return true;
175 }
176
177 /**
178 * Insert a shader into the cache. It's assumed the shader is not in the cache.
179 * Use si_shader_cache_load_shader before calling this.
180 *
181 * Returns false on failure, in which case the tgsi_binary should be freed.
182 */
183 static bool si_shader_cache_insert_shader(struct si_screen *sscreen,
184 void *tgsi_binary,
185 struct si_shader *shader)
186 {
187 void *hw_binary;
188 struct hash_entry *entry;
189
190 entry = _mesa_hash_table_search(sscreen->shader_cache, tgsi_binary);
191 if (entry)
192 return false; /* already added */
193
194 hw_binary = si_get_shader_binary(shader);
195 if (!hw_binary)
196 return false;
197
198 if (_mesa_hash_table_insert(sscreen->shader_cache, tgsi_binary,
199 hw_binary) == NULL) {
200 FREE(hw_binary);
201 return false;
202 }
203
204 return true;
205 }
206
207 static bool si_shader_cache_load_shader(struct si_screen *sscreen,
208 void *tgsi_binary,
209 struct si_shader *shader)
210 {
211 struct hash_entry *entry =
212 _mesa_hash_table_search(sscreen->shader_cache, tgsi_binary);
213 if (!entry)
214 return false;
215
216 return si_load_shader_binary(shader, entry->data);
217 }
218
219 static uint32_t si_shader_cache_key_hash(const void *key)
220 {
221 /* The first dword is the key size. */
222 return util_hash_crc32(key, *(uint32_t*)key);
223 }
224
225 static bool si_shader_cache_key_equals(const void *a, const void *b)
226 {
227 uint32_t *keya = (uint32_t*)a;
228 uint32_t *keyb = (uint32_t*)b;
229
230 /* The first dword is the key size. */
231 if (*keya != *keyb)
232 return false;
233
234 return memcmp(keya, keyb, *keya) == 0;
235 }
236
237 static void si_destroy_shader_cache_entry(struct hash_entry *entry)
238 {
239 FREE((void*)entry->key);
240 FREE(entry->data);
241 }
242
243 bool si_init_shader_cache(struct si_screen *sscreen)
244 {
245 pipe_mutex_init(sscreen->shader_cache_mutex);
246 sscreen->shader_cache =
247 _mesa_hash_table_create(NULL,
248 si_shader_cache_key_hash,
249 si_shader_cache_key_equals);
250 return sscreen->shader_cache != NULL;
251 }
252
253 void si_destroy_shader_cache(struct si_screen *sscreen)
254 {
255 if (sscreen->shader_cache)
256 _mesa_hash_table_destroy(sscreen->shader_cache,
257 si_destroy_shader_cache_entry);
258 pipe_mutex_destroy(sscreen->shader_cache_mutex);
259 }
260
261 /* SHADER STATES */
262
263 static void si_set_tesseval_regs(struct si_screen *sscreen,
264 struct si_shader *shader,
265 struct si_pm4_state *pm4)
266 {
267 struct tgsi_shader_info *info = &shader->selector->info;
268 unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE];
269 unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING];
270 bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW];
271 bool tes_point_mode = info->properties[TGSI_PROPERTY_TES_POINT_MODE];
272 unsigned type, partitioning, topology, distribution_mode;
273
274 switch (tes_prim_mode) {
275 case PIPE_PRIM_LINES:
276 type = V_028B6C_TESS_ISOLINE;
277 break;
278 case PIPE_PRIM_TRIANGLES:
279 type = V_028B6C_TESS_TRIANGLE;
280 break;
281 case PIPE_PRIM_QUADS:
282 type = V_028B6C_TESS_QUAD;
283 break;
284 default:
285 assert(0);
286 return;
287 }
288
289 switch (tes_spacing) {
290 case PIPE_TESS_SPACING_FRACTIONAL_ODD:
291 partitioning = V_028B6C_PART_FRAC_ODD;
292 break;
293 case PIPE_TESS_SPACING_FRACTIONAL_EVEN:
294 partitioning = V_028B6C_PART_FRAC_EVEN;
295 break;
296 case PIPE_TESS_SPACING_EQUAL:
297 partitioning = V_028B6C_PART_INTEGER;
298 break;
299 default:
300 assert(0);
301 return;
302 }
303
304 if (tes_point_mode)
305 topology = V_028B6C_OUTPUT_POINT;
306 else if (tes_prim_mode == PIPE_PRIM_LINES)
307 topology = V_028B6C_OUTPUT_LINE;
308 else if (tes_vertex_order_cw)
309 /* for some reason, this must be the other way around */
310 topology = V_028B6C_OUTPUT_TRIANGLE_CCW;
311 else
312 topology = V_028B6C_OUTPUT_TRIANGLE_CW;
313
314 if (sscreen->has_distributed_tess) {
315 if (sscreen->b.family == CHIP_FIJI ||
316 sscreen->b.family >= CHIP_POLARIS10)
317 distribution_mode = V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS;
318 else
319 distribution_mode = V_028B6C_DISTRIBUTION_MODE_DONUTS;
320 } else
321 distribution_mode = V_028B6C_DISTRIBUTION_MODE_NO_DIST;
322
323 si_pm4_set_reg(pm4, R_028B6C_VGT_TF_PARAM,
324 S_028B6C_TYPE(type) |
325 S_028B6C_PARTITIONING(partitioning) |
326 S_028B6C_TOPOLOGY(topology) |
327 S_028B6C_DISTRIBUTION_MODE(distribution_mode));
328 }
329
330 static struct si_pm4_state *si_get_shader_pm4_state(struct si_shader *shader)
331 {
332 if (shader->pm4)
333 si_pm4_clear_state(shader->pm4);
334 else
335 shader->pm4 = CALLOC_STRUCT(si_pm4_state);
336
337 return shader->pm4;
338 }
339
340 static void si_shader_ls(struct si_shader *shader)
341 {
342 struct si_pm4_state *pm4;
343 unsigned vgpr_comp_cnt;
344 uint64_t va;
345
346 pm4 = si_get_shader_pm4_state(shader);
347 if (!pm4)
348 return;
349
350 va = shader->bo->gpu_address;
351 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
352
353 /* We need at least 2 components for LS.
354 * VGPR0-3: (VertexID, RelAutoindex, ???, InstanceID). */
355 vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : 1;
356
357 si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
358 si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, va >> 40);
359
360 shader->config.rsrc1 = S_00B528_VGPRS((shader->config.num_vgprs - 1) / 4) |
361 S_00B528_SGPRS((shader->config.num_sgprs - 1) / 8) |
362 S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt) |
363 S_00B528_DX10_CLAMP(1) |
364 S_00B528_FLOAT_MODE(shader->config.float_mode);
365 shader->config.rsrc2 = S_00B52C_USER_SGPR(SI_LS_NUM_USER_SGPR) |
366 S_00B52C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
367 }
368
369 static void si_shader_hs(struct si_shader *shader)
370 {
371 struct si_pm4_state *pm4;
372 uint64_t va;
373
374 pm4 = si_get_shader_pm4_state(shader);
375 if (!pm4)
376 return;
377
378 va = shader->bo->gpu_address;
379 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
380
381 si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8);
382 si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, va >> 40);
383 si_pm4_set_reg(pm4, R_00B428_SPI_SHADER_PGM_RSRC1_HS,
384 S_00B428_VGPRS((shader->config.num_vgprs - 1) / 4) |
385 S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8) |
386 S_00B428_DX10_CLAMP(1) |
387 S_00B428_FLOAT_MODE(shader->config.float_mode));
388 si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
389 S_00B42C_USER_SGPR(SI_TCS_NUM_USER_SGPR) |
390 S_00B42C_OC_LDS_EN(1) |
391 S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
392 }
393
394 static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader)
395 {
396 struct si_pm4_state *pm4;
397 unsigned num_user_sgprs;
398 unsigned vgpr_comp_cnt;
399 uint64_t va;
400 unsigned oc_lds_en;
401
402 pm4 = si_get_shader_pm4_state(shader);
403 if (!pm4)
404 return;
405
406 va = shader->bo->gpu_address;
407 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
408
409 if (shader->selector->type == PIPE_SHADER_VERTEX) {
410 vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : 0;
411 num_user_sgprs = SI_ES_NUM_USER_SGPR;
412 } else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
413 vgpr_comp_cnt = 3; /* all components are needed for TES */
414 num_user_sgprs = SI_TES_NUM_USER_SGPR;
415 } else
416 unreachable("invalid shader selector type");
417
418 oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0;
419
420 si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
421 shader->selector->esgs_itemsize / 4);
422 si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
423 si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, va >> 40);
424 si_pm4_set_reg(pm4, R_00B328_SPI_SHADER_PGM_RSRC1_ES,
425 S_00B328_VGPRS((shader->config.num_vgprs - 1) / 4) |
426 S_00B328_SGPRS((shader->config.num_sgprs - 1) / 8) |
427 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt) |
428 S_00B328_DX10_CLAMP(1) |
429 S_00B328_FLOAT_MODE(shader->config.float_mode));
430 si_pm4_set_reg(pm4, R_00B32C_SPI_SHADER_PGM_RSRC2_ES,
431 S_00B32C_USER_SGPR(num_user_sgprs) |
432 S_00B32C_OC_LDS_EN(oc_lds_en) |
433 S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
434
435 if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
436 si_set_tesseval_regs(sscreen, shader, pm4);
437 }
438
439 /**
440 * Calculate the appropriate setting of VGT_GS_MODE when \p shader is a
441 * geometry shader.
442 */
443 static uint32_t si_vgt_gs_mode(struct si_shader *shader)
444 {
445 unsigned gs_max_vert_out = shader->selector->gs_max_out_vertices;
446 unsigned cut_mode;
447
448 if (gs_max_vert_out <= 128) {
449 cut_mode = V_028A40_GS_CUT_128;
450 } else if (gs_max_vert_out <= 256) {
451 cut_mode = V_028A40_GS_CUT_256;
452 } else if (gs_max_vert_out <= 512) {
453 cut_mode = V_028A40_GS_CUT_512;
454 } else {
455 assert(gs_max_vert_out <= 1024);
456 cut_mode = V_028A40_GS_CUT_1024;
457 }
458
459 return S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
460 S_028A40_CUT_MODE(cut_mode)|
461 S_028A40_ES_WRITE_OPTIMIZE(1) |
462 S_028A40_GS_WRITE_OPTIMIZE(1);
463 }
464
465 static void si_shader_gs(struct si_shader *shader)
466 {
467 unsigned gs_vert_itemsize = shader->selector->gsvs_vertex_size;
468 unsigned gsvs_itemsize = shader->selector->max_gsvs_emit_size >> 2;
469 unsigned gs_num_invocations = shader->selector->gs_num_invocations;
470 struct si_pm4_state *pm4;
471 uint64_t va;
472 unsigned max_stream = shader->selector->max_gs_stream;
473
474 /* The GSVS_RING_ITEMSIZE register takes 15 bits */
475 assert(gsvs_itemsize < (1 << 15));
476
477 pm4 = si_get_shader_pm4_state(shader);
478 if (!pm4)
479 return;
480
481 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(shader));
482
483 si_pm4_set_reg(pm4, R_028A60_VGT_GSVS_RING_OFFSET_1, gsvs_itemsize);
484 si_pm4_set_reg(pm4, R_028A64_VGT_GSVS_RING_OFFSET_2, gsvs_itemsize * ((max_stream >= 2) ? 2 : 1));
485 si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, gsvs_itemsize * ((max_stream >= 3) ? 3 : 1));
486
487 si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gsvs_itemsize * (max_stream + 1));
488
489 si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, shader->selector->gs_max_out_vertices);
490
491 si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, gs_vert_itemsize >> 2);
492 si_pm4_set_reg(pm4, R_028B60_VGT_GS_VERT_ITEMSIZE_1, (max_stream >= 1) ? gs_vert_itemsize >> 2 : 0);
493 si_pm4_set_reg(pm4, R_028B64_VGT_GS_VERT_ITEMSIZE_2, (max_stream >= 2) ? gs_vert_itemsize >> 2 : 0);
494 si_pm4_set_reg(pm4, R_028B68_VGT_GS_VERT_ITEMSIZE_3, (max_stream >= 3) ? gs_vert_itemsize >> 2 : 0);
495
496 si_pm4_set_reg(pm4, R_028B90_VGT_GS_INSTANCE_CNT,
497 S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
498 S_028B90_ENABLE(gs_num_invocations > 0));
499
500 va = shader->bo->gpu_address;
501 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
502 si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
503 si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40);
504
505 si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
506 S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
507 S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) |
508 S_00B228_DX10_CLAMP(1) |
509 S_00B228_FLOAT_MODE(shader->config.float_mode));
510 si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
511 S_00B22C_USER_SGPR(SI_GS_NUM_USER_SGPR) |
512 S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
513 }
514
515 /**
516 * Compute the state for \p shader, which will run as a vertex shader on the
517 * hardware.
518 *
519 * If \p gs is non-NULL, it points to the geometry shader for which this shader
520 * is the copy shader.
521 */
522 static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader,
523 struct si_shader *gs)
524 {
525 struct si_pm4_state *pm4;
526 unsigned num_user_sgprs;
527 unsigned nparams, vgpr_comp_cnt;
528 uint64_t va;
529 unsigned oc_lds_en;
530 unsigned window_space =
531 shader->selector->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
532 bool enable_prim_id = si_vs_exports_prim_id(shader);
533
534 pm4 = si_get_shader_pm4_state(shader);
535 if (!pm4)
536 return;
537
538 /* We always write VGT_GS_MODE in the VS state, because every switch
539 * between different shader pipelines involving a different GS or no
540 * GS at all involves a switch of the VS (different GS use different
541 * copy shaders). On the other hand, when the API switches from a GS to
542 * no GS and then back to the same GS used originally, the GS state is
543 * not sent again.
544 */
545 if (!gs) {
546 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE,
547 S_028A40_MODE(enable_prim_id ? V_028A40_GS_SCENARIO_A : 0));
548 si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, enable_prim_id);
549 } else {
550 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(gs));
551 si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, 0);
552 }
553
554 va = shader->bo->gpu_address;
555 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
556
557 if (gs) {
558 vgpr_comp_cnt = 0; /* only VertexID is needed for GS-COPY. */
559 num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR;
560 } else if (shader->selector->type == PIPE_SHADER_VERTEX) {
561 vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : (enable_prim_id ? 2 : 0);
562 num_user_sgprs = SI_VS_NUM_USER_SGPR;
563 } else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
564 vgpr_comp_cnt = 3; /* all components are needed for TES */
565 num_user_sgprs = SI_TES_NUM_USER_SGPR;
566 } else
567 unreachable("invalid shader selector type");
568
569 /* VS is required to export at least one param. */
570 nparams = MAX2(shader->info.nr_param_exports, 1);
571 si_pm4_set_reg(pm4, R_0286C4_SPI_VS_OUT_CONFIG,
572 S_0286C4_VS_EXPORT_COUNT(nparams - 1));
573
574 si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT,
575 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
576 S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ?
577 V_02870C_SPI_SHADER_4COMP :
578 V_02870C_SPI_SHADER_NONE) |
579 S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ?
580 V_02870C_SPI_SHADER_4COMP :
581 V_02870C_SPI_SHADER_NONE) |
582 S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ?
583 V_02870C_SPI_SHADER_4COMP :
584 V_02870C_SPI_SHADER_NONE));
585
586 oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0;
587
588 si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
589 si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, va >> 40);
590 si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS,
591 S_00B128_VGPRS((shader->config.num_vgprs - 1) / 4) |
592 S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8) |
593 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt) |
594 S_00B128_DX10_CLAMP(1) |
595 S_00B128_FLOAT_MODE(shader->config.float_mode));
596 si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS,
597 S_00B12C_USER_SGPR(num_user_sgprs) |
598 S_00B12C_OC_LDS_EN(oc_lds_en) |
599 S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) |
600 S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) |
601 S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
602 S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
603 S_00B12C_SO_EN(!!shader->selector->so.num_outputs) |
604 S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
605 if (window_space)
606 si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL,
607 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
608 else
609 si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL,
610 S_028818_VTX_W0_FMT(1) |
611 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
612 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
613 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
614
615 if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
616 si_set_tesseval_regs(sscreen, shader, pm4);
617 }
618
619 static unsigned si_get_ps_num_interp(struct si_shader *ps)
620 {
621 struct tgsi_shader_info *info = &ps->selector->info;
622 unsigned num_colors = !!(info->colors_read & 0x0f) +
623 !!(info->colors_read & 0xf0);
624 unsigned num_interp = ps->selector->info.num_inputs +
625 (ps->key.ps.prolog.color_two_side ? num_colors : 0);
626
627 assert(num_interp <= 32);
628 return MIN2(num_interp, 32);
629 }
630
631 static unsigned si_get_spi_shader_col_format(struct si_shader *shader)
632 {
633 unsigned value = shader->key.ps.epilog.spi_shader_col_format;
634 unsigned i, num_targets = (util_last_bit(value) + 3) / 4;
635
636 /* If the i-th target format is set, all previous target formats must
637 * be non-zero to avoid hangs.
638 */
639 for (i = 0; i < num_targets; i++)
640 if (!(value & (0xf << (i * 4))))
641 value |= V_028714_SPI_SHADER_32_R << (i * 4);
642
643 return value;
644 }
645
646 static unsigned si_get_cb_shader_mask(unsigned spi_shader_col_format)
647 {
648 unsigned i, cb_shader_mask = 0;
649
650 for (i = 0; i < 8; i++) {
651 switch ((spi_shader_col_format >> (i * 4)) & 0xf) {
652 case V_028714_SPI_SHADER_ZERO:
653 break;
654 case V_028714_SPI_SHADER_32_R:
655 cb_shader_mask |= 0x1 << (i * 4);
656 break;
657 case V_028714_SPI_SHADER_32_GR:
658 cb_shader_mask |= 0x3 << (i * 4);
659 break;
660 case V_028714_SPI_SHADER_32_AR:
661 cb_shader_mask |= 0x9 << (i * 4);
662 break;
663 case V_028714_SPI_SHADER_FP16_ABGR:
664 case V_028714_SPI_SHADER_UNORM16_ABGR:
665 case V_028714_SPI_SHADER_SNORM16_ABGR:
666 case V_028714_SPI_SHADER_UINT16_ABGR:
667 case V_028714_SPI_SHADER_SINT16_ABGR:
668 case V_028714_SPI_SHADER_32_ABGR:
669 cb_shader_mask |= 0xf << (i * 4);
670 break;
671 default:
672 assert(0);
673 }
674 }
675 return cb_shader_mask;
676 }
677
678 static void si_shader_ps(struct si_shader *shader)
679 {
680 struct tgsi_shader_info *info = &shader->selector->info;
681 struct si_pm4_state *pm4;
682 unsigned spi_ps_in_control, spi_shader_col_format, cb_shader_mask;
683 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
684 uint64_t va;
685 unsigned input_ena = shader->config.spi_ps_input_ena;
686
687 /* we need to enable at least one of them, otherwise we hang the GPU */
688 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena) ||
689 G_0286CC_PERSP_CENTER_ENA(input_ena) ||
690 G_0286CC_PERSP_CENTROID_ENA(input_ena) ||
691 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena) ||
692 G_0286CC_LINEAR_SAMPLE_ENA(input_ena) ||
693 G_0286CC_LINEAR_CENTER_ENA(input_ena) ||
694 G_0286CC_LINEAR_CENTROID_ENA(input_ena) ||
695 G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena));
696 /* POS_W_FLOAT_ENA requires one of the perspective weights. */
697 assert(!G_0286CC_POS_W_FLOAT_ENA(input_ena) ||
698 G_0286CC_PERSP_SAMPLE_ENA(input_ena) ||
699 G_0286CC_PERSP_CENTER_ENA(input_ena) ||
700 G_0286CC_PERSP_CENTROID_ENA(input_ena) ||
701 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena));
702
703 /* Validate interpolation optimization flags (read as implications). */
704 assert(!shader->key.ps.prolog.bc_optimize_for_persp ||
705 (G_0286CC_PERSP_CENTER_ENA(input_ena) &&
706 G_0286CC_PERSP_CENTROID_ENA(input_ena)));
707 assert(!shader->key.ps.prolog.bc_optimize_for_linear ||
708 (G_0286CC_LINEAR_CENTER_ENA(input_ena) &&
709 G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
710 assert(!shader->key.ps.prolog.force_persp_center_interp ||
711 (!G_0286CC_PERSP_SAMPLE_ENA(input_ena) &&
712 !G_0286CC_PERSP_CENTROID_ENA(input_ena)));
713 assert(!shader->key.ps.prolog.force_linear_center_interp ||
714 (!G_0286CC_LINEAR_SAMPLE_ENA(input_ena) &&
715 !G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
716 assert(!shader->key.ps.prolog.force_persp_sample_interp ||
717 (!G_0286CC_PERSP_CENTER_ENA(input_ena) &&
718 !G_0286CC_PERSP_CENTROID_ENA(input_ena)));
719 assert(!shader->key.ps.prolog.force_linear_sample_interp ||
720 (!G_0286CC_LINEAR_CENTER_ENA(input_ena) &&
721 !G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
722
723 /* Validate cases when the optimizations are off (read as implications). */
724 assert(shader->key.ps.prolog.bc_optimize_for_persp ||
725 !G_0286CC_PERSP_CENTER_ENA(input_ena) ||
726 !G_0286CC_PERSP_CENTROID_ENA(input_ena));
727 assert(shader->key.ps.prolog.bc_optimize_for_linear ||
728 !G_0286CC_LINEAR_CENTER_ENA(input_ena) ||
729 !G_0286CC_LINEAR_CENTROID_ENA(input_ena));
730
731 pm4 = si_get_shader_pm4_state(shader);
732 if (!pm4)
733 return;
734
735 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
736 * Possible vaules:
737 * 0 -> Position = pixel center
738 * 1 -> Position = pixel centroid
739 * 2 -> Position = at sample position
740 *
741 * From GLSL 4.5 specification, section 7.1:
742 * "The variable gl_FragCoord is available as an input variable from
743 * within fragment shaders and it holds the window relative coordinates
744 * (x, y, z, 1/w) values for the fragment. If multi-sampling, this
745 * value can be for any location within the pixel, or one of the
746 * fragment samples. The use of centroid does not further restrict
747 * this value to be inside the current primitive."
748 *
749 * Meaning that centroid has no effect and we can return anything within
750 * the pixel. Thus, return the value at sample position, because that's
751 * the most accurate one shaders can get.
752 */
753 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(2);
754
755 if (info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] ==
756 TGSI_FS_COORD_PIXEL_CENTER_INTEGER)
757 spi_baryc_cntl |= S_0286E0_POS_FLOAT_ULC(1);
758
759 spi_shader_col_format = si_get_spi_shader_col_format(shader);
760 cb_shader_mask = si_get_cb_shader_mask(spi_shader_col_format);
761
762 /* Ensure that some export memory is always allocated, for two reasons:
763 *
764 * 1) Correctness: The hardware ignores the EXEC mask if no export
765 * memory is allocated, so KILL and alpha test do not work correctly
766 * without this.
767 * 2) Performance: Every shader needs at least a NULL export, even when
768 * it writes no color/depth output. The NULL export instruction
769 * stalls without this setting.
770 *
771 * Don't add this to CB_SHADER_MASK.
772 */
773 if (!spi_shader_col_format &&
774 !info->writes_z && !info->writes_stencil && !info->writes_samplemask)
775 spi_shader_col_format = V_028714_SPI_SHADER_32_R;
776
777 si_pm4_set_reg(pm4, R_0286CC_SPI_PS_INPUT_ENA, input_ena);
778 si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR,
779 shader->config.spi_ps_input_addr);
780
781 /* Set interpolation controls. */
782 spi_ps_in_control = S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader));
783
784 /* Set registers. */
785 si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
786 si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
787
788 si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT,
789 si_get_spi_shader_z_format(info->writes_z,
790 info->writes_stencil,
791 info->writes_samplemask));
792
793 si_pm4_set_reg(pm4, R_028714_SPI_SHADER_COL_FORMAT, spi_shader_col_format);
794 si_pm4_set_reg(pm4, R_02823C_CB_SHADER_MASK, cb_shader_mask);
795
796 va = shader->bo->gpu_address;
797 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
798 si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
799 si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, va >> 40);
800
801 si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS,
802 S_00B028_VGPRS((shader->config.num_vgprs - 1) / 4) |
803 S_00B028_SGPRS((shader->config.num_sgprs - 1) / 8) |
804 S_00B028_DX10_CLAMP(1) |
805 S_00B028_FLOAT_MODE(shader->config.float_mode));
806 si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
807 S_00B02C_EXTRA_LDS_SIZE(shader->config.lds_size) |
808 S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR) |
809 S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
810
811 /* Prefer RE_Z if the shader is complex enough. The requirement is either:
812 * - the shader uses at least 2 VMEM instructions, or
813 * - the code size is at least 50 2-dword instructions or 100 1-dword
814 * instructions.
815 *
816 * Shaders with side effects that must execute independently of the
817 * depth test require LATE_Z.
818 */
819 if (info->writes_memory &&
820 !info->properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL])
821 shader->z_order = V_02880C_LATE_Z;
822 else if (info->num_memory_instructions >= 2 ||
823 shader->binary.code_size > 100*4)
824 shader->z_order = V_02880C_EARLY_Z_THEN_RE_Z;
825 else
826 shader->z_order = V_02880C_EARLY_Z_THEN_LATE_Z;
827 }
828
829 static void si_shader_init_pm4_state(struct si_screen *sscreen,
830 struct si_shader *shader)
831 {
832 switch (shader->selector->type) {
833 case PIPE_SHADER_VERTEX:
834 if (shader->key.vs.as_ls)
835 si_shader_ls(shader);
836 else if (shader->key.vs.as_es)
837 si_shader_es(sscreen, shader);
838 else
839 si_shader_vs(sscreen, shader, NULL);
840 break;
841 case PIPE_SHADER_TESS_CTRL:
842 si_shader_hs(shader);
843 break;
844 case PIPE_SHADER_TESS_EVAL:
845 if (shader->key.tes.as_es)
846 si_shader_es(sscreen, shader);
847 else
848 si_shader_vs(sscreen, shader, NULL);
849 break;
850 case PIPE_SHADER_GEOMETRY:
851 si_shader_gs(shader);
852 si_shader_vs(sscreen, shader->gs_copy_shader, shader);
853 break;
854 case PIPE_SHADER_FRAGMENT:
855 si_shader_ps(shader);
856 break;
857 default:
858 assert(0);
859 }
860 }
861
862 static unsigned si_get_alpha_test_func(struct si_context *sctx)
863 {
864 /* Alpha-test should be disabled if colorbuffer 0 is integer. */
865 if (sctx->queued.named.dsa &&
866 !sctx->framebuffer.cb0_is_integer)
867 return sctx->queued.named.dsa->alpha_func;
868
869 return PIPE_FUNC_ALWAYS;
870 }
871
872 /* Compute the key for the hw shader variant */
873 static inline void si_shader_selector_key(struct pipe_context *ctx,
874 struct si_shader_selector *sel,
875 union si_shader_key *key)
876 {
877 struct si_context *sctx = (struct si_context *)ctx;
878 unsigned i;
879
880 memset(key, 0, sizeof(*key));
881
882 switch (sel->type) {
883 case PIPE_SHADER_VERTEX:
884 if (sctx->vertex_elements) {
885 unsigned count = MIN2(sel->info.num_inputs,
886 sctx->vertex_elements->count);
887 for (i = 0; i < count; ++i)
888 key->vs.prolog.instance_divisors[i] =
889 sctx->vertex_elements->elements[i].instance_divisor;
890 }
891 if (sctx->tes_shader.cso)
892 key->vs.as_ls = 1;
893 else if (sctx->gs_shader.cso)
894 key->vs.as_es = 1;
895
896 if (!sctx->gs_shader.cso && sctx->ps_shader.cso &&
897 sctx->ps_shader.cso->info.uses_primid)
898 key->vs.epilog.export_prim_id = 1;
899 break;
900 case PIPE_SHADER_TESS_CTRL:
901 key->tcs.epilog.prim_mode =
902 sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
903
904 if (sel == sctx->fixed_func_tcs_shader.cso)
905 key->tcs.epilog.inputs_to_copy = sctx->vs_shader.cso->outputs_written;
906 break;
907 case PIPE_SHADER_TESS_EVAL:
908 if (sctx->gs_shader.cso)
909 key->tes.as_es = 1;
910 else if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
911 key->tes.epilog.export_prim_id = 1;
912 break;
913 case PIPE_SHADER_GEOMETRY:
914 break;
915 case PIPE_SHADER_FRAGMENT: {
916 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
917 struct si_state_blend *blend = sctx->queued.named.blend;
918
919 if (sel->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] &&
920 sel->info.colors_written == 0x1)
921 key->ps.epilog.last_cbuf = MAX2(sctx->framebuffer.state.nr_cbufs, 1) - 1;
922
923 if (blend) {
924 /* Select the shader color format based on whether
925 * blending or alpha are needed.
926 */
927 key->ps.epilog.spi_shader_col_format =
928 (blend->blend_enable_4bit & blend->need_src_alpha_4bit &
929 sctx->framebuffer.spi_shader_col_format_blend_alpha) |
930 (blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
931 sctx->framebuffer.spi_shader_col_format_blend) |
932 (~blend->blend_enable_4bit & blend->need_src_alpha_4bit &
933 sctx->framebuffer.spi_shader_col_format_alpha) |
934 (~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
935 sctx->framebuffer.spi_shader_col_format);
936
937 /* The output for dual source blending should have
938 * the same format as the first output.
939 */
940 if (blend->dual_src_blend)
941 key->ps.epilog.spi_shader_col_format |=
942 (key->ps.epilog.spi_shader_col_format & 0xf) << 4;
943 } else
944 key->ps.epilog.spi_shader_col_format = sctx->framebuffer.spi_shader_col_format;
945
946 /* If alpha-to-coverage is enabled, we have to export alpha
947 * even if there is no color buffer.
948 */
949 if (!(key->ps.epilog.spi_shader_col_format & 0xf) &&
950 blend && blend->alpha_to_coverage)
951 key->ps.epilog.spi_shader_col_format |= V_028710_SPI_SHADER_32_AR;
952
953 /* On SI and CIK except Hawaii, the CB doesn't clamp outputs
954 * to the range supported by the type if a channel has less
955 * than 16 bits and the export format is 16_ABGR.
956 */
957 if (sctx->b.chip_class <= CIK && sctx->b.family != CHIP_HAWAII)
958 key->ps.epilog.color_is_int8 = sctx->framebuffer.color_is_int8;
959
960 /* Disable unwritten outputs (if WRITE_ALL_CBUFS isn't enabled). */
961 if (!key->ps.epilog.last_cbuf) {
962 key->ps.epilog.spi_shader_col_format &= sel->colors_written_4bit;
963 key->ps.epilog.color_is_int8 &= sel->info.colors_written;
964 }
965
966 if (rs) {
967 bool is_poly = (sctx->current_rast_prim >= PIPE_PRIM_TRIANGLES &&
968 sctx->current_rast_prim <= PIPE_PRIM_POLYGON) ||
969 sctx->current_rast_prim >= PIPE_PRIM_TRIANGLES_ADJACENCY;
970 bool is_line = !is_poly && sctx->current_rast_prim != PIPE_PRIM_POINTS;
971
972 key->ps.prolog.color_two_side = rs->two_side && sel->info.colors_read;
973 key->ps.prolog.flatshade_colors = rs->flatshade && sel->info.colors_read;
974
975 if (sctx->queued.named.blend) {
976 key->ps.epilog.alpha_to_one = sctx->queued.named.blend->alpha_to_one &&
977 rs->multisample_enable &&
978 !sctx->framebuffer.cb0_is_integer;
979 }
980
981 key->ps.prolog.poly_stipple = rs->poly_stipple_enable && is_poly;
982 key->ps.epilog.poly_line_smoothing = ((is_poly && rs->poly_smooth) ||
983 (is_line && rs->line_smooth)) &&
984 sctx->framebuffer.nr_samples <= 1;
985 key->ps.epilog.clamp_color = rs->clamp_fragment_color;
986
987 if (rs->force_persample_interp &&
988 rs->multisample_enable &&
989 sctx->framebuffer.nr_samples > 1 &&
990 sctx->ps_iter_samples > 1) {
991 key->ps.prolog.force_persp_sample_interp =
992 sel->info.uses_persp_center ||
993 sel->info.uses_persp_centroid;
994
995 key->ps.prolog.force_linear_sample_interp =
996 sel->info.uses_linear_center ||
997 sel->info.uses_linear_centroid;
998 } else if (rs->multisample_enable &&
999 sctx->framebuffer.nr_samples > 1) {
1000 key->ps.prolog.bc_optimize_for_persp =
1001 sel->info.uses_persp_center &&
1002 sel->info.uses_persp_centroid;
1003 key->ps.prolog.bc_optimize_for_linear =
1004 sel->info.uses_linear_center &&
1005 sel->info.uses_linear_centroid;
1006 } else {
1007 /* Make sure SPI doesn't compute more than 1 pair
1008 * of (i,j), which is the optimization here. */
1009 key->ps.prolog.force_persp_center_interp =
1010 sel->info.uses_persp_center +
1011 sel->info.uses_persp_centroid +
1012 sel->info.uses_persp_sample > 1;
1013
1014 key->ps.prolog.force_linear_center_interp =
1015 sel->info.uses_linear_center +
1016 sel->info.uses_linear_centroid +
1017 sel->info.uses_linear_sample > 1;
1018 }
1019 }
1020
1021 key->ps.epilog.alpha_func = si_get_alpha_test_func(sctx);
1022 break;
1023 }
1024 default:
1025 assert(0);
1026 }
1027 }
1028
1029 /* Select the hw shader variant depending on the current state. */
1030 static int si_shader_select_with_key(struct si_screen *sscreen,
1031 struct si_shader_ctx_state *state,
1032 union si_shader_key *key,
1033 LLVMTargetMachineRef tm,
1034 struct pipe_debug_callback *debug,
1035 bool wait,
1036 bool is_debug_context)
1037 {
1038 struct si_shader_selector *sel = state->cso;
1039 struct si_shader *current = state->current;
1040 struct si_shader *iter, *shader = NULL;
1041 int r;
1042
1043 /* Check if we don't need to change anything.
1044 * This path is also used for most shaders that don't need multiple
1045 * variants, it will cost just a computation of the key and this
1046 * test. */
1047 if (likely(current && memcmp(&current->key, key, sizeof(*key)) == 0))
1048 return 0;
1049
1050 /* This must be done before the mutex is locked, because async GS
1051 * compilation calls this function too, and therefore must enter
1052 * the mutex first.
1053 */
1054 if (wait)
1055 util_queue_job_wait(&sel->ready);
1056
1057 pipe_mutex_lock(sel->mutex);
1058
1059 /* Find the shader variant. */
1060 for (iter = sel->first_variant; iter; iter = iter->next_variant) {
1061 /* Don't check the "current" shader. We checked it above. */
1062 if (current != iter &&
1063 memcmp(&iter->key, key, sizeof(*key)) == 0) {
1064 state->current = iter;
1065 pipe_mutex_unlock(sel->mutex);
1066 return 0;
1067 }
1068 }
1069
1070 /* Build a new shader. */
1071 shader = CALLOC_STRUCT(si_shader);
1072 if (!shader) {
1073 pipe_mutex_unlock(sel->mutex);
1074 return -ENOMEM;
1075 }
1076 shader->selector = sel;
1077 shader->key = *key;
1078
1079 r = si_shader_create(sscreen, tm, shader, debug);
1080 if (unlikely(r)) {
1081 R600_ERR("Failed to build shader variant (type=%u) %d\n",
1082 sel->type, r);
1083 FREE(shader);
1084 pipe_mutex_unlock(sel->mutex);
1085 return r;
1086 }
1087
1088 if (is_debug_context) {
1089 FILE *f = open_memstream(&shader->shader_log,
1090 &shader->shader_log_size);
1091 if (f) {
1092 si_shader_dump(sscreen, shader, NULL, sel->type, f);
1093 fclose(f);
1094 }
1095 }
1096
1097 si_shader_init_pm4_state(sscreen, shader);
1098
1099 if (!sel->last_variant) {
1100 sel->first_variant = shader;
1101 sel->last_variant = shader;
1102 } else {
1103 sel->last_variant->next_variant = shader;
1104 sel->last_variant = shader;
1105 }
1106 state->current = shader;
1107 pipe_mutex_unlock(sel->mutex);
1108 return 0;
1109 }
1110
1111 static int si_shader_select(struct pipe_context *ctx,
1112 struct si_shader_ctx_state *state)
1113 {
1114 struct si_context *sctx = (struct si_context *)ctx;
1115 union si_shader_key key;
1116
1117 si_shader_selector_key(ctx, state->cso, &key);
1118 return si_shader_select_with_key(sctx->screen, state, &key,
1119 sctx->tm, &sctx->b.debug, true,
1120 sctx->is_debug);
1121 }
1122
1123 static void si_parse_next_shader_property(const struct tgsi_shader_info *info,
1124 union si_shader_key *key)
1125 {
1126 unsigned next_shader = info->properties[TGSI_PROPERTY_NEXT_SHADER];
1127
1128 switch (info->processor) {
1129 case PIPE_SHADER_VERTEX:
1130 switch (next_shader) {
1131 case PIPE_SHADER_GEOMETRY:
1132 key->vs.as_es = 1;
1133 break;
1134 case PIPE_SHADER_TESS_CTRL:
1135 case PIPE_SHADER_TESS_EVAL:
1136 key->vs.as_ls = 1;
1137 break;
1138 }
1139 break;
1140
1141 case PIPE_SHADER_TESS_EVAL:
1142 if (next_shader == PIPE_SHADER_GEOMETRY)
1143 key->tes.as_es = 1;
1144 break;
1145 }
1146 }
1147
1148 /**
1149 * Compile the main shader part or the monolithic shader as part of
1150 * si_shader_selector initialization. Since it can be done asynchronously,
1151 * there is no way to report compile failures to applications.
1152 */
1153 void si_init_shader_selector_async(void *job, int thread_index)
1154 {
1155 struct si_shader_selector *sel = (struct si_shader_selector *)job;
1156 struct si_screen *sscreen = sel->screen;
1157 LLVMTargetMachineRef tm;
1158 struct pipe_debug_callback *debug = &sel->debug;
1159 unsigned i;
1160
1161 if (thread_index >= 0) {
1162 assert(thread_index < ARRAY_SIZE(sscreen->tm));
1163 tm = sscreen->tm[thread_index];
1164 if (!debug->async)
1165 debug = NULL;
1166 } else {
1167 tm = sel->tm;
1168 }
1169
1170 /* Compile the main shader part for use with a prolog and/or epilog.
1171 * If this fails, the driver will try to compile a monolithic shader
1172 * on demand.
1173 */
1174 if (sel->type != PIPE_SHADER_GEOMETRY &&
1175 !sscreen->use_monolithic_shaders) {
1176 struct si_shader *shader = CALLOC_STRUCT(si_shader);
1177 void *tgsi_binary;
1178
1179 if (!shader) {
1180 fprintf(stderr, "radeonsi: can't allocate a main shader part\n");
1181 return;
1182 }
1183
1184 shader->selector = sel;
1185 si_parse_next_shader_property(&sel->info, &shader->key);
1186
1187 tgsi_binary = si_get_tgsi_binary(sel);
1188
1189 /* Try to load the shader from the shader cache. */
1190 pipe_mutex_lock(sscreen->shader_cache_mutex);
1191
1192 if (tgsi_binary &&
1193 si_shader_cache_load_shader(sscreen, tgsi_binary, shader)) {
1194 FREE(tgsi_binary);
1195 pipe_mutex_unlock(sscreen->shader_cache_mutex);
1196 } else {
1197 pipe_mutex_unlock(sscreen->shader_cache_mutex);
1198
1199 /* Compile the shader if it hasn't been loaded from the cache. */
1200 if (si_compile_tgsi_shader(sscreen, tm, shader, false,
1201 debug) != 0) {
1202 FREE(shader);
1203 FREE(tgsi_binary);
1204 fprintf(stderr, "radeonsi: can't compile a main shader part\n");
1205 return;
1206 }
1207
1208 if (tgsi_binary) {
1209 pipe_mutex_lock(sscreen->shader_cache_mutex);
1210 if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader))
1211 FREE(tgsi_binary);
1212 pipe_mutex_unlock(sscreen->shader_cache_mutex);
1213 }
1214 }
1215
1216 sel->main_shader_part = shader;
1217 }
1218
1219 /* Pre-compilation. */
1220 if (sel->type == PIPE_SHADER_GEOMETRY ||
1221 sscreen->b.debug_flags & DBG_PRECOMPILE) {
1222 struct si_shader_ctx_state state = {sel};
1223 union si_shader_key key;
1224
1225 memset(&key, 0, sizeof(key));
1226 si_parse_next_shader_property(&sel->info, &key);
1227
1228 /* Set reasonable defaults, so that the shader key doesn't
1229 * cause any code to be eliminated.
1230 */
1231 switch (sel->type) {
1232 case PIPE_SHADER_TESS_CTRL:
1233 key.tcs.epilog.prim_mode = PIPE_PRIM_TRIANGLES;
1234 break;
1235 case PIPE_SHADER_FRAGMENT:
1236 key.ps.prolog.bc_optimize_for_persp =
1237 sel->info.uses_persp_center &&
1238 sel->info.uses_persp_centroid;
1239 key.ps.prolog.bc_optimize_for_linear =
1240 sel->info.uses_linear_center &&
1241 sel->info.uses_linear_centroid;
1242 key.ps.epilog.alpha_func = PIPE_FUNC_ALWAYS;
1243 for (i = 0; i < 8; i++)
1244 if (sel->info.colors_written & (1 << i))
1245 key.ps.epilog.spi_shader_col_format |=
1246 V_028710_SPI_SHADER_FP16_ABGR << (i * 4);
1247 break;
1248 }
1249
1250 if (si_shader_select_with_key(sscreen, &state, &key, tm, debug,
1251 false, sel->is_debug_context))
1252 fprintf(stderr, "radeonsi: can't create a monolithic shader\n");
1253 }
1254 }
1255
1256 static void *si_create_shader_selector(struct pipe_context *ctx,
1257 const struct pipe_shader_state *state)
1258 {
1259 struct si_screen *sscreen = (struct si_screen *)ctx->screen;
1260 struct si_context *sctx = (struct si_context*)ctx;
1261 struct si_shader_selector *sel = CALLOC_STRUCT(si_shader_selector);
1262 int i;
1263
1264 if (!sel)
1265 return NULL;
1266
1267 sel->screen = sscreen;
1268 sel->tm = sctx->tm;
1269 sel->debug = sctx->b.debug;
1270 sel->is_debug_context = sctx->is_debug;
1271 sel->tokens = tgsi_dup_tokens(state->tokens);
1272 if (!sel->tokens) {
1273 FREE(sel);
1274 return NULL;
1275 }
1276
1277 sel->so = state->stream_output;
1278 tgsi_scan_shader(state->tokens, &sel->info);
1279 sel->type = sel->info.processor;
1280 p_atomic_inc(&sscreen->b.num_shaders_created);
1281
1282 /* Set which opcode uses which (i,j) pair. */
1283 if (sel->info.uses_persp_opcode_interp_centroid)
1284 sel->info.uses_persp_centroid = true;
1285
1286 if (sel->info.uses_linear_opcode_interp_centroid)
1287 sel->info.uses_linear_centroid = true;
1288
1289 if (sel->info.uses_persp_opcode_interp_offset ||
1290 sel->info.uses_persp_opcode_interp_sample)
1291 sel->info.uses_persp_center = true;
1292
1293 if (sel->info.uses_linear_opcode_interp_offset ||
1294 sel->info.uses_linear_opcode_interp_sample)
1295 sel->info.uses_linear_center = true;
1296
1297 switch (sel->type) {
1298 case PIPE_SHADER_GEOMETRY:
1299 sel->gs_output_prim =
1300 sel->info.properties[TGSI_PROPERTY_GS_OUTPUT_PRIM];
1301 sel->gs_max_out_vertices =
1302 sel->info.properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES];
1303 sel->gs_num_invocations =
1304 sel->info.properties[TGSI_PROPERTY_GS_INVOCATIONS];
1305 sel->gsvs_vertex_size = sel->info.num_outputs * 16;
1306 sel->max_gsvs_emit_size = sel->gsvs_vertex_size *
1307 sel->gs_max_out_vertices;
1308
1309 sel->max_gs_stream = 0;
1310 for (i = 0; i < sel->so.num_outputs; i++)
1311 sel->max_gs_stream = MAX2(sel->max_gs_stream,
1312 sel->so.output[i].stream);
1313
1314 sel->gs_input_verts_per_prim =
1315 u_vertices_per_prim(sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]);
1316 break;
1317
1318 case PIPE_SHADER_TESS_CTRL:
1319 /* Always reserve space for these. */
1320 sel->patch_outputs_written |=
1321 (1llu << si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSINNER, 0)) |
1322 (1llu << si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSOUTER, 0));
1323 /* fall through */
1324 case PIPE_SHADER_VERTEX:
1325 case PIPE_SHADER_TESS_EVAL:
1326 for (i = 0; i < sel->info.num_outputs; i++) {
1327 unsigned name = sel->info.output_semantic_name[i];
1328 unsigned index = sel->info.output_semantic_index[i];
1329
1330 switch (name) {
1331 case TGSI_SEMANTIC_TESSINNER:
1332 case TGSI_SEMANTIC_TESSOUTER:
1333 case TGSI_SEMANTIC_PATCH:
1334 sel->patch_outputs_written |=
1335 1llu << si_shader_io_get_unique_index(name, index);
1336 break;
1337 default:
1338 sel->outputs_written |=
1339 1llu << si_shader_io_get_unique_index(name, index);
1340 }
1341 }
1342 sel->esgs_itemsize = util_last_bit64(sel->outputs_written) * 16;
1343 break;
1344
1345 case PIPE_SHADER_FRAGMENT:
1346 for (i = 0; i < 8; i++)
1347 if (sel->info.colors_written & (1 << i))
1348 sel->colors_written_4bit |= 0xf << (4 * i);
1349
1350 for (i = 0; i < sel->info.num_inputs; i++) {
1351 if (sel->info.input_semantic_name[i] == TGSI_SEMANTIC_COLOR) {
1352 int index = sel->info.input_semantic_index[i];
1353 sel->color_attr_index[index] = i;
1354 }
1355 }
1356 break;
1357 }
1358
1359 /* DB_SHADER_CONTROL */
1360 sel->db_shader_control =
1361 S_02880C_Z_EXPORT_ENABLE(sel->info.writes_z) |
1362 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(sel->info.writes_stencil) |
1363 S_02880C_MASK_EXPORT_ENABLE(sel->info.writes_samplemask) |
1364 S_02880C_KILL_ENABLE(sel->info.uses_kill);
1365
1366 switch (sel->info.properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT]) {
1367 case TGSI_FS_DEPTH_LAYOUT_GREATER:
1368 sel->db_shader_control |=
1369 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z);
1370 break;
1371 case TGSI_FS_DEPTH_LAYOUT_LESS:
1372 sel->db_shader_control |=
1373 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z);
1374 break;
1375 }
1376
1377 if (sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL])
1378 sel->db_shader_control |= S_02880C_DEPTH_BEFORE_SHADER(1);
1379
1380 if (sel->info.writes_memory)
1381 sel->db_shader_control |= S_02880C_EXEC_ON_HIER_FAIL(1) |
1382 S_02880C_EXEC_ON_NOOP(1);
1383 pipe_mutex_init(sel->mutex);
1384 util_queue_fence_init(&sel->ready);
1385
1386 if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
1387 sctx->is_debug ||
1388 r600_can_dump_shader(&sscreen->b, sel->info.processor) ||
1389 !util_queue_is_initialized(&sscreen->shader_compiler_queue))
1390 si_init_shader_selector_async(sel, -1);
1391 else
1392 util_queue_add_job(&sscreen->shader_compiler_queue, sel,
1393 &sel->ready, si_init_shader_selector_async,
1394 NULL);
1395
1396 return sel;
1397 }
1398
1399 static void si_bind_vs_shader(struct pipe_context *ctx, void *state)
1400 {
1401 struct si_context *sctx = (struct si_context *)ctx;
1402 struct si_shader_selector *sel = state;
1403
1404 if (sctx->vs_shader.cso == sel)
1405 return;
1406
1407 sctx->vs_shader.cso = sel;
1408 sctx->vs_shader.current = sel ? sel->first_variant : NULL;
1409 sctx->do_update_shaders = true;
1410 si_mark_atom_dirty(sctx, &sctx->clip_regs);
1411 r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
1412 }
1413
1414 static void si_bind_gs_shader(struct pipe_context *ctx, void *state)
1415 {
1416 struct si_context *sctx = (struct si_context *)ctx;
1417 struct si_shader_selector *sel = state;
1418 bool enable_changed = !!sctx->gs_shader.cso != !!sel;
1419
1420 if (sctx->gs_shader.cso == sel)
1421 return;
1422
1423 sctx->gs_shader.cso = sel;
1424 sctx->gs_shader.current = sel ? sel->first_variant : NULL;
1425 sctx->do_update_shaders = true;
1426 si_mark_atom_dirty(sctx, &sctx->clip_regs);
1427 sctx->last_rast_prim = -1; /* reset this so that it gets updated */
1428
1429 if (enable_changed)
1430 si_shader_change_notify(sctx);
1431 r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
1432 }
1433
1434 static void si_bind_tcs_shader(struct pipe_context *ctx, void *state)
1435 {
1436 struct si_context *sctx = (struct si_context *)ctx;
1437 struct si_shader_selector *sel = state;
1438 bool enable_changed = !!sctx->tcs_shader.cso != !!sel;
1439
1440 if (sctx->tcs_shader.cso == sel)
1441 return;
1442
1443 sctx->tcs_shader.cso = sel;
1444 sctx->tcs_shader.current = sel ? sel->first_variant : NULL;
1445 sctx->do_update_shaders = true;
1446
1447 if (enable_changed)
1448 sctx->last_tcs = NULL; /* invalidate derived tess state */
1449 }
1450
1451 static void si_bind_tes_shader(struct pipe_context *ctx, void *state)
1452 {
1453 struct si_context *sctx = (struct si_context *)ctx;
1454 struct si_shader_selector *sel = state;
1455 bool enable_changed = !!sctx->tes_shader.cso != !!sel;
1456
1457 if (sctx->tes_shader.cso == sel)
1458 return;
1459
1460 sctx->tes_shader.cso = sel;
1461 sctx->tes_shader.current = sel ? sel->first_variant : NULL;
1462 sctx->do_update_shaders = true;
1463 si_mark_atom_dirty(sctx, &sctx->clip_regs);
1464 sctx->last_rast_prim = -1; /* reset this so that it gets updated */
1465
1466 if (enable_changed) {
1467 si_shader_change_notify(sctx);
1468 sctx->last_tes_sh_base = -1; /* invalidate derived tess state */
1469 }
1470 r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
1471 }
1472
1473 static void si_bind_ps_shader(struct pipe_context *ctx, void *state)
1474 {
1475 struct si_context *sctx = (struct si_context *)ctx;
1476 struct si_shader_selector *sel = state;
1477
1478 /* skip if supplied shader is one already in use */
1479 if (sctx->ps_shader.cso == sel)
1480 return;
1481
1482 sctx->ps_shader.cso = sel;
1483 sctx->ps_shader.current = sel ? sel->first_variant : NULL;
1484 sctx->do_update_shaders = true;
1485 si_mark_atom_dirty(sctx, &sctx->cb_render_state);
1486 }
1487
1488 static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
1489 {
1490 if (shader->pm4) {
1491 switch (shader->selector->type) {
1492 case PIPE_SHADER_VERTEX:
1493 if (shader->key.vs.as_ls)
1494 si_pm4_delete_state(sctx, ls, shader->pm4);
1495 else if (shader->key.vs.as_es)
1496 si_pm4_delete_state(sctx, es, shader->pm4);
1497 else
1498 si_pm4_delete_state(sctx, vs, shader->pm4);
1499 break;
1500 case PIPE_SHADER_TESS_CTRL:
1501 si_pm4_delete_state(sctx, hs, shader->pm4);
1502 break;
1503 case PIPE_SHADER_TESS_EVAL:
1504 if (shader->key.tes.as_es)
1505 si_pm4_delete_state(sctx, es, shader->pm4);
1506 else
1507 si_pm4_delete_state(sctx, vs, shader->pm4);
1508 break;
1509 case PIPE_SHADER_GEOMETRY:
1510 si_pm4_delete_state(sctx, gs, shader->pm4);
1511 si_pm4_delete_state(sctx, vs, shader->gs_copy_shader->pm4);
1512 break;
1513 case PIPE_SHADER_FRAGMENT:
1514 si_pm4_delete_state(sctx, ps, shader->pm4);
1515 break;
1516 }
1517 }
1518
1519 si_shader_destroy(shader);
1520 free(shader);
1521 }
1522
1523 static void si_delete_shader_selector(struct pipe_context *ctx, void *state)
1524 {
1525 struct si_context *sctx = (struct si_context *)ctx;
1526 struct si_shader_selector *sel = (struct si_shader_selector *)state;
1527 struct si_shader *p = sel->first_variant, *c;
1528 struct si_shader_ctx_state *current_shader[SI_NUM_SHADERS] = {
1529 [PIPE_SHADER_VERTEX] = &sctx->vs_shader,
1530 [PIPE_SHADER_TESS_CTRL] = &sctx->tcs_shader,
1531 [PIPE_SHADER_TESS_EVAL] = &sctx->tes_shader,
1532 [PIPE_SHADER_GEOMETRY] = &sctx->gs_shader,
1533 [PIPE_SHADER_FRAGMENT] = &sctx->ps_shader,
1534 };
1535
1536 util_queue_job_wait(&sel->ready);
1537
1538 if (current_shader[sel->type]->cso == sel) {
1539 current_shader[sel->type]->cso = NULL;
1540 current_shader[sel->type]->current = NULL;
1541 }
1542
1543 while (p) {
1544 c = p->next_variant;
1545 si_delete_shader(sctx, p);
1546 p = c;
1547 }
1548
1549 if (sel->main_shader_part)
1550 si_delete_shader(sctx, sel->main_shader_part);
1551
1552 util_queue_fence_destroy(&sel->ready);
1553 pipe_mutex_destroy(sel->mutex);
1554 free(sel->tokens);
1555 free(sel);
1556 }
1557
1558 static unsigned si_get_ps_input_cntl(struct si_context *sctx,
1559 struct si_shader *vs, unsigned name,
1560 unsigned index, unsigned interpolate)
1561 {
1562 struct tgsi_shader_info *vsinfo = &vs->selector->info;
1563 unsigned j, ps_input_cntl = 0;
1564
1565 if (interpolate == TGSI_INTERPOLATE_CONSTANT ||
1566 (interpolate == TGSI_INTERPOLATE_COLOR && sctx->flatshade))
1567 ps_input_cntl |= S_028644_FLAT_SHADE(1);
1568
1569 if (name == TGSI_SEMANTIC_PCOORD ||
1570 (name == TGSI_SEMANTIC_TEXCOORD &&
1571 sctx->sprite_coord_enable & (1 << index))) {
1572 ps_input_cntl |= S_028644_PT_SPRITE_TEX(1);
1573 }
1574
1575 for (j = 0; j < vsinfo->num_outputs; j++) {
1576 if (name == vsinfo->output_semantic_name[j] &&
1577 index == vsinfo->output_semantic_index[j]) {
1578 ps_input_cntl |= S_028644_OFFSET(vs->info.vs_output_param_offset[j]);
1579 break;
1580 }
1581 }
1582
1583 if (name == TGSI_SEMANTIC_PRIMID)
1584 /* PrimID is written after the last output. */
1585 ps_input_cntl |= S_028644_OFFSET(vs->info.vs_output_param_offset[vsinfo->num_outputs]);
1586 else if (j == vsinfo->num_outputs && !G_028644_PT_SPRITE_TEX(ps_input_cntl)) {
1587 /* No corresponding output found, load defaults into input.
1588 * Don't set any other bits.
1589 * (FLAT_SHADE=1 completely changes behavior) */
1590 ps_input_cntl = S_028644_OFFSET(0x20);
1591 /* D3D 9 behaviour. GL is undefined */
1592 if (name == TGSI_SEMANTIC_COLOR && index == 0)
1593 ps_input_cntl |= S_028644_DEFAULT_VAL(3);
1594 }
1595 return ps_input_cntl;
1596 }
1597
1598 static void si_emit_spi_map(struct si_context *sctx, struct r600_atom *atom)
1599 {
1600 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1601 struct si_shader *ps = sctx->ps_shader.current;
1602 struct si_shader *vs = si_get_vs_state(sctx);
1603 struct tgsi_shader_info *psinfo = ps ? &ps->selector->info : NULL;
1604 unsigned i, num_interp, num_written = 0, bcol_interp[2];
1605
1606 if (!ps || !ps->selector->info.num_inputs)
1607 return;
1608
1609 num_interp = si_get_ps_num_interp(ps);
1610 assert(num_interp > 0);
1611 radeon_set_context_reg_seq(cs, R_028644_SPI_PS_INPUT_CNTL_0, num_interp);
1612
1613 for (i = 0; i < psinfo->num_inputs; i++) {
1614 unsigned name = psinfo->input_semantic_name[i];
1615 unsigned index = psinfo->input_semantic_index[i];
1616 unsigned interpolate = psinfo->input_interpolate[i];
1617
1618 radeon_emit(cs, si_get_ps_input_cntl(sctx, vs, name, index,
1619 interpolate));
1620 num_written++;
1621
1622 if (name == TGSI_SEMANTIC_COLOR) {
1623 assert(index < ARRAY_SIZE(bcol_interp));
1624 bcol_interp[index] = interpolate;
1625 }
1626 }
1627
1628 if (ps->key.ps.prolog.color_two_side) {
1629 unsigned bcol = TGSI_SEMANTIC_BCOLOR;
1630
1631 for (i = 0; i < 2; i++) {
1632 if (!(psinfo->colors_read & (0xf << (i * 4))))
1633 continue;
1634
1635 radeon_emit(cs, si_get_ps_input_cntl(sctx, vs, bcol,
1636 i, bcol_interp[i]));
1637 num_written++;
1638 }
1639 }
1640 assert(num_interp == num_written);
1641 }
1642
1643 /**
1644 * Writing CONFIG or UCONFIG VGT registers requires VGT_FLUSH before that.
1645 */
1646 static void si_init_config_add_vgt_flush(struct si_context *sctx)
1647 {
1648 if (sctx->init_config_has_vgt_flush)
1649 return;
1650
1651 /* Done by Vulkan before VGT_FLUSH. */
1652 si_pm4_cmd_begin(sctx->init_config, PKT3_EVENT_WRITE);
1653 si_pm4_cmd_add(sctx->init_config,
1654 EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1655 si_pm4_cmd_end(sctx->init_config, false);
1656
1657 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
1658 si_pm4_cmd_begin(sctx->init_config, PKT3_EVENT_WRITE);
1659 si_pm4_cmd_add(sctx->init_config, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1660 si_pm4_cmd_end(sctx->init_config, false);
1661 sctx->init_config_has_vgt_flush = true;
1662 }
1663
1664 /* Initialize state related to ESGS / GSVS ring buffers */
1665 static bool si_update_gs_ring_buffers(struct si_context *sctx)
1666 {
1667 struct si_shader_selector *es =
1668 sctx->tes_shader.cso ? sctx->tes_shader.cso : sctx->vs_shader.cso;
1669 struct si_shader_selector *gs = sctx->gs_shader.cso;
1670 struct si_pm4_state *pm4;
1671
1672 /* Chip constants. */
1673 unsigned num_se = sctx->screen->b.info.max_se;
1674 unsigned wave_size = 64;
1675 unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
1676 unsigned gs_vertex_reuse = 16 * num_se; /* GS_VERTEX_REUSE register (per SE) */
1677 unsigned alignment = 256 * num_se;
1678 /* The maximum size is 63.999 MB per SE. */
1679 unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
1680
1681 /* Calculate the minimum size. */
1682 unsigned min_esgs_ring_size = align(es->esgs_itemsize * gs_vertex_reuse *
1683 wave_size, alignment);
1684
1685 /* These are recommended sizes, not minimum sizes. */
1686 unsigned esgs_ring_size = max_gs_waves * 2 * wave_size *
1687 es->esgs_itemsize * gs->gs_input_verts_per_prim;
1688 unsigned gsvs_ring_size = max_gs_waves * 2 * wave_size *
1689 gs->max_gsvs_emit_size * (gs->max_gs_stream + 1);
1690
1691 min_esgs_ring_size = align(min_esgs_ring_size, alignment);
1692 esgs_ring_size = align(esgs_ring_size, alignment);
1693 gsvs_ring_size = align(gsvs_ring_size, alignment);
1694
1695 esgs_ring_size = CLAMP(esgs_ring_size, min_esgs_ring_size, max_size);
1696 gsvs_ring_size = MIN2(gsvs_ring_size, max_size);
1697
1698 /* Some rings don't have to be allocated if shaders don't use them.
1699 * (e.g. no varyings between ES and GS or GS and VS)
1700 */
1701 bool update_esgs = esgs_ring_size &&
1702 (!sctx->esgs_ring ||
1703 sctx->esgs_ring->width0 < esgs_ring_size);
1704 bool update_gsvs = gsvs_ring_size &&
1705 (!sctx->gsvs_ring ||
1706 sctx->gsvs_ring->width0 < gsvs_ring_size);
1707
1708 if (!update_esgs && !update_gsvs)
1709 return true;
1710
1711 if (update_esgs) {
1712 pipe_resource_reference(&sctx->esgs_ring, NULL);
1713 sctx->esgs_ring = pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
1714 PIPE_USAGE_DEFAULT,
1715 esgs_ring_size);
1716 if (!sctx->esgs_ring)
1717 return false;
1718 }
1719
1720 if (update_gsvs) {
1721 pipe_resource_reference(&sctx->gsvs_ring, NULL);
1722 sctx->gsvs_ring = pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
1723 PIPE_USAGE_DEFAULT,
1724 gsvs_ring_size);
1725 if (!sctx->gsvs_ring)
1726 return false;
1727 }
1728
1729 /* Create the "init_config_gs_rings" state. */
1730 pm4 = CALLOC_STRUCT(si_pm4_state);
1731 if (!pm4)
1732 return false;
1733
1734 if (sctx->b.chip_class >= CIK) {
1735 if (sctx->esgs_ring)
1736 si_pm4_set_reg(pm4, R_030900_VGT_ESGS_RING_SIZE,
1737 sctx->esgs_ring->width0 / 256);
1738 if (sctx->gsvs_ring)
1739 si_pm4_set_reg(pm4, R_030904_VGT_GSVS_RING_SIZE,
1740 sctx->gsvs_ring->width0 / 256);
1741 } else {
1742 if (sctx->esgs_ring)
1743 si_pm4_set_reg(pm4, R_0088C8_VGT_ESGS_RING_SIZE,
1744 sctx->esgs_ring->width0 / 256);
1745 if (sctx->gsvs_ring)
1746 si_pm4_set_reg(pm4, R_0088CC_VGT_GSVS_RING_SIZE,
1747 sctx->gsvs_ring->width0 / 256);
1748 }
1749
1750 /* Set the state. */
1751 if (sctx->init_config_gs_rings)
1752 si_pm4_free_state(sctx, sctx->init_config_gs_rings, ~0);
1753 sctx->init_config_gs_rings = pm4;
1754
1755 if (!sctx->init_config_has_vgt_flush) {
1756 si_init_config_add_vgt_flush(sctx);
1757 si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
1758 }
1759
1760 /* Flush the context to re-emit both init_config states. */
1761 sctx->b.initial_gfx_cs_size = 0; /* force flush */
1762 si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
1763
1764 /* Set ring bindings. */
1765 if (sctx->esgs_ring) {
1766 si_set_ring_buffer(&sctx->b.b, SI_ES_RING_ESGS,
1767 sctx->esgs_ring, 0, sctx->esgs_ring->width0,
1768 true, true, 4, 64, 0);
1769 si_set_ring_buffer(&sctx->b.b, SI_GS_RING_ESGS,
1770 sctx->esgs_ring, 0, sctx->esgs_ring->width0,
1771 false, false, 0, 0, 0);
1772 }
1773 if (sctx->gsvs_ring)
1774 si_set_ring_buffer(&sctx->b.b, SI_VS_RING_GSVS,
1775 sctx->gsvs_ring, 0, sctx->gsvs_ring->width0,
1776 false, false, 0, 0, 0);
1777 return true;
1778 }
1779
1780 static void si_update_gsvs_ring_bindings(struct si_context *sctx)
1781 {
1782 unsigned gsvs_itemsize = sctx->gs_shader.cso->max_gsvs_emit_size;
1783 uint64_t offset;
1784
1785 if (!sctx->gsvs_ring || gsvs_itemsize == sctx->last_gsvs_itemsize)
1786 return;
1787
1788 sctx->last_gsvs_itemsize = gsvs_itemsize;
1789
1790 si_set_ring_buffer(&sctx->b.b, SI_GS_RING_GSVS0,
1791 sctx->gsvs_ring, gsvs_itemsize,
1792 64, true, true, 4, 16, 0);
1793
1794 offset = gsvs_itemsize * 64;
1795 si_set_ring_buffer(&sctx->b.b, SI_GS_RING_GSVS1,
1796 sctx->gsvs_ring, gsvs_itemsize,
1797 64, true, true, 4, 16, offset);
1798
1799 offset = (gsvs_itemsize * 2) * 64;
1800 si_set_ring_buffer(&sctx->b.b, SI_GS_RING_GSVS2,
1801 sctx->gsvs_ring, gsvs_itemsize,
1802 64, true, true, 4, 16, offset);
1803
1804 offset = (gsvs_itemsize * 3) * 64;
1805 si_set_ring_buffer(&sctx->b.b, SI_GS_RING_GSVS3,
1806 sctx->gsvs_ring, gsvs_itemsize,
1807 64, true, true, 4, 16, offset);
1808 }
1809
1810 /**
1811 * @returns 1 if \p sel has been updated to use a new scratch buffer
1812 * 0 if not
1813 * < 0 if there was a failure
1814 */
1815 static int si_update_scratch_buffer(struct si_context *sctx,
1816 struct si_shader *shader)
1817 {
1818 uint64_t scratch_va = sctx->scratch_buffer->gpu_address;
1819 int r;
1820
1821 if (!shader)
1822 return 0;
1823
1824 /* This shader doesn't need a scratch buffer */
1825 if (shader->config.scratch_bytes_per_wave == 0)
1826 return 0;
1827
1828 /* This shader is already configured to use the current
1829 * scratch buffer. */
1830 if (shader->scratch_bo == sctx->scratch_buffer)
1831 return 0;
1832
1833 assert(sctx->scratch_buffer);
1834
1835 si_shader_apply_scratch_relocs(sctx, shader, &shader->config, scratch_va);
1836
1837 /* Replace the shader bo with a new bo that has the relocs applied. */
1838 r = si_shader_binary_upload(sctx->screen, shader);
1839 if (r)
1840 return r;
1841
1842 /* Update the shader state to use the new shader bo. */
1843 si_shader_init_pm4_state(sctx->screen, shader);
1844
1845 r600_resource_reference(&shader->scratch_bo, sctx->scratch_buffer);
1846
1847 return 1;
1848 }
1849
1850 static unsigned si_get_current_scratch_buffer_size(struct si_context *sctx)
1851 {
1852 return sctx->scratch_buffer ? sctx->scratch_buffer->b.b.width0 : 0;
1853 }
1854
1855 static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader *shader)
1856 {
1857 return shader ? shader->config.scratch_bytes_per_wave : 0;
1858 }
1859
1860 static unsigned si_get_max_scratch_bytes_per_wave(struct si_context *sctx)
1861 {
1862 unsigned bytes = 0;
1863
1864 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current));
1865 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current));
1866 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current));
1867 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tcs_shader.current));
1868 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current));
1869 return bytes;
1870 }
1871
1872 static bool si_update_spi_tmpring_size(struct si_context *sctx)
1873 {
1874 unsigned current_scratch_buffer_size =
1875 si_get_current_scratch_buffer_size(sctx);
1876 unsigned scratch_bytes_per_wave =
1877 si_get_max_scratch_bytes_per_wave(sctx);
1878 unsigned scratch_needed_size = scratch_bytes_per_wave *
1879 sctx->scratch_waves;
1880 unsigned spi_tmpring_size;
1881 int r;
1882
1883 if (scratch_needed_size > 0) {
1884 if (scratch_needed_size > current_scratch_buffer_size) {
1885 /* Create a bigger scratch buffer */
1886 r600_resource_reference(&sctx->scratch_buffer, NULL);
1887
1888 sctx->scratch_buffer =
1889 si_resource_create_custom(&sctx->screen->b.b,
1890 PIPE_USAGE_DEFAULT, scratch_needed_size);
1891 if (!sctx->scratch_buffer)
1892 return false;
1893 sctx->emit_scratch_reloc = true;
1894 }
1895
1896 /* Update the shaders, so they are using the latest scratch. The
1897 * scratch buffer may have been changed since these shaders were
1898 * last used, so we still need to try to update them, even if
1899 * they require scratch buffers smaller than the current size.
1900 */
1901 r = si_update_scratch_buffer(sctx, sctx->ps_shader.current);
1902 if (r < 0)
1903 return false;
1904 if (r == 1)
1905 si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4);
1906
1907 r = si_update_scratch_buffer(sctx, sctx->gs_shader.current);
1908 if (r < 0)
1909 return false;
1910 if (r == 1)
1911 si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
1912
1913 r = si_update_scratch_buffer(sctx, sctx->tcs_shader.current);
1914 if (r < 0)
1915 return false;
1916 if (r == 1)
1917 si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4);
1918
1919 /* VS can be bound as LS, ES, or VS. */
1920 r = si_update_scratch_buffer(sctx, sctx->vs_shader.current);
1921 if (r < 0)
1922 return false;
1923 if (r == 1) {
1924 if (sctx->tes_shader.current)
1925 si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
1926 else if (sctx->gs_shader.current)
1927 si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
1928 else
1929 si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
1930 }
1931
1932 /* TES can be bound as ES or VS. */
1933 r = si_update_scratch_buffer(sctx, sctx->tes_shader.current);
1934 if (r < 0)
1935 return false;
1936 if (r == 1) {
1937 if (sctx->gs_shader.current)
1938 si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
1939 else
1940 si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
1941 }
1942 }
1943
1944 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
1945 assert((scratch_needed_size & ~0x3FF) == scratch_needed_size &&
1946 "scratch size should already be aligned correctly.");
1947
1948 spi_tmpring_size = S_0286E8_WAVES(sctx->scratch_waves) |
1949 S_0286E8_WAVESIZE(scratch_bytes_per_wave >> 10);
1950 if (spi_tmpring_size != sctx->spi_tmpring_size) {
1951 sctx->spi_tmpring_size = spi_tmpring_size;
1952 sctx->emit_scratch_reloc = true;
1953 }
1954 return true;
1955 }
1956
1957 static void si_init_tess_factor_ring(struct si_context *sctx)
1958 {
1959 bool double_offchip_buffers = sctx->b.chip_class >= CIK;
1960 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
1961 unsigned max_offchip_buffers = max_offchip_buffers_per_se *
1962 sctx->screen->b.info.max_se;
1963 unsigned offchip_granularity;
1964
1965 switch (sctx->screen->tess_offchip_block_dw_size) {
1966 default:
1967 assert(0);
1968 /* fall through */
1969 case 8192:
1970 offchip_granularity = V_03093C_X_8K_DWORDS;
1971 break;
1972 case 4096:
1973 offchip_granularity = V_03093C_X_4K_DWORDS;
1974 break;
1975 }
1976
1977 switch (sctx->b.chip_class) {
1978 case SI:
1979 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
1980 break;
1981 case CIK:
1982 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
1983 break;
1984 case VI:
1985 default:
1986 max_offchip_buffers = MIN2(max_offchip_buffers, 512);
1987 break;
1988 }
1989
1990 assert(!sctx->tf_ring);
1991 sctx->tf_ring = pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
1992 PIPE_USAGE_DEFAULT,
1993 32768 * sctx->screen->b.info.max_se);
1994 if (!sctx->tf_ring)
1995 return;
1996
1997 assert(((sctx->tf_ring->width0 / 4) & C_030938_SIZE) == 0);
1998
1999 sctx->tess_offchip_ring = pipe_buffer_create(sctx->b.b.screen,
2000 PIPE_BIND_CUSTOM,
2001 PIPE_USAGE_DEFAULT,
2002 max_offchip_buffers *
2003 sctx->screen->tess_offchip_block_dw_size * 4);
2004 if (!sctx->tess_offchip_ring)
2005 return;
2006
2007 si_init_config_add_vgt_flush(sctx);
2008
2009 /* Append these registers to the init config state. */
2010 if (sctx->b.chip_class >= CIK) {
2011 if (sctx->b.chip_class >= VI)
2012 --max_offchip_buffers;
2013
2014 si_pm4_set_reg(sctx->init_config, R_030938_VGT_TF_RING_SIZE,
2015 S_030938_SIZE(sctx->tf_ring->width0 / 4));
2016 si_pm4_set_reg(sctx->init_config, R_030940_VGT_TF_MEMORY_BASE,
2017 r600_resource(sctx->tf_ring)->gpu_address >> 8);
2018 si_pm4_set_reg(sctx->init_config, R_03093C_VGT_HS_OFFCHIP_PARAM,
2019 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
2020 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity));
2021 } else {
2022 assert(offchip_granularity == V_03093C_X_8K_DWORDS);
2023 si_pm4_set_reg(sctx->init_config, R_008988_VGT_TF_RING_SIZE,
2024 S_008988_SIZE(sctx->tf_ring->width0 / 4));
2025 si_pm4_set_reg(sctx->init_config, R_0089B8_VGT_TF_MEMORY_BASE,
2026 r600_resource(sctx->tf_ring)->gpu_address >> 8);
2027 si_pm4_set_reg(sctx->init_config, R_0089B0_VGT_HS_OFFCHIP_PARAM,
2028 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers));
2029 }
2030
2031 /* Flush the context to re-emit the init_config state.
2032 * This is done only once in a lifetime of a context.
2033 */
2034 si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
2035 sctx->b.initial_gfx_cs_size = 0; /* force flush */
2036 si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
2037
2038 si_set_ring_buffer(&sctx->b.b, SI_HS_RING_TESS_FACTOR, sctx->tf_ring,
2039 0, sctx->tf_ring->width0, false, false, 0, 0, 0);
2040
2041 si_set_ring_buffer(&sctx->b.b, SI_HS_RING_TESS_OFFCHIP,
2042 sctx->tess_offchip_ring, 0,
2043 sctx->tess_offchip_ring->width0, false, false, 0, 0, 0);
2044 }
2045
2046 /**
2047 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
2048 * VS passes its outputs to TES directly, so the fixed-function shader only
2049 * has to write TESSOUTER and TESSINNER.
2050 */
2051 static void si_generate_fixed_func_tcs(struct si_context *sctx)
2052 {
2053 struct ureg_src outer, inner;
2054 struct ureg_dst tessouter, tessinner;
2055 struct ureg_program *ureg = ureg_create(PIPE_SHADER_TESS_CTRL);
2056
2057 if (!ureg)
2058 return; /* if we get here, we're screwed */
2059
2060 assert(!sctx->fixed_func_tcs_shader.cso);
2061
2062 outer = ureg_DECL_system_value(ureg,
2063 TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI, 0);
2064 inner = ureg_DECL_system_value(ureg,
2065 TGSI_SEMANTIC_DEFAULT_TESSINNER_SI, 0);
2066
2067 tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0);
2068 tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0);
2069
2070 ureg_MOV(ureg, tessouter, outer);
2071 ureg_MOV(ureg, tessinner, inner);
2072 ureg_END(ureg);
2073
2074 sctx->fixed_func_tcs_shader.cso =
2075 ureg_create_shader_and_destroy(ureg, &sctx->b.b);
2076 }
2077
2078 static void si_update_vgt_shader_config(struct si_context *sctx)
2079 {
2080 /* Calculate the index of the config.
2081 * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */
2082 unsigned index = 2*!!sctx->tes_shader.cso + !!sctx->gs_shader.cso;
2083 struct si_pm4_state **pm4 = &sctx->vgt_shader_config[index];
2084
2085 if (!*pm4) {
2086 uint32_t stages = 0;
2087
2088 *pm4 = CALLOC_STRUCT(si_pm4_state);
2089
2090 if (sctx->tes_shader.cso) {
2091 stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) |
2092 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
2093
2094 if (sctx->gs_shader.cso)
2095 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) |
2096 S_028B54_GS_EN(1) |
2097 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
2098 else
2099 stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS);
2100 } else if (sctx->gs_shader.cso) {
2101 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) |
2102 S_028B54_GS_EN(1) |
2103 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
2104 }
2105
2106 si_pm4_set_reg(*pm4, R_028B54_VGT_SHADER_STAGES_EN, stages);
2107 }
2108 si_pm4_bind_state(sctx, vgt_shader_config, *pm4);
2109 }
2110
2111 static void si_update_so(struct si_context *sctx, struct si_shader_selector *shader)
2112 {
2113 struct pipe_stream_output_info *so = &shader->so;
2114 uint32_t enabled_stream_buffers_mask = 0;
2115 int i;
2116
2117 for (i = 0; i < so->num_outputs; i++)
2118 enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << (so->output[i].stream * 4);
2119 sctx->b.streamout.enabled_stream_buffers_mask = enabled_stream_buffers_mask;
2120 sctx->b.streamout.stride_in_dw = shader->so.stride;
2121 }
2122
2123 bool si_update_shaders(struct si_context *sctx)
2124 {
2125 struct pipe_context *ctx = (struct pipe_context*)sctx;
2126 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
2127 int r;
2128
2129 /* Update stages before GS. */
2130 if (sctx->tes_shader.cso) {
2131 if (!sctx->tf_ring) {
2132 si_init_tess_factor_ring(sctx);
2133 if (!sctx->tf_ring)
2134 return false;
2135 }
2136
2137 /* VS as LS */
2138 r = si_shader_select(ctx, &sctx->vs_shader);
2139 if (r)
2140 return false;
2141 si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
2142
2143 if (sctx->tcs_shader.cso) {
2144 r = si_shader_select(ctx, &sctx->tcs_shader);
2145 if (r)
2146 return false;
2147 si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4);
2148 } else {
2149 if (!sctx->fixed_func_tcs_shader.cso) {
2150 si_generate_fixed_func_tcs(sctx);
2151 if (!sctx->fixed_func_tcs_shader.cso)
2152 return false;
2153 }
2154
2155 r = si_shader_select(ctx, &sctx->fixed_func_tcs_shader);
2156 if (r)
2157 return false;
2158 si_pm4_bind_state(sctx, hs,
2159 sctx->fixed_func_tcs_shader.current->pm4);
2160 }
2161
2162 r = si_shader_select(ctx, &sctx->tes_shader);
2163 if (r)
2164 return false;
2165
2166 if (sctx->gs_shader.cso) {
2167 /* TES as ES */
2168 si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
2169 } else {
2170 /* TES as VS */
2171 si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
2172 si_update_so(sctx, sctx->tes_shader.cso);
2173 }
2174 } else if (sctx->gs_shader.cso) {
2175 /* VS as ES */
2176 r = si_shader_select(ctx, &sctx->vs_shader);
2177 if (r)
2178 return false;
2179 si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
2180 } else {
2181 /* VS as VS */
2182 r = si_shader_select(ctx, &sctx->vs_shader);
2183 if (r)
2184 return false;
2185 si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
2186 si_update_so(sctx, sctx->vs_shader.cso);
2187 }
2188
2189 /* Update GS. */
2190 if (sctx->gs_shader.cso) {
2191 r = si_shader_select(ctx, &sctx->gs_shader);
2192 if (r)
2193 return false;
2194 si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
2195 si_pm4_bind_state(sctx, vs, sctx->gs_shader.current->gs_copy_shader->pm4);
2196 si_update_so(sctx, sctx->gs_shader.cso);
2197
2198 if (!si_update_gs_ring_buffers(sctx))
2199 return false;
2200
2201 si_update_gsvs_ring_bindings(sctx);
2202 } else {
2203 si_pm4_bind_state(sctx, gs, NULL);
2204 si_pm4_bind_state(sctx, es, NULL);
2205 }
2206
2207 si_update_vgt_shader_config(sctx);
2208
2209 if (sctx->ps_shader.cso) {
2210 unsigned db_shader_control;
2211
2212 r = si_shader_select(ctx, &sctx->ps_shader);
2213 if (r)
2214 return false;
2215 si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4);
2216
2217 db_shader_control =
2218 sctx->ps_shader.cso->db_shader_control |
2219 S_02880C_KILL_ENABLE(si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS) |
2220 S_02880C_Z_ORDER(sctx->ps_shader.current->z_order);
2221
2222 if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs) ||
2223 sctx->sprite_coord_enable != rs->sprite_coord_enable ||
2224 sctx->flatshade != rs->flatshade) {
2225 sctx->sprite_coord_enable = rs->sprite_coord_enable;
2226 sctx->flatshade = rs->flatshade;
2227 si_mark_atom_dirty(sctx, &sctx->spi_map);
2228 }
2229
2230 if (sctx->b.family == CHIP_STONEY && si_pm4_state_changed(sctx, ps))
2231 si_mark_atom_dirty(sctx, &sctx->cb_render_state);
2232
2233 if (sctx->ps_db_shader_control != db_shader_control) {
2234 sctx->ps_db_shader_control = db_shader_control;
2235 si_mark_atom_dirty(sctx, &sctx->db_render_state);
2236 }
2237
2238 if (sctx->smoothing_enabled != sctx->ps_shader.current->key.ps.epilog.poly_line_smoothing) {
2239 sctx->smoothing_enabled = sctx->ps_shader.current->key.ps.epilog.poly_line_smoothing;
2240 si_mark_atom_dirty(sctx, &sctx->msaa_config);
2241
2242 if (sctx->b.chip_class == SI)
2243 si_mark_atom_dirty(sctx, &sctx->db_render_state);
2244
2245 if (sctx->framebuffer.nr_samples <= 1)
2246 si_mark_atom_dirty(sctx, &sctx->msaa_sample_locs.atom);
2247 }
2248 }
2249
2250 if (si_pm4_state_changed(sctx, ls) ||
2251 si_pm4_state_changed(sctx, hs) ||
2252 si_pm4_state_changed(sctx, es) ||
2253 si_pm4_state_changed(sctx, gs) ||
2254 si_pm4_state_changed(sctx, vs) ||
2255 si_pm4_state_changed(sctx, ps)) {
2256 if (!si_update_spi_tmpring_size(sctx))
2257 return false;
2258 }
2259
2260 sctx->do_update_shaders = false;
2261 return true;
2262 }
2263
2264 void si_init_shader_functions(struct si_context *sctx)
2265 {
2266 si_init_atom(sctx, &sctx->spi_map, &sctx->atoms.s.spi_map, si_emit_spi_map);
2267
2268 sctx->b.b.create_vs_state = si_create_shader_selector;
2269 sctx->b.b.create_tcs_state = si_create_shader_selector;
2270 sctx->b.b.create_tes_state = si_create_shader_selector;
2271 sctx->b.b.create_gs_state = si_create_shader_selector;
2272 sctx->b.b.create_fs_state = si_create_shader_selector;
2273
2274 sctx->b.b.bind_vs_state = si_bind_vs_shader;
2275 sctx->b.b.bind_tcs_state = si_bind_tcs_shader;
2276 sctx->b.b.bind_tes_state = si_bind_tes_shader;
2277 sctx->b.b.bind_gs_state = si_bind_gs_shader;
2278 sctx->b.b.bind_fs_state = si_bind_ps_shader;
2279
2280 sctx->b.b.delete_vs_state = si_delete_shader_selector;
2281 sctx->b.b.delete_tcs_state = si_delete_shader_selector;
2282 sctx->b.b.delete_tes_state = si_delete_shader_selector;
2283 sctx->b.b.delete_gs_state = si_delete_shader_selector;
2284 sctx->b.b.delete_fs_state = si_delete_shader_selector;
2285 }