radeonsi: make si_compile_llvm return bool
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "util/u_memory.h"
26 #include "tgsi/tgsi_strings.h"
27 #include "tgsi/tgsi_from_mesa.h"
28
29 #include "ac_exp_param.h"
30 #include "ac_rtld.h"
31 #include "si_shader_internal.h"
32 #include "si_pipe.h"
33 #include "sid.h"
34
35 #include "compiler/nir/nir.h"
36 #include "compiler/nir/nir_serialize.h"
37
38 static const char scratch_rsrc_dword0_symbol[] =
39 "SCRATCH_RSRC_DWORD0";
40
41 static const char scratch_rsrc_dword1_symbol[] =
42 "SCRATCH_RSRC_DWORD1";
43
44 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
45
46 /** Whether the shader runs as a combination of multiple API shaders */
47 bool si_is_multi_part_shader(struct si_shader_context *ctx)
48 {
49 if (ctx->screen->info.chip_class <= GFX8)
50 return false;
51
52 return ctx->shader->key.as_ls ||
53 ctx->shader->key.as_es ||
54 ctx->type == PIPE_SHADER_TESS_CTRL ||
55 ctx->type == PIPE_SHADER_GEOMETRY;
56 }
57
58 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
59 bool si_is_merged_shader(struct si_shader_context *ctx)
60 {
61 return ctx->shader->key.as_ngg || si_is_multi_part_shader(ctx);
62 }
63
64 /**
65 * Returns a unique index for a per-patch semantic name and index. The index
66 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
67 * can be calculated.
68 */
69 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
70 {
71 switch (semantic_name) {
72 case TGSI_SEMANTIC_TESSOUTER:
73 return 0;
74 case TGSI_SEMANTIC_TESSINNER:
75 return 1;
76 case TGSI_SEMANTIC_PATCH:
77 assert(index < 30);
78 return 2 + index;
79
80 default:
81 assert(!"invalid semantic name");
82 return 0;
83 }
84 }
85
86 /**
87 * Returns a unique index for a semantic name and index. The index must be
88 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
89 * calculated.
90 */
91 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index,
92 unsigned is_varying)
93 {
94 switch (semantic_name) {
95 case TGSI_SEMANTIC_POSITION:
96 return 0;
97 case TGSI_SEMANTIC_GENERIC:
98 /* Since some shader stages use the the highest used IO index
99 * to determine the size to allocate for inputs/outputs
100 * (in LDS, tess and GS rings). GENERIC should be placed right
101 * after POSITION to make that size as small as possible.
102 */
103 if (index < SI_MAX_IO_GENERIC)
104 return 1 + index;
105
106 assert(!"invalid generic index");
107 return 0;
108 case TGSI_SEMANTIC_FOG:
109 return SI_MAX_IO_GENERIC + 1;
110 case TGSI_SEMANTIC_COLOR:
111 assert(index < 2);
112 return SI_MAX_IO_GENERIC + 2 + index;
113 case TGSI_SEMANTIC_BCOLOR:
114 assert(index < 2);
115 /* If it's a varying, COLOR and BCOLOR alias. */
116 if (is_varying)
117 return SI_MAX_IO_GENERIC + 2 + index;
118 else
119 return SI_MAX_IO_GENERIC + 4 + index;
120 case TGSI_SEMANTIC_TEXCOORD:
121 assert(index < 8);
122 return SI_MAX_IO_GENERIC + 6 + index;
123
124 /* These are rarely used between LS and HS or ES and GS. */
125 case TGSI_SEMANTIC_CLIPDIST:
126 assert(index < 2);
127 return SI_MAX_IO_GENERIC + 6 + 8 + index;
128 case TGSI_SEMANTIC_CLIPVERTEX:
129 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
130 case TGSI_SEMANTIC_PSIZE:
131 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
132
133 /* These can't be written by LS, HS, and ES. */
134 case TGSI_SEMANTIC_LAYER:
135 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
136 case TGSI_SEMANTIC_VIEWPORT_INDEX:
137 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
138 case TGSI_SEMANTIC_PRIMID:
139 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
140 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
141 default:
142 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
143 assert(!"invalid semantic name");
144 return 0;
145 }
146 }
147
148 static void si_dump_streamout(struct pipe_stream_output_info *so)
149 {
150 unsigned i;
151
152 if (so->num_outputs)
153 fprintf(stderr, "STREAMOUT\n");
154
155 for (i = 0; i < so->num_outputs; i++) {
156 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
157 so->output[i].start_component;
158 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
159 i, so->output[i].output_buffer,
160 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
161 so->output[i].register_index,
162 mask & 1 ? "x" : "",
163 mask & 2 ? "y" : "",
164 mask & 4 ? "z" : "",
165 mask & 8 ? "w" : "");
166 }
167 }
168
169 static void declare_streamout_params(struct si_shader_context *ctx,
170 struct pipe_stream_output_info *so)
171 {
172 if (ctx->screen->use_ngg_streamout) {
173 if (ctx->type == PIPE_SHADER_TESS_EVAL)
174 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
175 return;
176 }
177
178 /* Streamout SGPRs. */
179 if (so->num_outputs) {
180 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
181 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
182 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
183 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
184 }
185
186 /* A streamout buffer offset is loaded if the stride is non-zero. */
187 for (int i = 0; i < 4; i++) {
188 if (!so->stride[i])
189 continue;
190
191 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
192 }
193 }
194
195 unsigned si_get_max_workgroup_size(const struct si_shader *shader)
196 {
197 switch (shader->selector->type) {
198 case PIPE_SHADER_VERTEX:
199 case PIPE_SHADER_TESS_EVAL:
200 return shader->key.as_ngg ? 128 : 0;
201
202 case PIPE_SHADER_TESS_CTRL:
203 /* Return this so that LLVM doesn't remove s_barrier
204 * instructions on chips where we use s_barrier. */
205 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
206
207 case PIPE_SHADER_GEOMETRY:
208 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
209
210 case PIPE_SHADER_COMPUTE:
211 break; /* see below */
212
213 default:
214 return 0;
215 }
216
217 const unsigned *properties = shader->selector->info.properties;
218 unsigned max_work_group_size =
219 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
220 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
221 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
222
223 if (!max_work_group_size) {
224 /* This is a variable group size compute shader,
225 * compile it for the maximum possible group size.
226 */
227 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
228 }
229 return max_work_group_size;
230 }
231
232 static void declare_const_and_shader_buffers(struct si_shader_context *ctx,
233 bool assign_params)
234 {
235 enum ac_arg_type const_shader_buf_type;
236
237 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
238 ctx->shader->selector->info.shader_buffers_declared == 0)
239 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
240 else
241 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
242
243 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
244 assign_params ? &ctx->const_and_shader_buffers :
245 &ctx->other_const_and_shader_buffers);
246 }
247
248 static void declare_samplers_and_images(struct si_shader_context *ctx,
249 bool assign_params)
250 {
251 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
252 assign_params ? &ctx->samplers_and_images :
253 &ctx->other_samplers_and_images);
254 }
255
256 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
257 bool assign_params)
258 {
259 declare_const_and_shader_buffers(ctx, assign_params);
260 declare_samplers_and_images(ctx, assign_params);
261 }
262
263 static void declare_global_desc_pointers(struct si_shader_context *ctx)
264 {
265 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
266 &ctx->rw_buffers);
267 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
268 &ctx->bindless_samplers_and_images);
269 }
270
271 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
272 {
273 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
274 if (!ctx->shader->is_gs_copy_shader) {
275 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
276 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
277 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
278 }
279 }
280
281 static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx)
282 {
283 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
284
285 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
286 if (num_vbos_in_user_sgprs) {
287 unsigned user_sgprs = ctx->args.num_sgprs_used;
288
289 if (si_is_merged_shader(ctx))
290 user_sgprs -= 8;
291 assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
292
293 /* Declare unused SGPRs to align VB descriptors to 4 SGPRs (hw requirement). */
294 for (unsigned i = user_sgprs; i < SI_SGPR_VS_VB_DESCRIPTOR_FIRST; i++)
295 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
296
297 assert(num_vbos_in_user_sgprs <= ARRAY_SIZE(ctx->vb_descriptors));
298 for (unsigned i = 0; i < num_vbos_in_user_sgprs; i++)
299 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->vb_descriptors[i]);
300 }
301 }
302
303 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
304 unsigned *num_prolog_vgprs,
305 bool ngg_cull_shader)
306 {
307 struct si_shader *shader = ctx->shader;
308
309 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
310 if (shader->key.as_ls) {
311 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
312 if (ctx->screen->info.chip_class >= GFX10) {
313 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
314 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
315 } else {
316 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
317 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
318 }
319 } else if (ctx->screen->info.chip_class >= GFX10) {
320 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
321 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
322 &ctx->vs_prim_id); /* user vgpr or PrimID (legacy) */
323 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
324 } else {
325 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
326 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vs_prim_id);
327 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
328 }
329
330 if (!shader->is_gs_copy_shader) {
331 if (shader->key.opt.ngg_culling && !ngg_cull_shader) {
332 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
333 &ctx->ngg_old_thread_id);
334 }
335
336 /* Vertex load indices. */
337 if (shader->selector->info.num_inputs) {
338 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
339 &ctx->vertex_index0);
340 for (unsigned i = 1; i < shader->selector->info.num_inputs; i++)
341 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
342 }
343 *num_prolog_vgprs += shader->selector->info.num_inputs;
344 }
345 }
346
347 static void declare_vs_blit_inputs(struct si_shader_context *ctx,
348 unsigned vs_blit_property)
349 {
350 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
351 &ctx->vs_blit_inputs); /* i16 x1, y1 */
352 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* i16 x1, y1 */
353 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* depth */
354
355 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
356 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color0 */
357 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color1 */
358 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color2 */
359 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color3 */
360 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
361 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x1 */
362 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y1 */
363 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x2 */
364 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y2 */
365 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.z */
366 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.w */
367 }
368 }
369
370 static void declare_tes_input_vgprs(struct si_shader_context *ctx, bool ngg_cull_shader)
371 {
372 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
373 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
374 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
375 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
376
377 if (ctx->shader->key.opt.ngg_culling && !ngg_cull_shader) {
378 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
379 &ctx->ngg_old_thread_id);
380 }
381 }
382
383 enum {
384 /* Convenient merged shader definitions. */
385 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
386 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
387 };
388
389 void si_add_arg_checked(struct ac_shader_args *args,
390 enum ac_arg_regfile file,
391 unsigned registers, enum ac_arg_type type,
392 struct ac_arg *arg,
393 unsigned idx)
394 {
395 assert(args->arg_count == idx);
396 ac_add_arg(args, file, registers, type, arg);
397 }
398
399 void si_create_function(struct si_shader_context *ctx, bool ngg_cull_shader)
400 {
401 struct si_shader *shader = ctx->shader;
402 LLVMTypeRef returns[AC_MAX_ARGS];
403 unsigned i, num_return_sgprs;
404 unsigned num_returns = 0;
405 unsigned num_prolog_vgprs = 0;
406 unsigned type = ctx->type;
407 unsigned vs_blit_property =
408 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
409
410 memset(&ctx->args, 0, sizeof(ctx->args));
411
412 /* Set MERGED shaders. */
413 if (ctx->screen->info.chip_class >= GFX9) {
414 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
415 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
416 else if (shader->key.as_es || shader->key.as_ngg || type == PIPE_SHADER_GEOMETRY)
417 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
418 }
419
420 switch (type) {
421 case PIPE_SHADER_VERTEX:
422 declare_global_desc_pointers(ctx);
423
424 if (vs_blit_property) {
425 declare_vs_blit_inputs(ctx, vs_blit_property);
426
427 /* VGPRs */
428 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
429 break;
430 }
431
432 declare_per_stage_desc_pointers(ctx, true);
433 declare_vs_specific_input_sgprs(ctx);
434 if (!shader->is_gs_copy_shader)
435 declare_vb_descriptor_input_sgprs(ctx);
436
437 if (shader->key.as_es) {
438 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
439 &ctx->es2gs_offset);
440 } else if (shader->key.as_ls) {
441 /* no extra parameters */
442 } else {
443 /* The locations of the other parameters are assigned dynamically. */
444 declare_streamout_params(ctx, &shader->selector->so);
445 }
446
447 /* VGPRs */
448 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
449
450 /* Return values */
451 if (shader->key.opt.vs_as_prim_discard_cs) {
452 for (i = 0; i < 4; i++)
453 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
454 }
455 break;
456
457 case PIPE_SHADER_TESS_CTRL: /* GFX6-GFX8 */
458 declare_global_desc_pointers(ctx);
459 declare_per_stage_desc_pointers(ctx, true);
460 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
461 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
462 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
463 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
464 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
465 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
466
467 /* VGPRs */
468 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
469 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
470
471 /* param_tcs_offchip_offset and param_tcs_factor_offset are
472 * placed after the user SGPRs.
473 */
474 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
475 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
476 for (i = 0; i < 11; i++)
477 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
478 break;
479
480 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
481 /* Merged stages have 8 system SGPRs at the beginning. */
482 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
483 declare_per_stage_desc_pointers(ctx,
484 ctx->type == PIPE_SHADER_TESS_CTRL);
485 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
486 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
487 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
488 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
489 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
490 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
491
492 declare_global_desc_pointers(ctx);
493 declare_per_stage_desc_pointers(ctx,
494 ctx->type == PIPE_SHADER_VERTEX);
495 declare_vs_specific_input_sgprs(ctx);
496
497 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
498 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
499 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
500 declare_vb_descriptor_input_sgprs(ctx);
501
502 /* VGPRs (first TCS, then VS) */
503 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
504 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
505
506 if (ctx->type == PIPE_SHADER_VERTEX) {
507 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
508
509 /* LS return values are inputs to the TCS main shader part. */
510 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
511 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
512 for (i = 0; i < 2; i++)
513 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
514 } else {
515 /* TCS return values are inputs to the TCS epilog.
516 *
517 * param_tcs_offchip_offset, param_tcs_factor_offset,
518 * param_tcs_offchip_layout, and param_rw_buffers
519 * should be passed to the epilog.
520 */
521 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
522 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
523 for (i = 0; i < 11; i++)
524 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
525 }
526 break;
527
528 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
529 /* Merged stages have 8 system SGPRs at the beginning. */
530 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
531 declare_per_stage_desc_pointers(ctx,
532 ctx->type == PIPE_SHADER_GEOMETRY);
533
534 if (ctx->shader->key.as_ngg)
535 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_tg_info);
536 else
537 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
538
539 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
540 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
541 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
542 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
543 &ctx->small_prim_cull_info); /* SPI_SHADER_PGM_LO_GS << 8 */
544 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
545
546 declare_global_desc_pointers(ctx);
547 if (ctx->type != PIPE_SHADER_VERTEX || !vs_blit_property) {
548 declare_per_stage_desc_pointers(ctx,
549 (ctx->type == PIPE_SHADER_VERTEX ||
550 ctx->type == PIPE_SHADER_TESS_EVAL));
551 }
552
553 if (ctx->type == PIPE_SHADER_VERTEX) {
554 if (vs_blit_property)
555 declare_vs_blit_inputs(ctx, vs_blit_property);
556 else
557 declare_vs_specific_input_sgprs(ctx);
558 } else {
559 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
560 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
561 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
562 /* Declare as many input SGPRs as the VS has. */
563 }
564
565 if (ctx->type == PIPE_SHADER_VERTEX)
566 declare_vb_descriptor_input_sgprs(ctx);
567
568 /* VGPRs (first GS, then VS/TES) */
569 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx01_offset);
570 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx23_offset);
571 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
572 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
573 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
574
575 if (ctx->type == PIPE_SHADER_VERTEX) {
576 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
577 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
578 declare_tes_input_vgprs(ctx, ngg_cull_shader);
579 }
580
581 if ((ctx->shader->key.as_es || ngg_cull_shader) &&
582 (ctx->type == PIPE_SHADER_VERTEX ||
583 ctx->type == PIPE_SHADER_TESS_EVAL)) {
584 unsigned num_user_sgprs, num_vgprs;
585
586 if (ctx->type == PIPE_SHADER_VERTEX) {
587 /* For the NGG cull shader, add 1 SGPR to hold
588 * the vertex buffer pointer.
589 */
590 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR + ngg_cull_shader;
591
592 if (ngg_cull_shader && shader->selector->num_vbos_in_user_sgprs) {
593 assert(num_user_sgprs <= 8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
594 num_user_sgprs = SI_SGPR_VS_VB_DESCRIPTOR_FIRST +
595 shader->selector->num_vbos_in_user_sgprs * 4;
596 }
597 } else {
598 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
599 }
600
601 /* The NGG cull shader has to return all 9 VGPRs + the old thread ID.
602 *
603 * The normal merged ESGS shader only has to return the 5 VGPRs
604 * for the GS stage.
605 */
606 num_vgprs = ngg_cull_shader ? 10 : 5;
607
608 /* ES return values are inputs to GS. */
609 for (i = 0; i < 8 + num_user_sgprs; i++)
610 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
611 for (i = 0; i < num_vgprs; i++)
612 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
613 }
614 break;
615
616 case PIPE_SHADER_TESS_EVAL:
617 declare_global_desc_pointers(ctx);
618 declare_per_stage_desc_pointers(ctx, true);
619 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
620 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
621 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
622
623 if (shader->key.as_es) {
624 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
625 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
626 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
627 } else {
628 declare_streamout_params(ctx, &shader->selector->so);
629 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
630 }
631
632 /* VGPRs */
633 declare_tes_input_vgprs(ctx, ngg_cull_shader);
634 break;
635
636 case PIPE_SHADER_GEOMETRY:
637 declare_global_desc_pointers(ctx);
638 declare_per_stage_desc_pointers(ctx, true);
639 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
640 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_wave_id);
641
642 /* VGPRs */
643 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[0]);
644 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[1]);
645 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
646 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[2]);
647 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[3]);
648 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[4]);
649 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[5]);
650 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
651 break;
652
653 case PIPE_SHADER_FRAGMENT:
654 declare_global_desc_pointers(ctx);
655 declare_per_stage_desc_pointers(ctx, true);
656 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL,
657 SI_PARAM_ALPHA_REF);
658 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
659 &ctx->args.prim_mask, SI_PARAM_PRIM_MASK);
660
661 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_sample,
662 SI_PARAM_PERSP_SAMPLE);
663 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
664 &ctx->args.persp_center, SI_PARAM_PERSP_CENTER);
665 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
666 &ctx->args.persp_centroid, SI_PARAM_PERSP_CENTROID);
667 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
668 NULL, SI_PARAM_PERSP_PULL_MODEL);
669 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
670 &ctx->args.linear_sample, SI_PARAM_LINEAR_SAMPLE);
671 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
672 &ctx->args.linear_center, SI_PARAM_LINEAR_CENTER);
673 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
674 &ctx->args.linear_centroid, SI_PARAM_LINEAR_CENTROID);
675 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_FLOAT,
676 NULL, SI_PARAM_LINE_STIPPLE_TEX);
677 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
678 &ctx->args.frag_pos[0], SI_PARAM_POS_X_FLOAT);
679 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
680 &ctx->args.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
681 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
682 &ctx->args.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
683 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
684 &ctx->args.frag_pos[3], SI_PARAM_POS_W_FLOAT);
685 shader->info.face_vgpr_index = ctx->args.num_vgprs_used;
686 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
687 &ctx->args.front_face, SI_PARAM_FRONT_FACE);
688 shader->info.ancillary_vgpr_index = ctx->args.num_vgprs_used;
689 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
690 &ctx->args.ancillary, SI_PARAM_ANCILLARY);
691 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
692 &ctx->args.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
693 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
694 &ctx->pos_fixed_pt, SI_PARAM_POS_FIXED_PT);
695
696 /* Color inputs from the prolog. */
697 if (shader->selector->info.colors_read) {
698 unsigned num_color_elements =
699 util_bitcount(shader->selector->info.colors_read);
700
701 for (i = 0; i < num_color_elements; i++)
702 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
703
704 num_prolog_vgprs += num_color_elements;
705 }
706
707 /* Outputs for the epilog. */
708 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
709 num_returns =
710 num_return_sgprs +
711 util_bitcount(shader->selector->info.colors_written) * 4 +
712 shader->selector->info.writes_z +
713 shader->selector->info.writes_stencil +
714 shader->selector->info.writes_samplemask +
715 1 /* SampleMaskIn */;
716
717 num_returns = MAX2(num_returns,
718 num_return_sgprs +
719 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
720
721 for (i = 0; i < num_return_sgprs; i++)
722 returns[i] = ctx->ac.i32;
723 for (; i < num_returns; i++)
724 returns[i] = ctx->ac.f32;
725 break;
726
727 case PIPE_SHADER_COMPUTE:
728 declare_global_desc_pointers(ctx);
729 declare_per_stage_desc_pointers(ctx, true);
730 if (shader->selector->info.uses_grid_size)
731 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT,
732 &ctx->args.num_work_groups);
733 if (shader->selector->info.uses_block_size &&
734 shader->selector->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
735 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->block_size);
736
737 unsigned cs_user_data_dwords =
738 shader->selector->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
739 if (cs_user_data_dwords) {
740 ac_add_arg(&ctx->args, AC_ARG_SGPR, cs_user_data_dwords, AC_ARG_INT,
741 &ctx->cs_user_data);
742 }
743
744 /* Hardware SGPRs. */
745 for (i = 0; i < 3; i++) {
746 if (shader->selector->info.uses_block_id[i]) {
747 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
748 &ctx->args.workgroup_ids[i]);
749 }
750 }
751 if (shader->selector->info.uses_subgroup_info)
752 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tg_size);
753
754 /* Hardware VGPRs. */
755 ac_add_arg(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
756 &ctx->args.local_invocation_ids);
757 break;
758 default:
759 assert(0 && "unimplemented shader");
760 return;
761 }
762
763 si_llvm_create_func(ctx, ngg_cull_shader ? "ngg_cull_main" : "main",
764 returns, num_returns, si_get_max_workgroup_size(shader));
765
766 /* Reserve register locations for VGPR inputs the PS prolog may need. */
767 if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
768 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
769 "InitialPSInputAddr",
770 S_0286D0_PERSP_SAMPLE_ENA(1) |
771 S_0286D0_PERSP_CENTER_ENA(1) |
772 S_0286D0_PERSP_CENTROID_ENA(1) |
773 S_0286D0_LINEAR_SAMPLE_ENA(1) |
774 S_0286D0_LINEAR_CENTER_ENA(1) |
775 S_0286D0_LINEAR_CENTROID_ENA(1) |
776 S_0286D0_FRONT_FACE_ENA(1) |
777 S_0286D0_ANCILLARY_ENA(1) |
778 S_0286D0_POS_FIXED_PT_ENA(1));
779 }
780
781 shader->info.num_input_sgprs = ctx->args.num_sgprs_used;
782 shader->info.num_input_vgprs = ctx->args.num_vgprs_used;
783
784 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
785 shader->info.num_input_vgprs -= num_prolog_vgprs;
786
787 if (shader->key.as_ls || ctx->type == PIPE_SHADER_TESS_CTRL) {
788 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
789 /* The LSHS size is not known until draw time, so we append it
790 * at the end of whatever LDS use there may be in the rest of
791 * the shader (currently none, unless LLVM decides to do its
792 * own LDS-based lowering).
793 */
794 ctx->ac.lds = LLVMAddGlobalInAddressSpace(
795 ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0),
796 "__lds_end", AC_ADDR_SPACE_LDS);
797 LLVMSetAlignment(ctx->ac.lds, 256);
798 } else {
799 ac_declare_lds_as_pointer(&ctx->ac);
800 }
801 }
802
803 /* Unlike radv, we override these arguments in the prolog, so to the
804 * API shader they appear as normal arguments.
805 */
806 if (ctx->type == PIPE_SHADER_VERTEX) {
807 ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
808 ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
809 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
810 ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
811 ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
812 }
813 }
814
815 /* For the UMR disassembler. */
816 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
817 #define DEBUGGER_NUM_MARKERS 5
818
819 static bool si_shader_binary_open(struct si_screen *screen,
820 struct si_shader *shader,
821 struct ac_rtld_binary *rtld)
822 {
823 const struct si_shader_selector *sel = shader->selector;
824 const char *part_elfs[5];
825 size_t part_sizes[5];
826 unsigned num_parts = 0;
827
828 #define add_part(shader_or_part) \
829 if (shader_or_part) { \
830 part_elfs[num_parts] = (shader_or_part)->binary.elf_buffer; \
831 part_sizes[num_parts] = (shader_or_part)->binary.elf_size; \
832 num_parts++; \
833 }
834
835 add_part(shader->prolog);
836 add_part(shader->previous_stage);
837 add_part(shader->prolog2);
838 add_part(shader);
839 add_part(shader->epilog);
840
841 #undef add_part
842
843 struct ac_rtld_symbol lds_symbols[2];
844 unsigned num_lds_symbols = 0;
845
846 if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
847 (sel->type == PIPE_SHADER_GEOMETRY || shader->key.as_ngg)) {
848 /* We add this symbol even on LLVM <= 8 to ensure that
849 * shader->config.lds_size is set correctly below.
850 */
851 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
852 sym->name = "esgs_ring";
853 sym->size = shader->gs_info.esgs_ring_size;
854 sym->align = 64 * 1024;
855 }
856
857 if (shader->key.as_ngg && sel->type == PIPE_SHADER_GEOMETRY) {
858 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
859 sym->name = "ngg_emit";
860 sym->size = shader->ngg.ngg_emit_size * 4;
861 sym->align = 4;
862 }
863
864 bool ok = ac_rtld_open(rtld, (struct ac_rtld_open_info){
865 .info = &screen->info,
866 .options = {
867 .halt_at_entry = screen->options.halt_shaders,
868 },
869 .shader_type = tgsi_processor_to_shader_stage(sel->type),
870 .wave_size = si_get_shader_wave_size(shader),
871 .num_parts = num_parts,
872 .elf_ptrs = part_elfs,
873 .elf_sizes = part_sizes,
874 .num_shared_lds_symbols = num_lds_symbols,
875 .shared_lds_symbols = lds_symbols });
876
877 if (rtld->lds_size > 0) {
878 unsigned alloc_granularity = screen->info.chip_class >= GFX7 ? 512 : 256;
879 shader->config.lds_size =
880 align(rtld->lds_size, alloc_granularity) / alloc_granularity;
881 }
882
883 return ok;
884 }
885
886 static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_shader *shader)
887 {
888 struct ac_rtld_binary rtld;
889 si_shader_binary_open(screen, shader, &rtld);
890 return rtld.exec_size;
891 }
892
893 static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
894 {
895 uint64_t *scratch_va = data;
896
897 if (!strcmp(scratch_rsrc_dword0_symbol, name)) {
898 *value = (uint32_t)*scratch_va;
899 return true;
900 }
901 if (!strcmp(scratch_rsrc_dword1_symbol, name)) {
902 /* Enable scratch coalescing. */
903 *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) |
904 S_008F04_SWIZZLE_ENABLE(1);
905 return true;
906 }
907
908 return false;
909 }
910
911 bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader,
912 uint64_t scratch_va)
913 {
914 struct ac_rtld_binary binary;
915 if (!si_shader_binary_open(sscreen, shader, &binary))
916 return false;
917
918 si_resource_reference(&shader->bo, NULL);
919 shader->bo = si_aligned_buffer_create(&sscreen->b,
920 sscreen->info.cpdma_prefetch_writes_memory ?
921 0 : SI_RESOURCE_FLAG_READ_ONLY,
922 PIPE_USAGE_IMMUTABLE,
923 align(binary.rx_size, SI_CPDMA_ALIGNMENT),
924 256);
925 if (!shader->bo)
926 return false;
927
928 /* Upload. */
929 struct ac_rtld_upload_info u = {};
930 u.binary = &binary;
931 u.get_external_symbol = si_get_external_symbol;
932 u.cb_data = &scratch_va;
933 u.rx_va = shader->bo->gpu_address;
934 u.rx_ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
935 PIPE_TRANSFER_READ_WRITE |
936 PIPE_TRANSFER_UNSYNCHRONIZED |
937 RADEON_TRANSFER_TEMPORARY);
938 if (!u.rx_ptr)
939 return false;
940
941 bool ok = ac_rtld_upload(&u);
942
943 sscreen->ws->buffer_unmap(shader->bo->buf);
944 ac_rtld_close(&binary);
945
946 return ok;
947 }
948
949 static void si_shader_dump_disassembly(struct si_screen *screen,
950 const struct si_shader_binary *binary,
951 enum pipe_shader_type shader_type,
952 unsigned wave_size,
953 struct pipe_debug_callback *debug,
954 const char *name, FILE *file)
955 {
956 struct ac_rtld_binary rtld_binary;
957
958 if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
959 .info = &screen->info,
960 .shader_type = tgsi_processor_to_shader_stage(shader_type),
961 .wave_size = wave_size,
962 .num_parts = 1,
963 .elf_ptrs = &binary->elf_buffer,
964 .elf_sizes = &binary->elf_size }))
965 return;
966
967 const char *disasm;
968 size_t nbytes;
969
970 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
971 goto out;
972
973 if (nbytes > INT_MAX)
974 goto out;
975
976 if (debug && debug->debug_message) {
977 /* Very long debug messages are cut off, so send the
978 * disassembly one line at a time. This causes more
979 * overhead, but on the plus side it simplifies
980 * parsing of resulting logs.
981 */
982 pipe_debug_message(debug, SHADER_INFO,
983 "Shader Disassembly Begin");
984
985 uint64_t line = 0;
986 while (line < nbytes) {
987 int count = nbytes - line;
988 const char *nl = memchr(disasm + line, '\n', nbytes - line);
989 if (nl)
990 count = nl - (disasm + line);
991
992 if (count) {
993 pipe_debug_message(debug, SHADER_INFO,
994 "%.*s", count, disasm + line);
995 }
996
997 line += count + 1;
998 }
999
1000 pipe_debug_message(debug, SHADER_INFO,
1001 "Shader Disassembly End");
1002 }
1003
1004 if (file) {
1005 fprintf(file, "Shader %s disassembly:\n", name);
1006 fprintf(file, "%*s", (int)nbytes, disasm);
1007 }
1008
1009 out:
1010 ac_rtld_close(&rtld_binary);
1011 }
1012
1013 static void si_calculate_max_simd_waves(struct si_shader *shader)
1014 {
1015 struct si_screen *sscreen = shader->selector->screen;
1016 struct ac_shader_config *conf = &shader->config;
1017 unsigned num_inputs = shader->selector->info.num_inputs;
1018 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
1019 unsigned lds_per_wave = 0;
1020 unsigned max_simd_waves;
1021
1022 max_simd_waves = sscreen->info.max_wave64_per_simd;
1023
1024 /* Compute LDS usage for PS. */
1025 switch (shader->selector->type) {
1026 case PIPE_SHADER_FRAGMENT:
1027 /* The minimum usage per wave is (num_inputs * 48). The maximum
1028 * usage is (num_inputs * 48 * 16).
1029 * We can get anything in between and it varies between waves.
1030 *
1031 * The 48 bytes per input for a single primitive is equal to
1032 * 4 bytes/component * 4 components/input * 3 points.
1033 *
1034 * Other stages don't know the size at compile time or don't
1035 * allocate LDS per wave, but instead they do it per thread group.
1036 */
1037 lds_per_wave = conf->lds_size * lds_increment +
1038 align(num_inputs * 48, lds_increment);
1039 break;
1040 case PIPE_SHADER_COMPUTE:
1041 if (shader->selector) {
1042 unsigned max_workgroup_size =
1043 si_get_max_workgroup_size(shader);
1044 lds_per_wave = (conf->lds_size * lds_increment) /
1045 DIV_ROUND_UP(max_workgroup_size,
1046 sscreen->compute_wave_size);
1047 }
1048 break;
1049 default:;
1050 }
1051
1052 /* Compute the per-SIMD wave counts. */
1053 if (conf->num_sgprs) {
1054 max_simd_waves =
1055 MIN2(max_simd_waves,
1056 sscreen->info.num_physical_sgprs_per_simd / conf->num_sgprs);
1057 }
1058
1059 if (conf->num_vgprs) {
1060 /* Always print wave limits as Wave64, so that we can compare
1061 * Wave32 and Wave64 with shader-db fairly. */
1062 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd;
1063 max_simd_waves = MIN2(max_simd_waves, max_vgprs / conf->num_vgprs);
1064 }
1065
1066 /* LDS is 64KB per CU (4 SIMDs) on GFX6-9, which is 16KB per SIMD (usage above
1067 * 16KB makes some SIMDs unoccupied).
1068 *
1069 * LDS is 128KB in WGP mode and 64KB in CU mode. Assume the WGP mode is used.
1070 */
1071 unsigned max_lds_size = sscreen->info.chip_class >= GFX10 ? 128*1024 : 64*1024;
1072 unsigned max_lds_per_simd = max_lds_size / 4;
1073 if (lds_per_wave)
1074 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
1075
1076 shader->info.max_simd_waves = max_simd_waves;
1077 }
1078
1079 void si_shader_dump_stats_for_shader_db(struct si_screen *screen,
1080 struct si_shader *shader,
1081 struct pipe_debug_callback *debug)
1082 {
1083 const struct ac_shader_config *conf = &shader->config;
1084
1085 if (screen->options.debug_disassembly)
1086 si_shader_dump_disassembly(screen, &shader->binary,
1087 shader->selector->type,
1088 si_get_shader_wave_size(shader),
1089 debug, "main", NULL);
1090
1091 pipe_debug_message(debug, SHADER_INFO,
1092 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
1093 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
1094 "Spilled VGPRs: %d PrivMem VGPRs: %d",
1095 conf->num_sgprs, conf->num_vgprs,
1096 si_get_shader_binary_size(screen, shader),
1097 conf->lds_size, conf->scratch_bytes_per_wave,
1098 shader->info.max_simd_waves, conf->spilled_sgprs,
1099 conf->spilled_vgprs, shader->info.private_mem_vgprs);
1100 }
1101
1102 static void si_shader_dump_stats(struct si_screen *sscreen,
1103 struct si_shader *shader,
1104 FILE *file,
1105 bool check_debug_option)
1106 {
1107 const struct ac_shader_config *conf = &shader->config;
1108
1109 if (!check_debug_option ||
1110 si_can_dump_shader(sscreen, shader->selector->type)) {
1111 if (shader->selector->type == PIPE_SHADER_FRAGMENT) {
1112 fprintf(file, "*** SHADER CONFIG ***\n"
1113 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1114 "SPI_PS_INPUT_ENA = 0x%04x\n",
1115 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
1116 }
1117
1118 fprintf(file, "*** SHADER STATS ***\n"
1119 "SGPRS: %d\n"
1120 "VGPRS: %d\n"
1121 "Spilled SGPRs: %d\n"
1122 "Spilled VGPRs: %d\n"
1123 "Private memory VGPRs: %d\n"
1124 "Code Size: %d bytes\n"
1125 "LDS: %d blocks\n"
1126 "Scratch: %d bytes per wave\n"
1127 "Max Waves: %d\n"
1128 "********************\n\n\n",
1129 conf->num_sgprs, conf->num_vgprs,
1130 conf->spilled_sgprs, conf->spilled_vgprs,
1131 shader->info.private_mem_vgprs,
1132 si_get_shader_binary_size(sscreen, shader),
1133 conf->lds_size, conf->scratch_bytes_per_wave,
1134 shader->info.max_simd_waves);
1135 }
1136 }
1137
1138 const char *si_get_shader_name(const struct si_shader *shader)
1139 {
1140 switch (shader->selector->type) {
1141 case PIPE_SHADER_VERTEX:
1142 if (shader->key.as_es)
1143 return "Vertex Shader as ES";
1144 else if (shader->key.as_ls)
1145 return "Vertex Shader as LS";
1146 else if (shader->key.opt.vs_as_prim_discard_cs)
1147 return "Vertex Shader as Primitive Discard CS";
1148 else if (shader->key.as_ngg)
1149 return "Vertex Shader as ESGS";
1150 else
1151 return "Vertex Shader as VS";
1152 case PIPE_SHADER_TESS_CTRL:
1153 return "Tessellation Control Shader";
1154 case PIPE_SHADER_TESS_EVAL:
1155 if (shader->key.as_es)
1156 return "Tessellation Evaluation Shader as ES";
1157 else if (shader->key.as_ngg)
1158 return "Tessellation Evaluation Shader as ESGS";
1159 else
1160 return "Tessellation Evaluation Shader as VS";
1161 case PIPE_SHADER_GEOMETRY:
1162 if (shader->is_gs_copy_shader)
1163 return "GS Copy Shader as VS";
1164 else
1165 return "Geometry Shader";
1166 case PIPE_SHADER_FRAGMENT:
1167 return "Pixel Shader";
1168 case PIPE_SHADER_COMPUTE:
1169 return "Compute Shader";
1170 default:
1171 return "Unknown Shader";
1172 }
1173 }
1174
1175 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
1176 struct pipe_debug_callback *debug,
1177 FILE *file, bool check_debug_option)
1178 {
1179 enum pipe_shader_type shader_type = shader->selector->type;
1180
1181 if (!check_debug_option ||
1182 si_can_dump_shader(sscreen, shader_type))
1183 si_dump_shader_key(shader, file);
1184
1185 if (!check_debug_option && shader->binary.llvm_ir_string) {
1186 if (shader->previous_stage &&
1187 shader->previous_stage->binary.llvm_ir_string) {
1188 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
1189 si_get_shader_name(shader));
1190 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
1191 }
1192
1193 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
1194 si_get_shader_name(shader));
1195 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
1196 }
1197
1198 if (!check_debug_option ||
1199 (si_can_dump_shader(sscreen, shader_type) &&
1200 !(sscreen->debug_flags & DBG(NO_ASM)))) {
1201 unsigned wave_size = si_get_shader_wave_size(shader);
1202
1203 fprintf(file, "\n%s:\n", si_get_shader_name(shader));
1204
1205 if (shader->prolog)
1206 si_shader_dump_disassembly(sscreen, &shader->prolog->binary,
1207 shader_type, wave_size, debug, "prolog", file);
1208 if (shader->previous_stage)
1209 si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary,
1210 shader_type, wave_size, debug, "previous stage", file);
1211 if (shader->prolog2)
1212 si_shader_dump_disassembly(sscreen, &shader->prolog2->binary,
1213 shader_type, wave_size, debug, "prolog2", file);
1214
1215 si_shader_dump_disassembly(sscreen, &shader->binary, shader_type,
1216 wave_size, debug, "main", file);
1217
1218 if (shader->epilog)
1219 si_shader_dump_disassembly(sscreen, &shader->epilog->binary,
1220 shader_type, wave_size, debug, "epilog", file);
1221 fprintf(file, "\n");
1222 }
1223
1224 si_shader_dump_stats(sscreen, shader, file, check_debug_option);
1225 }
1226
1227 static void si_dump_shader_key_vs(const struct si_shader_key *key,
1228 const struct si_vs_prolog_bits *prolog,
1229 const char *prefix, FILE *f)
1230 {
1231 fprintf(f, " %s.instance_divisor_is_one = %u\n",
1232 prefix, prolog->instance_divisor_is_one);
1233 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
1234 prefix, prolog->instance_divisor_is_fetched);
1235 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n",
1236 prefix, prolog->unpack_instance_id_from_vertex_id);
1237 fprintf(f, " %s.ls_vgpr_fix = %u\n",
1238 prefix, prolog->ls_vgpr_fix);
1239
1240 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
1241 fprintf(f, " mono.vs.fix_fetch = {");
1242 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
1243 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
1244 if (i)
1245 fprintf(f, ", ");
1246 if (!fix.bits)
1247 fprintf(f, "0");
1248 else
1249 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size,
1250 fix.u.num_channels_m1, fix.u.format);
1251 }
1252 fprintf(f, "}\n");
1253 }
1254
1255 static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
1256 {
1257 const struct si_shader_key *key = &shader->key;
1258 enum pipe_shader_type shader_type = shader->selector->type;
1259
1260 fprintf(f, "SHADER KEY\n");
1261
1262 switch (shader_type) {
1263 case PIPE_SHADER_VERTEX:
1264 si_dump_shader_key_vs(key, &key->part.vs.prolog,
1265 "part.vs.prolog", f);
1266 fprintf(f, " as_es = %u\n", key->as_es);
1267 fprintf(f, " as_ls = %u\n", key->as_ls);
1268 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1269 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
1270 key->mono.u.vs_export_prim_id);
1271 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n",
1272 key->opt.vs_as_prim_discard_cs);
1273 fprintf(f, " opt.cs_prim_type = %s\n",
1274 tgsi_primitive_names[key->opt.cs_prim_type]);
1275 fprintf(f, " opt.cs_indexed = %u\n",
1276 key->opt.cs_indexed);
1277 fprintf(f, " opt.cs_instancing = %u\n",
1278 key->opt.cs_instancing);
1279 fprintf(f, " opt.cs_primitive_restart = %u\n",
1280 key->opt.cs_primitive_restart);
1281 fprintf(f, " opt.cs_provoking_vertex_first = %u\n",
1282 key->opt.cs_provoking_vertex_first);
1283 fprintf(f, " opt.cs_need_correct_orientation = %u\n",
1284 key->opt.cs_need_correct_orientation);
1285 fprintf(f, " opt.cs_cull_front = %u\n",
1286 key->opt.cs_cull_front);
1287 fprintf(f, " opt.cs_cull_back = %u\n",
1288 key->opt.cs_cull_back);
1289 fprintf(f, " opt.cs_cull_z = %u\n",
1290 key->opt.cs_cull_z);
1291 fprintf(f, " opt.cs_halfz_clip_space = %u\n",
1292 key->opt.cs_halfz_clip_space);
1293 break;
1294
1295 case PIPE_SHADER_TESS_CTRL:
1296 if (shader->selector->screen->info.chip_class >= GFX9) {
1297 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
1298 "part.tcs.ls_prolog", f);
1299 }
1300 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
1301 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
1302 break;
1303
1304 case PIPE_SHADER_TESS_EVAL:
1305 fprintf(f, " as_es = %u\n", key->as_es);
1306 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1307 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
1308 key->mono.u.vs_export_prim_id);
1309 break;
1310
1311 case PIPE_SHADER_GEOMETRY:
1312 if (shader->is_gs_copy_shader)
1313 break;
1314
1315 if (shader->selector->screen->info.chip_class >= GFX9 &&
1316 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
1317 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
1318 "part.gs.vs_prolog", f);
1319 }
1320 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
1321 fprintf(f, " part.gs.prolog.gfx9_prev_is_vs = %u\n", key->part.gs.prolog.gfx9_prev_is_vs);
1322 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1323 break;
1324
1325 case PIPE_SHADER_COMPUTE:
1326 break;
1327
1328 case PIPE_SHADER_FRAGMENT:
1329 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
1330 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
1331 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
1332 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
1333 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
1334 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
1335 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
1336 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
1337 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
1338 fprintf(f, " part.ps.prolog.samplemask_log_ps_iter = %u\n", key->part.ps.prolog.samplemask_log_ps_iter);
1339 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
1340 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
1341 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
1342 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
1343 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
1344 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
1345 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
1346 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
1347 fprintf(f, " mono.u.ps.interpolate_at_sample_force_center = %u\n", key->mono.u.ps.interpolate_at_sample_force_center);
1348 fprintf(f, " mono.u.ps.fbfetch_msaa = %u\n", key->mono.u.ps.fbfetch_msaa);
1349 fprintf(f, " mono.u.ps.fbfetch_is_1D = %u\n", key->mono.u.ps.fbfetch_is_1D);
1350 fprintf(f, " mono.u.ps.fbfetch_layered = %u\n", key->mono.u.ps.fbfetch_layered);
1351 break;
1352
1353 default:
1354 assert(0);
1355 }
1356
1357 if ((shader_type == PIPE_SHADER_GEOMETRY ||
1358 shader_type == PIPE_SHADER_TESS_EVAL ||
1359 shader_type == PIPE_SHADER_VERTEX) &&
1360 !key->as_es && !key->as_ls) {
1361 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
1362 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
1363 if (shader_type != PIPE_SHADER_GEOMETRY)
1364 fprintf(f, " opt.ngg_culling = 0x%x\n", key->opt.ngg_culling);
1365 }
1366 }
1367
1368 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
1369 {
1370 struct si_shader *shader = ctx->shader;
1371 struct si_shader_info *info = &shader->selector->info;
1372
1373 if ((ctx->type != PIPE_SHADER_VERTEX &&
1374 ctx->type != PIPE_SHADER_TESS_EVAL) ||
1375 shader->key.as_ls ||
1376 shader->key.as_es)
1377 return;
1378
1379 ac_optimize_vs_outputs(&ctx->ac,
1380 ctx->main_fn,
1381 shader->info.vs_output_param_offset,
1382 info->num_outputs,
1383 &shader->info.nr_param_exports);
1384 }
1385
1386 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
1387 const struct si_vs_prolog_bits *prolog_key,
1388 const struct si_shader_key *key,
1389 bool ngg_cull_shader)
1390 {
1391 /* VGPR initialization fixup for Vega10 and Raven is always done in the
1392 * VS prolog. */
1393 return sel->vs_needs_prolog ||
1394 prolog_key->ls_vgpr_fix ||
1395 prolog_key->unpack_instance_id_from_vertex_id ||
1396 (ngg_cull_shader && key->opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL);
1397 }
1398
1399 static bool si_build_main_function(struct si_shader_context *ctx,
1400 struct si_shader *shader,
1401 struct nir_shader *nir, bool free_nir,
1402 bool ngg_cull_shader)
1403 {
1404 struct si_shader_selector *sel = shader->selector;
1405 const struct si_shader_info *info = &sel->info;
1406
1407 ctx->shader = shader;
1408 ctx->type = sel->type;
1409
1410 ctx->num_const_buffers = util_last_bit(info->const_buffers_declared);
1411 ctx->num_shader_buffers = util_last_bit(info->shader_buffers_declared);
1412
1413 ctx->num_samplers = util_last_bit(info->samplers_declared);
1414 ctx->num_images = util_last_bit(info->images_declared);
1415
1416 si_llvm_init_resource_callbacks(ctx);
1417
1418 switch (ctx->type) {
1419 case PIPE_SHADER_VERTEX:
1420 si_llvm_init_vs_callbacks(ctx, ngg_cull_shader);
1421 break;
1422 case PIPE_SHADER_TESS_CTRL:
1423 si_llvm_init_tcs_callbacks(ctx);
1424 break;
1425 case PIPE_SHADER_TESS_EVAL:
1426 si_llvm_init_tes_callbacks(ctx, ngg_cull_shader);
1427 break;
1428 case PIPE_SHADER_GEOMETRY:
1429 si_llvm_init_gs_callbacks(ctx);
1430 break;
1431 case PIPE_SHADER_FRAGMENT:
1432 si_llvm_init_ps_callbacks(ctx);
1433 break;
1434 case PIPE_SHADER_COMPUTE:
1435 ctx->abi.load_local_group_size = si_llvm_get_block_size;
1436 break;
1437 default:
1438 assert(!"Unsupported shader type");
1439 return false;
1440 }
1441
1442 si_create_function(ctx, ngg_cull_shader);
1443
1444 if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)
1445 si_preload_esgs_ring(ctx);
1446
1447 if (ctx->type == PIPE_SHADER_GEOMETRY)
1448 si_preload_gs_rings(ctx);
1449 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1450 si_llvm_preload_tes_rings(ctx);
1451
1452 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
1453 sel->info.tessfactors_are_def_in_all_invocs) {
1454 for (unsigned i = 0; i < 6; i++) {
1455 ctx->invoc0_tess_factors[i] =
1456 ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
1457 }
1458 }
1459
1460 if (ctx->type == PIPE_SHADER_GEOMETRY) {
1461 for (unsigned i = 0; i < 4; i++) {
1462 ctx->gs_next_vertex[i] =
1463 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1464 }
1465 if (shader->key.as_ngg) {
1466 for (unsigned i = 0; i < 4; ++i) {
1467 ctx->gs_curprim_verts[i] =
1468 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1469 ctx->gs_generated_prims[i] =
1470 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1471 }
1472
1473 unsigned scratch_size = 8;
1474 if (sel->so.num_outputs)
1475 scratch_size = 44;
1476
1477 assert(!ctx->gs_ngg_scratch);
1478 LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, scratch_size);
1479 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1480 ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
1481 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
1482 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
1483
1484 ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1485 LLVMArrayType(ctx->ac.i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
1486 LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
1487 LLVMSetAlignment(ctx->gs_ngg_emit, 4);
1488 }
1489 }
1490
1491 if (ctx->type != PIPE_SHADER_GEOMETRY &&
1492 (shader->key.as_ngg && !shader->key.as_es)) {
1493 /* Unconditionally declare scratch space base for streamout and
1494 * vertex compaction. Whether space is actually allocated is
1495 * determined during linking / PM4 creation.
1496 *
1497 * Add an extra dword per vertex to ensure an odd stride, which
1498 * avoids bank conflicts for SoA accesses.
1499 */
1500 if (!gfx10_is_ngg_passthrough(shader))
1501 si_llvm_declare_esgs_ring(ctx);
1502
1503 /* This is really only needed when streamout and / or vertex
1504 * compaction is enabled.
1505 */
1506 if (!ctx->gs_ngg_scratch &&
1507 (sel->so.num_outputs || shader->key.opt.ngg_culling)) {
1508 LLVMTypeRef asi32 = LLVMArrayType(ctx->ac.i32, 8);
1509 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1510 asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
1511 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
1512 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
1513 }
1514 }
1515
1516 /* For GFX9 merged shaders:
1517 * - Set EXEC for the first shader. If the prolog is present, set
1518 * EXEC there instead.
1519 * - Add a barrier before the second shader.
1520 * - In the second shader, reset EXEC to ~0 and wrap the main part in
1521 * an if-statement. This is required for correctness in geometry
1522 * shaders, to ensure that empty GS waves do not send GS_EMIT and
1523 * GS_CUT messages.
1524 *
1525 * For monolithic merged shaders, the first shader is wrapped in an
1526 * if-block together with its prolog in si_build_wrapper_function.
1527 *
1528 * NGG vertex and tess eval shaders running as the last
1529 * vertex/geometry stage handle execution explicitly using
1530 * if-statements.
1531 */
1532 if (ctx->screen->info.chip_class >= GFX9) {
1533 if (!shader->is_monolithic &&
1534 (shader->key.as_es || shader->key.as_ls) &&
1535 (ctx->type == PIPE_SHADER_TESS_EVAL ||
1536 (ctx->type == PIPE_SHADER_VERTEX &&
1537 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog,
1538 &shader->key, ngg_cull_shader)))) {
1539 si_init_exec_from_input(ctx,
1540 ctx->merged_wave_info, 0);
1541 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
1542 ctx->type == PIPE_SHADER_GEOMETRY ||
1543 (shader->key.as_ngg && !shader->key.as_es)) {
1544 LLVMValueRef thread_enabled;
1545 bool nested_barrier;
1546
1547 if (!shader->is_monolithic ||
1548 (ctx->type == PIPE_SHADER_TESS_EVAL &&
1549 shader->key.as_ngg && !shader->key.as_es &&
1550 !shader->key.opt.ngg_culling))
1551 ac_init_exec_full_mask(&ctx->ac);
1552
1553 if ((ctx->type == PIPE_SHADER_VERTEX ||
1554 ctx->type == PIPE_SHADER_TESS_EVAL) &&
1555 shader->key.as_ngg && !shader->key.as_es &&
1556 !shader->key.opt.ngg_culling) {
1557 gfx10_ngg_build_sendmsg_gs_alloc_req(ctx);
1558
1559 /* Build the primitive export at the beginning
1560 * of the shader if possible.
1561 */
1562 if (gfx10_ngg_export_prim_early(shader))
1563 gfx10_ngg_build_export_prim(ctx, NULL, NULL);
1564 }
1565
1566 if (ctx->type == PIPE_SHADER_TESS_CTRL ||
1567 ctx->type == PIPE_SHADER_GEOMETRY) {
1568 if (ctx->type == PIPE_SHADER_GEOMETRY && shader->key.as_ngg) {
1569 gfx10_ngg_gs_emit_prologue(ctx);
1570 nested_barrier = false;
1571 } else {
1572 nested_barrier = true;
1573 }
1574
1575 thread_enabled = si_is_gs_thread(ctx);
1576 } else {
1577 thread_enabled = si_is_es_thread(ctx);
1578 nested_barrier = false;
1579 }
1580
1581 ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
1582 ctx->merged_wrap_if_label = 11500;
1583 ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
1584
1585 if (nested_barrier) {
1586 /* Execute a barrier before the second shader in
1587 * a merged shader.
1588 *
1589 * Execute the barrier inside the conditional block,
1590 * so that empty waves can jump directly to s_endpgm,
1591 * which will also signal the barrier.
1592 *
1593 * This is possible in gfx9, because an empty wave
1594 * for the second shader does not participate in
1595 * the epilogue. With NGG, empty waves may still
1596 * be required to export data (e.g. GS output vertices),
1597 * so we cannot let them exit early.
1598 *
1599 * If the shader is TCS and the TCS epilog is present
1600 * and contains a barrier, it will wait there and then
1601 * reach s_endpgm.
1602 */
1603 si_llvm_emit_barrier(ctx);
1604 }
1605 }
1606 }
1607
1608 if (sel->force_correct_derivs_after_kill) {
1609 ctx->postponed_kill = ac_build_alloca_undef(&ctx->ac, ctx->ac.i1, "");
1610 /* true = don't kill. */
1611 LLVMBuildStore(ctx->ac.builder, ctx->ac.i1true,
1612 ctx->postponed_kill);
1613 }
1614
1615 bool success = si_nir_build_llvm(ctx, nir);
1616 if (free_nir)
1617 ralloc_free(nir);
1618 if (!success) {
1619 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
1620 return false;
1621 }
1622
1623 si_llvm_build_ret(ctx, ctx->return_value);
1624 return true;
1625 }
1626
1627 /**
1628 * Compute the VS prolog key, which contains all the information needed to
1629 * build the VS prolog function, and set shader->info bits where needed.
1630 *
1631 * \param info Shader info of the vertex shader.
1632 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
1633 * \param has_old_ Whether the preceding shader part is the NGG cull shader.
1634 * \param prolog_key Key of the VS prolog
1635 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
1636 * \param key Output shader part key.
1637 */
1638 static void si_get_vs_prolog_key(const struct si_shader_info *info,
1639 unsigned num_input_sgprs,
1640 bool ngg_cull_shader,
1641 const struct si_vs_prolog_bits *prolog_key,
1642 struct si_shader *shader_out,
1643 union si_shader_part_key *key)
1644 {
1645 memset(key, 0, sizeof(*key));
1646 key->vs_prolog.states = *prolog_key;
1647 key->vs_prolog.num_input_sgprs = num_input_sgprs;
1648 key->vs_prolog.num_inputs = info->num_inputs;
1649 key->vs_prolog.as_ls = shader_out->key.as_ls;
1650 key->vs_prolog.as_es = shader_out->key.as_es;
1651 key->vs_prolog.as_ngg = shader_out->key.as_ngg;
1652
1653 if (ngg_cull_shader) {
1654 key->vs_prolog.gs_fast_launch_tri_list = !!(shader_out->key.opt.ngg_culling &
1655 SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST);
1656 key->vs_prolog.gs_fast_launch_tri_strip = !!(shader_out->key.opt.ngg_culling &
1657 SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP);
1658 } else {
1659 key->vs_prolog.has_ngg_cull_inputs = !!shader_out->key.opt.ngg_culling;
1660 }
1661
1662 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
1663 key->vs_prolog.as_ls = 1;
1664 key->vs_prolog.num_merged_next_stage_vgprs = 2;
1665 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
1666 key->vs_prolog.as_es = 1;
1667 key->vs_prolog.num_merged_next_stage_vgprs = 5;
1668 } else if (shader_out->key.as_ngg) {
1669 key->vs_prolog.num_merged_next_stage_vgprs = 5;
1670 }
1671
1672 /* Enable loading the InstanceID VGPR. */
1673 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
1674
1675 if ((key->vs_prolog.states.instance_divisor_is_one |
1676 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
1677 shader_out->info.uses_instanceid = true;
1678 }
1679
1680 static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
1681 struct si_shader_selector *sel)
1682 {
1683 if (!compiler->low_opt_passes)
1684 return false;
1685
1686 /* Assume a slow CPU. */
1687 assert(!sel->screen->info.has_dedicated_vram &&
1688 sel->screen->info.chip_class <= GFX8);
1689
1690 /* For a crazy dEQP test containing 2597 memory opcodes, mostly
1691 * buffer stores. */
1692 return sel->type == PIPE_SHADER_COMPUTE &&
1693 sel->info.num_memory_instructions > 1000;
1694 }
1695
1696 static struct nir_shader *get_nir_shader(struct si_shader_selector *sel,
1697 bool *free_nir)
1698 {
1699 *free_nir = false;
1700
1701 if (sel->nir) {
1702 return sel->nir;
1703 } else if (sel->nir_binary) {
1704 struct pipe_screen *screen = &sel->screen->b;
1705 const void *options =
1706 screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR,
1707 sel->type);
1708
1709 struct blob_reader blob_reader;
1710 blob_reader_init(&blob_reader, sel->nir_binary, sel->nir_size);
1711 *free_nir = true;
1712 return nir_deserialize(NULL, options, &blob_reader);
1713 }
1714 return NULL;
1715 }
1716
1717 int si_compile_shader(struct si_screen *sscreen,
1718 struct ac_llvm_compiler *compiler,
1719 struct si_shader *shader,
1720 struct pipe_debug_callback *debug)
1721 {
1722 struct si_shader_selector *sel = shader->selector;
1723 struct si_shader_context ctx;
1724 bool free_nir;
1725 struct nir_shader *nir = get_nir_shader(sel, &free_nir);
1726
1727 /* Dump NIR before doing NIR->LLVM conversion in case the
1728 * conversion fails. */
1729 if (si_can_dump_shader(sscreen, sel->type) &&
1730 !(sscreen->debug_flags & DBG(NO_NIR))) {
1731 nir_print_shader(nir, stderr);
1732 si_dump_streamout(&sel->so);
1733 }
1734
1735 si_llvm_context_init(&ctx, sscreen, compiler, si_get_shader_wave_size(shader));
1736
1737 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
1738 sizeof(shader->info.vs_output_param_offset));
1739
1740 shader->info.uses_instanceid = sel->info.uses_instanceid;
1741
1742 LLVMValueRef ngg_cull_main_fn = NULL;
1743 if (shader->key.opt.ngg_culling) {
1744 if (!si_build_main_function(&ctx, shader, nir, false, true)) {
1745 si_llvm_dispose(&ctx);
1746 return -1;
1747 }
1748 ngg_cull_main_fn = ctx.main_fn;
1749 ctx.main_fn = NULL;
1750 }
1751
1752 if (!si_build_main_function(&ctx, shader, nir, free_nir, false)) {
1753 si_llvm_dispose(&ctx);
1754 return -1;
1755 }
1756
1757 if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
1758 LLVMValueRef parts[4];
1759 unsigned num_parts = 0;
1760 bool has_prolog = false;
1761 LLVMValueRef main_fn = ctx.main_fn;
1762
1763 if (ngg_cull_main_fn) {
1764 if (si_vs_needs_prolog(sel, &shader->key.part.vs.prolog,
1765 &shader->key, true)) {
1766 union si_shader_part_key prolog_key;
1767 si_get_vs_prolog_key(&sel->info,
1768 shader->info.num_input_sgprs,
1769 true,
1770 &shader->key.part.vs.prolog,
1771 shader, &prolog_key);
1772 prolog_key.vs_prolog.is_monolithic = true;
1773 si_llvm_build_vs_prolog(&ctx, &prolog_key);
1774 parts[num_parts++] = ctx.main_fn;
1775 has_prolog = true;
1776 }
1777 parts[num_parts++] = ngg_cull_main_fn;
1778 }
1779
1780 if (si_vs_needs_prolog(sel, &shader->key.part.vs.prolog,
1781 &shader->key, false)) {
1782 union si_shader_part_key prolog_key;
1783 si_get_vs_prolog_key(&sel->info,
1784 shader->info.num_input_sgprs,
1785 false,
1786 &shader->key.part.vs.prolog,
1787 shader, &prolog_key);
1788 prolog_key.vs_prolog.is_monolithic = true;
1789 si_llvm_build_vs_prolog(&ctx, &prolog_key);
1790 parts[num_parts++] = ctx.main_fn;
1791 has_prolog = true;
1792 }
1793 parts[num_parts++] = main_fn;
1794
1795 si_build_wrapper_function(&ctx, parts, num_parts,
1796 has_prolog ? 1 : 0, 0);
1797
1798 if (ctx.shader->key.opt.vs_as_prim_discard_cs)
1799 si_build_prim_discard_compute_shader(&ctx);
1800 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_EVAL &&
1801 ngg_cull_main_fn) {
1802 LLVMValueRef parts[2];
1803
1804 parts[0] = ngg_cull_main_fn;
1805 parts[1] = ctx.main_fn;
1806
1807 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
1808 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
1809 if (sscreen->info.chip_class >= GFX9) {
1810 struct si_shader_selector *ls = shader->key.part.tcs.ls;
1811 LLVMValueRef parts[4];
1812 bool vs_needs_prolog =
1813 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog,
1814 &shader->key, false);
1815
1816 /* TCS main part */
1817 parts[2] = ctx.main_fn;
1818
1819 /* TCS epilog */
1820 union si_shader_part_key tcs_epilog_key;
1821 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
1822 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
1823 si_llvm_build_tcs_epilog(&ctx, &tcs_epilog_key);
1824 parts[3] = ctx.main_fn;
1825
1826 /* VS as LS main part */
1827 nir = get_nir_shader(ls, &free_nir);
1828 struct si_shader shader_ls = {};
1829 shader_ls.selector = ls;
1830 shader_ls.key.as_ls = 1;
1831 shader_ls.key.mono = shader->key.mono;
1832 shader_ls.key.opt = shader->key.opt;
1833 shader_ls.is_monolithic = true;
1834
1835 if (!si_build_main_function(&ctx, &shader_ls, nir, free_nir, false)) {
1836 si_llvm_dispose(&ctx);
1837 return -1;
1838 }
1839 shader->info.uses_instanceid |= ls->info.uses_instanceid;
1840 parts[1] = ctx.main_fn;
1841
1842 /* LS prolog */
1843 if (vs_needs_prolog) {
1844 union si_shader_part_key vs_prolog_key;
1845 si_get_vs_prolog_key(&ls->info,
1846 shader_ls.info.num_input_sgprs,
1847 false,
1848 &shader->key.part.tcs.ls_prolog,
1849 shader, &vs_prolog_key);
1850 vs_prolog_key.vs_prolog.is_monolithic = true;
1851 si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1852 parts[0] = ctx.main_fn;
1853 }
1854
1855 /* Reset the shader context. */
1856 ctx.shader = shader;
1857 ctx.type = PIPE_SHADER_TESS_CTRL;
1858
1859 si_build_wrapper_function(&ctx,
1860 parts + !vs_needs_prolog,
1861 4 - !vs_needs_prolog, vs_needs_prolog,
1862 vs_needs_prolog ? 2 : 1);
1863 } else {
1864 LLVMValueRef parts[2];
1865 union si_shader_part_key epilog_key;
1866
1867 parts[0] = ctx.main_fn;
1868
1869 memset(&epilog_key, 0, sizeof(epilog_key));
1870 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
1871 si_llvm_build_tcs_epilog(&ctx, &epilog_key);
1872 parts[1] = ctx.main_fn;
1873
1874 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
1875 }
1876 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
1877 if (ctx.screen->info.chip_class >= GFX9) {
1878 struct si_shader_selector *es = shader->key.part.gs.es;
1879 LLVMValueRef es_prolog = NULL;
1880 LLVMValueRef es_main = NULL;
1881 LLVMValueRef gs_prolog = NULL;
1882 LLVMValueRef gs_main = ctx.main_fn;
1883
1884 /* GS prolog */
1885 union si_shader_part_key gs_prolog_key;
1886 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
1887 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
1888 gs_prolog_key.gs_prolog.is_monolithic = true;
1889 gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
1890 si_llvm_build_gs_prolog(&ctx, &gs_prolog_key);
1891 gs_prolog = ctx.main_fn;
1892
1893 /* ES main part */
1894 nir = get_nir_shader(es, &free_nir);
1895 struct si_shader shader_es = {};
1896 shader_es.selector = es;
1897 shader_es.key.as_es = 1;
1898 shader_es.key.as_ngg = shader->key.as_ngg;
1899 shader_es.key.mono = shader->key.mono;
1900 shader_es.key.opt = shader->key.opt;
1901 shader_es.is_monolithic = true;
1902
1903 if (!si_build_main_function(&ctx, &shader_es, nir, free_nir, false)) {
1904 si_llvm_dispose(&ctx);
1905 return -1;
1906 }
1907 shader->info.uses_instanceid |= es->info.uses_instanceid;
1908 es_main = ctx.main_fn;
1909
1910 /* ES prolog */
1911 if (es->type == PIPE_SHADER_VERTEX &&
1912 si_vs_needs_prolog(es, &shader->key.part.gs.vs_prolog,
1913 &shader->key, false)) {
1914 union si_shader_part_key vs_prolog_key;
1915 si_get_vs_prolog_key(&es->info,
1916 shader_es.info.num_input_sgprs,
1917 false,
1918 &shader->key.part.gs.vs_prolog,
1919 shader, &vs_prolog_key);
1920 vs_prolog_key.vs_prolog.is_monolithic = true;
1921 si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1922 es_prolog = ctx.main_fn;
1923 }
1924
1925 /* Reset the shader context. */
1926 ctx.shader = shader;
1927 ctx.type = PIPE_SHADER_GEOMETRY;
1928
1929 /* Prepare the array of shader parts. */
1930 LLVMValueRef parts[4];
1931 unsigned num_parts = 0, main_part, next_first_part;
1932
1933 if (es_prolog)
1934 parts[num_parts++] = es_prolog;
1935
1936 parts[main_part = num_parts++] = es_main;
1937 parts[next_first_part = num_parts++] = gs_prolog;
1938 parts[num_parts++] = gs_main;
1939
1940 si_build_wrapper_function(&ctx, parts, num_parts,
1941 main_part, next_first_part);
1942 } else {
1943 LLVMValueRef parts[2];
1944 union si_shader_part_key prolog_key;
1945
1946 parts[1] = ctx.main_fn;
1947
1948 memset(&prolog_key, 0, sizeof(prolog_key));
1949 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
1950 si_llvm_build_gs_prolog(&ctx, &prolog_key);
1951 parts[0] = ctx.main_fn;
1952
1953 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
1954 }
1955 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
1956 si_llvm_build_monolithic_ps(&ctx, shader);
1957 }
1958
1959 si_llvm_optimize_module(&ctx);
1960
1961 /* Post-optimization transformations and analysis. */
1962 si_optimize_vs_outputs(&ctx);
1963
1964 if ((debug && debug->debug_message) ||
1965 si_can_dump_shader(sscreen, ctx.type)) {
1966 ctx.shader->info.private_mem_vgprs =
1967 ac_count_scratch_private_memory(ctx.main_fn);
1968 }
1969
1970 /* Make sure the input is a pointer and not integer followed by inttoptr. */
1971 assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) ==
1972 LLVMPointerTypeKind);
1973
1974 /* Compile to bytecode. */
1975 if (!si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler,
1976 &ctx.ac, debug, ctx.type, si_get_shader_name(shader),
1977 si_should_optimize_less(compiler, shader->selector))) {
1978 si_llvm_dispose(&ctx);
1979 fprintf(stderr, "LLVM failed to compile shader\n");
1980 return -1;
1981 }
1982
1983 si_llvm_dispose(&ctx);
1984
1985 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
1986 * LLVM 3.9svn has this bug.
1987 */
1988 if (sel->type == PIPE_SHADER_COMPUTE) {
1989 unsigned wave_size = sscreen->compute_wave_size;
1990 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd *
1991 (wave_size == 32 ? 2 : 1);
1992 unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd;
1993 unsigned max_sgprs_per_wave = 128;
1994 unsigned simds_per_tg = 4; /* assuming WGP mode on gfx10 */
1995 unsigned threads_per_tg = si_get_max_workgroup_size(shader);
1996 unsigned waves_per_tg = DIV_ROUND_UP(threads_per_tg, wave_size);
1997 unsigned waves_per_simd = DIV_ROUND_UP(waves_per_tg, simds_per_tg);
1998
1999 max_vgprs = max_vgprs / waves_per_simd;
2000 max_sgprs = MIN2(max_sgprs / waves_per_simd, max_sgprs_per_wave);
2001
2002 if (shader->config.num_sgprs > max_sgprs ||
2003 shader->config.num_vgprs > max_vgprs) {
2004 fprintf(stderr, "LLVM failed to compile a shader correctly: "
2005 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
2006 shader->config.num_sgprs, shader->config.num_vgprs,
2007 max_sgprs, max_vgprs);
2008
2009 /* Just terminate the process, because dependent
2010 * shaders can hang due to bad input data, but use
2011 * the env var to allow shader-db to work.
2012 */
2013 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
2014 abort();
2015 }
2016 }
2017
2018 /* Add the scratch offset to input SGPRs. */
2019 if (shader->config.scratch_bytes_per_wave && !si_is_merged_shader(&ctx))
2020 shader->info.num_input_sgprs += 1; /* scratch byte offset */
2021
2022 /* Calculate the number of fragment input VGPRs. */
2023 if (ctx.type == PIPE_SHADER_FRAGMENT) {
2024 shader->info.num_input_vgprs = ac_get_fs_input_vgpr_cnt(&shader->config,
2025 &shader->info.face_vgpr_index,
2026 &shader->info.ancillary_vgpr_index);
2027 }
2028
2029 si_calculate_max_simd_waves(shader);
2030 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
2031 return 0;
2032 }
2033
2034 /**
2035 * Create, compile and return a shader part (prolog or epilog).
2036 *
2037 * \param sscreen screen
2038 * \param list list of shader parts of the same category
2039 * \param type shader type
2040 * \param key shader part key
2041 * \param prolog whether the part being requested is a prolog
2042 * \param tm LLVM target machine
2043 * \param debug debug callback
2044 * \param build the callback responsible for building the main function
2045 * \return non-NULL on success
2046 */
2047 static struct si_shader_part *
2048 si_get_shader_part(struct si_screen *sscreen,
2049 struct si_shader_part **list,
2050 enum pipe_shader_type type,
2051 bool prolog,
2052 union si_shader_part_key *key,
2053 struct ac_llvm_compiler *compiler,
2054 struct pipe_debug_callback *debug,
2055 void (*build)(struct si_shader_context *,
2056 union si_shader_part_key *),
2057 const char *name)
2058 {
2059 struct si_shader_part *result;
2060
2061 simple_mtx_lock(&sscreen->shader_parts_mutex);
2062
2063 /* Find existing. */
2064 for (result = *list; result; result = result->next) {
2065 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
2066 simple_mtx_unlock(&sscreen->shader_parts_mutex);
2067 return result;
2068 }
2069 }
2070
2071 /* Compile a new one. */
2072 result = CALLOC_STRUCT(si_shader_part);
2073 result->key = *key;
2074
2075 struct si_shader shader = {};
2076
2077 switch (type) {
2078 case PIPE_SHADER_VERTEX:
2079 shader.key.as_ls = key->vs_prolog.as_ls;
2080 shader.key.as_es = key->vs_prolog.as_es;
2081 shader.key.as_ngg = key->vs_prolog.as_ngg;
2082 break;
2083 case PIPE_SHADER_TESS_CTRL:
2084 assert(!prolog);
2085 shader.key.part.tcs.epilog = key->tcs_epilog.states;
2086 break;
2087 case PIPE_SHADER_GEOMETRY:
2088 assert(prolog);
2089 shader.key.as_ngg = key->gs_prolog.as_ngg;
2090 break;
2091 case PIPE_SHADER_FRAGMENT:
2092 if (prolog)
2093 shader.key.part.ps.prolog = key->ps_prolog.states;
2094 else
2095 shader.key.part.ps.epilog = key->ps_epilog.states;
2096 break;
2097 default:
2098 unreachable("bad shader part");
2099 }
2100
2101 struct si_shader_context ctx;
2102 si_llvm_context_init(&ctx, sscreen, compiler,
2103 si_get_wave_size(sscreen, type, shader.key.as_ngg,
2104 shader.key.as_es));
2105 ctx.shader = &shader;
2106 ctx.type = type;
2107
2108 build(&ctx, key);
2109
2110 /* Compile. */
2111 si_llvm_optimize_module(&ctx);
2112
2113 if (!si_compile_llvm(sscreen, &result->binary, &result->config, compiler,
2114 &ctx.ac, debug, ctx.type, name, false)) {
2115 FREE(result);
2116 result = NULL;
2117 goto out;
2118 }
2119
2120 result->next = *list;
2121 *list = result;
2122
2123 out:
2124 si_llvm_dispose(&ctx);
2125 simple_mtx_unlock(&sscreen->shader_parts_mutex);
2126 return result;
2127 }
2128
2129 static bool si_get_vs_prolog(struct si_screen *sscreen,
2130 struct ac_llvm_compiler *compiler,
2131 struct si_shader *shader,
2132 struct pipe_debug_callback *debug,
2133 struct si_shader *main_part,
2134 const struct si_vs_prolog_bits *key)
2135 {
2136 struct si_shader_selector *vs = main_part->selector;
2137
2138 if (!si_vs_needs_prolog(vs, key, &shader->key, false))
2139 return true;
2140
2141 /* Get the prolog. */
2142 union si_shader_part_key prolog_key;
2143 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs, false,
2144 key, shader, &prolog_key);
2145
2146 shader->prolog =
2147 si_get_shader_part(sscreen, &sscreen->vs_prologs,
2148 PIPE_SHADER_VERTEX, true, &prolog_key, compiler,
2149 debug, si_llvm_build_vs_prolog,
2150 "Vertex Shader Prolog");
2151 return shader->prolog != NULL;
2152 }
2153
2154 /**
2155 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
2156 */
2157 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
2158 struct ac_llvm_compiler *compiler,
2159 struct si_shader *shader,
2160 struct pipe_debug_callback *debug)
2161 {
2162 return si_get_vs_prolog(sscreen, compiler, shader, debug, shader,
2163 &shader->key.part.vs.prolog);
2164 }
2165
2166 /**
2167 * Select and compile (or reuse) TCS parts (epilog).
2168 */
2169 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
2170 struct ac_llvm_compiler *compiler,
2171 struct si_shader *shader,
2172 struct pipe_debug_callback *debug)
2173 {
2174 if (sscreen->info.chip_class >= GFX9) {
2175 struct si_shader *ls_main_part =
2176 shader->key.part.tcs.ls->main_shader_part_ls;
2177
2178 if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
2179 &shader->key.part.tcs.ls_prolog))
2180 return false;
2181
2182 shader->previous_stage = ls_main_part;
2183 }
2184
2185 /* Get the epilog. */
2186 union si_shader_part_key epilog_key;
2187 memset(&epilog_key, 0, sizeof(epilog_key));
2188 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
2189
2190 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
2191 PIPE_SHADER_TESS_CTRL, false,
2192 &epilog_key, compiler, debug,
2193 si_llvm_build_tcs_epilog,
2194 "Tessellation Control Shader Epilog");
2195 return shader->epilog != NULL;
2196 }
2197
2198 /**
2199 * Select and compile (or reuse) GS parts (prolog).
2200 */
2201 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
2202 struct ac_llvm_compiler *compiler,
2203 struct si_shader *shader,
2204 struct pipe_debug_callback *debug)
2205 {
2206 if (sscreen->info.chip_class >= GFX9) {
2207 struct si_shader *es_main_part;
2208 enum pipe_shader_type es_type = shader->key.part.gs.es->type;
2209
2210 if (shader->key.as_ngg)
2211 es_main_part = shader->key.part.gs.es->main_shader_part_ngg_es;
2212 else
2213 es_main_part = shader->key.part.gs.es->main_shader_part_es;
2214
2215 if (es_type == PIPE_SHADER_VERTEX &&
2216 !si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
2217 &shader->key.part.gs.vs_prolog))
2218 return false;
2219
2220 shader->previous_stage = es_main_part;
2221 }
2222
2223 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
2224 return true;
2225
2226 union si_shader_part_key prolog_key;
2227 memset(&prolog_key, 0, sizeof(prolog_key));
2228 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
2229 prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
2230
2231 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
2232 PIPE_SHADER_GEOMETRY, true,
2233 &prolog_key, compiler, debug,
2234 si_llvm_build_gs_prolog,
2235 "Geometry Shader Prolog");
2236 return shader->prolog2 != NULL;
2237 }
2238
2239 /**
2240 * Compute the PS prolog key, which contains all the information needed to
2241 * build the PS prolog function, and set related bits in shader->config.
2242 */
2243 void si_get_ps_prolog_key(struct si_shader *shader,
2244 union si_shader_part_key *key,
2245 bool separate_prolog)
2246 {
2247 struct si_shader_info *info = &shader->selector->info;
2248
2249 memset(key, 0, sizeof(*key));
2250 key->ps_prolog.states = shader->key.part.ps.prolog;
2251 key->ps_prolog.colors_read = info->colors_read;
2252 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
2253 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
2254 key->ps_prolog.wqm = info->uses_derivatives &&
2255 (key->ps_prolog.colors_read ||
2256 key->ps_prolog.states.force_persp_sample_interp ||
2257 key->ps_prolog.states.force_linear_sample_interp ||
2258 key->ps_prolog.states.force_persp_center_interp ||
2259 key->ps_prolog.states.force_linear_center_interp ||
2260 key->ps_prolog.states.bc_optimize_for_persp ||
2261 key->ps_prolog.states.bc_optimize_for_linear);
2262 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
2263
2264 if (info->colors_read) {
2265 unsigned *color = shader->selector->color_attr_index;
2266
2267 if (shader->key.part.ps.prolog.color_two_side) {
2268 /* BCOLORs are stored after the last input. */
2269 key->ps_prolog.num_interp_inputs = info->num_inputs;
2270 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
2271 if (separate_prolog)
2272 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
2273 }
2274
2275 for (unsigned i = 0; i < 2; i++) {
2276 unsigned interp = info->input_interpolate[color[i]];
2277 unsigned location = info->input_interpolate_loc[color[i]];
2278
2279 if (!(info->colors_read & (0xf << i*4)))
2280 continue;
2281
2282 key->ps_prolog.color_attr_index[i] = color[i];
2283
2284 if (shader->key.part.ps.prolog.flatshade_colors &&
2285 interp == TGSI_INTERPOLATE_COLOR)
2286 interp = TGSI_INTERPOLATE_CONSTANT;
2287
2288 switch (interp) {
2289 case TGSI_INTERPOLATE_CONSTANT:
2290 key->ps_prolog.color_interp_vgpr_index[i] = -1;
2291 break;
2292 case TGSI_INTERPOLATE_PERSPECTIVE:
2293 case TGSI_INTERPOLATE_COLOR:
2294 /* Force the interpolation location for colors here. */
2295 if (shader->key.part.ps.prolog.force_persp_sample_interp)
2296 location = TGSI_INTERPOLATE_LOC_SAMPLE;
2297 if (shader->key.part.ps.prolog.force_persp_center_interp)
2298 location = TGSI_INTERPOLATE_LOC_CENTER;
2299
2300 switch (location) {
2301 case TGSI_INTERPOLATE_LOC_SAMPLE:
2302 key->ps_prolog.color_interp_vgpr_index[i] = 0;
2303 if (separate_prolog) {
2304 shader->config.spi_ps_input_ena |=
2305 S_0286CC_PERSP_SAMPLE_ENA(1);
2306 }
2307 break;
2308 case TGSI_INTERPOLATE_LOC_CENTER:
2309 key->ps_prolog.color_interp_vgpr_index[i] = 2;
2310 if (separate_prolog) {
2311 shader->config.spi_ps_input_ena |=
2312 S_0286CC_PERSP_CENTER_ENA(1);
2313 }
2314 break;
2315 case TGSI_INTERPOLATE_LOC_CENTROID:
2316 key->ps_prolog.color_interp_vgpr_index[i] = 4;
2317 if (separate_prolog) {
2318 shader->config.spi_ps_input_ena |=
2319 S_0286CC_PERSP_CENTROID_ENA(1);
2320 }
2321 break;
2322 default:
2323 assert(0);
2324 }
2325 break;
2326 case TGSI_INTERPOLATE_LINEAR:
2327 /* Force the interpolation location for colors here. */
2328 if (shader->key.part.ps.prolog.force_linear_sample_interp)
2329 location = TGSI_INTERPOLATE_LOC_SAMPLE;
2330 if (shader->key.part.ps.prolog.force_linear_center_interp)
2331 location = TGSI_INTERPOLATE_LOC_CENTER;
2332
2333 /* The VGPR assignment for non-monolithic shaders
2334 * works because InitialPSInputAddr is set on the
2335 * main shader and PERSP_PULL_MODEL is never used.
2336 */
2337 switch (location) {
2338 case TGSI_INTERPOLATE_LOC_SAMPLE:
2339 key->ps_prolog.color_interp_vgpr_index[i] =
2340 separate_prolog ? 6 : 9;
2341 if (separate_prolog) {
2342 shader->config.spi_ps_input_ena |=
2343 S_0286CC_LINEAR_SAMPLE_ENA(1);
2344 }
2345 break;
2346 case TGSI_INTERPOLATE_LOC_CENTER:
2347 key->ps_prolog.color_interp_vgpr_index[i] =
2348 separate_prolog ? 8 : 11;
2349 if (separate_prolog) {
2350 shader->config.spi_ps_input_ena |=
2351 S_0286CC_LINEAR_CENTER_ENA(1);
2352 }
2353 break;
2354 case TGSI_INTERPOLATE_LOC_CENTROID:
2355 key->ps_prolog.color_interp_vgpr_index[i] =
2356 separate_prolog ? 10 : 13;
2357 if (separate_prolog) {
2358 shader->config.spi_ps_input_ena |=
2359 S_0286CC_LINEAR_CENTROID_ENA(1);
2360 }
2361 break;
2362 default:
2363 assert(0);
2364 }
2365 break;
2366 default:
2367 assert(0);
2368 }
2369 }
2370 }
2371 }
2372
2373 /**
2374 * Check whether a PS prolog is required based on the key.
2375 */
2376 bool si_need_ps_prolog(const union si_shader_part_key *key)
2377 {
2378 return key->ps_prolog.colors_read ||
2379 key->ps_prolog.states.force_persp_sample_interp ||
2380 key->ps_prolog.states.force_linear_sample_interp ||
2381 key->ps_prolog.states.force_persp_center_interp ||
2382 key->ps_prolog.states.force_linear_center_interp ||
2383 key->ps_prolog.states.bc_optimize_for_persp ||
2384 key->ps_prolog.states.bc_optimize_for_linear ||
2385 key->ps_prolog.states.poly_stipple ||
2386 key->ps_prolog.states.samplemask_log_ps_iter;
2387 }
2388
2389 /**
2390 * Compute the PS epilog key, which contains all the information needed to
2391 * build the PS epilog function.
2392 */
2393 void si_get_ps_epilog_key(struct si_shader *shader,
2394 union si_shader_part_key *key)
2395 {
2396 struct si_shader_info *info = &shader->selector->info;
2397 memset(key, 0, sizeof(*key));
2398 key->ps_epilog.colors_written = info->colors_written;
2399 key->ps_epilog.writes_z = info->writes_z;
2400 key->ps_epilog.writes_stencil = info->writes_stencil;
2401 key->ps_epilog.writes_samplemask = info->writes_samplemask;
2402 key->ps_epilog.states = shader->key.part.ps.epilog;
2403 }
2404
2405 /**
2406 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
2407 */
2408 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
2409 struct ac_llvm_compiler *compiler,
2410 struct si_shader *shader,
2411 struct pipe_debug_callback *debug)
2412 {
2413 union si_shader_part_key prolog_key;
2414 union si_shader_part_key epilog_key;
2415
2416 /* Get the prolog. */
2417 si_get_ps_prolog_key(shader, &prolog_key, true);
2418
2419 /* The prolog is a no-op if these aren't set. */
2420 if (si_need_ps_prolog(&prolog_key)) {
2421 shader->prolog =
2422 si_get_shader_part(sscreen, &sscreen->ps_prologs,
2423 PIPE_SHADER_FRAGMENT, true,
2424 &prolog_key, compiler, debug,
2425 si_llvm_build_ps_prolog,
2426 "Fragment Shader Prolog");
2427 if (!shader->prolog)
2428 return false;
2429 }
2430
2431 /* Get the epilog. */
2432 si_get_ps_epilog_key(shader, &epilog_key);
2433
2434 shader->epilog =
2435 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
2436 PIPE_SHADER_FRAGMENT, false,
2437 &epilog_key, compiler, debug,
2438 si_llvm_build_ps_epilog,
2439 "Fragment Shader Epilog");
2440 if (!shader->epilog)
2441 return false;
2442
2443 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
2444 if (shader->key.part.ps.prolog.poly_stipple) {
2445 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
2446 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
2447 }
2448
2449 /* Set up the enable bits for per-sample shading if needed. */
2450 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
2451 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
2452 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2453 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
2454 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
2455 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
2456 }
2457 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
2458 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
2459 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2460 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
2461 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
2462 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
2463 }
2464 if (shader->key.part.ps.prolog.force_persp_center_interp &&
2465 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
2466 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2467 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
2468 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
2469 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
2470 }
2471 if (shader->key.part.ps.prolog.force_linear_center_interp &&
2472 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
2473 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2474 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
2475 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
2476 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
2477 }
2478
2479 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
2480 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
2481 !(shader->config.spi_ps_input_ena & 0xf)) {
2482 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
2483 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
2484 }
2485
2486 /* At least one pair of interpolation weights must be enabled. */
2487 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
2488 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
2489 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
2490 }
2491
2492 /* Samplemask fixup requires the sample ID. */
2493 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
2494 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
2495 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
2496 }
2497
2498 /* The sample mask input is always enabled, because the API shader always
2499 * passes it through to the epilog. Disable it here if it's unused.
2500 */
2501 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
2502 !shader->selector->info.reads_samplemask)
2503 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
2504
2505 return true;
2506 }
2507
2508 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
2509 unsigned *lds_size)
2510 {
2511 /* If tessellation is all offchip and on-chip GS isn't used, this
2512 * workaround is not needed.
2513 */
2514 return;
2515
2516 /* SPI barrier management bug:
2517 * Make sure we have at least 4k of LDS in use to avoid the bug.
2518 * It applies to workgroup sizes of more than one wavefront.
2519 */
2520 if (sscreen->info.family == CHIP_BONAIRE ||
2521 sscreen->info.family == CHIP_KABINI)
2522 *lds_size = MAX2(*lds_size, 8);
2523 }
2524
2525 void si_fix_resource_usage(struct si_screen *sscreen, struct si_shader *shader)
2526 {
2527 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
2528
2529 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
2530
2531 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
2532 si_get_max_workgroup_size(shader) > sscreen->compute_wave_size) {
2533 si_multiwave_lds_size_workaround(sscreen,
2534 &shader->config.lds_size);
2535 }
2536 }
2537
2538 bool si_create_shader_variant(struct si_screen *sscreen,
2539 struct ac_llvm_compiler *compiler,
2540 struct si_shader *shader,
2541 struct pipe_debug_callback *debug)
2542 {
2543 struct si_shader_selector *sel = shader->selector;
2544 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
2545 int r;
2546
2547 /* LS, ES, VS are compiled on demand if the main part hasn't been
2548 * compiled for that stage.
2549 *
2550 * GS are compiled on demand if the main part hasn't been compiled
2551 * for the chosen NGG-ness.
2552 *
2553 * Vertex shaders are compiled on demand when a vertex fetch
2554 * workaround must be applied.
2555 */
2556 if (shader->is_monolithic) {
2557 /* Monolithic shader (compiled as a whole, has many variants,
2558 * may take a long time to compile).
2559 */
2560 r = si_compile_shader(sscreen, compiler, shader, debug);
2561 if (r)
2562 return false;
2563 } else {
2564 /* The shader consists of several parts:
2565 *
2566 * - the middle part is the user shader, it has 1 variant only
2567 * and it was compiled during the creation of the shader
2568 * selector
2569 * - the prolog part is inserted at the beginning
2570 * - the epilog part is inserted at the end
2571 *
2572 * The prolog and epilog have many (but simple) variants.
2573 *
2574 * Starting with gfx9, geometry and tessellation control
2575 * shaders also contain the prolog and user shader parts of
2576 * the previous shader stage.
2577 */
2578
2579 if (!mainp)
2580 return false;
2581
2582 /* Copy the compiled shader data over. */
2583 shader->is_binary_shared = true;
2584 shader->binary = mainp->binary;
2585 shader->config = mainp->config;
2586 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
2587 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
2588 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
2589 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
2590 memcpy(shader->info.vs_output_param_offset,
2591 mainp->info.vs_output_param_offset,
2592 sizeof(mainp->info.vs_output_param_offset));
2593 shader->info.uses_instanceid = mainp->info.uses_instanceid;
2594 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
2595 shader->info.nr_param_exports = mainp->info.nr_param_exports;
2596
2597 /* Select prologs and/or epilogs. */
2598 switch (sel->type) {
2599 case PIPE_SHADER_VERTEX:
2600 if (!si_shader_select_vs_parts(sscreen, compiler, shader, debug))
2601 return false;
2602 break;
2603 case PIPE_SHADER_TESS_CTRL:
2604 if (!si_shader_select_tcs_parts(sscreen, compiler, shader, debug))
2605 return false;
2606 break;
2607 case PIPE_SHADER_TESS_EVAL:
2608 break;
2609 case PIPE_SHADER_GEOMETRY:
2610 if (!si_shader_select_gs_parts(sscreen, compiler, shader, debug))
2611 return false;
2612 break;
2613 case PIPE_SHADER_FRAGMENT:
2614 if (!si_shader_select_ps_parts(sscreen, compiler, shader, debug))
2615 return false;
2616
2617 /* Make sure we have at least as many VGPRs as there
2618 * are allocated inputs.
2619 */
2620 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2621 shader->info.num_input_vgprs);
2622 break;
2623 default:;
2624 }
2625
2626 /* Update SGPR and VGPR counts. */
2627 if (shader->prolog) {
2628 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
2629 shader->prolog->config.num_sgprs);
2630 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2631 shader->prolog->config.num_vgprs);
2632 }
2633 if (shader->previous_stage) {
2634 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
2635 shader->previous_stage->config.num_sgprs);
2636 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2637 shader->previous_stage->config.num_vgprs);
2638 shader->config.spilled_sgprs =
2639 MAX2(shader->config.spilled_sgprs,
2640 shader->previous_stage->config.spilled_sgprs);
2641 shader->config.spilled_vgprs =
2642 MAX2(shader->config.spilled_vgprs,
2643 shader->previous_stage->config.spilled_vgprs);
2644 shader->info.private_mem_vgprs =
2645 MAX2(shader->info.private_mem_vgprs,
2646 shader->previous_stage->info.private_mem_vgprs);
2647 shader->config.scratch_bytes_per_wave =
2648 MAX2(shader->config.scratch_bytes_per_wave,
2649 shader->previous_stage->config.scratch_bytes_per_wave);
2650 shader->info.uses_instanceid |=
2651 shader->previous_stage->info.uses_instanceid;
2652 }
2653 if (shader->prolog2) {
2654 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
2655 shader->prolog2->config.num_sgprs);
2656 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2657 shader->prolog2->config.num_vgprs);
2658 }
2659 if (shader->epilog) {
2660 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
2661 shader->epilog->config.num_sgprs);
2662 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2663 shader->epilog->config.num_vgprs);
2664 }
2665 si_calculate_max_simd_waves(shader);
2666 }
2667
2668 if (shader->key.as_ngg) {
2669 assert(!shader->key.as_es && !shader->key.as_ls);
2670 gfx10_ngg_calculate_subgroup_info(shader);
2671 } else if (sscreen->info.chip_class >= GFX9 && sel->type == PIPE_SHADER_GEOMETRY) {
2672 gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
2673 }
2674
2675 si_fix_resource_usage(sscreen, shader);
2676 si_shader_dump(sscreen, shader, debug, stderr, true);
2677
2678 /* Upload. */
2679 if (!si_shader_binary_upload(sscreen, shader, 0)) {
2680 fprintf(stderr, "LLVM failed to upload shader\n");
2681 return false;
2682 }
2683
2684 return true;
2685 }
2686
2687 void si_shader_binary_clean(struct si_shader_binary *binary)
2688 {
2689 free((void *)binary->elf_buffer);
2690 binary->elf_buffer = NULL;
2691
2692 free(binary->llvm_ir_string);
2693 binary->llvm_ir_string = NULL;
2694 }
2695
2696 void si_shader_destroy(struct si_shader *shader)
2697 {
2698 if (shader->scratch_bo)
2699 si_resource_reference(&shader->scratch_bo, NULL);
2700
2701 si_resource_reference(&shader->bo, NULL);
2702
2703 if (!shader->is_binary_shared)
2704 si_shader_binary_clean(&shader->binary);
2705
2706 free(shader->shader_log);
2707 }