7ae29880adb539c8c46abb8345d33d3dd6e01b5f
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "ac_exp_param.h"
26 #include "ac_rtld.h"
27 #include "compiler/nir/nir.h"
28 #include "compiler/nir/nir_serialize.h"
29 #include "si_pipe.h"
30 #include "si_shader_internal.h"
31 #include "sid.h"
32 #include "tgsi/tgsi_from_mesa.h"
33 #include "tgsi/tgsi_strings.h"
34 #include "util/u_memory.h"
35
36 static const char scratch_rsrc_dword0_symbol[] = "SCRATCH_RSRC_DWORD0";
37
38 static const char scratch_rsrc_dword1_symbol[] = "SCRATCH_RSRC_DWORD1";
39
40 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
41
42 /** Whether the shader runs as a combination of multiple API shaders */
43 bool si_is_multi_part_shader(struct si_shader *shader)
44 {
45 if (shader->selector->screen->info.chip_class <= GFX8)
46 return false;
47
48 return shader->key.as_ls || shader->key.as_es ||
49 shader->selector->type == PIPE_SHADER_TESS_CTRL ||
50 shader->selector->type == PIPE_SHADER_GEOMETRY;
51 }
52
53 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
54 bool si_is_merged_shader(struct si_shader *shader)
55 {
56 return shader->key.as_ngg || si_is_multi_part_shader(shader);
57 }
58
59 /**
60 * Returns a unique index for a per-patch semantic name and index. The index
61 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
62 * can be calculated.
63 */
64 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
65 {
66 switch (semantic_name) {
67 case TGSI_SEMANTIC_TESSOUTER:
68 return 0;
69 case TGSI_SEMANTIC_TESSINNER:
70 return 1;
71 case TGSI_SEMANTIC_PATCH:
72 assert(index < 30);
73 return 2 + index;
74
75 default:
76 assert(!"invalid semantic name");
77 return 0;
78 }
79 }
80
81 /**
82 * Returns a unique index for a semantic name and index. The index must be
83 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
84 * calculated.
85 */
86 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index, unsigned is_varying)
87 {
88 switch (semantic_name) {
89 case TGSI_SEMANTIC_POSITION:
90 return 0;
91 case TGSI_SEMANTIC_GENERIC:
92 /* Since some shader stages use the the highest used IO index
93 * to determine the size to allocate for inputs/outputs
94 * (in LDS, tess and GS rings). GENERIC should be placed right
95 * after POSITION to make that size as small as possible.
96 */
97 if (index < SI_MAX_IO_GENERIC)
98 return 1 + index;
99
100 assert(!"invalid generic index");
101 return 0;
102 case TGSI_SEMANTIC_FOG:
103 return SI_MAX_IO_GENERIC + 1;
104 case TGSI_SEMANTIC_COLOR:
105 assert(index < 2);
106 return SI_MAX_IO_GENERIC + 2 + index;
107 case TGSI_SEMANTIC_BCOLOR:
108 assert(index < 2);
109 /* If it's a varying, COLOR and BCOLOR alias. */
110 if (is_varying)
111 return SI_MAX_IO_GENERIC + 2 + index;
112 else
113 return SI_MAX_IO_GENERIC + 4 + index;
114 case TGSI_SEMANTIC_TEXCOORD:
115 assert(index < 8);
116 return SI_MAX_IO_GENERIC + 6 + index;
117
118 /* These are rarely used between LS and HS or ES and GS. */
119 case TGSI_SEMANTIC_CLIPDIST:
120 assert(index < 2);
121 return SI_MAX_IO_GENERIC + 6 + 8 + index;
122 case TGSI_SEMANTIC_CLIPVERTEX:
123 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
124 case TGSI_SEMANTIC_PSIZE:
125 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
126
127 /* These can't be written by LS, HS, and ES. */
128 case TGSI_SEMANTIC_LAYER:
129 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
130 case TGSI_SEMANTIC_VIEWPORT_INDEX:
131 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
132 case TGSI_SEMANTIC_PRIMID:
133 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
134 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
135 default:
136 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
137 assert(!"invalid semantic name");
138 return 0;
139 }
140 }
141
142 static void si_dump_streamout(struct pipe_stream_output_info *so)
143 {
144 unsigned i;
145
146 if (so->num_outputs)
147 fprintf(stderr, "STREAMOUT\n");
148
149 for (i = 0; i < so->num_outputs; i++) {
150 unsigned mask = ((1 << so->output[i].num_components) - 1) << so->output[i].start_component;
151 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n", i, so->output[i].output_buffer,
152 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
153 so->output[i].register_index, mask & 1 ? "x" : "", mask & 2 ? "y" : "",
154 mask & 4 ? "z" : "", mask & 8 ? "w" : "");
155 }
156 }
157
158 static void declare_streamout_params(struct si_shader_context *ctx,
159 struct pipe_stream_output_info *so)
160 {
161 if (ctx->screen->use_ngg_streamout) {
162 if (ctx->type == PIPE_SHADER_TESS_EVAL)
163 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
164 return;
165 }
166
167 /* Streamout SGPRs. */
168 if (so->num_outputs) {
169 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
170 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
171 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
172 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
173 }
174
175 /* A streamout buffer offset is loaded if the stride is non-zero. */
176 for (int i = 0; i < 4; i++) {
177 if (!so->stride[i])
178 continue;
179
180 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
181 }
182 }
183
184 unsigned si_get_max_workgroup_size(const struct si_shader *shader)
185 {
186 switch (shader->selector->type) {
187 case PIPE_SHADER_VERTEX:
188 case PIPE_SHADER_TESS_EVAL:
189 return shader->key.as_ngg ? 128 : 0;
190
191 case PIPE_SHADER_TESS_CTRL:
192 /* Return this so that LLVM doesn't remove s_barrier
193 * instructions on chips where we use s_barrier. */
194 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
195
196 case PIPE_SHADER_GEOMETRY:
197 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
198
199 case PIPE_SHADER_COMPUTE:
200 break; /* see below */
201
202 default:
203 return 0;
204 }
205
206 const unsigned *properties = shader->selector->info.properties;
207 unsigned max_work_group_size = properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
208 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
209 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
210
211 if (!max_work_group_size) {
212 /* This is a variable group size compute shader,
213 * compile it for the maximum possible group size.
214 */
215 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
216 }
217 return max_work_group_size;
218 }
219
220 static void declare_const_and_shader_buffers(struct si_shader_context *ctx, bool assign_params)
221 {
222 enum ac_arg_type const_shader_buf_type;
223
224 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
225 ctx->shader->selector->info.shader_buffers_declared == 0)
226 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
227 else
228 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
229
230 ac_add_arg(
231 &ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
232 assign_params ? &ctx->const_and_shader_buffers : &ctx->other_const_and_shader_buffers);
233 }
234
235 static void declare_samplers_and_images(struct si_shader_context *ctx, bool assign_params)
236 {
237 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
238 assign_params ? &ctx->samplers_and_images : &ctx->other_samplers_and_images);
239 }
240
241 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx, bool assign_params)
242 {
243 declare_const_and_shader_buffers(ctx, assign_params);
244 declare_samplers_and_images(ctx, assign_params);
245 }
246
247 static void declare_global_desc_pointers(struct si_shader_context *ctx)
248 {
249 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->rw_buffers);
250 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
251 &ctx->bindless_samplers_and_images);
252 }
253
254 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
255 {
256 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
257 if (!ctx->shader->is_gs_copy_shader) {
258 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
259 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
260 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
261 }
262 }
263
264 static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx)
265 {
266 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
267
268 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
269 if (num_vbos_in_user_sgprs) {
270 unsigned user_sgprs = ctx->args.num_sgprs_used;
271
272 if (si_is_merged_shader(ctx->shader))
273 user_sgprs -= 8;
274 assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
275
276 /* Declare unused SGPRs to align VB descriptors to 4 SGPRs (hw requirement). */
277 for (unsigned i = user_sgprs; i < SI_SGPR_VS_VB_DESCRIPTOR_FIRST; i++)
278 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
279
280 assert(num_vbos_in_user_sgprs <= ARRAY_SIZE(ctx->vb_descriptors));
281 for (unsigned i = 0; i < num_vbos_in_user_sgprs; i++)
282 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->vb_descriptors[i]);
283 }
284 }
285
286 static void declare_vs_input_vgprs(struct si_shader_context *ctx, unsigned *num_prolog_vgprs,
287 bool ngg_cull_shader)
288 {
289 struct si_shader *shader = ctx->shader;
290
291 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
292 if (shader->key.as_ls) {
293 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
294 if (ctx->screen->info.chip_class >= GFX10) {
295 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
296 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
297 } else {
298 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
299 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
300 }
301 } else if (ctx->screen->info.chip_class >= GFX10) {
302 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
303 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
304 &ctx->vs_prim_id); /* user vgpr or PrimID (legacy) */
305 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
306 } else {
307 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
308 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vs_prim_id);
309 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
310 }
311
312 if (!shader->is_gs_copy_shader) {
313 if (shader->key.opt.ngg_culling && !ngg_cull_shader) {
314 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->ngg_old_thread_id);
315 }
316
317 /* Vertex load indices. */
318 if (shader->selector->info.num_inputs) {
319 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vertex_index0);
320 for (unsigned i = 1; i < shader->selector->info.num_inputs; i++)
321 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
322 }
323 *num_prolog_vgprs += shader->selector->info.num_inputs;
324 }
325 }
326
327 static void declare_vs_blit_inputs(struct si_shader_context *ctx, unsigned vs_blit_property)
328 {
329 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_blit_inputs); /* i16 x1, y1 */
330 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* i16 x1, y1 */
331 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* depth */
332
333 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
334 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color0 */
335 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color1 */
336 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color2 */
337 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color3 */
338 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
339 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x1 */
340 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y1 */
341 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x2 */
342 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y2 */
343 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.z */
344 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.w */
345 }
346 }
347
348 static void declare_tes_input_vgprs(struct si_shader_context *ctx, bool ngg_cull_shader)
349 {
350 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
351 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
352 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
353 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
354
355 if (ctx->shader->key.opt.ngg_culling && !ngg_cull_shader) {
356 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->ngg_old_thread_id);
357 }
358 }
359
360 enum
361 {
362 /* Convenient merged shader definitions. */
363 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
364 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
365 };
366
367 void si_add_arg_checked(struct ac_shader_args *args, enum ac_arg_regfile file, unsigned registers,
368 enum ac_arg_type type, struct ac_arg *arg, unsigned idx)
369 {
370 assert(args->arg_count == idx);
371 ac_add_arg(args, file, registers, type, arg);
372 }
373
374 void si_create_function(struct si_shader_context *ctx, bool ngg_cull_shader)
375 {
376 struct si_shader *shader = ctx->shader;
377 LLVMTypeRef returns[AC_MAX_ARGS];
378 unsigned i, num_return_sgprs;
379 unsigned num_returns = 0;
380 unsigned num_prolog_vgprs = 0;
381 unsigned type = ctx->type;
382 unsigned vs_blit_property = shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
383
384 memset(&ctx->args, 0, sizeof(ctx->args));
385
386 /* Set MERGED shaders. */
387 if (ctx->screen->info.chip_class >= GFX9) {
388 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
389 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
390 else if (shader->key.as_es || shader->key.as_ngg || type == PIPE_SHADER_GEOMETRY)
391 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
392 }
393
394 switch (type) {
395 case PIPE_SHADER_VERTEX:
396 declare_global_desc_pointers(ctx);
397
398 if (vs_blit_property) {
399 declare_vs_blit_inputs(ctx, vs_blit_property);
400
401 /* VGPRs */
402 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
403 break;
404 }
405
406 declare_per_stage_desc_pointers(ctx, true);
407 declare_vs_specific_input_sgprs(ctx);
408 if (!shader->is_gs_copy_shader)
409 declare_vb_descriptor_input_sgprs(ctx);
410
411 if (shader->key.as_es) {
412 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
413 } else if (shader->key.as_ls) {
414 /* no extra parameters */
415 } else {
416 /* The locations of the other parameters are assigned dynamically. */
417 declare_streamout_params(ctx, &shader->selector->so);
418 }
419
420 /* VGPRs */
421 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
422
423 /* Return values */
424 if (shader->key.opt.vs_as_prim_discard_cs) {
425 for (i = 0; i < 4; i++)
426 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
427 }
428 break;
429
430 case PIPE_SHADER_TESS_CTRL: /* GFX6-GFX8 */
431 declare_global_desc_pointers(ctx);
432 declare_per_stage_desc_pointers(ctx, true);
433 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
434 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
435 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
436 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
437 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
438 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
439
440 /* VGPRs */
441 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
442 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
443
444 /* param_tcs_offchip_offset and param_tcs_factor_offset are
445 * placed after the user SGPRs.
446 */
447 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
448 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
449 for (i = 0; i < 11; i++)
450 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
451 break;
452
453 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
454 /* Merged stages have 8 system SGPRs at the beginning. */
455 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
456 declare_per_stage_desc_pointers(ctx, ctx->type == PIPE_SHADER_TESS_CTRL);
457 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
458 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
459 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
460 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
461 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
462 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
463
464 declare_global_desc_pointers(ctx);
465 declare_per_stage_desc_pointers(ctx, ctx->type == PIPE_SHADER_VERTEX);
466 declare_vs_specific_input_sgprs(ctx);
467
468 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
469 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
470 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
471 declare_vb_descriptor_input_sgprs(ctx);
472
473 /* VGPRs (first TCS, then VS) */
474 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
475 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
476
477 if (ctx->type == PIPE_SHADER_VERTEX) {
478 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
479
480 /* LS return values are inputs to the TCS main shader part. */
481 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
482 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
483 for (i = 0; i < 2; i++)
484 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
485 } else {
486 /* TCS return values are inputs to the TCS epilog.
487 *
488 * param_tcs_offchip_offset, param_tcs_factor_offset,
489 * param_tcs_offchip_layout, and param_rw_buffers
490 * should be passed to the epilog.
491 */
492 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
493 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
494 for (i = 0; i < 11; i++)
495 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
496 }
497 break;
498
499 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
500 /* Merged stages have 8 system SGPRs at the beginning. */
501 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
502 declare_per_stage_desc_pointers(ctx, ctx->type == PIPE_SHADER_GEOMETRY);
503
504 if (ctx->shader->key.as_ngg)
505 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_tg_info);
506 else
507 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
508
509 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
510 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
511 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
512 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
513 &ctx->small_prim_cull_info); /* SPI_SHADER_PGM_LO_GS << 8 */
514 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
515 NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
516
517 declare_global_desc_pointers(ctx);
518 if (ctx->type != PIPE_SHADER_VERTEX || !vs_blit_property) {
519 declare_per_stage_desc_pointers(
520 ctx, (ctx->type == PIPE_SHADER_VERTEX || ctx->type == PIPE_SHADER_TESS_EVAL));
521 }
522
523 if (ctx->type == PIPE_SHADER_VERTEX) {
524 if (vs_blit_property)
525 declare_vs_blit_inputs(ctx, vs_blit_property);
526 else
527 declare_vs_specific_input_sgprs(ctx);
528 } else {
529 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
530 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
531 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
532 /* Declare as many input SGPRs as the VS has. */
533 }
534
535 if (ctx->type == PIPE_SHADER_VERTEX)
536 declare_vb_descriptor_input_sgprs(ctx);
537
538 /* VGPRs (first GS, then VS/TES) */
539 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx01_offset);
540 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx23_offset);
541 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
542 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
543 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
544
545 if (ctx->type == PIPE_SHADER_VERTEX) {
546 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
547 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
548 declare_tes_input_vgprs(ctx, ngg_cull_shader);
549 }
550
551 if ((ctx->shader->key.as_es || ngg_cull_shader) &&
552 (ctx->type == PIPE_SHADER_VERTEX || ctx->type == PIPE_SHADER_TESS_EVAL)) {
553 unsigned num_user_sgprs, num_vgprs;
554
555 if (ctx->type == PIPE_SHADER_VERTEX) {
556 /* For the NGG cull shader, add 1 SGPR to hold
557 * the vertex buffer pointer.
558 */
559 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR + ngg_cull_shader;
560
561 if (ngg_cull_shader && shader->selector->num_vbos_in_user_sgprs) {
562 assert(num_user_sgprs <= 8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
563 num_user_sgprs =
564 SI_SGPR_VS_VB_DESCRIPTOR_FIRST + shader->selector->num_vbos_in_user_sgprs * 4;
565 }
566 } else {
567 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
568 }
569
570 /* The NGG cull shader has to return all 9 VGPRs + the old thread ID.
571 *
572 * The normal merged ESGS shader only has to return the 5 VGPRs
573 * for the GS stage.
574 */
575 num_vgprs = ngg_cull_shader ? 10 : 5;
576
577 /* ES return values are inputs to GS. */
578 for (i = 0; i < 8 + num_user_sgprs; i++)
579 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
580 for (i = 0; i < num_vgprs; i++)
581 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
582 }
583 break;
584
585 case PIPE_SHADER_TESS_EVAL:
586 declare_global_desc_pointers(ctx);
587 declare_per_stage_desc_pointers(ctx, true);
588 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
589 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
590 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
591
592 if (shader->key.as_es) {
593 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
594 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
595 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
596 } else {
597 declare_streamout_params(ctx, &shader->selector->so);
598 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
599 }
600
601 /* VGPRs */
602 declare_tes_input_vgprs(ctx, ngg_cull_shader);
603 break;
604
605 case PIPE_SHADER_GEOMETRY:
606 declare_global_desc_pointers(ctx);
607 declare_per_stage_desc_pointers(ctx, true);
608 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
609 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_wave_id);
610
611 /* VGPRs */
612 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[0]);
613 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[1]);
614 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
615 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[2]);
616 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[3]);
617 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[4]);
618 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[5]);
619 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
620 break;
621
622 case PIPE_SHADER_FRAGMENT:
623 declare_global_desc_pointers(ctx);
624 declare_per_stage_desc_pointers(ctx, true);
625 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL, SI_PARAM_ALPHA_REF);
626 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.prim_mask,
627 SI_PARAM_PRIM_MASK);
628
629 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_sample,
630 SI_PARAM_PERSP_SAMPLE);
631 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_center,
632 SI_PARAM_PERSP_CENTER);
633 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_centroid,
634 SI_PARAM_PERSP_CENTROID);
635 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT, NULL, SI_PARAM_PERSP_PULL_MODEL);
636 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.linear_sample,
637 SI_PARAM_LINEAR_SAMPLE);
638 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.linear_center,
639 SI_PARAM_LINEAR_CENTER);
640 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.linear_centroid,
641 SI_PARAM_LINEAR_CENTROID);
642 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_FLOAT, NULL, SI_PARAM_LINE_STIPPLE_TEX);
643 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->args.frag_pos[0],
644 SI_PARAM_POS_X_FLOAT);
645 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->args.frag_pos[1],
646 SI_PARAM_POS_Y_FLOAT);
647 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->args.frag_pos[2],
648 SI_PARAM_POS_Z_FLOAT);
649 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->args.frag_pos[3],
650 SI_PARAM_POS_W_FLOAT);
651 shader->info.face_vgpr_index = ctx->args.num_vgprs_used;
652 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.front_face,
653 SI_PARAM_FRONT_FACE);
654 shader->info.ancillary_vgpr_index = ctx->args.num_vgprs_used;
655 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.ancillary,
656 SI_PARAM_ANCILLARY);
657 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->args.sample_coverage,
658 SI_PARAM_SAMPLE_COVERAGE);
659 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->pos_fixed_pt,
660 SI_PARAM_POS_FIXED_PT);
661
662 /* Color inputs from the prolog. */
663 if (shader->selector->info.colors_read) {
664 unsigned num_color_elements = util_bitcount(shader->selector->info.colors_read);
665
666 for (i = 0; i < num_color_elements; i++)
667 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
668
669 num_prolog_vgprs += num_color_elements;
670 }
671
672 /* Outputs for the epilog. */
673 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
674 num_returns = num_return_sgprs + util_bitcount(shader->selector->info.colors_written) * 4 +
675 shader->selector->info.writes_z + shader->selector->info.writes_stencil +
676 shader->selector->info.writes_samplemask + 1 /* SampleMaskIn */;
677
678 num_returns = MAX2(num_returns, num_return_sgprs + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
679
680 for (i = 0; i < num_return_sgprs; i++)
681 returns[i] = ctx->ac.i32;
682 for (; i < num_returns; i++)
683 returns[i] = ctx->ac.f32;
684 break;
685
686 case PIPE_SHADER_COMPUTE:
687 declare_global_desc_pointers(ctx);
688 declare_per_stage_desc_pointers(ctx, true);
689 if (shader->selector->info.uses_grid_size)
690 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->args.num_work_groups);
691 if (shader->selector->info.uses_block_size &&
692 shader->selector->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
693 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->block_size);
694
695 unsigned cs_user_data_dwords =
696 shader->selector->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
697 if (cs_user_data_dwords) {
698 ac_add_arg(&ctx->args, AC_ARG_SGPR, cs_user_data_dwords, AC_ARG_INT, &ctx->cs_user_data);
699 }
700
701 /* Hardware SGPRs. */
702 for (i = 0; i < 3; i++) {
703 if (shader->selector->info.uses_block_id[i]) {
704 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.workgroup_ids[i]);
705 }
706 }
707 if (shader->selector->info.uses_subgroup_info)
708 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tg_size);
709
710 /* Hardware VGPRs. */
711 ac_add_arg(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT, &ctx->args.local_invocation_ids);
712 break;
713 default:
714 assert(0 && "unimplemented shader");
715 return;
716 }
717
718 si_llvm_create_func(ctx, ngg_cull_shader ? "ngg_cull_main" : "main", returns, num_returns,
719 si_get_max_workgroup_size(shader));
720
721 /* Reserve register locations for VGPR inputs the PS prolog may need. */
722 if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
723 ac_llvm_add_target_dep_function_attr(
724 ctx->main_fn, "InitialPSInputAddr",
725 S_0286D0_PERSP_SAMPLE_ENA(1) | S_0286D0_PERSP_CENTER_ENA(1) |
726 S_0286D0_PERSP_CENTROID_ENA(1) | S_0286D0_LINEAR_SAMPLE_ENA(1) |
727 S_0286D0_LINEAR_CENTER_ENA(1) | S_0286D0_LINEAR_CENTROID_ENA(1) |
728 S_0286D0_FRONT_FACE_ENA(1) | S_0286D0_ANCILLARY_ENA(1) | S_0286D0_POS_FIXED_PT_ENA(1));
729 }
730
731 shader->info.num_input_sgprs = ctx->args.num_sgprs_used;
732 shader->info.num_input_vgprs = ctx->args.num_vgprs_used;
733
734 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
735 shader->info.num_input_vgprs -= num_prolog_vgprs;
736
737 if (shader->key.as_ls || ctx->type == PIPE_SHADER_TESS_CTRL) {
738 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
739 /* The LSHS size is not known until draw time, so we append it
740 * at the end of whatever LDS use there may be in the rest of
741 * the shader (currently none, unless LLVM decides to do its
742 * own LDS-based lowering).
743 */
744 ctx->ac.lds = LLVMAddGlobalInAddressSpace(ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0),
745 "__lds_end", AC_ADDR_SPACE_LDS);
746 LLVMSetAlignment(ctx->ac.lds, 256);
747 } else {
748 ac_declare_lds_as_pointer(&ctx->ac);
749 }
750 }
751
752 /* Unlike radv, we override these arguments in the prolog, so to the
753 * API shader they appear as normal arguments.
754 */
755 if (ctx->type == PIPE_SHADER_VERTEX) {
756 ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
757 ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
758 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
759 ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
760 ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
761 }
762 }
763
764 /* For the UMR disassembler. */
765 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
766 #define DEBUGGER_NUM_MARKERS 5
767
768 static bool si_shader_binary_open(struct si_screen *screen, struct si_shader *shader,
769 struct ac_rtld_binary *rtld)
770 {
771 const struct si_shader_selector *sel = shader->selector;
772 const char *part_elfs[5];
773 size_t part_sizes[5];
774 unsigned num_parts = 0;
775
776 #define add_part(shader_or_part) \
777 if (shader_or_part) { \
778 part_elfs[num_parts] = (shader_or_part)->binary.elf_buffer; \
779 part_sizes[num_parts] = (shader_or_part)->binary.elf_size; \
780 num_parts++; \
781 }
782
783 add_part(shader->prolog);
784 add_part(shader->previous_stage);
785 add_part(shader->prolog2);
786 add_part(shader);
787 add_part(shader->epilog);
788
789 #undef add_part
790
791 struct ac_rtld_symbol lds_symbols[2];
792 unsigned num_lds_symbols = 0;
793
794 if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
795 (sel->type == PIPE_SHADER_GEOMETRY || shader->key.as_ngg)) {
796 /* We add this symbol even on LLVM <= 8 to ensure that
797 * shader->config.lds_size is set correctly below.
798 */
799 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
800 sym->name = "esgs_ring";
801 sym->size = shader->gs_info.esgs_ring_size;
802 sym->align = 64 * 1024;
803 }
804
805 if (shader->key.as_ngg && sel->type == PIPE_SHADER_GEOMETRY) {
806 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
807 sym->name = "ngg_emit";
808 sym->size = shader->ngg.ngg_emit_size * 4;
809 sym->align = 4;
810 }
811
812 bool ok = ac_rtld_open(
813 rtld, (struct ac_rtld_open_info){.info = &screen->info,
814 .options =
815 {
816 .halt_at_entry = screen->options.halt_shaders,
817 },
818 .shader_type = tgsi_processor_to_shader_stage(sel->type),
819 .wave_size = si_get_shader_wave_size(shader),
820 .num_parts = num_parts,
821 .elf_ptrs = part_elfs,
822 .elf_sizes = part_sizes,
823 .num_shared_lds_symbols = num_lds_symbols,
824 .shared_lds_symbols = lds_symbols});
825
826 if (rtld->lds_size > 0) {
827 unsigned alloc_granularity = screen->info.chip_class >= GFX7 ? 512 : 256;
828 shader->config.lds_size = align(rtld->lds_size, alloc_granularity) / alloc_granularity;
829 }
830
831 return ok;
832 }
833
834 static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_shader *shader)
835 {
836 struct ac_rtld_binary rtld;
837 si_shader_binary_open(screen, shader, &rtld);
838 return rtld.exec_size;
839 }
840
841 static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
842 {
843 uint64_t *scratch_va = data;
844
845 if (!strcmp(scratch_rsrc_dword0_symbol, name)) {
846 *value = (uint32_t)*scratch_va;
847 return true;
848 }
849 if (!strcmp(scratch_rsrc_dword1_symbol, name)) {
850 /* Enable scratch coalescing. */
851 *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) | S_008F04_SWIZZLE_ENABLE(1);
852 return true;
853 }
854
855 return false;
856 }
857
858 bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader,
859 uint64_t scratch_va)
860 {
861 struct ac_rtld_binary binary;
862 if (!si_shader_binary_open(sscreen, shader, &binary))
863 return false;
864
865 si_resource_reference(&shader->bo, NULL);
866 shader->bo = si_aligned_buffer_create(
867 &sscreen->b, sscreen->info.cpdma_prefetch_writes_memory ? 0 : SI_RESOURCE_FLAG_READ_ONLY,
868 PIPE_USAGE_IMMUTABLE, align(binary.rx_size, SI_CPDMA_ALIGNMENT), 256);
869 if (!shader->bo)
870 return false;
871
872 /* Upload. */
873 struct ac_rtld_upload_info u = {};
874 u.binary = &binary;
875 u.get_external_symbol = si_get_external_symbol;
876 u.cb_data = &scratch_va;
877 u.rx_va = shader->bo->gpu_address;
878 u.rx_ptr = sscreen->ws->buffer_map(
879 shader->bo->buf, NULL,
880 PIPE_TRANSFER_READ_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED | RADEON_TRANSFER_TEMPORARY);
881 if (!u.rx_ptr)
882 return false;
883
884 bool ok = ac_rtld_upload(&u);
885
886 sscreen->ws->buffer_unmap(shader->bo->buf);
887 ac_rtld_close(&binary);
888
889 return ok;
890 }
891
892 static void si_shader_dump_disassembly(struct si_screen *screen,
893 const struct si_shader_binary *binary,
894 enum pipe_shader_type shader_type, unsigned wave_size,
895 struct pipe_debug_callback *debug, const char *name,
896 FILE *file)
897 {
898 struct ac_rtld_binary rtld_binary;
899
900 if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
901 .info = &screen->info,
902 .shader_type = tgsi_processor_to_shader_stage(shader_type),
903 .wave_size = wave_size,
904 .num_parts = 1,
905 .elf_ptrs = &binary->elf_buffer,
906 .elf_sizes = &binary->elf_size}))
907 return;
908
909 const char *disasm;
910 size_t nbytes;
911
912 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
913 goto out;
914
915 if (nbytes > INT_MAX)
916 goto out;
917
918 if (debug && debug->debug_message) {
919 /* Very long debug messages are cut off, so send the
920 * disassembly one line at a time. This causes more
921 * overhead, but on the plus side it simplifies
922 * parsing of resulting logs.
923 */
924 pipe_debug_message(debug, SHADER_INFO, "Shader Disassembly Begin");
925
926 uint64_t line = 0;
927 while (line < nbytes) {
928 int count = nbytes - line;
929 const char *nl = memchr(disasm + line, '\n', nbytes - line);
930 if (nl)
931 count = nl - (disasm + line);
932
933 if (count) {
934 pipe_debug_message(debug, SHADER_INFO, "%.*s", count, disasm + line);
935 }
936
937 line += count + 1;
938 }
939
940 pipe_debug_message(debug, SHADER_INFO, "Shader Disassembly End");
941 }
942
943 if (file) {
944 fprintf(file, "Shader %s disassembly:\n", name);
945 fprintf(file, "%*s", (int)nbytes, disasm);
946 }
947
948 out:
949 ac_rtld_close(&rtld_binary);
950 }
951
952 static void si_calculate_max_simd_waves(struct si_shader *shader)
953 {
954 struct si_screen *sscreen = shader->selector->screen;
955 struct ac_shader_config *conf = &shader->config;
956 unsigned num_inputs = shader->selector->info.num_inputs;
957 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
958 unsigned lds_per_wave = 0;
959 unsigned max_simd_waves;
960
961 max_simd_waves = sscreen->info.max_wave64_per_simd;
962
963 /* Compute LDS usage for PS. */
964 switch (shader->selector->type) {
965 case PIPE_SHADER_FRAGMENT:
966 /* The minimum usage per wave is (num_inputs * 48). The maximum
967 * usage is (num_inputs * 48 * 16).
968 * We can get anything in between and it varies between waves.
969 *
970 * The 48 bytes per input for a single primitive is equal to
971 * 4 bytes/component * 4 components/input * 3 points.
972 *
973 * Other stages don't know the size at compile time or don't
974 * allocate LDS per wave, but instead they do it per thread group.
975 */
976 lds_per_wave = conf->lds_size * lds_increment + align(num_inputs * 48, lds_increment);
977 break;
978 case PIPE_SHADER_COMPUTE:
979 if (shader->selector) {
980 unsigned max_workgroup_size = si_get_max_workgroup_size(shader);
981 lds_per_wave = (conf->lds_size * lds_increment) /
982 DIV_ROUND_UP(max_workgroup_size, sscreen->compute_wave_size);
983 }
984 break;
985 default:;
986 }
987
988 /* Compute the per-SIMD wave counts. */
989 if (conf->num_sgprs) {
990 max_simd_waves =
991 MIN2(max_simd_waves, sscreen->info.num_physical_sgprs_per_simd / conf->num_sgprs);
992 }
993
994 if (conf->num_vgprs) {
995 /* Always print wave limits as Wave64, so that we can compare
996 * Wave32 and Wave64 with shader-db fairly. */
997 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd;
998 max_simd_waves = MIN2(max_simd_waves, max_vgprs / conf->num_vgprs);
999 }
1000
1001 unsigned max_lds_per_simd = sscreen->info.lds_size_per_workgroup / 4;
1002 if (lds_per_wave)
1003 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
1004
1005 shader->info.max_simd_waves = max_simd_waves;
1006 }
1007
1008 void si_shader_dump_stats_for_shader_db(struct si_screen *screen, struct si_shader *shader,
1009 struct pipe_debug_callback *debug)
1010 {
1011 const struct ac_shader_config *conf = &shader->config;
1012
1013 if (screen->options.debug_disassembly)
1014 si_shader_dump_disassembly(screen, &shader->binary, shader->selector->type,
1015 si_get_shader_wave_size(shader), debug, "main", NULL);
1016
1017 pipe_debug_message(debug, SHADER_INFO,
1018 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
1019 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
1020 "Spilled VGPRs: %d PrivMem VGPRs: %d",
1021 conf->num_sgprs, conf->num_vgprs, si_get_shader_binary_size(screen, shader),
1022 conf->lds_size, conf->scratch_bytes_per_wave, shader->info.max_simd_waves,
1023 conf->spilled_sgprs, conf->spilled_vgprs, shader->info.private_mem_vgprs);
1024 }
1025
1026 static void si_shader_dump_stats(struct si_screen *sscreen, struct si_shader *shader, FILE *file,
1027 bool check_debug_option)
1028 {
1029 const struct ac_shader_config *conf = &shader->config;
1030
1031 if (!check_debug_option || si_can_dump_shader(sscreen, shader->selector->type)) {
1032 if (shader->selector->type == PIPE_SHADER_FRAGMENT) {
1033 fprintf(file,
1034 "*** SHADER CONFIG ***\n"
1035 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1036 "SPI_PS_INPUT_ENA = 0x%04x\n",
1037 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
1038 }
1039
1040 fprintf(file,
1041 "*** SHADER STATS ***\n"
1042 "SGPRS: %d\n"
1043 "VGPRS: %d\n"
1044 "Spilled SGPRs: %d\n"
1045 "Spilled VGPRs: %d\n"
1046 "Private memory VGPRs: %d\n"
1047 "Code Size: %d bytes\n"
1048 "LDS: %d blocks\n"
1049 "Scratch: %d bytes per wave\n"
1050 "Max Waves: %d\n"
1051 "********************\n\n\n",
1052 conf->num_sgprs, conf->num_vgprs, conf->spilled_sgprs, conf->spilled_vgprs,
1053 shader->info.private_mem_vgprs, si_get_shader_binary_size(sscreen, shader),
1054 conf->lds_size, conf->scratch_bytes_per_wave, shader->info.max_simd_waves);
1055 }
1056 }
1057
1058 const char *si_get_shader_name(const struct si_shader *shader)
1059 {
1060 switch (shader->selector->type) {
1061 case PIPE_SHADER_VERTEX:
1062 if (shader->key.as_es)
1063 return "Vertex Shader as ES";
1064 else if (shader->key.as_ls)
1065 return "Vertex Shader as LS";
1066 else if (shader->key.opt.vs_as_prim_discard_cs)
1067 return "Vertex Shader as Primitive Discard CS";
1068 else if (shader->key.as_ngg)
1069 return "Vertex Shader as ESGS";
1070 else
1071 return "Vertex Shader as VS";
1072 case PIPE_SHADER_TESS_CTRL:
1073 return "Tessellation Control Shader";
1074 case PIPE_SHADER_TESS_EVAL:
1075 if (shader->key.as_es)
1076 return "Tessellation Evaluation Shader as ES";
1077 else if (shader->key.as_ngg)
1078 return "Tessellation Evaluation Shader as ESGS";
1079 else
1080 return "Tessellation Evaluation Shader as VS";
1081 case PIPE_SHADER_GEOMETRY:
1082 if (shader->is_gs_copy_shader)
1083 return "GS Copy Shader as VS";
1084 else
1085 return "Geometry Shader";
1086 case PIPE_SHADER_FRAGMENT:
1087 return "Pixel Shader";
1088 case PIPE_SHADER_COMPUTE:
1089 return "Compute Shader";
1090 default:
1091 return "Unknown Shader";
1092 }
1093 }
1094
1095 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
1096 struct pipe_debug_callback *debug, FILE *file, bool check_debug_option)
1097 {
1098 enum pipe_shader_type shader_type = shader->selector->type;
1099
1100 if (!check_debug_option || si_can_dump_shader(sscreen, shader_type))
1101 si_dump_shader_key(shader, file);
1102
1103 if (!check_debug_option && shader->binary.llvm_ir_string) {
1104 if (shader->previous_stage && shader->previous_stage->binary.llvm_ir_string) {
1105 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n", si_get_shader_name(shader));
1106 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
1107 }
1108
1109 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n", si_get_shader_name(shader));
1110 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
1111 }
1112
1113 if (!check_debug_option ||
1114 (si_can_dump_shader(sscreen, shader_type) && !(sscreen->debug_flags & DBG(NO_ASM)))) {
1115 unsigned wave_size = si_get_shader_wave_size(shader);
1116
1117 fprintf(file, "\n%s:\n", si_get_shader_name(shader));
1118
1119 if (shader->prolog)
1120 si_shader_dump_disassembly(sscreen, &shader->prolog->binary, shader_type, wave_size, debug,
1121 "prolog", file);
1122 if (shader->previous_stage)
1123 si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary, shader_type,
1124 wave_size, debug, "previous stage", file);
1125 if (shader->prolog2)
1126 si_shader_dump_disassembly(sscreen, &shader->prolog2->binary, shader_type, wave_size,
1127 debug, "prolog2", file);
1128
1129 si_shader_dump_disassembly(sscreen, &shader->binary, shader_type, wave_size, debug, "main",
1130 file);
1131
1132 if (shader->epilog)
1133 si_shader_dump_disassembly(sscreen, &shader->epilog->binary, shader_type, wave_size, debug,
1134 "epilog", file);
1135 fprintf(file, "\n");
1136 }
1137
1138 si_shader_dump_stats(sscreen, shader, file, check_debug_option);
1139 }
1140
1141 static void si_dump_shader_key_vs(const struct si_shader_key *key,
1142 const struct si_vs_prolog_bits *prolog, const char *prefix,
1143 FILE *f)
1144 {
1145 fprintf(f, " %s.instance_divisor_is_one = %u\n", prefix, prolog->instance_divisor_is_one);
1146 fprintf(f, " %s.instance_divisor_is_fetched = %u\n", prefix,
1147 prolog->instance_divisor_is_fetched);
1148 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n", prefix,
1149 prolog->unpack_instance_id_from_vertex_id);
1150 fprintf(f, " %s.ls_vgpr_fix = %u\n", prefix, prolog->ls_vgpr_fix);
1151
1152 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
1153 fprintf(f, " mono.vs.fix_fetch = {");
1154 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
1155 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
1156 if (i)
1157 fprintf(f, ", ");
1158 if (!fix.bits)
1159 fprintf(f, "0");
1160 else
1161 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size, fix.u.num_channels_m1,
1162 fix.u.format);
1163 }
1164 fprintf(f, "}\n");
1165 }
1166
1167 static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
1168 {
1169 const struct si_shader_key *key = &shader->key;
1170 enum pipe_shader_type shader_type = shader->selector->type;
1171
1172 fprintf(f, "SHADER KEY\n");
1173
1174 switch (shader_type) {
1175 case PIPE_SHADER_VERTEX:
1176 si_dump_shader_key_vs(key, &key->part.vs.prolog, "part.vs.prolog", f);
1177 fprintf(f, " as_es = %u\n", key->as_es);
1178 fprintf(f, " as_ls = %u\n", key->as_ls);
1179 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1180 fprintf(f, " mono.u.vs_export_prim_id = %u\n", key->mono.u.vs_export_prim_id);
1181 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n", key->opt.vs_as_prim_discard_cs);
1182 fprintf(f, " opt.cs_prim_type = %s\n", tgsi_primitive_names[key->opt.cs_prim_type]);
1183 fprintf(f, " opt.cs_indexed = %u\n", key->opt.cs_indexed);
1184 fprintf(f, " opt.cs_instancing = %u\n", key->opt.cs_instancing);
1185 fprintf(f, " opt.cs_primitive_restart = %u\n", key->opt.cs_primitive_restart);
1186 fprintf(f, " opt.cs_provoking_vertex_first = %u\n", key->opt.cs_provoking_vertex_first);
1187 fprintf(f, " opt.cs_need_correct_orientation = %u\n", key->opt.cs_need_correct_orientation);
1188 fprintf(f, " opt.cs_cull_front = %u\n", key->opt.cs_cull_front);
1189 fprintf(f, " opt.cs_cull_back = %u\n", key->opt.cs_cull_back);
1190 fprintf(f, " opt.cs_cull_z = %u\n", key->opt.cs_cull_z);
1191 fprintf(f, " opt.cs_halfz_clip_space = %u\n", key->opt.cs_halfz_clip_space);
1192 break;
1193
1194 case PIPE_SHADER_TESS_CTRL:
1195 if (shader->selector->screen->info.chip_class >= GFX9) {
1196 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog, "part.tcs.ls_prolog", f);
1197 }
1198 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
1199 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%" PRIx64 "\n",
1200 key->mono.u.ff_tcs_inputs_to_copy);
1201 break;
1202
1203 case PIPE_SHADER_TESS_EVAL:
1204 fprintf(f, " as_es = %u\n", key->as_es);
1205 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1206 fprintf(f, " mono.u.vs_export_prim_id = %u\n", key->mono.u.vs_export_prim_id);
1207 break;
1208
1209 case PIPE_SHADER_GEOMETRY:
1210 if (shader->is_gs_copy_shader)
1211 break;
1212
1213 if (shader->selector->screen->info.chip_class >= GFX9 &&
1214 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
1215 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog, "part.gs.vs_prolog", f);
1216 }
1217 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n",
1218 key->part.gs.prolog.tri_strip_adj_fix);
1219 fprintf(f, " part.gs.prolog.gfx9_prev_is_vs = %u\n", key->part.gs.prolog.gfx9_prev_is_vs);
1220 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1221 break;
1222
1223 case PIPE_SHADER_COMPUTE:
1224 break;
1225
1226 case PIPE_SHADER_FRAGMENT:
1227 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
1228 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
1229 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
1230 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n",
1231 key->part.ps.prolog.force_persp_sample_interp);
1232 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n",
1233 key->part.ps.prolog.force_linear_sample_interp);
1234 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n",
1235 key->part.ps.prolog.force_persp_center_interp);
1236 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n",
1237 key->part.ps.prolog.force_linear_center_interp);
1238 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n",
1239 key->part.ps.prolog.bc_optimize_for_persp);
1240 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n",
1241 key->part.ps.prolog.bc_optimize_for_linear);
1242 fprintf(f, " part.ps.prolog.samplemask_log_ps_iter = %u\n",
1243 key->part.ps.prolog.samplemask_log_ps_iter);
1244 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n",
1245 key->part.ps.epilog.spi_shader_col_format);
1246 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
1247 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
1248 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
1249 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
1250 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
1251 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n",
1252 key->part.ps.epilog.poly_line_smoothing);
1253 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
1254 fprintf(f, " mono.u.ps.interpolate_at_sample_force_center = %u\n",
1255 key->mono.u.ps.interpolate_at_sample_force_center);
1256 fprintf(f, " mono.u.ps.fbfetch_msaa = %u\n", key->mono.u.ps.fbfetch_msaa);
1257 fprintf(f, " mono.u.ps.fbfetch_is_1D = %u\n", key->mono.u.ps.fbfetch_is_1D);
1258 fprintf(f, " mono.u.ps.fbfetch_layered = %u\n", key->mono.u.ps.fbfetch_layered);
1259 break;
1260
1261 default:
1262 assert(0);
1263 }
1264
1265 if ((shader_type == PIPE_SHADER_GEOMETRY || shader_type == PIPE_SHADER_TESS_EVAL ||
1266 shader_type == PIPE_SHADER_VERTEX) &&
1267 !key->as_es && !key->as_ls) {
1268 fprintf(f, " opt.kill_outputs = 0x%" PRIx64 "\n", key->opt.kill_outputs);
1269 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
1270 if (shader_type != PIPE_SHADER_GEOMETRY)
1271 fprintf(f, " opt.ngg_culling = 0x%x\n", key->opt.ngg_culling);
1272 }
1273 }
1274
1275 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
1276 {
1277 struct si_shader *shader = ctx->shader;
1278 struct si_shader_info *info = &shader->selector->info;
1279 unsigned skip_vs_optim_mask = 0;
1280
1281 if ((ctx->type != PIPE_SHADER_VERTEX && ctx->type != PIPE_SHADER_TESS_EVAL) ||
1282 shader->key.as_ls || shader->key.as_es)
1283 return;
1284
1285 /* Optimizing these outputs is not possible, since they might be overriden
1286 * at runtime with S_028644_PT_SPRITE_TEX. */
1287 for (int i = 0; i < info->num_outputs; i++) {
1288 if (info->output_semantic_name[i] == TGSI_SEMANTIC_PCOORD ||
1289 info->output_semantic_name[i] == TGSI_SEMANTIC_TEXCOORD) {
1290 skip_vs_optim_mask |= 1u << shader->info.vs_output_param_offset[i];
1291 }
1292 }
1293
1294 ac_optimize_vs_outputs(&ctx->ac, ctx->main_fn, shader->info.vs_output_param_offset,
1295 info->num_outputs, skip_vs_optim_mask,
1296 &shader->info.nr_param_exports);
1297 }
1298
1299 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
1300 const struct si_vs_prolog_bits *prolog_key,
1301 const struct si_shader_key *key, bool ngg_cull_shader)
1302 {
1303 /* VGPR initialization fixup for Vega10 and Raven is always done in the
1304 * VS prolog. */
1305 return sel->vs_needs_prolog || prolog_key->ls_vgpr_fix ||
1306 prolog_key->unpack_instance_id_from_vertex_id ||
1307 (ngg_cull_shader && key->opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL);
1308 }
1309
1310 static bool si_build_main_function(struct si_shader_context *ctx, struct si_shader *shader,
1311 struct nir_shader *nir, bool free_nir, bool ngg_cull_shader)
1312 {
1313 struct si_shader_selector *sel = shader->selector;
1314 const struct si_shader_info *info = &sel->info;
1315
1316 ctx->shader = shader;
1317 ctx->type = sel->type;
1318
1319 ctx->num_const_buffers = util_last_bit(info->const_buffers_declared);
1320 ctx->num_shader_buffers = util_last_bit(info->shader_buffers_declared);
1321
1322 ctx->num_samplers = util_last_bit(info->samplers_declared);
1323 ctx->num_images = util_last_bit(info->images_declared);
1324
1325 si_llvm_init_resource_callbacks(ctx);
1326
1327 switch (ctx->type) {
1328 case PIPE_SHADER_VERTEX:
1329 si_llvm_init_vs_callbacks(ctx, ngg_cull_shader);
1330 break;
1331 case PIPE_SHADER_TESS_CTRL:
1332 si_llvm_init_tcs_callbacks(ctx);
1333 break;
1334 case PIPE_SHADER_TESS_EVAL:
1335 si_llvm_init_tes_callbacks(ctx, ngg_cull_shader);
1336 break;
1337 case PIPE_SHADER_GEOMETRY:
1338 si_llvm_init_gs_callbacks(ctx);
1339 break;
1340 case PIPE_SHADER_FRAGMENT:
1341 si_llvm_init_ps_callbacks(ctx);
1342 break;
1343 case PIPE_SHADER_COMPUTE:
1344 ctx->abi.load_local_group_size = si_llvm_get_block_size;
1345 break;
1346 default:
1347 assert(!"Unsupported shader type");
1348 return false;
1349 }
1350
1351 si_create_function(ctx, ngg_cull_shader);
1352
1353 if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)
1354 si_preload_esgs_ring(ctx);
1355
1356 if (ctx->type == PIPE_SHADER_GEOMETRY)
1357 si_preload_gs_rings(ctx);
1358 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1359 si_llvm_preload_tes_rings(ctx);
1360
1361 if (ctx->type == PIPE_SHADER_TESS_CTRL && sel->info.tessfactors_are_def_in_all_invocs) {
1362 for (unsigned i = 0; i < 6; i++) {
1363 ctx->invoc0_tess_factors[i] = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
1364 }
1365 }
1366
1367 if (ctx->type == PIPE_SHADER_GEOMETRY) {
1368 for (unsigned i = 0; i < 4; i++) {
1369 ctx->gs_next_vertex[i] = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1370 }
1371 if (shader->key.as_ngg) {
1372 for (unsigned i = 0; i < 4; ++i) {
1373 ctx->gs_curprim_verts[i] = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1374 ctx->gs_generated_prims[i] = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1375 }
1376
1377 unsigned scratch_size = 8;
1378 if (sel->so.num_outputs)
1379 scratch_size = 44;
1380
1381 assert(!ctx->gs_ngg_scratch);
1382 LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, scratch_size);
1383 ctx->gs_ngg_scratch =
1384 LLVMAddGlobalInAddressSpace(ctx->ac.module, ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
1385 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
1386 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
1387
1388 ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(
1389 ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
1390 LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
1391 LLVMSetAlignment(ctx->gs_ngg_emit, 4);
1392 }
1393 }
1394
1395 if (ctx->type != PIPE_SHADER_GEOMETRY && (shader->key.as_ngg && !shader->key.as_es)) {
1396 /* Unconditionally declare scratch space base for streamout and
1397 * vertex compaction. Whether space is actually allocated is
1398 * determined during linking / PM4 creation.
1399 *
1400 * Add an extra dword per vertex to ensure an odd stride, which
1401 * avoids bank conflicts for SoA accesses.
1402 */
1403 if (!gfx10_is_ngg_passthrough(shader))
1404 si_llvm_declare_esgs_ring(ctx);
1405
1406 /* This is really only needed when streamout and / or vertex
1407 * compaction is enabled.
1408 */
1409 if (!ctx->gs_ngg_scratch && (sel->so.num_outputs || shader->key.opt.ngg_culling)) {
1410 LLVMTypeRef asi32 = LLVMArrayType(ctx->ac.i32, 8);
1411 ctx->gs_ngg_scratch =
1412 LLVMAddGlobalInAddressSpace(ctx->ac.module, asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
1413 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
1414 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
1415 }
1416 }
1417
1418 /* For GFX9 merged shaders:
1419 * - Set EXEC for the first shader. If the prolog is present, set
1420 * EXEC there instead.
1421 * - Add a barrier before the second shader.
1422 * - In the second shader, reset EXEC to ~0 and wrap the main part in
1423 * an if-statement. This is required for correctness in geometry
1424 * shaders, to ensure that empty GS waves do not send GS_EMIT and
1425 * GS_CUT messages.
1426 *
1427 * For monolithic merged shaders, the first shader is wrapped in an
1428 * if-block together with its prolog in si_build_wrapper_function.
1429 *
1430 * NGG vertex and tess eval shaders running as the last
1431 * vertex/geometry stage handle execution explicitly using
1432 * if-statements.
1433 */
1434 if (ctx->screen->info.chip_class >= GFX9) {
1435 if (!shader->is_monolithic && (shader->key.as_es || shader->key.as_ls) &&
1436 (ctx->type == PIPE_SHADER_TESS_EVAL ||
1437 (ctx->type == PIPE_SHADER_VERTEX &&
1438 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog, &shader->key, ngg_cull_shader)))) {
1439 si_init_exec_from_input(ctx, ctx->merged_wave_info, 0);
1440 } else if (ctx->type == PIPE_SHADER_TESS_CTRL || ctx->type == PIPE_SHADER_GEOMETRY ||
1441 (shader->key.as_ngg && !shader->key.as_es)) {
1442 LLVMValueRef thread_enabled;
1443 bool nested_barrier;
1444
1445 if (!shader->is_monolithic || (ctx->type == PIPE_SHADER_TESS_EVAL && shader->key.as_ngg &&
1446 !shader->key.as_es && !shader->key.opt.ngg_culling))
1447 ac_init_exec_full_mask(&ctx->ac);
1448
1449 if ((ctx->type == PIPE_SHADER_VERTEX || ctx->type == PIPE_SHADER_TESS_EVAL) &&
1450 shader->key.as_ngg && !shader->key.as_es && !shader->key.opt.ngg_culling) {
1451 gfx10_ngg_build_sendmsg_gs_alloc_req(ctx);
1452
1453 /* Build the primitive export at the beginning
1454 * of the shader if possible.
1455 */
1456 if (gfx10_ngg_export_prim_early(shader))
1457 gfx10_ngg_build_export_prim(ctx, NULL, NULL);
1458 }
1459
1460 if (ctx->type == PIPE_SHADER_TESS_CTRL || ctx->type == PIPE_SHADER_GEOMETRY) {
1461 if (ctx->type == PIPE_SHADER_GEOMETRY && shader->key.as_ngg) {
1462 gfx10_ngg_gs_emit_prologue(ctx);
1463 nested_barrier = false;
1464 } else {
1465 nested_barrier = true;
1466 }
1467
1468 thread_enabled = si_is_gs_thread(ctx);
1469 } else {
1470 thread_enabled = si_is_es_thread(ctx);
1471 nested_barrier = false;
1472 }
1473
1474 ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
1475 ctx->merged_wrap_if_label = 11500;
1476 ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
1477
1478 if (nested_barrier) {
1479 /* Execute a barrier before the second shader in
1480 * a merged shader.
1481 *
1482 * Execute the barrier inside the conditional block,
1483 * so that empty waves can jump directly to s_endpgm,
1484 * which will also signal the barrier.
1485 *
1486 * This is possible in gfx9, because an empty wave
1487 * for the second shader does not participate in
1488 * the epilogue. With NGG, empty waves may still
1489 * be required to export data (e.g. GS output vertices),
1490 * so we cannot let them exit early.
1491 *
1492 * If the shader is TCS and the TCS epilog is present
1493 * and contains a barrier, it will wait there and then
1494 * reach s_endpgm.
1495 */
1496 si_llvm_emit_barrier(ctx);
1497 }
1498 }
1499 }
1500
1501 bool success = si_nir_build_llvm(ctx, nir);
1502 if (free_nir)
1503 ralloc_free(nir);
1504 if (!success) {
1505 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
1506 return false;
1507 }
1508
1509 si_llvm_build_ret(ctx, ctx->return_value);
1510 return true;
1511 }
1512
1513 /**
1514 * Compute the VS prolog key, which contains all the information needed to
1515 * build the VS prolog function, and set shader->info bits where needed.
1516 *
1517 * \param info Shader info of the vertex shader.
1518 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
1519 * \param has_old_ Whether the preceding shader part is the NGG cull shader.
1520 * \param prolog_key Key of the VS prolog
1521 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
1522 * \param key Output shader part key.
1523 */
1524 static void si_get_vs_prolog_key(const struct si_shader_info *info, unsigned num_input_sgprs,
1525 bool ngg_cull_shader, const struct si_vs_prolog_bits *prolog_key,
1526 struct si_shader *shader_out, union si_shader_part_key *key)
1527 {
1528 memset(key, 0, sizeof(*key));
1529 key->vs_prolog.states = *prolog_key;
1530 key->vs_prolog.num_input_sgprs = num_input_sgprs;
1531 key->vs_prolog.num_inputs = info->num_inputs;
1532 key->vs_prolog.as_ls = shader_out->key.as_ls;
1533 key->vs_prolog.as_es = shader_out->key.as_es;
1534 key->vs_prolog.as_ngg = shader_out->key.as_ngg;
1535 key->vs_prolog.as_prim_discard_cs = shader_out->key.opt.vs_as_prim_discard_cs;
1536
1537 if (ngg_cull_shader) {
1538 key->vs_prolog.gs_fast_launch_tri_list =
1539 !!(shader_out->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST);
1540 key->vs_prolog.gs_fast_launch_tri_strip =
1541 !!(shader_out->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP);
1542 } else {
1543 key->vs_prolog.has_ngg_cull_inputs = !!shader_out->key.opt.ngg_culling;
1544 }
1545
1546 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
1547 key->vs_prolog.as_ls = 1;
1548 key->vs_prolog.num_merged_next_stage_vgprs = 2;
1549 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
1550 key->vs_prolog.as_es = 1;
1551 key->vs_prolog.num_merged_next_stage_vgprs = 5;
1552 } else if (shader_out->key.as_ngg) {
1553 key->vs_prolog.num_merged_next_stage_vgprs = 5;
1554 }
1555
1556 /* Only one of these combinations can be set. as_ngg can be set with as_es. */
1557 assert(key->vs_prolog.as_ls + key->vs_prolog.as_ngg +
1558 (key->vs_prolog.as_es && !key->vs_prolog.as_ngg) + key->vs_prolog.as_prim_discard_cs <=
1559 1);
1560
1561 /* Enable loading the InstanceID VGPR. */
1562 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
1563
1564 if ((key->vs_prolog.states.instance_divisor_is_one |
1565 key->vs_prolog.states.instance_divisor_is_fetched) &
1566 input_mask)
1567 shader_out->info.uses_instanceid = true;
1568 }
1569
1570 static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
1571 struct si_shader_selector *sel)
1572 {
1573 if (!compiler->low_opt_passes)
1574 return false;
1575
1576 /* Assume a slow CPU. */
1577 assert(!sel->screen->info.has_dedicated_vram && sel->screen->info.chip_class <= GFX8);
1578
1579 /* For a crazy dEQP test containing 2597 memory opcodes, mostly
1580 * buffer stores. */
1581 return sel->type == PIPE_SHADER_COMPUTE && sel->info.num_memory_instructions > 1000;
1582 }
1583
1584 static struct nir_shader *get_nir_shader(struct si_shader_selector *sel, bool *free_nir)
1585 {
1586 *free_nir = false;
1587
1588 if (sel->nir) {
1589 return sel->nir;
1590 } else if (sel->nir_binary) {
1591 struct pipe_screen *screen = &sel->screen->b;
1592 const void *options = screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR, sel->type);
1593
1594 struct blob_reader blob_reader;
1595 blob_reader_init(&blob_reader, sel->nir_binary, sel->nir_size);
1596 *free_nir = true;
1597 return nir_deserialize(NULL, options, &blob_reader);
1598 }
1599 return NULL;
1600 }
1601
1602 static bool si_llvm_compile_shader(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
1603 struct si_shader *shader, struct pipe_debug_callback *debug,
1604 struct nir_shader *nir, bool free_nir)
1605 {
1606 struct si_shader_selector *sel = shader->selector;
1607 struct si_shader_context ctx;
1608
1609 si_llvm_context_init(&ctx, sscreen, compiler, si_get_shader_wave_size(shader));
1610
1611 LLVMValueRef ngg_cull_main_fn = NULL;
1612 if (shader->key.opt.ngg_culling) {
1613 if (!si_build_main_function(&ctx, shader, nir, false, true)) {
1614 si_llvm_dispose(&ctx);
1615 return false;
1616 }
1617 ngg_cull_main_fn = ctx.main_fn;
1618 ctx.main_fn = NULL;
1619 }
1620
1621 if (!si_build_main_function(&ctx, shader, nir, free_nir, false)) {
1622 si_llvm_dispose(&ctx);
1623 return false;
1624 }
1625
1626 if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
1627 LLVMValueRef parts[4];
1628 unsigned num_parts = 0;
1629 bool has_prolog = false;
1630 LLVMValueRef main_fn = ctx.main_fn;
1631
1632 if (ngg_cull_main_fn) {
1633 if (si_vs_needs_prolog(sel, &shader->key.part.vs.prolog, &shader->key, true)) {
1634 union si_shader_part_key prolog_key;
1635 si_get_vs_prolog_key(&sel->info, shader->info.num_input_sgprs, true,
1636 &shader->key.part.vs.prolog, shader, &prolog_key);
1637 prolog_key.vs_prolog.is_monolithic = true;
1638 si_llvm_build_vs_prolog(&ctx, &prolog_key);
1639 parts[num_parts++] = ctx.main_fn;
1640 has_prolog = true;
1641 }
1642 parts[num_parts++] = ngg_cull_main_fn;
1643 }
1644
1645 if (si_vs_needs_prolog(sel, &shader->key.part.vs.prolog, &shader->key, false)) {
1646 union si_shader_part_key prolog_key;
1647 si_get_vs_prolog_key(&sel->info, shader->info.num_input_sgprs, false,
1648 &shader->key.part.vs.prolog, shader, &prolog_key);
1649 prolog_key.vs_prolog.is_monolithic = true;
1650 si_llvm_build_vs_prolog(&ctx, &prolog_key);
1651 parts[num_parts++] = ctx.main_fn;
1652 has_prolog = true;
1653 }
1654 parts[num_parts++] = main_fn;
1655
1656 si_build_wrapper_function(&ctx, parts, num_parts, has_prolog ? 1 : 0, 0);
1657
1658 if (ctx.shader->key.opt.vs_as_prim_discard_cs)
1659 si_build_prim_discard_compute_shader(&ctx);
1660 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_EVAL && ngg_cull_main_fn) {
1661 LLVMValueRef parts[2];
1662
1663 parts[0] = ngg_cull_main_fn;
1664 parts[1] = ctx.main_fn;
1665
1666 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
1667 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
1668 if (sscreen->info.chip_class >= GFX9) {
1669 struct si_shader_selector *ls = shader->key.part.tcs.ls;
1670 LLVMValueRef parts[4];
1671 bool vs_needs_prolog =
1672 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog, &shader->key, false);
1673
1674 /* TCS main part */
1675 parts[2] = ctx.main_fn;
1676
1677 /* TCS epilog */
1678 union si_shader_part_key tcs_epilog_key;
1679 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
1680 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
1681 si_llvm_build_tcs_epilog(&ctx, &tcs_epilog_key);
1682 parts[3] = ctx.main_fn;
1683
1684 /* VS as LS main part */
1685 nir = get_nir_shader(ls, &free_nir);
1686 struct si_shader shader_ls = {};
1687 shader_ls.selector = ls;
1688 shader_ls.key.as_ls = 1;
1689 shader_ls.key.mono = shader->key.mono;
1690 shader_ls.key.opt = shader->key.opt;
1691 shader_ls.is_monolithic = true;
1692
1693 if (!si_build_main_function(&ctx, &shader_ls, nir, free_nir, false)) {
1694 si_llvm_dispose(&ctx);
1695 return false;
1696 }
1697 shader->info.uses_instanceid |= ls->info.uses_instanceid;
1698 parts[1] = ctx.main_fn;
1699
1700 /* LS prolog */
1701 if (vs_needs_prolog) {
1702 union si_shader_part_key vs_prolog_key;
1703 si_get_vs_prolog_key(&ls->info, shader_ls.info.num_input_sgprs, false,
1704 &shader->key.part.tcs.ls_prolog, shader, &vs_prolog_key);
1705 vs_prolog_key.vs_prolog.is_monolithic = true;
1706 si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1707 parts[0] = ctx.main_fn;
1708 }
1709
1710 /* Reset the shader context. */
1711 ctx.shader = shader;
1712 ctx.type = PIPE_SHADER_TESS_CTRL;
1713
1714 si_build_wrapper_function(&ctx, parts + !vs_needs_prolog, 4 - !vs_needs_prolog,
1715 vs_needs_prolog, vs_needs_prolog ? 2 : 1);
1716 } else {
1717 LLVMValueRef parts[2];
1718 union si_shader_part_key epilog_key;
1719
1720 parts[0] = ctx.main_fn;
1721
1722 memset(&epilog_key, 0, sizeof(epilog_key));
1723 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
1724 si_llvm_build_tcs_epilog(&ctx, &epilog_key);
1725 parts[1] = ctx.main_fn;
1726
1727 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
1728 }
1729 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
1730 if (ctx.screen->info.chip_class >= GFX9) {
1731 struct si_shader_selector *es = shader->key.part.gs.es;
1732 LLVMValueRef es_prolog = NULL;
1733 LLVMValueRef es_main = NULL;
1734 LLVMValueRef gs_prolog = NULL;
1735 LLVMValueRef gs_main = ctx.main_fn;
1736
1737 /* GS prolog */
1738 union si_shader_part_key gs_prolog_key;
1739 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
1740 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
1741 gs_prolog_key.gs_prolog.is_monolithic = true;
1742 gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
1743 si_llvm_build_gs_prolog(&ctx, &gs_prolog_key);
1744 gs_prolog = ctx.main_fn;
1745
1746 /* ES main part */
1747 nir = get_nir_shader(es, &free_nir);
1748 struct si_shader shader_es = {};
1749 shader_es.selector = es;
1750 shader_es.key.as_es = 1;
1751 shader_es.key.as_ngg = shader->key.as_ngg;
1752 shader_es.key.mono = shader->key.mono;
1753 shader_es.key.opt = shader->key.opt;
1754 shader_es.is_monolithic = true;
1755
1756 if (!si_build_main_function(&ctx, &shader_es, nir, free_nir, false)) {
1757 si_llvm_dispose(&ctx);
1758 return false;
1759 }
1760 shader->info.uses_instanceid |= es->info.uses_instanceid;
1761 es_main = ctx.main_fn;
1762
1763 /* ES prolog */
1764 if (es->type == PIPE_SHADER_VERTEX &&
1765 si_vs_needs_prolog(es, &shader->key.part.gs.vs_prolog, &shader->key, false)) {
1766 union si_shader_part_key vs_prolog_key;
1767 si_get_vs_prolog_key(&es->info, shader_es.info.num_input_sgprs, false,
1768 &shader->key.part.gs.vs_prolog, shader, &vs_prolog_key);
1769 vs_prolog_key.vs_prolog.is_monolithic = true;
1770 si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1771 es_prolog = ctx.main_fn;
1772 }
1773
1774 /* Reset the shader context. */
1775 ctx.shader = shader;
1776 ctx.type = PIPE_SHADER_GEOMETRY;
1777
1778 /* Prepare the array of shader parts. */
1779 LLVMValueRef parts[4];
1780 unsigned num_parts = 0, main_part, next_first_part;
1781
1782 if (es_prolog)
1783 parts[num_parts++] = es_prolog;
1784
1785 parts[main_part = num_parts++] = es_main;
1786 parts[next_first_part = num_parts++] = gs_prolog;
1787 parts[num_parts++] = gs_main;
1788
1789 si_build_wrapper_function(&ctx, parts, num_parts, main_part, next_first_part);
1790 } else {
1791 LLVMValueRef parts[2];
1792 union si_shader_part_key prolog_key;
1793
1794 parts[1] = ctx.main_fn;
1795
1796 memset(&prolog_key, 0, sizeof(prolog_key));
1797 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
1798 si_llvm_build_gs_prolog(&ctx, &prolog_key);
1799 parts[0] = ctx.main_fn;
1800
1801 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
1802 }
1803 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
1804 si_llvm_build_monolithic_ps(&ctx, shader);
1805 }
1806
1807 si_llvm_optimize_module(&ctx);
1808
1809 /* Post-optimization transformations and analysis. */
1810 si_optimize_vs_outputs(&ctx);
1811
1812 if ((debug && debug->debug_message) || si_can_dump_shader(sscreen, ctx.type)) {
1813 ctx.shader->info.private_mem_vgprs = ac_count_scratch_private_memory(ctx.main_fn);
1814 }
1815
1816 /* Make sure the input is a pointer and not integer followed by inttoptr. */
1817 assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) == LLVMPointerTypeKind);
1818
1819 /* Compile to bytecode. */
1820 if (!si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler, &ctx.ac, debug,
1821 ctx.type, si_get_shader_name(shader),
1822 si_should_optimize_less(compiler, shader->selector))) {
1823 si_llvm_dispose(&ctx);
1824 fprintf(stderr, "LLVM failed to compile shader\n");
1825 return false;
1826 }
1827
1828 si_llvm_dispose(&ctx);
1829 return true;
1830 }
1831
1832 bool si_compile_shader(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
1833 struct si_shader *shader, struct pipe_debug_callback *debug)
1834 {
1835 struct si_shader_selector *sel = shader->selector;
1836 bool free_nir;
1837 struct nir_shader *nir = get_nir_shader(sel, &free_nir);
1838
1839 /* Dump NIR before doing NIR->LLVM conversion in case the
1840 * conversion fails. */
1841 if (si_can_dump_shader(sscreen, sel->type) && !(sscreen->debug_flags & DBG(NO_NIR))) {
1842 nir_print_shader(nir, stderr);
1843 si_dump_streamout(&sel->so);
1844 }
1845
1846 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
1847 sizeof(shader->info.vs_output_param_offset));
1848
1849 shader->info.uses_instanceid = sel->info.uses_instanceid;
1850
1851 /* TODO: ACO could compile non-monolithic shaders here (starting
1852 * with PS and NGG VS), but monolithic shaders should be compiled
1853 * by LLVM due to more complicated compilation.
1854 */
1855 if (!si_llvm_compile_shader(sscreen, compiler, shader, debug, nir, free_nir))
1856 return false;
1857
1858 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
1859 * LLVM 3.9svn has this bug.
1860 */
1861 if (sel->type == PIPE_SHADER_COMPUTE) {
1862 unsigned wave_size = sscreen->compute_wave_size;
1863 unsigned max_vgprs =
1864 sscreen->info.num_physical_wave64_vgprs_per_simd * (wave_size == 32 ? 2 : 1);
1865 unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd;
1866 unsigned max_sgprs_per_wave = 128;
1867 unsigned simds_per_tg = 4; /* assuming WGP mode on gfx10 */
1868 unsigned threads_per_tg = si_get_max_workgroup_size(shader);
1869 unsigned waves_per_tg = DIV_ROUND_UP(threads_per_tg, wave_size);
1870 unsigned waves_per_simd = DIV_ROUND_UP(waves_per_tg, simds_per_tg);
1871
1872 max_vgprs = max_vgprs / waves_per_simd;
1873 max_sgprs = MIN2(max_sgprs / waves_per_simd, max_sgprs_per_wave);
1874
1875 if (shader->config.num_sgprs > max_sgprs || shader->config.num_vgprs > max_vgprs) {
1876 fprintf(stderr,
1877 "LLVM failed to compile a shader correctly: "
1878 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
1879 shader->config.num_sgprs, shader->config.num_vgprs, max_sgprs, max_vgprs);
1880
1881 /* Just terminate the process, because dependent
1882 * shaders can hang due to bad input data, but use
1883 * the env var to allow shader-db to work.
1884 */
1885 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
1886 abort();
1887 }
1888 }
1889
1890 /* Add the scratch offset to input SGPRs. */
1891 if (shader->config.scratch_bytes_per_wave && !si_is_merged_shader(shader))
1892 shader->info.num_input_sgprs += 1; /* scratch byte offset */
1893
1894 /* Calculate the number of fragment input VGPRs. */
1895 if (sel->type == PIPE_SHADER_FRAGMENT) {
1896 shader->info.num_input_vgprs = ac_get_fs_input_vgpr_cnt(
1897 &shader->config, &shader->info.face_vgpr_index, &shader->info.ancillary_vgpr_index);
1898 }
1899
1900 si_calculate_max_simd_waves(shader);
1901 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
1902 return true;
1903 }
1904
1905 /**
1906 * Create, compile and return a shader part (prolog or epilog).
1907 *
1908 * \param sscreen screen
1909 * \param list list of shader parts of the same category
1910 * \param type shader type
1911 * \param key shader part key
1912 * \param prolog whether the part being requested is a prolog
1913 * \param tm LLVM target machine
1914 * \param debug debug callback
1915 * \param build the callback responsible for building the main function
1916 * \return non-NULL on success
1917 */
1918 static struct si_shader_part *
1919 si_get_shader_part(struct si_screen *sscreen, struct si_shader_part **list,
1920 enum pipe_shader_type type, bool prolog, union si_shader_part_key *key,
1921 struct ac_llvm_compiler *compiler, struct pipe_debug_callback *debug,
1922 void (*build)(struct si_shader_context *, union si_shader_part_key *),
1923 const char *name)
1924 {
1925 struct si_shader_part *result;
1926
1927 simple_mtx_lock(&sscreen->shader_parts_mutex);
1928
1929 /* Find existing. */
1930 for (result = *list; result; result = result->next) {
1931 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
1932 simple_mtx_unlock(&sscreen->shader_parts_mutex);
1933 return result;
1934 }
1935 }
1936
1937 /* Compile a new one. */
1938 result = CALLOC_STRUCT(si_shader_part);
1939 result->key = *key;
1940
1941 struct si_shader_selector sel = {};
1942 sel.screen = sscreen;
1943
1944 struct si_shader shader = {};
1945 shader.selector = &sel;
1946
1947 switch (type) {
1948 case PIPE_SHADER_VERTEX:
1949 shader.key.as_ls = key->vs_prolog.as_ls;
1950 shader.key.as_es = key->vs_prolog.as_es;
1951 shader.key.as_ngg = key->vs_prolog.as_ngg;
1952 shader.key.opt.vs_as_prim_discard_cs = key->vs_prolog.as_prim_discard_cs;
1953 break;
1954 case PIPE_SHADER_TESS_CTRL:
1955 assert(!prolog);
1956 shader.key.part.tcs.epilog = key->tcs_epilog.states;
1957 break;
1958 case PIPE_SHADER_GEOMETRY:
1959 assert(prolog);
1960 shader.key.as_ngg = key->gs_prolog.as_ngg;
1961 break;
1962 case PIPE_SHADER_FRAGMENT:
1963 if (prolog)
1964 shader.key.part.ps.prolog = key->ps_prolog.states;
1965 else
1966 shader.key.part.ps.epilog = key->ps_epilog.states;
1967 break;
1968 default:
1969 unreachable("bad shader part");
1970 }
1971
1972 struct si_shader_context ctx;
1973 si_llvm_context_init(&ctx, sscreen, compiler,
1974 si_get_wave_size(sscreen, type, shader.key.as_ngg, shader.key.as_es,
1975 shader.key.opt.vs_as_prim_discard_cs));
1976 ctx.shader = &shader;
1977 ctx.type = type;
1978
1979 build(&ctx, key);
1980
1981 /* Compile. */
1982 si_llvm_optimize_module(&ctx);
1983
1984 if (!si_compile_llvm(sscreen, &result->binary, &result->config, compiler, &ctx.ac, debug,
1985 ctx.type, name, false)) {
1986 FREE(result);
1987 result = NULL;
1988 goto out;
1989 }
1990
1991 result->next = *list;
1992 *list = result;
1993
1994 out:
1995 si_llvm_dispose(&ctx);
1996 simple_mtx_unlock(&sscreen->shader_parts_mutex);
1997 return result;
1998 }
1999
2000 static bool si_get_vs_prolog(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
2001 struct si_shader *shader, struct pipe_debug_callback *debug,
2002 struct si_shader *main_part, const struct si_vs_prolog_bits *key)
2003 {
2004 struct si_shader_selector *vs = main_part->selector;
2005
2006 if (!si_vs_needs_prolog(vs, key, &shader->key, false))
2007 return true;
2008
2009 /* Get the prolog. */
2010 union si_shader_part_key prolog_key;
2011 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs, false, key, shader,
2012 &prolog_key);
2013
2014 shader->prolog =
2015 si_get_shader_part(sscreen, &sscreen->vs_prologs, PIPE_SHADER_VERTEX, true, &prolog_key,
2016 compiler, debug, si_llvm_build_vs_prolog, "Vertex Shader Prolog");
2017 return shader->prolog != NULL;
2018 }
2019
2020 /**
2021 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
2022 */
2023 static bool si_shader_select_vs_parts(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
2024 struct si_shader *shader, struct pipe_debug_callback *debug)
2025 {
2026 return si_get_vs_prolog(sscreen, compiler, shader, debug, shader, &shader->key.part.vs.prolog);
2027 }
2028
2029 /**
2030 * Select and compile (or reuse) TCS parts (epilog).
2031 */
2032 static bool si_shader_select_tcs_parts(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
2033 struct si_shader *shader, struct pipe_debug_callback *debug)
2034 {
2035 if (sscreen->info.chip_class >= GFX9) {
2036 struct si_shader *ls_main_part = shader->key.part.tcs.ls->main_shader_part_ls;
2037
2038 if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
2039 &shader->key.part.tcs.ls_prolog))
2040 return false;
2041
2042 shader->previous_stage = ls_main_part;
2043 }
2044
2045 /* Get the epilog. */
2046 union si_shader_part_key epilog_key;
2047 memset(&epilog_key, 0, sizeof(epilog_key));
2048 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
2049
2050 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs, PIPE_SHADER_TESS_CTRL, false,
2051 &epilog_key, compiler, debug, si_llvm_build_tcs_epilog,
2052 "Tessellation Control Shader Epilog");
2053 return shader->epilog != NULL;
2054 }
2055
2056 /**
2057 * Select and compile (or reuse) GS parts (prolog).
2058 */
2059 static bool si_shader_select_gs_parts(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
2060 struct si_shader *shader, struct pipe_debug_callback *debug)
2061 {
2062 if (sscreen->info.chip_class >= GFX9) {
2063 struct si_shader *es_main_part;
2064 enum pipe_shader_type es_type = shader->key.part.gs.es->type;
2065
2066 if (shader->key.as_ngg)
2067 es_main_part = shader->key.part.gs.es->main_shader_part_ngg_es;
2068 else
2069 es_main_part = shader->key.part.gs.es->main_shader_part_es;
2070
2071 if (es_type == PIPE_SHADER_VERTEX &&
2072 !si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
2073 &shader->key.part.gs.vs_prolog))
2074 return false;
2075
2076 shader->previous_stage = es_main_part;
2077 }
2078
2079 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
2080 return true;
2081
2082 union si_shader_part_key prolog_key;
2083 memset(&prolog_key, 0, sizeof(prolog_key));
2084 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
2085 prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
2086
2087 shader->prolog2 =
2088 si_get_shader_part(sscreen, &sscreen->gs_prologs, PIPE_SHADER_GEOMETRY, true, &prolog_key,
2089 compiler, debug, si_llvm_build_gs_prolog, "Geometry Shader Prolog");
2090 return shader->prolog2 != NULL;
2091 }
2092
2093 /**
2094 * Compute the PS prolog key, which contains all the information needed to
2095 * build the PS prolog function, and set related bits in shader->config.
2096 */
2097 void si_get_ps_prolog_key(struct si_shader *shader, union si_shader_part_key *key,
2098 bool separate_prolog)
2099 {
2100 struct si_shader_info *info = &shader->selector->info;
2101
2102 memset(key, 0, sizeof(*key));
2103 key->ps_prolog.states = shader->key.part.ps.prolog;
2104 key->ps_prolog.colors_read = info->colors_read;
2105 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
2106 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
2107 key->ps_prolog.wqm =
2108 info->uses_derivatives &&
2109 (key->ps_prolog.colors_read || key->ps_prolog.states.force_persp_sample_interp ||
2110 key->ps_prolog.states.force_linear_sample_interp ||
2111 key->ps_prolog.states.force_persp_center_interp ||
2112 key->ps_prolog.states.force_linear_center_interp ||
2113 key->ps_prolog.states.bc_optimize_for_persp || key->ps_prolog.states.bc_optimize_for_linear);
2114 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
2115
2116 if (info->colors_read) {
2117 unsigned *color = shader->selector->color_attr_index;
2118
2119 if (shader->key.part.ps.prolog.color_two_side) {
2120 /* BCOLORs are stored after the last input. */
2121 key->ps_prolog.num_interp_inputs = info->num_inputs;
2122 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
2123 if (separate_prolog)
2124 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
2125 }
2126
2127 for (unsigned i = 0; i < 2; i++) {
2128 unsigned interp = info->input_interpolate[color[i]];
2129 unsigned location = info->input_interpolate_loc[color[i]];
2130
2131 if (!(info->colors_read & (0xf << i * 4)))
2132 continue;
2133
2134 key->ps_prolog.color_attr_index[i] = color[i];
2135
2136 if (shader->key.part.ps.prolog.flatshade_colors && interp == TGSI_INTERPOLATE_COLOR)
2137 interp = TGSI_INTERPOLATE_CONSTANT;
2138
2139 switch (interp) {
2140 case TGSI_INTERPOLATE_CONSTANT:
2141 key->ps_prolog.color_interp_vgpr_index[i] = -1;
2142 break;
2143 case TGSI_INTERPOLATE_PERSPECTIVE:
2144 case TGSI_INTERPOLATE_COLOR:
2145 /* Force the interpolation location for colors here. */
2146 if (shader->key.part.ps.prolog.force_persp_sample_interp)
2147 location = TGSI_INTERPOLATE_LOC_SAMPLE;
2148 if (shader->key.part.ps.prolog.force_persp_center_interp)
2149 location = TGSI_INTERPOLATE_LOC_CENTER;
2150
2151 switch (location) {
2152 case TGSI_INTERPOLATE_LOC_SAMPLE:
2153 key->ps_prolog.color_interp_vgpr_index[i] = 0;
2154 if (separate_prolog) {
2155 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
2156 }
2157 break;
2158 case TGSI_INTERPOLATE_LOC_CENTER:
2159 key->ps_prolog.color_interp_vgpr_index[i] = 2;
2160 if (separate_prolog) {
2161 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
2162 }
2163 break;
2164 case TGSI_INTERPOLATE_LOC_CENTROID:
2165 key->ps_prolog.color_interp_vgpr_index[i] = 4;
2166 if (separate_prolog) {
2167 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTROID_ENA(1);
2168 }
2169 break;
2170 default:
2171 assert(0);
2172 }
2173 break;
2174 case TGSI_INTERPOLATE_LINEAR:
2175 /* Force the interpolation location for colors here. */
2176 if (shader->key.part.ps.prolog.force_linear_sample_interp)
2177 location = TGSI_INTERPOLATE_LOC_SAMPLE;
2178 if (shader->key.part.ps.prolog.force_linear_center_interp)
2179 location = TGSI_INTERPOLATE_LOC_CENTER;
2180
2181 /* The VGPR assignment for non-monolithic shaders
2182 * works because InitialPSInputAddr is set on the
2183 * main shader and PERSP_PULL_MODEL is never used.
2184 */
2185 switch (location) {
2186 case TGSI_INTERPOLATE_LOC_SAMPLE:
2187 key->ps_prolog.color_interp_vgpr_index[i] = separate_prolog ? 6 : 9;
2188 if (separate_prolog) {
2189 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
2190 }
2191 break;
2192 case TGSI_INTERPOLATE_LOC_CENTER:
2193 key->ps_prolog.color_interp_vgpr_index[i] = separate_prolog ? 8 : 11;
2194 if (separate_prolog) {
2195 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
2196 }
2197 break;
2198 case TGSI_INTERPOLATE_LOC_CENTROID:
2199 key->ps_prolog.color_interp_vgpr_index[i] = separate_prolog ? 10 : 13;
2200 if (separate_prolog) {
2201 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTROID_ENA(1);
2202 }
2203 break;
2204 default:
2205 assert(0);
2206 }
2207 break;
2208 default:
2209 assert(0);
2210 }
2211 }
2212 }
2213 }
2214
2215 /**
2216 * Check whether a PS prolog is required based on the key.
2217 */
2218 bool si_need_ps_prolog(const union si_shader_part_key *key)
2219 {
2220 return key->ps_prolog.colors_read || key->ps_prolog.states.force_persp_sample_interp ||
2221 key->ps_prolog.states.force_linear_sample_interp ||
2222 key->ps_prolog.states.force_persp_center_interp ||
2223 key->ps_prolog.states.force_linear_center_interp ||
2224 key->ps_prolog.states.bc_optimize_for_persp ||
2225 key->ps_prolog.states.bc_optimize_for_linear || key->ps_prolog.states.poly_stipple ||
2226 key->ps_prolog.states.samplemask_log_ps_iter;
2227 }
2228
2229 /**
2230 * Compute the PS epilog key, which contains all the information needed to
2231 * build the PS epilog function.
2232 */
2233 void si_get_ps_epilog_key(struct si_shader *shader, union si_shader_part_key *key)
2234 {
2235 struct si_shader_info *info = &shader->selector->info;
2236 memset(key, 0, sizeof(*key));
2237 key->ps_epilog.colors_written = info->colors_written;
2238 key->ps_epilog.writes_z = info->writes_z;
2239 key->ps_epilog.writes_stencil = info->writes_stencil;
2240 key->ps_epilog.writes_samplemask = info->writes_samplemask;
2241 key->ps_epilog.states = shader->key.part.ps.epilog;
2242 }
2243
2244 /**
2245 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
2246 */
2247 static bool si_shader_select_ps_parts(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
2248 struct si_shader *shader, struct pipe_debug_callback *debug)
2249 {
2250 union si_shader_part_key prolog_key;
2251 union si_shader_part_key epilog_key;
2252
2253 /* Get the prolog. */
2254 si_get_ps_prolog_key(shader, &prolog_key, true);
2255
2256 /* The prolog is a no-op if these aren't set. */
2257 if (si_need_ps_prolog(&prolog_key)) {
2258 shader->prolog =
2259 si_get_shader_part(sscreen, &sscreen->ps_prologs, PIPE_SHADER_FRAGMENT, true, &prolog_key,
2260 compiler, debug, si_llvm_build_ps_prolog, "Fragment Shader Prolog");
2261 if (!shader->prolog)
2262 return false;
2263 }
2264
2265 /* Get the epilog. */
2266 si_get_ps_epilog_key(shader, &epilog_key);
2267
2268 shader->epilog =
2269 si_get_shader_part(sscreen, &sscreen->ps_epilogs, PIPE_SHADER_FRAGMENT, false, &epilog_key,
2270 compiler, debug, si_llvm_build_ps_epilog, "Fragment Shader Epilog");
2271 if (!shader->epilog)
2272 return false;
2273
2274 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
2275 if (shader->key.part.ps.prolog.poly_stipple) {
2276 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
2277 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
2278 }
2279
2280 /* Set up the enable bits for per-sample shading if needed. */
2281 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
2282 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
2283 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2284 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
2285 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
2286 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
2287 }
2288 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
2289 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
2290 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2291 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
2292 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
2293 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
2294 }
2295 if (shader->key.part.ps.prolog.force_persp_center_interp &&
2296 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
2297 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2298 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
2299 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
2300 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
2301 }
2302 if (shader->key.part.ps.prolog.force_linear_center_interp &&
2303 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
2304 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2305 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
2306 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
2307 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
2308 }
2309
2310 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
2311 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
2312 !(shader->config.spi_ps_input_ena & 0xf)) {
2313 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
2314 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
2315 }
2316
2317 /* At least one pair of interpolation weights must be enabled. */
2318 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
2319 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
2320 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
2321 }
2322
2323 /* Samplemask fixup requires the sample ID. */
2324 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
2325 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
2326 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
2327 }
2328
2329 /* The sample mask input is always enabled, because the API shader always
2330 * passes it through to the epilog. Disable it here if it's unused.
2331 */
2332 if (!shader->key.part.ps.epilog.poly_line_smoothing && !shader->selector->info.reads_samplemask)
2333 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
2334
2335 return true;
2336 }
2337
2338 void si_multiwave_lds_size_workaround(struct si_screen *sscreen, unsigned *lds_size)
2339 {
2340 /* If tessellation is all offchip and on-chip GS isn't used, this
2341 * workaround is not needed.
2342 */
2343 return;
2344
2345 /* SPI barrier management bug:
2346 * Make sure we have at least 4k of LDS in use to avoid the bug.
2347 * It applies to workgroup sizes of more than one wavefront.
2348 */
2349 if (sscreen->info.family == CHIP_BONAIRE || sscreen->info.family == CHIP_KABINI)
2350 *lds_size = MAX2(*lds_size, 8);
2351 }
2352
2353 void si_fix_resource_usage(struct si_screen *sscreen, struct si_shader *shader)
2354 {
2355 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
2356
2357 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
2358
2359 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
2360 si_get_max_workgroup_size(shader) > sscreen->compute_wave_size) {
2361 si_multiwave_lds_size_workaround(sscreen, &shader->config.lds_size);
2362 }
2363 }
2364
2365 bool si_create_shader_variant(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
2366 struct si_shader *shader, struct pipe_debug_callback *debug)
2367 {
2368 struct si_shader_selector *sel = shader->selector;
2369 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
2370
2371 /* LS, ES, VS are compiled on demand if the main part hasn't been
2372 * compiled for that stage.
2373 *
2374 * GS are compiled on demand if the main part hasn't been compiled
2375 * for the chosen NGG-ness.
2376 *
2377 * Vertex shaders are compiled on demand when a vertex fetch
2378 * workaround must be applied.
2379 */
2380 if (shader->is_monolithic) {
2381 /* Monolithic shader (compiled as a whole, has many variants,
2382 * may take a long time to compile).
2383 */
2384 if (!si_compile_shader(sscreen, compiler, shader, debug))
2385 return false;
2386 } else {
2387 /* The shader consists of several parts:
2388 *
2389 * - the middle part is the user shader, it has 1 variant only
2390 * and it was compiled during the creation of the shader
2391 * selector
2392 * - the prolog part is inserted at the beginning
2393 * - the epilog part is inserted at the end
2394 *
2395 * The prolog and epilog have many (but simple) variants.
2396 *
2397 * Starting with gfx9, geometry and tessellation control
2398 * shaders also contain the prolog and user shader parts of
2399 * the previous shader stage.
2400 */
2401
2402 if (!mainp)
2403 return false;
2404
2405 /* Copy the compiled shader data over. */
2406 shader->is_binary_shared = true;
2407 shader->binary = mainp->binary;
2408 shader->config = mainp->config;
2409 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
2410 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
2411 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
2412 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
2413 memcpy(shader->info.vs_output_param_offset, mainp->info.vs_output_param_offset,
2414 sizeof(mainp->info.vs_output_param_offset));
2415 shader->info.uses_instanceid = mainp->info.uses_instanceid;
2416 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
2417 shader->info.nr_param_exports = mainp->info.nr_param_exports;
2418
2419 /* Select prologs and/or epilogs. */
2420 switch (sel->type) {
2421 case PIPE_SHADER_VERTEX:
2422 if (!si_shader_select_vs_parts(sscreen, compiler, shader, debug))
2423 return false;
2424 break;
2425 case PIPE_SHADER_TESS_CTRL:
2426 if (!si_shader_select_tcs_parts(sscreen, compiler, shader, debug))
2427 return false;
2428 break;
2429 case PIPE_SHADER_TESS_EVAL:
2430 break;
2431 case PIPE_SHADER_GEOMETRY:
2432 if (!si_shader_select_gs_parts(sscreen, compiler, shader, debug))
2433 return false;
2434 break;
2435 case PIPE_SHADER_FRAGMENT:
2436 if (!si_shader_select_ps_parts(sscreen, compiler, shader, debug))
2437 return false;
2438
2439 /* Make sure we have at least as many VGPRs as there
2440 * are allocated inputs.
2441 */
2442 shader->config.num_vgprs = MAX2(shader->config.num_vgprs, shader->info.num_input_vgprs);
2443 break;
2444 default:;
2445 }
2446
2447 /* Update SGPR and VGPR counts. */
2448 if (shader->prolog) {
2449 shader->config.num_sgprs =
2450 MAX2(shader->config.num_sgprs, shader->prolog->config.num_sgprs);
2451 shader->config.num_vgprs =
2452 MAX2(shader->config.num_vgprs, shader->prolog->config.num_vgprs);
2453 }
2454 if (shader->previous_stage) {
2455 shader->config.num_sgprs =
2456 MAX2(shader->config.num_sgprs, shader->previous_stage->config.num_sgprs);
2457 shader->config.num_vgprs =
2458 MAX2(shader->config.num_vgprs, shader->previous_stage->config.num_vgprs);
2459 shader->config.spilled_sgprs =
2460 MAX2(shader->config.spilled_sgprs, shader->previous_stage->config.spilled_sgprs);
2461 shader->config.spilled_vgprs =
2462 MAX2(shader->config.spilled_vgprs, shader->previous_stage->config.spilled_vgprs);
2463 shader->info.private_mem_vgprs =
2464 MAX2(shader->info.private_mem_vgprs, shader->previous_stage->info.private_mem_vgprs);
2465 shader->config.scratch_bytes_per_wave =
2466 MAX2(shader->config.scratch_bytes_per_wave,
2467 shader->previous_stage->config.scratch_bytes_per_wave);
2468 shader->info.uses_instanceid |= shader->previous_stage->info.uses_instanceid;
2469 }
2470 if (shader->prolog2) {
2471 shader->config.num_sgprs =
2472 MAX2(shader->config.num_sgprs, shader->prolog2->config.num_sgprs);
2473 shader->config.num_vgprs =
2474 MAX2(shader->config.num_vgprs, shader->prolog2->config.num_vgprs);
2475 }
2476 if (shader->epilog) {
2477 shader->config.num_sgprs =
2478 MAX2(shader->config.num_sgprs, shader->epilog->config.num_sgprs);
2479 shader->config.num_vgprs =
2480 MAX2(shader->config.num_vgprs, shader->epilog->config.num_vgprs);
2481 }
2482 si_calculate_max_simd_waves(shader);
2483 }
2484
2485 if (shader->key.as_ngg) {
2486 assert(!shader->key.as_es && !shader->key.as_ls);
2487 gfx10_ngg_calculate_subgroup_info(shader);
2488 } else if (sscreen->info.chip_class >= GFX9 && sel->type == PIPE_SHADER_GEOMETRY) {
2489 gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
2490 }
2491
2492 si_fix_resource_usage(sscreen, shader);
2493 si_shader_dump(sscreen, shader, debug, stderr, true);
2494
2495 /* Upload. */
2496 if (!si_shader_binary_upload(sscreen, shader, 0)) {
2497 fprintf(stderr, "LLVM failed to upload shader\n");
2498 return false;
2499 }
2500
2501 return true;
2502 }
2503
2504 void si_shader_binary_clean(struct si_shader_binary *binary)
2505 {
2506 free((void *)binary->elf_buffer);
2507 binary->elf_buffer = NULL;
2508
2509 free(binary->llvm_ir_string);
2510 binary->llvm_ir_string = NULL;
2511 }
2512
2513 void si_shader_destroy(struct si_shader *shader)
2514 {
2515 if (shader->scratch_bo)
2516 si_resource_reference(&shader->scratch_bo, NULL);
2517
2518 si_resource_reference(&shader->bo, NULL);
2519
2520 if (!shader->is_binary_shared)
2521 si_shader_binary_clean(&shader->binary);
2522
2523 free(shader->shader_log);
2524 }