ac,radeonsi: add ac_gpu_info::lds_size_per_cu
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "util/u_memory.h"
26 #include "tgsi/tgsi_strings.h"
27 #include "tgsi/tgsi_from_mesa.h"
28
29 #include "ac_exp_param.h"
30 #include "ac_rtld.h"
31 #include "si_shader_internal.h"
32 #include "si_pipe.h"
33 #include "sid.h"
34
35 #include "compiler/nir/nir.h"
36 #include "compiler/nir/nir_serialize.h"
37
38 static const char scratch_rsrc_dword0_symbol[] =
39 "SCRATCH_RSRC_DWORD0";
40
41 static const char scratch_rsrc_dword1_symbol[] =
42 "SCRATCH_RSRC_DWORD1";
43
44 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
45
46 /** Whether the shader runs as a combination of multiple API shaders */
47 bool si_is_multi_part_shader(struct si_shader *shader)
48 {
49 if (shader->selector->screen->info.chip_class <= GFX8)
50 return false;
51
52 return shader->key.as_ls ||
53 shader->key.as_es ||
54 shader->selector->type == PIPE_SHADER_TESS_CTRL ||
55 shader->selector->type == PIPE_SHADER_GEOMETRY;
56 }
57
58 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
59 bool si_is_merged_shader(struct si_shader *shader)
60 {
61 return shader->key.as_ngg || si_is_multi_part_shader(shader);
62 }
63
64 /**
65 * Returns a unique index for a per-patch semantic name and index. The index
66 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
67 * can be calculated.
68 */
69 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
70 {
71 switch (semantic_name) {
72 case TGSI_SEMANTIC_TESSOUTER:
73 return 0;
74 case TGSI_SEMANTIC_TESSINNER:
75 return 1;
76 case TGSI_SEMANTIC_PATCH:
77 assert(index < 30);
78 return 2 + index;
79
80 default:
81 assert(!"invalid semantic name");
82 return 0;
83 }
84 }
85
86 /**
87 * Returns a unique index for a semantic name and index. The index must be
88 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
89 * calculated.
90 */
91 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index,
92 unsigned is_varying)
93 {
94 switch (semantic_name) {
95 case TGSI_SEMANTIC_POSITION:
96 return 0;
97 case TGSI_SEMANTIC_GENERIC:
98 /* Since some shader stages use the the highest used IO index
99 * to determine the size to allocate for inputs/outputs
100 * (in LDS, tess and GS rings). GENERIC should be placed right
101 * after POSITION to make that size as small as possible.
102 */
103 if (index < SI_MAX_IO_GENERIC)
104 return 1 + index;
105
106 assert(!"invalid generic index");
107 return 0;
108 case TGSI_SEMANTIC_FOG:
109 return SI_MAX_IO_GENERIC + 1;
110 case TGSI_SEMANTIC_COLOR:
111 assert(index < 2);
112 return SI_MAX_IO_GENERIC + 2 + index;
113 case TGSI_SEMANTIC_BCOLOR:
114 assert(index < 2);
115 /* If it's a varying, COLOR and BCOLOR alias. */
116 if (is_varying)
117 return SI_MAX_IO_GENERIC + 2 + index;
118 else
119 return SI_MAX_IO_GENERIC + 4 + index;
120 case TGSI_SEMANTIC_TEXCOORD:
121 assert(index < 8);
122 return SI_MAX_IO_GENERIC + 6 + index;
123
124 /* These are rarely used between LS and HS or ES and GS. */
125 case TGSI_SEMANTIC_CLIPDIST:
126 assert(index < 2);
127 return SI_MAX_IO_GENERIC + 6 + 8 + index;
128 case TGSI_SEMANTIC_CLIPVERTEX:
129 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
130 case TGSI_SEMANTIC_PSIZE:
131 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
132
133 /* These can't be written by LS, HS, and ES. */
134 case TGSI_SEMANTIC_LAYER:
135 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
136 case TGSI_SEMANTIC_VIEWPORT_INDEX:
137 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
138 case TGSI_SEMANTIC_PRIMID:
139 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
140 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
141 default:
142 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
143 assert(!"invalid semantic name");
144 return 0;
145 }
146 }
147
148 static void si_dump_streamout(struct pipe_stream_output_info *so)
149 {
150 unsigned i;
151
152 if (so->num_outputs)
153 fprintf(stderr, "STREAMOUT\n");
154
155 for (i = 0; i < so->num_outputs; i++) {
156 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
157 so->output[i].start_component;
158 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
159 i, so->output[i].output_buffer,
160 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
161 so->output[i].register_index,
162 mask & 1 ? "x" : "",
163 mask & 2 ? "y" : "",
164 mask & 4 ? "z" : "",
165 mask & 8 ? "w" : "");
166 }
167 }
168
169 static void declare_streamout_params(struct si_shader_context *ctx,
170 struct pipe_stream_output_info *so)
171 {
172 if (ctx->screen->use_ngg_streamout) {
173 if (ctx->type == PIPE_SHADER_TESS_EVAL)
174 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
175 return;
176 }
177
178 /* Streamout SGPRs. */
179 if (so->num_outputs) {
180 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
181 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
182 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
183 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
184 }
185
186 /* A streamout buffer offset is loaded if the stride is non-zero. */
187 for (int i = 0; i < 4; i++) {
188 if (!so->stride[i])
189 continue;
190
191 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
192 }
193 }
194
195 unsigned si_get_max_workgroup_size(const struct si_shader *shader)
196 {
197 switch (shader->selector->type) {
198 case PIPE_SHADER_VERTEX:
199 case PIPE_SHADER_TESS_EVAL:
200 return shader->key.as_ngg ? 128 : 0;
201
202 case PIPE_SHADER_TESS_CTRL:
203 /* Return this so that LLVM doesn't remove s_barrier
204 * instructions on chips where we use s_barrier. */
205 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
206
207 case PIPE_SHADER_GEOMETRY:
208 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
209
210 case PIPE_SHADER_COMPUTE:
211 break; /* see below */
212
213 default:
214 return 0;
215 }
216
217 const unsigned *properties = shader->selector->info.properties;
218 unsigned max_work_group_size =
219 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
220 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
221 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
222
223 if (!max_work_group_size) {
224 /* This is a variable group size compute shader,
225 * compile it for the maximum possible group size.
226 */
227 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
228 }
229 return max_work_group_size;
230 }
231
232 static void declare_const_and_shader_buffers(struct si_shader_context *ctx,
233 bool assign_params)
234 {
235 enum ac_arg_type const_shader_buf_type;
236
237 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
238 ctx->shader->selector->info.shader_buffers_declared == 0)
239 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
240 else
241 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
242
243 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
244 assign_params ? &ctx->const_and_shader_buffers :
245 &ctx->other_const_and_shader_buffers);
246 }
247
248 static void declare_samplers_and_images(struct si_shader_context *ctx,
249 bool assign_params)
250 {
251 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
252 assign_params ? &ctx->samplers_and_images :
253 &ctx->other_samplers_and_images);
254 }
255
256 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
257 bool assign_params)
258 {
259 declare_const_and_shader_buffers(ctx, assign_params);
260 declare_samplers_and_images(ctx, assign_params);
261 }
262
263 static void declare_global_desc_pointers(struct si_shader_context *ctx)
264 {
265 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
266 &ctx->rw_buffers);
267 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
268 &ctx->bindless_samplers_and_images);
269 }
270
271 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
272 {
273 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
274 if (!ctx->shader->is_gs_copy_shader) {
275 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
276 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
277 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
278 }
279 }
280
281 static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx)
282 {
283 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
284
285 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
286 if (num_vbos_in_user_sgprs) {
287 unsigned user_sgprs = ctx->args.num_sgprs_used;
288
289 if (si_is_merged_shader(ctx->shader))
290 user_sgprs -= 8;
291 assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
292
293 /* Declare unused SGPRs to align VB descriptors to 4 SGPRs (hw requirement). */
294 for (unsigned i = user_sgprs; i < SI_SGPR_VS_VB_DESCRIPTOR_FIRST; i++)
295 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
296
297 assert(num_vbos_in_user_sgprs <= ARRAY_SIZE(ctx->vb_descriptors));
298 for (unsigned i = 0; i < num_vbos_in_user_sgprs; i++)
299 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->vb_descriptors[i]);
300 }
301 }
302
303 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
304 unsigned *num_prolog_vgprs,
305 bool ngg_cull_shader)
306 {
307 struct si_shader *shader = ctx->shader;
308
309 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
310 if (shader->key.as_ls) {
311 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
312 if (ctx->screen->info.chip_class >= GFX10) {
313 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
314 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
315 } else {
316 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
317 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
318 }
319 } else if (ctx->screen->info.chip_class >= GFX10) {
320 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
321 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
322 &ctx->vs_prim_id); /* user vgpr or PrimID (legacy) */
323 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
324 } else {
325 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
326 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vs_prim_id);
327 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
328 }
329
330 if (!shader->is_gs_copy_shader) {
331 if (shader->key.opt.ngg_culling && !ngg_cull_shader) {
332 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
333 &ctx->ngg_old_thread_id);
334 }
335
336 /* Vertex load indices. */
337 if (shader->selector->info.num_inputs) {
338 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
339 &ctx->vertex_index0);
340 for (unsigned i = 1; i < shader->selector->info.num_inputs; i++)
341 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
342 }
343 *num_prolog_vgprs += shader->selector->info.num_inputs;
344 }
345 }
346
347 static void declare_vs_blit_inputs(struct si_shader_context *ctx,
348 unsigned vs_blit_property)
349 {
350 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
351 &ctx->vs_blit_inputs); /* i16 x1, y1 */
352 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* i16 x1, y1 */
353 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* depth */
354
355 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
356 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color0 */
357 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color1 */
358 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color2 */
359 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color3 */
360 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
361 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x1 */
362 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y1 */
363 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x2 */
364 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y2 */
365 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.z */
366 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.w */
367 }
368 }
369
370 static void declare_tes_input_vgprs(struct si_shader_context *ctx, bool ngg_cull_shader)
371 {
372 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
373 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
374 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
375 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
376
377 if (ctx->shader->key.opt.ngg_culling && !ngg_cull_shader) {
378 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
379 &ctx->ngg_old_thread_id);
380 }
381 }
382
383 enum {
384 /* Convenient merged shader definitions. */
385 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
386 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
387 };
388
389 void si_add_arg_checked(struct ac_shader_args *args,
390 enum ac_arg_regfile file,
391 unsigned registers, enum ac_arg_type type,
392 struct ac_arg *arg,
393 unsigned idx)
394 {
395 assert(args->arg_count == idx);
396 ac_add_arg(args, file, registers, type, arg);
397 }
398
399 void si_create_function(struct si_shader_context *ctx, bool ngg_cull_shader)
400 {
401 struct si_shader *shader = ctx->shader;
402 LLVMTypeRef returns[AC_MAX_ARGS];
403 unsigned i, num_return_sgprs;
404 unsigned num_returns = 0;
405 unsigned num_prolog_vgprs = 0;
406 unsigned type = ctx->type;
407 unsigned vs_blit_property =
408 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
409
410 memset(&ctx->args, 0, sizeof(ctx->args));
411
412 /* Set MERGED shaders. */
413 if (ctx->screen->info.chip_class >= GFX9) {
414 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
415 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
416 else if (shader->key.as_es || shader->key.as_ngg || type == PIPE_SHADER_GEOMETRY)
417 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
418 }
419
420 switch (type) {
421 case PIPE_SHADER_VERTEX:
422 declare_global_desc_pointers(ctx);
423
424 if (vs_blit_property) {
425 declare_vs_blit_inputs(ctx, vs_blit_property);
426
427 /* VGPRs */
428 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
429 break;
430 }
431
432 declare_per_stage_desc_pointers(ctx, true);
433 declare_vs_specific_input_sgprs(ctx);
434 if (!shader->is_gs_copy_shader)
435 declare_vb_descriptor_input_sgprs(ctx);
436
437 if (shader->key.as_es) {
438 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
439 &ctx->es2gs_offset);
440 } else if (shader->key.as_ls) {
441 /* no extra parameters */
442 } else {
443 /* The locations of the other parameters are assigned dynamically. */
444 declare_streamout_params(ctx, &shader->selector->so);
445 }
446
447 /* VGPRs */
448 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
449
450 /* Return values */
451 if (shader->key.opt.vs_as_prim_discard_cs) {
452 for (i = 0; i < 4; i++)
453 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
454 }
455 break;
456
457 case PIPE_SHADER_TESS_CTRL: /* GFX6-GFX8 */
458 declare_global_desc_pointers(ctx);
459 declare_per_stage_desc_pointers(ctx, true);
460 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
461 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
462 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
463 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
464 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
465 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
466
467 /* VGPRs */
468 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
469 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
470
471 /* param_tcs_offchip_offset and param_tcs_factor_offset are
472 * placed after the user SGPRs.
473 */
474 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
475 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
476 for (i = 0; i < 11; i++)
477 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
478 break;
479
480 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
481 /* Merged stages have 8 system SGPRs at the beginning. */
482 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
483 declare_per_stage_desc_pointers(ctx,
484 ctx->type == PIPE_SHADER_TESS_CTRL);
485 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
486 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
487 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
488 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
489 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
490 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
491
492 declare_global_desc_pointers(ctx);
493 declare_per_stage_desc_pointers(ctx,
494 ctx->type == PIPE_SHADER_VERTEX);
495 declare_vs_specific_input_sgprs(ctx);
496
497 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
498 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
499 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
500 declare_vb_descriptor_input_sgprs(ctx);
501
502 /* VGPRs (first TCS, then VS) */
503 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
504 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
505
506 if (ctx->type == PIPE_SHADER_VERTEX) {
507 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
508
509 /* LS return values are inputs to the TCS main shader part. */
510 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
511 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
512 for (i = 0; i < 2; i++)
513 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
514 } else {
515 /* TCS return values are inputs to the TCS epilog.
516 *
517 * param_tcs_offchip_offset, param_tcs_factor_offset,
518 * param_tcs_offchip_layout, and param_rw_buffers
519 * should be passed to the epilog.
520 */
521 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
522 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
523 for (i = 0; i < 11; i++)
524 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
525 }
526 break;
527
528 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
529 /* Merged stages have 8 system SGPRs at the beginning. */
530 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
531 declare_per_stage_desc_pointers(ctx,
532 ctx->type == PIPE_SHADER_GEOMETRY);
533
534 if (ctx->shader->key.as_ngg)
535 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_tg_info);
536 else
537 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
538
539 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
540 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
541 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
542 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
543 &ctx->small_prim_cull_info); /* SPI_SHADER_PGM_LO_GS << 8 */
544 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
545
546 declare_global_desc_pointers(ctx);
547 if (ctx->type != PIPE_SHADER_VERTEX || !vs_blit_property) {
548 declare_per_stage_desc_pointers(ctx,
549 (ctx->type == PIPE_SHADER_VERTEX ||
550 ctx->type == PIPE_SHADER_TESS_EVAL));
551 }
552
553 if (ctx->type == PIPE_SHADER_VERTEX) {
554 if (vs_blit_property)
555 declare_vs_blit_inputs(ctx, vs_blit_property);
556 else
557 declare_vs_specific_input_sgprs(ctx);
558 } else {
559 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
560 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
561 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
562 /* Declare as many input SGPRs as the VS has. */
563 }
564
565 if (ctx->type == PIPE_SHADER_VERTEX)
566 declare_vb_descriptor_input_sgprs(ctx);
567
568 /* VGPRs (first GS, then VS/TES) */
569 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx01_offset);
570 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx23_offset);
571 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
572 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
573 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
574
575 if (ctx->type == PIPE_SHADER_VERTEX) {
576 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
577 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
578 declare_tes_input_vgprs(ctx, ngg_cull_shader);
579 }
580
581 if ((ctx->shader->key.as_es || ngg_cull_shader) &&
582 (ctx->type == PIPE_SHADER_VERTEX ||
583 ctx->type == PIPE_SHADER_TESS_EVAL)) {
584 unsigned num_user_sgprs, num_vgprs;
585
586 if (ctx->type == PIPE_SHADER_VERTEX) {
587 /* For the NGG cull shader, add 1 SGPR to hold
588 * the vertex buffer pointer.
589 */
590 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR + ngg_cull_shader;
591
592 if (ngg_cull_shader && shader->selector->num_vbos_in_user_sgprs) {
593 assert(num_user_sgprs <= 8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
594 num_user_sgprs = SI_SGPR_VS_VB_DESCRIPTOR_FIRST +
595 shader->selector->num_vbos_in_user_sgprs * 4;
596 }
597 } else {
598 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
599 }
600
601 /* The NGG cull shader has to return all 9 VGPRs + the old thread ID.
602 *
603 * The normal merged ESGS shader only has to return the 5 VGPRs
604 * for the GS stage.
605 */
606 num_vgprs = ngg_cull_shader ? 10 : 5;
607
608 /* ES return values are inputs to GS. */
609 for (i = 0; i < 8 + num_user_sgprs; i++)
610 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
611 for (i = 0; i < num_vgprs; i++)
612 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
613 }
614 break;
615
616 case PIPE_SHADER_TESS_EVAL:
617 declare_global_desc_pointers(ctx);
618 declare_per_stage_desc_pointers(ctx, true);
619 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
620 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
621 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
622
623 if (shader->key.as_es) {
624 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
625 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
626 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
627 } else {
628 declare_streamout_params(ctx, &shader->selector->so);
629 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
630 }
631
632 /* VGPRs */
633 declare_tes_input_vgprs(ctx, ngg_cull_shader);
634 break;
635
636 case PIPE_SHADER_GEOMETRY:
637 declare_global_desc_pointers(ctx);
638 declare_per_stage_desc_pointers(ctx, true);
639 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
640 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_wave_id);
641
642 /* VGPRs */
643 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[0]);
644 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[1]);
645 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
646 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[2]);
647 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[3]);
648 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[4]);
649 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[5]);
650 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
651 break;
652
653 case PIPE_SHADER_FRAGMENT:
654 declare_global_desc_pointers(ctx);
655 declare_per_stage_desc_pointers(ctx, true);
656 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL,
657 SI_PARAM_ALPHA_REF);
658 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
659 &ctx->args.prim_mask, SI_PARAM_PRIM_MASK);
660
661 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_sample,
662 SI_PARAM_PERSP_SAMPLE);
663 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
664 &ctx->args.persp_center, SI_PARAM_PERSP_CENTER);
665 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
666 &ctx->args.persp_centroid, SI_PARAM_PERSP_CENTROID);
667 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
668 NULL, SI_PARAM_PERSP_PULL_MODEL);
669 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
670 &ctx->args.linear_sample, SI_PARAM_LINEAR_SAMPLE);
671 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
672 &ctx->args.linear_center, SI_PARAM_LINEAR_CENTER);
673 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
674 &ctx->args.linear_centroid, SI_PARAM_LINEAR_CENTROID);
675 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_FLOAT,
676 NULL, SI_PARAM_LINE_STIPPLE_TEX);
677 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
678 &ctx->args.frag_pos[0], SI_PARAM_POS_X_FLOAT);
679 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
680 &ctx->args.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
681 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
682 &ctx->args.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
683 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
684 &ctx->args.frag_pos[3], SI_PARAM_POS_W_FLOAT);
685 shader->info.face_vgpr_index = ctx->args.num_vgprs_used;
686 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
687 &ctx->args.front_face, SI_PARAM_FRONT_FACE);
688 shader->info.ancillary_vgpr_index = ctx->args.num_vgprs_used;
689 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
690 &ctx->args.ancillary, SI_PARAM_ANCILLARY);
691 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
692 &ctx->args.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
693 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
694 &ctx->pos_fixed_pt, SI_PARAM_POS_FIXED_PT);
695
696 /* Color inputs from the prolog. */
697 if (shader->selector->info.colors_read) {
698 unsigned num_color_elements =
699 util_bitcount(shader->selector->info.colors_read);
700
701 for (i = 0; i < num_color_elements; i++)
702 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
703
704 num_prolog_vgprs += num_color_elements;
705 }
706
707 /* Outputs for the epilog. */
708 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
709 num_returns =
710 num_return_sgprs +
711 util_bitcount(shader->selector->info.colors_written) * 4 +
712 shader->selector->info.writes_z +
713 shader->selector->info.writes_stencil +
714 shader->selector->info.writes_samplemask +
715 1 /* SampleMaskIn */;
716
717 num_returns = MAX2(num_returns,
718 num_return_sgprs +
719 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
720
721 for (i = 0; i < num_return_sgprs; i++)
722 returns[i] = ctx->ac.i32;
723 for (; i < num_returns; i++)
724 returns[i] = ctx->ac.f32;
725 break;
726
727 case PIPE_SHADER_COMPUTE:
728 declare_global_desc_pointers(ctx);
729 declare_per_stage_desc_pointers(ctx, true);
730 if (shader->selector->info.uses_grid_size)
731 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT,
732 &ctx->args.num_work_groups);
733 if (shader->selector->info.uses_block_size &&
734 shader->selector->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
735 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->block_size);
736
737 unsigned cs_user_data_dwords =
738 shader->selector->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
739 if (cs_user_data_dwords) {
740 ac_add_arg(&ctx->args, AC_ARG_SGPR, cs_user_data_dwords, AC_ARG_INT,
741 &ctx->cs_user_data);
742 }
743
744 /* Hardware SGPRs. */
745 for (i = 0; i < 3; i++) {
746 if (shader->selector->info.uses_block_id[i]) {
747 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
748 &ctx->args.workgroup_ids[i]);
749 }
750 }
751 if (shader->selector->info.uses_subgroup_info)
752 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tg_size);
753
754 /* Hardware VGPRs. */
755 ac_add_arg(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
756 &ctx->args.local_invocation_ids);
757 break;
758 default:
759 assert(0 && "unimplemented shader");
760 return;
761 }
762
763 si_llvm_create_func(ctx, ngg_cull_shader ? "ngg_cull_main" : "main",
764 returns, num_returns, si_get_max_workgroup_size(shader));
765
766 /* Reserve register locations for VGPR inputs the PS prolog may need. */
767 if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
768 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
769 "InitialPSInputAddr",
770 S_0286D0_PERSP_SAMPLE_ENA(1) |
771 S_0286D0_PERSP_CENTER_ENA(1) |
772 S_0286D0_PERSP_CENTROID_ENA(1) |
773 S_0286D0_LINEAR_SAMPLE_ENA(1) |
774 S_0286D0_LINEAR_CENTER_ENA(1) |
775 S_0286D0_LINEAR_CENTROID_ENA(1) |
776 S_0286D0_FRONT_FACE_ENA(1) |
777 S_0286D0_ANCILLARY_ENA(1) |
778 S_0286D0_POS_FIXED_PT_ENA(1));
779 }
780
781 shader->info.num_input_sgprs = ctx->args.num_sgprs_used;
782 shader->info.num_input_vgprs = ctx->args.num_vgprs_used;
783
784 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
785 shader->info.num_input_vgprs -= num_prolog_vgprs;
786
787 if (shader->key.as_ls || ctx->type == PIPE_SHADER_TESS_CTRL) {
788 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
789 /* The LSHS size is not known until draw time, so we append it
790 * at the end of whatever LDS use there may be in the rest of
791 * the shader (currently none, unless LLVM decides to do its
792 * own LDS-based lowering).
793 */
794 ctx->ac.lds = LLVMAddGlobalInAddressSpace(
795 ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0),
796 "__lds_end", AC_ADDR_SPACE_LDS);
797 LLVMSetAlignment(ctx->ac.lds, 256);
798 } else {
799 ac_declare_lds_as_pointer(&ctx->ac);
800 }
801 }
802
803 /* Unlike radv, we override these arguments in the prolog, so to the
804 * API shader they appear as normal arguments.
805 */
806 if (ctx->type == PIPE_SHADER_VERTEX) {
807 ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
808 ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
809 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
810 ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
811 ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
812 }
813 }
814
815 /* For the UMR disassembler. */
816 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
817 #define DEBUGGER_NUM_MARKERS 5
818
819 static bool si_shader_binary_open(struct si_screen *screen,
820 struct si_shader *shader,
821 struct ac_rtld_binary *rtld)
822 {
823 const struct si_shader_selector *sel = shader->selector;
824 const char *part_elfs[5];
825 size_t part_sizes[5];
826 unsigned num_parts = 0;
827
828 #define add_part(shader_or_part) \
829 if (shader_or_part) { \
830 part_elfs[num_parts] = (shader_or_part)->binary.elf_buffer; \
831 part_sizes[num_parts] = (shader_or_part)->binary.elf_size; \
832 num_parts++; \
833 }
834
835 add_part(shader->prolog);
836 add_part(shader->previous_stage);
837 add_part(shader->prolog2);
838 add_part(shader);
839 add_part(shader->epilog);
840
841 #undef add_part
842
843 struct ac_rtld_symbol lds_symbols[2];
844 unsigned num_lds_symbols = 0;
845
846 if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
847 (sel->type == PIPE_SHADER_GEOMETRY || shader->key.as_ngg)) {
848 /* We add this symbol even on LLVM <= 8 to ensure that
849 * shader->config.lds_size is set correctly below.
850 */
851 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
852 sym->name = "esgs_ring";
853 sym->size = shader->gs_info.esgs_ring_size;
854 sym->align = 64 * 1024;
855 }
856
857 if (shader->key.as_ngg && sel->type == PIPE_SHADER_GEOMETRY) {
858 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
859 sym->name = "ngg_emit";
860 sym->size = shader->ngg.ngg_emit_size * 4;
861 sym->align = 4;
862 }
863
864 bool ok = ac_rtld_open(rtld, (struct ac_rtld_open_info){
865 .info = &screen->info,
866 .options = {
867 .halt_at_entry = screen->options.halt_shaders,
868 },
869 .shader_type = tgsi_processor_to_shader_stage(sel->type),
870 .wave_size = si_get_shader_wave_size(shader),
871 .num_parts = num_parts,
872 .elf_ptrs = part_elfs,
873 .elf_sizes = part_sizes,
874 .num_shared_lds_symbols = num_lds_symbols,
875 .shared_lds_symbols = lds_symbols });
876
877 if (rtld->lds_size > 0) {
878 unsigned alloc_granularity = screen->info.chip_class >= GFX7 ? 512 : 256;
879 shader->config.lds_size =
880 align(rtld->lds_size, alloc_granularity) / alloc_granularity;
881 }
882
883 return ok;
884 }
885
886 static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_shader *shader)
887 {
888 struct ac_rtld_binary rtld;
889 si_shader_binary_open(screen, shader, &rtld);
890 return rtld.exec_size;
891 }
892
893 static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
894 {
895 uint64_t *scratch_va = data;
896
897 if (!strcmp(scratch_rsrc_dword0_symbol, name)) {
898 *value = (uint32_t)*scratch_va;
899 return true;
900 }
901 if (!strcmp(scratch_rsrc_dword1_symbol, name)) {
902 /* Enable scratch coalescing. */
903 *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) |
904 S_008F04_SWIZZLE_ENABLE(1);
905 return true;
906 }
907
908 return false;
909 }
910
911 bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader,
912 uint64_t scratch_va)
913 {
914 struct ac_rtld_binary binary;
915 if (!si_shader_binary_open(sscreen, shader, &binary))
916 return false;
917
918 si_resource_reference(&shader->bo, NULL);
919 shader->bo = si_aligned_buffer_create(&sscreen->b,
920 sscreen->info.cpdma_prefetch_writes_memory ?
921 0 : SI_RESOURCE_FLAG_READ_ONLY,
922 PIPE_USAGE_IMMUTABLE,
923 align(binary.rx_size, SI_CPDMA_ALIGNMENT),
924 256);
925 if (!shader->bo)
926 return false;
927
928 /* Upload. */
929 struct ac_rtld_upload_info u = {};
930 u.binary = &binary;
931 u.get_external_symbol = si_get_external_symbol;
932 u.cb_data = &scratch_va;
933 u.rx_va = shader->bo->gpu_address;
934 u.rx_ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
935 PIPE_TRANSFER_READ_WRITE |
936 PIPE_TRANSFER_UNSYNCHRONIZED |
937 RADEON_TRANSFER_TEMPORARY);
938 if (!u.rx_ptr)
939 return false;
940
941 bool ok = ac_rtld_upload(&u);
942
943 sscreen->ws->buffer_unmap(shader->bo->buf);
944 ac_rtld_close(&binary);
945
946 return ok;
947 }
948
949 static void si_shader_dump_disassembly(struct si_screen *screen,
950 const struct si_shader_binary *binary,
951 enum pipe_shader_type shader_type,
952 unsigned wave_size,
953 struct pipe_debug_callback *debug,
954 const char *name, FILE *file)
955 {
956 struct ac_rtld_binary rtld_binary;
957
958 if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
959 .info = &screen->info,
960 .shader_type = tgsi_processor_to_shader_stage(shader_type),
961 .wave_size = wave_size,
962 .num_parts = 1,
963 .elf_ptrs = &binary->elf_buffer,
964 .elf_sizes = &binary->elf_size }))
965 return;
966
967 const char *disasm;
968 size_t nbytes;
969
970 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
971 goto out;
972
973 if (nbytes > INT_MAX)
974 goto out;
975
976 if (debug && debug->debug_message) {
977 /* Very long debug messages are cut off, so send the
978 * disassembly one line at a time. This causes more
979 * overhead, but on the plus side it simplifies
980 * parsing of resulting logs.
981 */
982 pipe_debug_message(debug, SHADER_INFO,
983 "Shader Disassembly Begin");
984
985 uint64_t line = 0;
986 while (line < nbytes) {
987 int count = nbytes - line;
988 const char *nl = memchr(disasm + line, '\n', nbytes - line);
989 if (nl)
990 count = nl - (disasm + line);
991
992 if (count) {
993 pipe_debug_message(debug, SHADER_INFO,
994 "%.*s", count, disasm + line);
995 }
996
997 line += count + 1;
998 }
999
1000 pipe_debug_message(debug, SHADER_INFO,
1001 "Shader Disassembly End");
1002 }
1003
1004 if (file) {
1005 fprintf(file, "Shader %s disassembly:\n", name);
1006 fprintf(file, "%*s", (int)nbytes, disasm);
1007 }
1008
1009 out:
1010 ac_rtld_close(&rtld_binary);
1011 }
1012
1013 static void si_calculate_max_simd_waves(struct si_shader *shader)
1014 {
1015 struct si_screen *sscreen = shader->selector->screen;
1016 struct ac_shader_config *conf = &shader->config;
1017 unsigned num_inputs = shader->selector->info.num_inputs;
1018 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
1019 unsigned lds_per_wave = 0;
1020 unsigned max_simd_waves;
1021
1022 max_simd_waves = sscreen->info.max_wave64_per_simd;
1023
1024 /* Compute LDS usage for PS. */
1025 switch (shader->selector->type) {
1026 case PIPE_SHADER_FRAGMENT:
1027 /* The minimum usage per wave is (num_inputs * 48). The maximum
1028 * usage is (num_inputs * 48 * 16).
1029 * We can get anything in between and it varies between waves.
1030 *
1031 * The 48 bytes per input for a single primitive is equal to
1032 * 4 bytes/component * 4 components/input * 3 points.
1033 *
1034 * Other stages don't know the size at compile time or don't
1035 * allocate LDS per wave, but instead they do it per thread group.
1036 */
1037 lds_per_wave = conf->lds_size * lds_increment +
1038 align(num_inputs * 48, lds_increment);
1039 break;
1040 case PIPE_SHADER_COMPUTE:
1041 if (shader->selector) {
1042 unsigned max_workgroup_size =
1043 si_get_max_workgroup_size(shader);
1044 lds_per_wave = (conf->lds_size * lds_increment) /
1045 DIV_ROUND_UP(max_workgroup_size,
1046 sscreen->compute_wave_size);
1047 }
1048 break;
1049 default:;
1050 }
1051
1052 /* Compute the per-SIMD wave counts. */
1053 if (conf->num_sgprs) {
1054 max_simd_waves =
1055 MIN2(max_simd_waves,
1056 sscreen->info.num_physical_sgprs_per_simd / conf->num_sgprs);
1057 }
1058
1059 if (conf->num_vgprs) {
1060 /* Always print wave limits as Wave64, so that we can compare
1061 * Wave32 and Wave64 with shader-db fairly. */
1062 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd;
1063 max_simd_waves = MIN2(max_simd_waves, max_vgprs / conf->num_vgprs);
1064 }
1065
1066 unsigned max_lds_per_simd = sscreen->info.lds_size_per_cu / 4;
1067 if (lds_per_wave)
1068 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
1069
1070 shader->info.max_simd_waves = max_simd_waves;
1071 }
1072
1073 void si_shader_dump_stats_for_shader_db(struct si_screen *screen,
1074 struct si_shader *shader,
1075 struct pipe_debug_callback *debug)
1076 {
1077 const struct ac_shader_config *conf = &shader->config;
1078
1079 if (screen->options.debug_disassembly)
1080 si_shader_dump_disassembly(screen, &shader->binary,
1081 shader->selector->type,
1082 si_get_shader_wave_size(shader),
1083 debug, "main", NULL);
1084
1085 pipe_debug_message(debug, SHADER_INFO,
1086 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
1087 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
1088 "Spilled VGPRs: %d PrivMem VGPRs: %d",
1089 conf->num_sgprs, conf->num_vgprs,
1090 si_get_shader_binary_size(screen, shader),
1091 conf->lds_size, conf->scratch_bytes_per_wave,
1092 shader->info.max_simd_waves, conf->spilled_sgprs,
1093 conf->spilled_vgprs, shader->info.private_mem_vgprs);
1094 }
1095
1096 static void si_shader_dump_stats(struct si_screen *sscreen,
1097 struct si_shader *shader,
1098 FILE *file,
1099 bool check_debug_option)
1100 {
1101 const struct ac_shader_config *conf = &shader->config;
1102
1103 if (!check_debug_option ||
1104 si_can_dump_shader(sscreen, shader->selector->type)) {
1105 if (shader->selector->type == PIPE_SHADER_FRAGMENT) {
1106 fprintf(file, "*** SHADER CONFIG ***\n"
1107 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1108 "SPI_PS_INPUT_ENA = 0x%04x\n",
1109 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
1110 }
1111
1112 fprintf(file, "*** SHADER STATS ***\n"
1113 "SGPRS: %d\n"
1114 "VGPRS: %d\n"
1115 "Spilled SGPRs: %d\n"
1116 "Spilled VGPRs: %d\n"
1117 "Private memory VGPRs: %d\n"
1118 "Code Size: %d bytes\n"
1119 "LDS: %d blocks\n"
1120 "Scratch: %d bytes per wave\n"
1121 "Max Waves: %d\n"
1122 "********************\n\n\n",
1123 conf->num_sgprs, conf->num_vgprs,
1124 conf->spilled_sgprs, conf->spilled_vgprs,
1125 shader->info.private_mem_vgprs,
1126 si_get_shader_binary_size(sscreen, shader),
1127 conf->lds_size, conf->scratch_bytes_per_wave,
1128 shader->info.max_simd_waves);
1129 }
1130 }
1131
1132 const char *si_get_shader_name(const struct si_shader *shader)
1133 {
1134 switch (shader->selector->type) {
1135 case PIPE_SHADER_VERTEX:
1136 if (shader->key.as_es)
1137 return "Vertex Shader as ES";
1138 else if (shader->key.as_ls)
1139 return "Vertex Shader as LS";
1140 else if (shader->key.opt.vs_as_prim_discard_cs)
1141 return "Vertex Shader as Primitive Discard CS";
1142 else if (shader->key.as_ngg)
1143 return "Vertex Shader as ESGS";
1144 else
1145 return "Vertex Shader as VS";
1146 case PIPE_SHADER_TESS_CTRL:
1147 return "Tessellation Control Shader";
1148 case PIPE_SHADER_TESS_EVAL:
1149 if (shader->key.as_es)
1150 return "Tessellation Evaluation Shader as ES";
1151 else if (shader->key.as_ngg)
1152 return "Tessellation Evaluation Shader as ESGS";
1153 else
1154 return "Tessellation Evaluation Shader as VS";
1155 case PIPE_SHADER_GEOMETRY:
1156 if (shader->is_gs_copy_shader)
1157 return "GS Copy Shader as VS";
1158 else
1159 return "Geometry Shader";
1160 case PIPE_SHADER_FRAGMENT:
1161 return "Pixel Shader";
1162 case PIPE_SHADER_COMPUTE:
1163 return "Compute Shader";
1164 default:
1165 return "Unknown Shader";
1166 }
1167 }
1168
1169 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
1170 struct pipe_debug_callback *debug,
1171 FILE *file, bool check_debug_option)
1172 {
1173 enum pipe_shader_type shader_type = shader->selector->type;
1174
1175 if (!check_debug_option ||
1176 si_can_dump_shader(sscreen, shader_type))
1177 si_dump_shader_key(shader, file);
1178
1179 if (!check_debug_option && shader->binary.llvm_ir_string) {
1180 if (shader->previous_stage &&
1181 shader->previous_stage->binary.llvm_ir_string) {
1182 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
1183 si_get_shader_name(shader));
1184 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
1185 }
1186
1187 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
1188 si_get_shader_name(shader));
1189 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
1190 }
1191
1192 if (!check_debug_option ||
1193 (si_can_dump_shader(sscreen, shader_type) &&
1194 !(sscreen->debug_flags & DBG(NO_ASM)))) {
1195 unsigned wave_size = si_get_shader_wave_size(shader);
1196
1197 fprintf(file, "\n%s:\n", si_get_shader_name(shader));
1198
1199 if (shader->prolog)
1200 si_shader_dump_disassembly(sscreen, &shader->prolog->binary,
1201 shader_type, wave_size, debug, "prolog", file);
1202 if (shader->previous_stage)
1203 si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary,
1204 shader_type, wave_size, debug, "previous stage", file);
1205 if (shader->prolog2)
1206 si_shader_dump_disassembly(sscreen, &shader->prolog2->binary,
1207 shader_type, wave_size, debug, "prolog2", file);
1208
1209 si_shader_dump_disassembly(sscreen, &shader->binary, shader_type,
1210 wave_size, debug, "main", file);
1211
1212 if (shader->epilog)
1213 si_shader_dump_disassembly(sscreen, &shader->epilog->binary,
1214 shader_type, wave_size, debug, "epilog", file);
1215 fprintf(file, "\n");
1216 }
1217
1218 si_shader_dump_stats(sscreen, shader, file, check_debug_option);
1219 }
1220
1221 static void si_dump_shader_key_vs(const struct si_shader_key *key,
1222 const struct si_vs_prolog_bits *prolog,
1223 const char *prefix, FILE *f)
1224 {
1225 fprintf(f, " %s.instance_divisor_is_one = %u\n",
1226 prefix, prolog->instance_divisor_is_one);
1227 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
1228 prefix, prolog->instance_divisor_is_fetched);
1229 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n",
1230 prefix, prolog->unpack_instance_id_from_vertex_id);
1231 fprintf(f, " %s.ls_vgpr_fix = %u\n",
1232 prefix, prolog->ls_vgpr_fix);
1233
1234 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
1235 fprintf(f, " mono.vs.fix_fetch = {");
1236 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
1237 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
1238 if (i)
1239 fprintf(f, ", ");
1240 if (!fix.bits)
1241 fprintf(f, "0");
1242 else
1243 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size,
1244 fix.u.num_channels_m1, fix.u.format);
1245 }
1246 fprintf(f, "}\n");
1247 }
1248
1249 static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
1250 {
1251 const struct si_shader_key *key = &shader->key;
1252 enum pipe_shader_type shader_type = shader->selector->type;
1253
1254 fprintf(f, "SHADER KEY\n");
1255
1256 switch (shader_type) {
1257 case PIPE_SHADER_VERTEX:
1258 si_dump_shader_key_vs(key, &key->part.vs.prolog,
1259 "part.vs.prolog", f);
1260 fprintf(f, " as_es = %u\n", key->as_es);
1261 fprintf(f, " as_ls = %u\n", key->as_ls);
1262 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1263 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
1264 key->mono.u.vs_export_prim_id);
1265 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n",
1266 key->opt.vs_as_prim_discard_cs);
1267 fprintf(f, " opt.cs_prim_type = %s\n",
1268 tgsi_primitive_names[key->opt.cs_prim_type]);
1269 fprintf(f, " opt.cs_indexed = %u\n",
1270 key->opt.cs_indexed);
1271 fprintf(f, " opt.cs_instancing = %u\n",
1272 key->opt.cs_instancing);
1273 fprintf(f, " opt.cs_primitive_restart = %u\n",
1274 key->opt.cs_primitive_restart);
1275 fprintf(f, " opt.cs_provoking_vertex_first = %u\n",
1276 key->opt.cs_provoking_vertex_first);
1277 fprintf(f, " opt.cs_need_correct_orientation = %u\n",
1278 key->opt.cs_need_correct_orientation);
1279 fprintf(f, " opt.cs_cull_front = %u\n",
1280 key->opt.cs_cull_front);
1281 fprintf(f, " opt.cs_cull_back = %u\n",
1282 key->opt.cs_cull_back);
1283 fprintf(f, " opt.cs_cull_z = %u\n",
1284 key->opt.cs_cull_z);
1285 fprintf(f, " opt.cs_halfz_clip_space = %u\n",
1286 key->opt.cs_halfz_clip_space);
1287 break;
1288
1289 case PIPE_SHADER_TESS_CTRL:
1290 if (shader->selector->screen->info.chip_class >= GFX9) {
1291 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
1292 "part.tcs.ls_prolog", f);
1293 }
1294 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
1295 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
1296 break;
1297
1298 case PIPE_SHADER_TESS_EVAL:
1299 fprintf(f, " as_es = %u\n", key->as_es);
1300 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1301 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
1302 key->mono.u.vs_export_prim_id);
1303 break;
1304
1305 case PIPE_SHADER_GEOMETRY:
1306 if (shader->is_gs_copy_shader)
1307 break;
1308
1309 if (shader->selector->screen->info.chip_class >= GFX9 &&
1310 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
1311 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
1312 "part.gs.vs_prolog", f);
1313 }
1314 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
1315 fprintf(f, " part.gs.prolog.gfx9_prev_is_vs = %u\n", key->part.gs.prolog.gfx9_prev_is_vs);
1316 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1317 break;
1318
1319 case PIPE_SHADER_COMPUTE:
1320 break;
1321
1322 case PIPE_SHADER_FRAGMENT:
1323 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
1324 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
1325 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
1326 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
1327 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
1328 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
1329 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
1330 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
1331 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
1332 fprintf(f, " part.ps.prolog.samplemask_log_ps_iter = %u\n", key->part.ps.prolog.samplemask_log_ps_iter);
1333 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
1334 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
1335 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
1336 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
1337 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
1338 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
1339 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
1340 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
1341 fprintf(f, " mono.u.ps.interpolate_at_sample_force_center = %u\n", key->mono.u.ps.interpolate_at_sample_force_center);
1342 fprintf(f, " mono.u.ps.fbfetch_msaa = %u\n", key->mono.u.ps.fbfetch_msaa);
1343 fprintf(f, " mono.u.ps.fbfetch_is_1D = %u\n", key->mono.u.ps.fbfetch_is_1D);
1344 fprintf(f, " mono.u.ps.fbfetch_layered = %u\n", key->mono.u.ps.fbfetch_layered);
1345 break;
1346
1347 default:
1348 assert(0);
1349 }
1350
1351 if ((shader_type == PIPE_SHADER_GEOMETRY ||
1352 shader_type == PIPE_SHADER_TESS_EVAL ||
1353 shader_type == PIPE_SHADER_VERTEX) &&
1354 !key->as_es && !key->as_ls) {
1355 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
1356 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
1357 if (shader_type != PIPE_SHADER_GEOMETRY)
1358 fprintf(f, " opt.ngg_culling = 0x%x\n", key->opt.ngg_culling);
1359 }
1360 }
1361
1362 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
1363 {
1364 struct si_shader *shader = ctx->shader;
1365 struct si_shader_info *info = &shader->selector->info;
1366
1367 if ((ctx->type != PIPE_SHADER_VERTEX &&
1368 ctx->type != PIPE_SHADER_TESS_EVAL) ||
1369 shader->key.as_ls ||
1370 shader->key.as_es)
1371 return;
1372
1373 ac_optimize_vs_outputs(&ctx->ac,
1374 ctx->main_fn,
1375 shader->info.vs_output_param_offset,
1376 info->num_outputs,
1377 &shader->info.nr_param_exports);
1378 }
1379
1380 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
1381 const struct si_vs_prolog_bits *prolog_key,
1382 const struct si_shader_key *key,
1383 bool ngg_cull_shader)
1384 {
1385 /* VGPR initialization fixup for Vega10 and Raven is always done in the
1386 * VS prolog. */
1387 return sel->vs_needs_prolog ||
1388 prolog_key->ls_vgpr_fix ||
1389 prolog_key->unpack_instance_id_from_vertex_id ||
1390 (ngg_cull_shader && key->opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL);
1391 }
1392
1393 static bool si_build_main_function(struct si_shader_context *ctx,
1394 struct si_shader *shader,
1395 struct nir_shader *nir, bool free_nir,
1396 bool ngg_cull_shader)
1397 {
1398 struct si_shader_selector *sel = shader->selector;
1399 const struct si_shader_info *info = &sel->info;
1400
1401 ctx->shader = shader;
1402 ctx->type = sel->type;
1403
1404 ctx->num_const_buffers = util_last_bit(info->const_buffers_declared);
1405 ctx->num_shader_buffers = util_last_bit(info->shader_buffers_declared);
1406
1407 ctx->num_samplers = util_last_bit(info->samplers_declared);
1408 ctx->num_images = util_last_bit(info->images_declared);
1409
1410 si_llvm_init_resource_callbacks(ctx);
1411
1412 switch (ctx->type) {
1413 case PIPE_SHADER_VERTEX:
1414 si_llvm_init_vs_callbacks(ctx, ngg_cull_shader);
1415 break;
1416 case PIPE_SHADER_TESS_CTRL:
1417 si_llvm_init_tcs_callbacks(ctx);
1418 break;
1419 case PIPE_SHADER_TESS_EVAL:
1420 si_llvm_init_tes_callbacks(ctx, ngg_cull_shader);
1421 break;
1422 case PIPE_SHADER_GEOMETRY:
1423 si_llvm_init_gs_callbacks(ctx);
1424 break;
1425 case PIPE_SHADER_FRAGMENT:
1426 si_llvm_init_ps_callbacks(ctx);
1427 break;
1428 case PIPE_SHADER_COMPUTE:
1429 ctx->abi.load_local_group_size = si_llvm_get_block_size;
1430 break;
1431 default:
1432 assert(!"Unsupported shader type");
1433 return false;
1434 }
1435
1436 si_create_function(ctx, ngg_cull_shader);
1437
1438 if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)
1439 si_preload_esgs_ring(ctx);
1440
1441 if (ctx->type == PIPE_SHADER_GEOMETRY)
1442 si_preload_gs_rings(ctx);
1443 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1444 si_llvm_preload_tes_rings(ctx);
1445
1446 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
1447 sel->info.tessfactors_are_def_in_all_invocs) {
1448 for (unsigned i = 0; i < 6; i++) {
1449 ctx->invoc0_tess_factors[i] =
1450 ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
1451 }
1452 }
1453
1454 if (ctx->type == PIPE_SHADER_GEOMETRY) {
1455 for (unsigned i = 0; i < 4; i++) {
1456 ctx->gs_next_vertex[i] =
1457 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1458 }
1459 if (shader->key.as_ngg) {
1460 for (unsigned i = 0; i < 4; ++i) {
1461 ctx->gs_curprim_verts[i] =
1462 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1463 ctx->gs_generated_prims[i] =
1464 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1465 }
1466
1467 unsigned scratch_size = 8;
1468 if (sel->so.num_outputs)
1469 scratch_size = 44;
1470
1471 assert(!ctx->gs_ngg_scratch);
1472 LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, scratch_size);
1473 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1474 ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
1475 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
1476 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
1477
1478 ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1479 LLVMArrayType(ctx->ac.i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
1480 LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
1481 LLVMSetAlignment(ctx->gs_ngg_emit, 4);
1482 }
1483 }
1484
1485 if (ctx->type != PIPE_SHADER_GEOMETRY &&
1486 (shader->key.as_ngg && !shader->key.as_es)) {
1487 /* Unconditionally declare scratch space base for streamout and
1488 * vertex compaction. Whether space is actually allocated is
1489 * determined during linking / PM4 creation.
1490 *
1491 * Add an extra dword per vertex to ensure an odd stride, which
1492 * avoids bank conflicts for SoA accesses.
1493 */
1494 if (!gfx10_is_ngg_passthrough(shader))
1495 si_llvm_declare_esgs_ring(ctx);
1496
1497 /* This is really only needed when streamout and / or vertex
1498 * compaction is enabled.
1499 */
1500 if (!ctx->gs_ngg_scratch &&
1501 (sel->so.num_outputs || shader->key.opt.ngg_culling)) {
1502 LLVMTypeRef asi32 = LLVMArrayType(ctx->ac.i32, 8);
1503 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1504 asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
1505 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
1506 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
1507 }
1508 }
1509
1510 /* For GFX9 merged shaders:
1511 * - Set EXEC for the first shader. If the prolog is present, set
1512 * EXEC there instead.
1513 * - Add a barrier before the second shader.
1514 * - In the second shader, reset EXEC to ~0 and wrap the main part in
1515 * an if-statement. This is required for correctness in geometry
1516 * shaders, to ensure that empty GS waves do not send GS_EMIT and
1517 * GS_CUT messages.
1518 *
1519 * For monolithic merged shaders, the first shader is wrapped in an
1520 * if-block together with its prolog in si_build_wrapper_function.
1521 *
1522 * NGG vertex and tess eval shaders running as the last
1523 * vertex/geometry stage handle execution explicitly using
1524 * if-statements.
1525 */
1526 if (ctx->screen->info.chip_class >= GFX9) {
1527 if (!shader->is_monolithic &&
1528 (shader->key.as_es || shader->key.as_ls) &&
1529 (ctx->type == PIPE_SHADER_TESS_EVAL ||
1530 (ctx->type == PIPE_SHADER_VERTEX &&
1531 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog,
1532 &shader->key, ngg_cull_shader)))) {
1533 si_init_exec_from_input(ctx,
1534 ctx->merged_wave_info, 0);
1535 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
1536 ctx->type == PIPE_SHADER_GEOMETRY ||
1537 (shader->key.as_ngg && !shader->key.as_es)) {
1538 LLVMValueRef thread_enabled;
1539 bool nested_barrier;
1540
1541 if (!shader->is_monolithic ||
1542 (ctx->type == PIPE_SHADER_TESS_EVAL &&
1543 shader->key.as_ngg && !shader->key.as_es &&
1544 !shader->key.opt.ngg_culling))
1545 ac_init_exec_full_mask(&ctx->ac);
1546
1547 if ((ctx->type == PIPE_SHADER_VERTEX ||
1548 ctx->type == PIPE_SHADER_TESS_EVAL) &&
1549 shader->key.as_ngg && !shader->key.as_es &&
1550 !shader->key.opt.ngg_culling) {
1551 gfx10_ngg_build_sendmsg_gs_alloc_req(ctx);
1552
1553 /* Build the primitive export at the beginning
1554 * of the shader if possible.
1555 */
1556 if (gfx10_ngg_export_prim_early(shader))
1557 gfx10_ngg_build_export_prim(ctx, NULL, NULL);
1558 }
1559
1560 if (ctx->type == PIPE_SHADER_TESS_CTRL ||
1561 ctx->type == PIPE_SHADER_GEOMETRY) {
1562 if (ctx->type == PIPE_SHADER_GEOMETRY && shader->key.as_ngg) {
1563 gfx10_ngg_gs_emit_prologue(ctx);
1564 nested_barrier = false;
1565 } else {
1566 nested_barrier = true;
1567 }
1568
1569 thread_enabled = si_is_gs_thread(ctx);
1570 } else {
1571 thread_enabled = si_is_es_thread(ctx);
1572 nested_barrier = false;
1573 }
1574
1575 ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
1576 ctx->merged_wrap_if_label = 11500;
1577 ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
1578
1579 if (nested_barrier) {
1580 /* Execute a barrier before the second shader in
1581 * a merged shader.
1582 *
1583 * Execute the barrier inside the conditional block,
1584 * so that empty waves can jump directly to s_endpgm,
1585 * which will also signal the barrier.
1586 *
1587 * This is possible in gfx9, because an empty wave
1588 * for the second shader does not participate in
1589 * the epilogue. With NGG, empty waves may still
1590 * be required to export data (e.g. GS output vertices),
1591 * so we cannot let them exit early.
1592 *
1593 * If the shader is TCS and the TCS epilog is present
1594 * and contains a barrier, it will wait there and then
1595 * reach s_endpgm.
1596 */
1597 si_llvm_emit_barrier(ctx);
1598 }
1599 }
1600 }
1601
1602 if (sel->force_correct_derivs_after_kill) {
1603 ctx->postponed_kill = ac_build_alloca_undef(&ctx->ac, ctx->ac.i1, "");
1604 /* true = don't kill. */
1605 LLVMBuildStore(ctx->ac.builder, ctx->ac.i1true,
1606 ctx->postponed_kill);
1607 }
1608
1609 bool success = si_nir_build_llvm(ctx, nir);
1610 if (free_nir)
1611 ralloc_free(nir);
1612 if (!success) {
1613 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
1614 return false;
1615 }
1616
1617 si_llvm_build_ret(ctx, ctx->return_value);
1618 return true;
1619 }
1620
1621 /**
1622 * Compute the VS prolog key, which contains all the information needed to
1623 * build the VS prolog function, and set shader->info bits where needed.
1624 *
1625 * \param info Shader info of the vertex shader.
1626 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
1627 * \param has_old_ Whether the preceding shader part is the NGG cull shader.
1628 * \param prolog_key Key of the VS prolog
1629 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
1630 * \param key Output shader part key.
1631 */
1632 static void si_get_vs_prolog_key(const struct si_shader_info *info,
1633 unsigned num_input_sgprs,
1634 bool ngg_cull_shader,
1635 const struct si_vs_prolog_bits *prolog_key,
1636 struct si_shader *shader_out,
1637 union si_shader_part_key *key)
1638 {
1639 memset(key, 0, sizeof(*key));
1640 key->vs_prolog.states = *prolog_key;
1641 key->vs_prolog.num_input_sgprs = num_input_sgprs;
1642 key->vs_prolog.num_inputs = info->num_inputs;
1643 key->vs_prolog.as_ls = shader_out->key.as_ls;
1644 key->vs_prolog.as_es = shader_out->key.as_es;
1645 key->vs_prolog.as_ngg = shader_out->key.as_ngg;
1646
1647 if (ngg_cull_shader) {
1648 key->vs_prolog.gs_fast_launch_tri_list = !!(shader_out->key.opt.ngg_culling &
1649 SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST);
1650 key->vs_prolog.gs_fast_launch_tri_strip = !!(shader_out->key.opt.ngg_culling &
1651 SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP);
1652 } else {
1653 key->vs_prolog.has_ngg_cull_inputs = !!shader_out->key.opt.ngg_culling;
1654 }
1655
1656 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
1657 key->vs_prolog.as_ls = 1;
1658 key->vs_prolog.num_merged_next_stage_vgprs = 2;
1659 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
1660 key->vs_prolog.as_es = 1;
1661 key->vs_prolog.num_merged_next_stage_vgprs = 5;
1662 } else if (shader_out->key.as_ngg) {
1663 key->vs_prolog.num_merged_next_stage_vgprs = 5;
1664 }
1665
1666 /* Enable loading the InstanceID VGPR. */
1667 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
1668
1669 if ((key->vs_prolog.states.instance_divisor_is_one |
1670 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
1671 shader_out->info.uses_instanceid = true;
1672 }
1673
1674 static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
1675 struct si_shader_selector *sel)
1676 {
1677 if (!compiler->low_opt_passes)
1678 return false;
1679
1680 /* Assume a slow CPU. */
1681 assert(!sel->screen->info.has_dedicated_vram &&
1682 sel->screen->info.chip_class <= GFX8);
1683
1684 /* For a crazy dEQP test containing 2597 memory opcodes, mostly
1685 * buffer stores. */
1686 return sel->type == PIPE_SHADER_COMPUTE &&
1687 sel->info.num_memory_instructions > 1000;
1688 }
1689
1690 static struct nir_shader *get_nir_shader(struct si_shader_selector *sel,
1691 bool *free_nir)
1692 {
1693 *free_nir = false;
1694
1695 if (sel->nir) {
1696 return sel->nir;
1697 } else if (sel->nir_binary) {
1698 struct pipe_screen *screen = &sel->screen->b;
1699 const void *options =
1700 screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR,
1701 sel->type);
1702
1703 struct blob_reader blob_reader;
1704 blob_reader_init(&blob_reader, sel->nir_binary, sel->nir_size);
1705 *free_nir = true;
1706 return nir_deserialize(NULL, options, &blob_reader);
1707 }
1708 return NULL;
1709 }
1710
1711 static bool si_llvm_compile_shader(struct si_screen *sscreen,
1712 struct ac_llvm_compiler *compiler,
1713 struct si_shader *shader,
1714 struct pipe_debug_callback *debug,
1715 struct nir_shader *nir,
1716 bool free_nir)
1717 {
1718 struct si_shader_selector *sel = shader->selector;
1719 struct si_shader_context ctx;
1720
1721 si_llvm_context_init(&ctx, sscreen, compiler, si_get_shader_wave_size(shader));
1722
1723 LLVMValueRef ngg_cull_main_fn = NULL;
1724 if (shader->key.opt.ngg_culling) {
1725 if (!si_build_main_function(&ctx, shader, nir, false, true)) {
1726 si_llvm_dispose(&ctx);
1727 return false;
1728 }
1729 ngg_cull_main_fn = ctx.main_fn;
1730 ctx.main_fn = NULL;
1731 }
1732
1733 if (!si_build_main_function(&ctx, shader, nir, free_nir, false)) {
1734 si_llvm_dispose(&ctx);
1735 return false;
1736 }
1737
1738 if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
1739 LLVMValueRef parts[4];
1740 unsigned num_parts = 0;
1741 bool has_prolog = false;
1742 LLVMValueRef main_fn = ctx.main_fn;
1743
1744 if (ngg_cull_main_fn) {
1745 if (si_vs_needs_prolog(sel, &shader->key.part.vs.prolog,
1746 &shader->key, true)) {
1747 union si_shader_part_key prolog_key;
1748 si_get_vs_prolog_key(&sel->info,
1749 shader->info.num_input_sgprs,
1750 true,
1751 &shader->key.part.vs.prolog,
1752 shader, &prolog_key);
1753 prolog_key.vs_prolog.is_monolithic = true;
1754 si_llvm_build_vs_prolog(&ctx, &prolog_key);
1755 parts[num_parts++] = ctx.main_fn;
1756 has_prolog = true;
1757 }
1758 parts[num_parts++] = ngg_cull_main_fn;
1759 }
1760
1761 if (si_vs_needs_prolog(sel, &shader->key.part.vs.prolog,
1762 &shader->key, false)) {
1763 union si_shader_part_key prolog_key;
1764 si_get_vs_prolog_key(&sel->info,
1765 shader->info.num_input_sgprs,
1766 false,
1767 &shader->key.part.vs.prolog,
1768 shader, &prolog_key);
1769 prolog_key.vs_prolog.is_monolithic = true;
1770 si_llvm_build_vs_prolog(&ctx, &prolog_key);
1771 parts[num_parts++] = ctx.main_fn;
1772 has_prolog = true;
1773 }
1774 parts[num_parts++] = main_fn;
1775
1776 si_build_wrapper_function(&ctx, parts, num_parts,
1777 has_prolog ? 1 : 0, 0);
1778
1779 if (ctx.shader->key.opt.vs_as_prim_discard_cs)
1780 si_build_prim_discard_compute_shader(&ctx);
1781 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_EVAL &&
1782 ngg_cull_main_fn) {
1783 LLVMValueRef parts[2];
1784
1785 parts[0] = ngg_cull_main_fn;
1786 parts[1] = ctx.main_fn;
1787
1788 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
1789 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
1790 if (sscreen->info.chip_class >= GFX9) {
1791 struct si_shader_selector *ls = shader->key.part.tcs.ls;
1792 LLVMValueRef parts[4];
1793 bool vs_needs_prolog =
1794 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog,
1795 &shader->key, false);
1796
1797 /* TCS main part */
1798 parts[2] = ctx.main_fn;
1799
1800 /* TCS epilog */
1801 union si_shader_part_key tcs_epilog_key;
1802 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
1803 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
1804 si_llvm_build_tcs_epilog(&ctx, &tcs_epilog_key);
1805 parts[3] = ctx.main_fn;
1806
1807 /* VS as LS main part */
1808 nir = get_nir_shader(ls, &free_nir);
1809 struct si_shader shader_ls = {};
1810 shader_ls.selector = ls;
1811 shader_ls.key.as_ls = 1;
1812 shader_ls.key.mono = shader->key.mono;
1813 shader_ls.key.opt = shader->key.opt;
1814 shader_ls.is_monolithic = true;
1815
1816 if (!si_build_main_function(&ctx, &shader_ls, nir, free_nir, false)) {
1817 si_llvm_dispose(&ctx);
1818 return false;
1819 }
1820 shader->info.uses_instanceid |= ls->info.uses_instanceid;
1821 parts[1] = ctx.main_fn;
1822
1823 /* LS prolog */
1824 if (vs_needs_prolog) {
1825 union si_shader_part_key vs_prolog_key;
1826 si_get_vs_prolog_key(&ls->info,
1827 shader_ls.info.num_input_sgprs,
1828 false,
1829 &shader->key.part.tcs.ls_prolog,
1830 shader, &vs_prolog_key);
1831 vs_prolog_key.vs_prolog.is_monolithic = true;
1832 si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1833 parts[0] = ctx.main_fn;
1834 }
1835
1836 /* Reset the shader context. */
1837 ctx.shader = shader;
1838 ctx.type = PIPE_SHADER_TESS_CTRL;
1839
1840 si_build_wrapper_function(&ctx,
1841 parts + !vs_needs_prolog,
1842 4 - !vs_needs_prolog, vs_needs_prolog,
1843 vs_needs_prolog ? 2 : 1);
1844 } else {
1845 LLVMValueRef parts[2];
1846 union si_shader_part_key epilog_key;
1847
1848 parts[0] = ctx.main_fn;
1849
1850 memset(&epilog_key, 0, sizeof(epilog_key));
1851 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
1852 si_llvm_build_tcs_epilog(&ctx, &epilog_key);
1853 parts[1] = ctx.main_fn;
1854
1855 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
1856 }
1857 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
1858 if (ctx.screen->info.chip_class >= GFX9) {
1859 struct si_shader_selector *es = shader->key.part.gs.es;
1860 LLVMValueRef es_prolog = NULL;
1861 LLVMValueRef es_main = NULL;
1862 LLVMValueRef gs_prolog = NULL;
1863 LLVMValueRef gs_main = ctx.main_fn;
1864
1865 /* GS prolog */
1866 union si_shader_part_key gs_prolog_key;
1867 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
1868 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
1869 gs_prolog_key.gs_prolog.is_monolithic = true;
1870 gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
1871 si_llvm_build_gs_prolog(&ctx, &gs_prolog_key);
1872 gs_prolog = ctx.main_fn;
1873
1874 /* ES main part */
1875 nir = get_nir_shader(es, &free_nir);
1876 struct si_shader shader_es = {};
1877 shader_es.selector = es;
1878 shader_es.key.as_es = 1;
1879 shader_es.key.as_ngg = shader->key.as_ngg;
1880 shader_es.key.mono = shader->key.mono;
1881 shader_es.key.opt = shader->key.opt;
1882 shader_es.is_monolithic = true;
1883
1884 if (!si_build_main_function(&ctx, &shader_es, nir, free_nir, false)) {
1885 si_llvm_dispose(&ctx);
1886 return false;
1887 }
1888 shader->info.uses_instanceid |= es->info.uses_instanceid;
1889 es_main = ctx.main_fn;
1890
1891 /* ES prolog */
1892 if (es->type == PIPE_SHADER_VERTEX &&
1893 si_vs_needs_prolog(es, &shader->key.part.gs.vs_prolog,
1894 &shader->key, false)) {
1895 union si_shader_part_key vs_prolog_key;
1896 si_get_vs_prolog_key(&es->info,
1897 shader_es.info.num_input_sgprs,
1898 false,
1899 &shader->key.part.gs.vs_prolog,
1900 shader, &vs_prolog_key);
1901 vs_prolog_key.vs_prolog.is_monolithic = true;
1902 si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1903 es_prolog = ctx.main_fn;
1904 }
1905
1906 /* Reset the shader context. */
1907 ctx.shader = shader;
1908 ctx.type = PIPE_SHADER_GEOMETRY;
1909
1910 /* Prepare the array of shader parts. */
1911 LLVMValueRef parts[4];
1912 unsigned num_parts = 0, main_part, next_first_part;
1913
1914 if (es_prolog)
1915 parts[num_parts++] = es_prolog;
1916
1917 parts[main_part = num_parts++] = es_main;
1918 parts[next_first_part = num_parts++] = gs_prolog;
1919 parts[num_parts++] = gs_main;
1920
1921 si_build_wrapper_function(&ctx, parts, num_parts,
1922 main_part, next_first_part);
1923 } else {
1924 LLVMValueRef parts[2];
1925 union si_shader_part_key prolog_key;
1926
1927 parts[1] = ctx.main_fn;
1928
1929 memset(&prolog_key, 0, sizeof(prolog_key));
1930 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
1931 si_llvm_build_gs_prolog(&ctx, &prolog_key);
1932 parts[0] = ctx.main_fn;
1933
1934 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
1935 }
1936 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
1937 si_llvm_build_monolithic_ps(&ctx, shader);
1938 }
1939
1940 si_llvm_optimize_module(&ctx);
1941
1942 /* Post-optimization transformations and analysis. */
1943 si_optimize_vs_outputs(&ctx);
1944
1945 if ((debug && debug->debug_message) ||
1946 si_can_dump_shader(sscreen, ctx.type)) {
1947 ctx.shader->info.private_mem_vgprs =
1948 ac_count_scratch_private_memory(ctx.main_fn);
1949 }
1950
1951 /* Make sure the input is a pointer and not integer followed by inttoptr. */
1952 assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) ==
1953 LLVMPointerTypeKind);
1954
1955 /* Compile to bytecode. */
1956 if (!si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler,
1957 &ctx.ac, debug, ctx.type, si_get_shader_name(shader),
1958 si_should_optimize_less(compiler, shader->selector))) {
1959 si_llvm_dispose(&ctx);
1960 fprintf(stderr, "LLVM failed to compile shader\n");
1961 return false;
1962 }
1963
1964 si_llvm_dispose(&ctx);
1965 return true;
1966 }
1967
1968 bool si_compile_shader(struct si_screen *sscreen,
1969 struct ac_llvm_compiler *compiler,
1970 struct si_shader *shader,
1971 struct pipe_debug_callback *debug)
1972 {
1973 struct si_shader_selector *sel = shader->selector;
1974 bool free_nir;
1975 struct nir_shader *nir = get_nir_shader(sel, &free_nir);
1976
1977 /* Dump NIR before doing NIR->LLVM conversion in case the
1978 * conversion fails. */
1979 if (si_can_dump_shader(sscreen, sel->type) &&
1980 !(sscreen->debug_flags & DBG(NO_NIR))) {
1981 nir_print_shader(nir, stderr);
1982 si_dump_streamout(&sel->so);
1983 }
1984
1985 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
1986 sizeof(shader->info.vs_output_param_offset));
1987
1988 shader->info.uses_instanceid = sel->info.uses_instanceid;
1989
1990 /* TODO: ACO could compile non-monolithic shaders here (starting
1991 * with PS and NGG VS), but monolithic shaders should be compiled
1992 * by LLVM due to more complicated compilation.
1993 */
1994 if (!si_llvm_compile_shader(sscreen, compiler, shader, debug, nir, free_nir))
1995 return false;
1996
1997 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
1998 * LLVM 3.9svn has this bug.
1999 */
2000 if (sel->type == PIPE_SHADER_COMPUTE) {
2001 unsigned wave_size = sscreen->compute_wave_size;
2002 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd *
2003 (wave_size == 32 ? 2 : 1);
2004 unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd;
2005 unsigned max_sgprs_per_wave = 128;
2006 unsigned simds_per_tg = 4; /* assuming WGP mode on gfx10 */
2007 unsigned threads_per_tg = si_get_max_workgroup_size(shader);
2008 unsigned waves_per_tg = DIV_ROUND_UP(threads_per_tg, wave_size);
2009 unsigned waves_per_simd = DIV_ROUND_UP(waves_per_tg, simds_per_tg);
2010
2011 max_vgprs = max_vgprs / waves_per_simd;
2012 max_sgprs = MIN2(max_sgprs / waves_per_simd, max_sgprs_per_wave);
2013
2014 if (shader->config.num_sgprs > max_sgprs ||
2015 shader->config.num_vgprs > max_vgprs) {
2016 fprintf(stderr, "LLVM failed to compile a shader correctly: "
2017 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
2018 shader->config.num_sgprs, shader->config.num_vgprs,
2019 max_sgprs, max_vgprs);
2020
2021 /* Just terminate the process, because dependent
2022 * shaders can hang due to bad input data, but use
2023 * the env var to allow shader-db to work.
2024 */
2025 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
2026 abort();
2027 }
2028 }
2029
2030 /* Add the scratch offset to input SGPRs. */
2031 if (shader->config.scratch_bytes_per_wave && !si_is_merged_shader(shader))
2032 shader->info.num_input_sgprs += 1; /* scratch byte offset */
2033
2034 /* Calculate the number of fragment input VGPRs. */
2035 if (sel->type == PIPE_SHADER_FRAGMENT) {
2036 shader->info.num_input_vgprs = ac_get_fs_input_vgpr_cnt(&shader->config,
2037 &shader->info.face_vgpr_index,
2038 &shader->info.ancillary_vgpr_index);
2039 }
2040
2041 si_calculate_max_simd_waves(shader);
2042 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
2043 return true;
2044 }
2045
2046 /**
2047 * Create, compile and return a shader part (prolog or epilog).
2048 *
2049 * \param sscreen screen
2050 * \param list list of shader parts of the same category
2051 * \param type shader type
2052 * \param key shader part key
2053 * \param prolog whether the part being requested is a prolog
2054 * \param tm LLVM target machine
2055 * \param debug debug callback
2056 * \param build the callback responsible for building the main function
2057 * \return non-NULL on success
2058 */
2059 static struct si_shader_part *
2060 si_get_shader_part(struct si_screen *sscreen,
2061 struct si_shader_part **list,
2062 enum pipe_shader_type type,
2063 bool prolog,
2064 union si_shader_part_key *key,
2065 struct ac_llvm_compiler *compiler,
2066 struct pipe_debug_callback *debug,
2067 void (*build)(struct si_shader_context *,
2068 union si_shader_part_key *),
2069 const char *name)
2070 {
2071 struct si_shader_part *result;
2072
2073 simple_mtx_lock(&sscreen->shader_parts_mutex);
2074
2075 /* Find existing. */
2076 for (result = *list; result; result = result->next) {
2077 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
2078 simple_mtx_unlock(&sscreen->shader_parts_mutex);
2079 return result;
2080 }
2081 }
2082
2083 /* Compile a new one. */
2084 result = CALLOC_STRUCT(si_shader_part);
2085 result->key = *key;
2086
2087 struct si_shader_selector sel = {};
2088 sel.screen = sscreen;
2089
2090 struct si_shader shader = {};
2091 shader.selector = &sel;
2092
2093 switch (type) {
2094 case PIPE_SHADER_VERTEX:
2095 shader.key.as_ls = key->vs_prolog.as_ls;
2096 shader.key.as_es = key->vs_prolog.as_es;
2097 shader.key.as_ngg = key->vs_prolog.as_ngg;
2098 break;
2099 case PIPE_SHADER_TESS_CTRL:
2100 assert(!prolog);
2101 shader.key.part.tcs.epilog = key->tcs_epilog.states;
2102 break;
2103 case PIPE_SHADER_GEOMETRY:
2104 assert(prolog);
2105 shader.key.as_ngg = key->gs_prolog.as_ngg;
2106 break;
2107 case PIPE_SHADER_FRAGMENT:
2108 if (prolog)
2109 shader.key.part.ps.prolog = key->ps_prolog.states;
2110 else
2111 shader.key.part.ps.epilog = key->ps_epilog.states;
2112 break;
2113 default:
2114 unreachable("bad shader part");
2115 }
2116
2117 struct si_shader_context ctx;
2118 si_llvm_context_init(&ctx, sscreen, compiler,
2119 si_get_wave_size(sscreen, type, shader.key.as_ngg,
2120 shader.key.as_es));
2121 ctx.shader = &shader;
2122 ctx.type = type;
2123
2124 build(&ctx, key);
2125
2126 /* Compile. */
2127 si_llvm_optimize_module(&ctx);
2128
2129 if (!si_compile_llvm(sscreen, &result->binary, &result->config, compiler,
2130 &ctx.ac, debug, ctx.type, name, false)) {
2131 FREE(result);
2132 result = NULL;
2133 goto out;
2134 }
2135
2136 result->next = *list;
2137 *list = result;
2138
2139 out:
2140 si_llvm_dispose(&ctx);
2141 simple_mtx_unlock(&sscreen->shader_parts_mutex);
2142 return result;
2143 }
2144
2145 static bool si_get_vs_prolog(struct si_screen *sscreen,
2146 struct ac_llvm_compiler *compiler,
2147 struct si_shader *shader,
2148 struct pipe_debug_callback *debug,
2149 struct si_shader *main_part,
2150 const struct si_vs_prolog_bits *key)
2151 {
2152 struct si_shader_selector *vs = main_part->selector;
2153
2154 if (!si_vs_needs_prolog(vs, key, &shader->key, false))
2155 return true;
2156
2157 /* Get the prolog. */
2158 union si_shader_part_key prolog_key;
2159 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs, false,
2160 key, shader, &prolog_key);
2161
2162 shader->prolog =
2163 si_get_shader_part(sscreen, &sscreen->vs_prologs,
2164 PIPE_SHADER_VERTEX, true, &prolog_key, compiler,
2165 debug, si_llvm_build_vs_prolog,
2166 "Vertex Shader Prolog");
2167 return shader->prolog != NULL;
2168 }
2169
2170 /**
2171 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
2172 */
2173 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
2174 struct ac_llvm_compiler *compiler,
2175 struct si_shader *shader,
2176 struct pipe_debug_callback *debug)
2177 {
2178 return si_get_vs_prolog(sscreen, compiler, shader, debug, shader,
2179 &shader->key.part.vs.prolog);
2180 }
2181
2182 /**
2183 * Select and compile (or reuse) TCS parts (epilog).
2184 */
2185 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
2186 struct ac_llvm_compiler *compiler,
2187 struct si_shader *shader,
2188 struct pipe_debug_callback *debug)
2189 {
2190 if (sscreen->info.chip_class >= GFX9) {
2191 struct si_shader *ls_main_part =
2192 shader->key.part.tcs.ls->main_shader_part_ls;
2193
2194 if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
2195 &shader->key.part.tcs.ls_prolog))
2196 return false;
2197
2198 shader->previous_stage = ls_main_part;
2199 }
2200
2201 /* Get the epilog. */
2202 union si_shader_part_key epilog_key;
2203 memset(&epilog_key, 0, sizeof(epilog_key));
2204 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
2205
2206 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
2207 PIPE_SHADER_TESS_CTRL, false,
2208 &epilog_key, compiler, debug,
2209 si_llvm_build_tcs_epilog,
2210 "Tessellation Control Shader Epilog");
2211 return shader->epilog != NULL;
2212 }
2213
2214 /**
2215 * Select and compile (or reuse) GS parts (prolog).
2216 */
2217 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
2218 struct ac_llvm_compiler *compiler,
2219 struct si_shader *shader,
2220 struct pipe_debug_callback *debug)
2221 {
2222 if (sscreen->info.chip_class >= GFX9) {
2223 struct si_shader *es_main_part;
2224 enum pipe_shader_type es_type = shader->key.part.gs.es->type;
2225
2226 if (shader->key.as_ngg)
2227 es_main_part = shader->key.part.gs.es->main_shader_part_ngg_es;
2228 else
2229 es_main_part = shader->key.part.gs.es->main_shader_part_es;
2230
2231 if (es_type == PIPE_SHADER_VERTEX &&
2232 !si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
2233 &shader->key.part.gs.vs_prolog))
2234 return false;
2235
2236 shader->previous_stage = es_main_part;
2237 }
2238
2239 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
2240 return true;
2241
2242 union si_shader_part_key prolog_key;
2243 memset(&prolog_key, 0, sizeof(prolog_key));
2244 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
2245 prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
2246
2247 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
2248 PIPE_SHADER_GEOMETRY, true,
2249 &prolog_key, compiler, debug,
2250 si_llvm_build_gs_prolog,
2251 "Geometry Shader Prolog");
2252 return shader->prolog2 != NULL;
2253 }
2254
2255 /**
2256 * Compute the PS prolog key, which contains all the information needed to
2257 * build the PS prolog function, and set related bits in shader->config.
2258 */
2259 void si_get_ps_prolog_key(struct si_shader *shader,
2260 union si_shader_part_key *key,
2261 bool separate_prolog)
2262 {
2263 struct si_shader_info *info = &shader->selector->info;
2264
2265 memset(key, 0, sizeof(*key));
2266 key->ps_prolog.states = shader->key.part.ps.prolog;
2267 key->ps_prolog.colors_read = info->colors_read;
2268 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
2269 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
2270 key->ps_prolog.wqm = info->uses_derivatives &&
2271 (key->ps_prolog.colors_read ||
2272 key->ps_prolog.states.force_persp_sample_interp ||
2273 key->ps_prolog.states.force_linear_sample_interp ||
2274 key->ps_prolog.states.force_persp_center_interp ||
2275 key->ps_prolog.states.force_linear_center_interp ||
2276 key->ps_prolog.states.bc_optimize_for_persp ||
2277 key->ps_prolog.states.bc_optimize_for_linear);
2278 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
2279
2280 if (info->colors_read) {
2281 unsigned *color = shader->selector->color_attr_index;
2282
2283 if (shader->key.part.ps.prolog.color_two_side) {
2284 /* BCOLORs are stored after the last input. */
2285 key->ps_prolog.num_interp_inputs = info->num_inputs;
2286 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
2287 if (separate_prolog)
2288 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
2289 }
2290
2291 for (unsigned i = 0; i < 2; i++) {
2292 unsigned interp = info->input_interpolate[color[i]];
2293 unsigned location = info->input_interpolate_loc[color[i]];
2294
2295 if (!(info->colors_read & (0xf << i*4)))
2296 continue;
2297
2298 key->ps_prolog.color_attr_index[i] = color[i];
2299
2300 if (shader->key.part.ps.prolog.flatshade_colors &&
2301 interp == TGSI_INTERPOLATE_COLOR)
2302 interp = TGSI_INTERPOLATE_CONSTANT;
2303
2304 switch (interp) {
2305 case TGSI_INTERPOLATE_CONSTANT:
2306 key->ps_prolog.color_interp_vgpr_index[i] = -1;
2307 break;
2308 case TGSI_INTERPOLATE_PERSPECTIVE:
2309 case TGSI_INTERPOLATE_COLOR:
2310 /* Force the interpolation location for colors here. */
2311 if (shader->key.part.ps.prolog.force_persp_sample_interp)
2312 location = TGSI_INTERPOLATE_LOC_SAMPLE;
2313 if (shader->key.part.ps.prolog.force_persp_center_interp)
2314 location = TGSI_INTERPOLATE_LOC_CENTER;
2315
2316 switch (location) {
2317 case TGSI_INTERPOLATE_LOC_SAMPLE:
2318 key->ps_prolog.color_interp_vgpr_index[i] = 0;
2319 if (separate_prolog) {
2320 shader->config.spi_ps_input_ena |=
2321 S_0286CC_PERSP_SAMPLE_ENA(1);
2322 }
2323 break;
2324 case TGSI_INTERPOLATE_LOC_CENTER:
2325 key->ps_prolog.color_interp_vgpr_index[i] = 2;
2326 if (separate_prolog) {
2327 shader->config.spi_ps_input_ena |=
2328 S_0286CC_PERSP_CENTER_ENA(1);
2329 }
2330 break;
2331 case TGSI_INTERPOLATE_LOC_CENTROID:
2332 key->ps_prolog.color_interp_vgpr_index[i] = 4;
2333 if (separate_prolog) {
2334 shader->config.spi_ps_input_ena |=
2335 S_0286CC_PERSP_CENTROID_ENA(1);
2336 }
2337 break;
2338 default:
2339 assert(0);
2340 }
2341 break;
2342 case TGSI_INTERPOLATE_LINEAR:
2343 /* Force the interpolation location for colors here. */
2344 if (shader->key.part.ps.prolog.force_linear_sample_interp)
2345 location = TGSI_INTERPOLATE_LOC_SAMPLE;
2346 if (shader->key.part.ps.prolog.force_linear_center_interp)
2347 location = TGSI_INTERPOLATE_LOC_CENTER;
2348
2349 /* The VGPR assignment for non-monolithic shaders
2350 * works because InitialPSInputAddr is set on the
2351 * main shader and PERSP_PULL_MODEL is never used.
2352 */
2353 switch (location) {
2354 case TGSI_INTERPOLATE_LOC_SAMPLE:
2355 key->ps_prolog.color_interp_vgpr_index[i] =
2356 separate_prolog ? 6 : 9;
2357 if (separate_prolog) {
2358 shader->config.spi_ps_input_ena |=
2359 S_0286CC_LINEAR_SAMPLE_ENA(1);
2360 }
2361 break;
2362 case TGSI_INTERPOLATE_LOC_CENTER:
2363 key->ps_prolog.color_interp_vgpr_index[i] =
2364 separate_prolog ? 8 : 11;
2365 if (separate_prolog) {
2366 shader->config.spi_ps_input_ena |=
2367 S_0286CC_LINEAR_CENTER_ENA(1);
2368 }
2369 break;
2370 case TGSI_INTERPOLATE_LOC_CENTROID:
2371 key->ps_prolog.color_interp_vgpr_index[i] =
2372 separate_prolog ? 10 : 13;
2373 if (separate_prolog) {
2374 shader->config.spi_ps_input_ena |=
2375 S_0286CC_LINEAR_CENTROID_ENA(1);
2376 }
2377 break;
2378 default:
2379 assert(0);
2380 }
2381 break;
2382 default:
2383 assert(0);
2384 }
2385 }
2386 }
2387 }
2388
2389 /**
2390 * Check whether a PS prolog is required based on the key.
2391 */
2392 bool si_need_ps_prolog(const union si_shader_part_key *key)
2393 {
2394 return key->ps_prolog.colors_read ||
2395 key->ps_prolog.states.force_persp_sample_interp ||
2396 key->ps_prolog.states.force_linear_sample_interp ||
2397 key->ps_prolog.states.force_persp_center_interp ||
2398 key->ps_prolog.states.force_linear_center_interp ||
2399 key->ps_prolog.states.bc_optimize_for_persp ||
2400 key->ps_prolog.states.bc_optimize_for_linear ||
2401 key->ps_prolog.states.poly_stipple ||
2402 key->ps_prolog.states.samplemask_log_ps_iter;
2403 }
2404
2405 /**
2406 * Compute the PS epilog key, which contains all the information needed to
2407 * build the PS epilog function.
2408 */
2409 void si_get_ps_epilog_key(struct si_shader *shader,
2410 union si_shader_part_key *key)
2411 {
2412 struct si_shader_info *info = &shader->selector->info;
2413 memset(key, 0, sizeof(*key));
2414 key->ps_epilog.colors_written = info->colors_written;
2415 key->ps_epilog.writes_z = info->writes_z;
2416 key->ps_epilog.writes_stencil = info->writes_stencil;
2417 key->ps_epilog.writes_samplemask = info->writes_samplemask;
2418 key->ps_epilog.states = shader->key.part.ps.epilog;
2419 }
2420
2421 /**
2422 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
2423 */
2424 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
2425 struct ac_llvm_compiler *compiler,
2426 struct si_shader *shader,
2427 struct pipe_debug_callback *debug)
2428 {
2429 union si_shader_part_key prolog_key;
2430 union si_shader_part_key epilog_key;
2431
2432 /* Get the prolog. */
2433 si_get_ps_prolog_key(shader, &prolog_key, true);
2434
2435 /* The prolog is a no-op if these aren't set. */
2436 if (si_need_ps_prolog(&prolog_key)) {
2437 shader->prolog =
2438 si_get_shader_part(sscreen, &sscreen->ps_prologs,
2439 PIPE_SHADER_FRAGMENT, true,
2440 &prolog_key, compiler, debug,
2441 si_llvm_build_ps_prolog,
2442 "Fragment Shader Prolog");
2443 if (!shader->prolog)
2444 return false;
2445 }
2446
2447 /* Get the epilog. */
2448 si_get_ps_epilog_key(shader, &epilog_key);
2449
2450 shader->epilog =
2451 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
2452 PIPE_SHADER_FRAGMENT, false,
2453 &epilog_key, compiler, debug,
2454 si_llvm_build_ps_epilog,
2455 "Fragment Shader Epilog");
2456 if (!shader->epilog)
2457 return false;
2458
2459 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
2460 if (shader->key.part.ps.prolog.poly_stipple) {
2461 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
2462 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
2463 }
2464
2465 /* Set up the enable bits for per-sample shading if needed. */
2466 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
2467 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
2468 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2469 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
2470 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
2471 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
2472 }
2473 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
2474 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
2475 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2476 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
2477 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
2478 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
2479 }
2480 if (shader->key.part.ps.prolog.force_persp_center_interp &&
2481 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
2482 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2483 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
2484 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
2485 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
2486 }
2487 if (shader->key.part.ps.prolog.force_linear_center_interp &&
2488 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
2489 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2490 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
2491 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
2492 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
2493 }
2494
2495 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
2496 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
2497 !(shader->config.spi_ps_input_ena & 0xf)) {
2498 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
2499 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
2500 }
2501
2502 /* At least one pair of interpolation weights must be enabled. */
2503 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
2504 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
2505 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
2506 }
2507
2508 /* Samplemask fixup requires the sample ID. */
2509 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
2510 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
2511 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
2512 }
2513
2514 /* The sample mask input is always enabled, because the API shader always
2515 * passes it through to the epilog. Disable it here if it's unused.
2516 */
2517 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
2518 !shader->selector->info.reads_samplemask)
2519 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
2520
2521 return true;
2522 }
2523
2524 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
2525 unsigned *lds_size)
2526 {
2527 /* If tessellation is all offchip and on-chip GS isn't used, this
2528 * workaround is not needed.
2529 */
2530 return;
2531
2532 /* SPI barrier management bug:
2533 * Make sure we have at least 4k of LDS in use to avoid the bug.
2534 * It applies to workgroup sizes of more than one wavefront.
2535 */
2536 if (sscreen->info.family == CHIP_BONAIRE ||
2537 sscreen->info.family == CHIP_KABINI)
2538 *lds_size = MAX2(*lds_size, 8);
2539 }
2540
2541 void si_fix_resource_usage(struct si_screen *sscreen, struct si_shader *shader)
2542 {
2543 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
2544
2545 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
2546
2547 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
2548 si_get_max_workgroup_size(shader) > sscreen->compute_wave_size) {
2549 si_multiwave_lds_size_workaround(sscreen,
2550 &shader->config.lds_size);
2551 }
2552 }
2553
2554 bool si_create_shader_variant(struct si_screen *sscreen,
2555 struct ac_llvm_compiler *compiler,
2556 struct si_shader *shader,
2557 struct pipe_debug_callback *debug)
2558 {
2559 struct si_shader_selector *sel = shader->selector;
2560 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
2561
2562 /* LS, ES, VS are compiled on demand if the main part hasn't been
2563 * compiled for that stage.
2564 *
2565 * GS are compiled on demand if the main part hasn't been compiled
2566 * for the chosen NGG-ness.
2567 *
2568 * Vertex shaders are compiled on demand when a vertex fetch
2569 * workaround must be applied.
2570 */
2571 if (shader->is_monolithic) {
2572 /* Monolithic shader (compiled as a whole, has many variants,
2573 * may take a long time to compile).
2574 */
2575 if (!si_compile_shader(sscreen, compiler, shader, debug))
2576 return false;
2577 } else {
2578 /* The shader consists of several parts:
2579 *
2580 * - the middle part is the user shader, it has 1 variant only
2581 * and it was compiled during the creation of the shader
2582 * selector
2583 * - the prolog part is inserted at the beginning
2584 * - the epilog part is inserted at the end
2585 *
2586 * The prolog and epilog have many (but simple) variants.
2587 *
2588 * Starting with gfx9, geometry and tessellation control
2589 * shaders also contain the prolog and user shader parts of
2590 * the previous shader stage.
2591 */
2592
2593 if (!mainp)
2594 return false;
2595
2596 /* Copy the compiled shader data over. */
2597 shader->is_binary_shared = true;
2598 shader->binary = mainp->binary;
2599 shader->config = mainp->config;
2600 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
2601 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
2602 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
2603 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
2604 memcpy(shader->info.vs_output_param_offset,
2605 mainp->info.vs_output_param_offset,
2606 sizeof(mainp->info.vs_output_param_offset));
2607 shader->info.uses_instanceid = mainp->info.uses_instanceid;
2608 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
2609 shader->info.nr_param_exports = mainp->info.nr_param_exports;
2610
2611 /* Select prologs and/or epilogs. */
2612 switch (sel->type) {
2613 case PIPE_SHADER_VERTEX:
2614 if (!si_shader_select_vs_parts(sscreen, compiler, shader, debug))
2615 return false;
2616 break;
2617 case PIPE_SHADER_TESS_CTRL:
2618 if (!si_shader_select_tcs_parts(sscreen, compiler, shader, debug))
2619 return false;
2620 break;
2621 case PIPE_SHADER_TESS_EVAL:
2622 break;
2623 case PIPE_SHADER_GEOMETRY:
2624 if (!si_shader_select_gs_parts(sscreen, compiler, shader, debug))
2625 return false;
2626 break;
2627 case PIPE_SHADER_FRAGMENT:
2628 if (!si_shader_select_ps_parts(sscreen, compiler, shader, debug))
2629 return false;
2630
2631 /* Make sure we have at least as many VGPRs as there
2632 * are allocated inputs.
2633 */
2634 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2635 shader->info.num_input_vgprs);
2636 break;
2637 default:;
2638 }
2639
2640 /* Update SGPR and VGPR counts. */
2641 if (shader->prolog) {
2642 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
2643 shader->prolog->config.num_sgprs);
2644 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2645 shader->prolog->config.num_vgprs);
2646 }
2647 if (shader->previous_stage) {
2648 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
2649 shader->previous_stage->config.num_sgprs);
2650 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2651 shader->previous_stage->config.num_vgprs);
2652 shader->config.spilled_sgprs =
2653 MAX2(shader->config.spilled_sgprs,
2654 shader->previous_stage->config.spilled_sgprs);
2655 shader->config.spilled_vgprs =
2656 MAX2(shader->config.spilled_vgprs,
2657 shader->previous_stage->config.spilled_vgprs);
2658 shader->info.private_mem_vgprs =
2659 MAX2(shader->info.private_mem_vgprs,
2660 shader->previous_stage->info.private_mem_vgprs);
2661 shader->config.scratch_bytes_per_wave =
2662 MAX2(shader->config.scratch_bytes_per_wave,
2663 shader->previous_stage->config.scratch_bytes_per_wave);
2664 shader->info.uses_instanceid |=
2665 shader->previous_stage->info.uses_instanceid;
2666 }
2667 if (shader->prolog2) {
2668 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
2669 shader->prolog2->config.num_sgprs);
2670 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2671 shader->prolog2->config.num_vgprs);
2672 }
2673 if (shader->epilog) {
2674 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
2675 shader->epilog->config.num_sgprs);
2676 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2677 shader->epilog->config.num_vgprs);
2678 }
2679 si_calculate_max_simd_waves(shader);
2680 }
2681
2682 if (shader->key.as_ngg) {
2683 assert(!shader->key.as_es && !shader->key.as_ls);
2684 gfx10_ngg_calculate_subgroup_info(shader);
2685 } else if (sscreen->info.chip_class >= GFX9 && sel->type == PIPE_SHADER_GEOMETRY) {
2686 gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
2687 }
2688
2689 si_fix_resource_usage(sscreen, shader);
2690 si_shader_dump(sscreen, shader, debug, stderr, true);
2691
2692 /* Upload. */
2693 if (!si_shader_binary_upload(sscreen, shader, 0)) {
2694 fprintf(stderr, "LLVM failed to upload shader\n");
2695 return false;
2696 }
2697
2698 return true;
2699 }
2700
2701 void si_shader_binary_clean(struct si_shader_binary *binary)
2702 {
2703 free((void *)binary->elf_buffer);
2704 binary->elf_buffer = NULL;
2705
2706 free(binary->llvm_ir_string);
2707 binary->llvm_ir_string = NULL;
2708 }
2709
2710 void si_shader_destroy(struct si_shader *shader)
2711 {
2712 if (shader->scratch_bo)
2713 si_resource_reference(&shader->scratch_bo, NULL);
2714
2715 si_resource_reference(&shader->bo, NULL);
2716
2717 if (!shader->is_binary_shared)
2718 si_shader_binary_clean(&shader->binary);
2719
2720 free(shader->shader_log);
2721 }