radeonsi/gfx10: fix the wave size for compute-based culling
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "util/u_memory.h"
26 #include "tgsi/tgsi_strings.h"
27 #include "tgsi/tgsi_from_mesa.h"
28
29 #include "ac_exp_param.h"
30 #include "ac_rtld.h"
31 #include "si_shader_internal.h"
32 #include "si_pipe.h"
33 #include "sid.h"
34
35 #include "compiler/nir/nir.h"
36 #include "compiler/nir/nir_serialize.h"
37
38 static const char scratch_rsrc_dword0_symbol[] =
39 "SCRATCH_RSRC_DWORD0";
40
41 static const char scratch_rsrc_dword1_symbol[] =
42 "SCRATCH_RSRC_DWORD1";
43
44 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
45
46 /** Whether the shader runs as a combination of multiple API shaders */
47 bool si_is_multi_part_shader(struct si_shader *shader)
48 {
49 if (shader->selector->screen->info.chip_class <= GFX8)
50 return false;
51
52 return shader->key.as_ls ||
53 shader->key.as_es ||
54 shader->selector->type == PIPE_SHADER_TESS_CTRL ||
55 shader->selector->type == PIPE_SHADER_GEOMETRY;
56 }
57
58 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
59 bool si_is_merged_shader(struct si_shader *shader)
60 {
61 return shader->key.as_ngg || si_is_multi_part_shader(shader);
62 }
63
64 /**
65 * Returns a unique index for a per-patch semantic name and index. The index
66 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
67 * can be calculated.
68 */
69 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
70 {
71 switch (semantic_name) {
72 case TGSI_SEMANTIC_TESSOUTER:
73 return 0;
74 case TGSI_SEMANTIC_TESSINNER:
75 return 1;
76 case TGSI_SEMANTIC_PATCH:
77 assert(index < 30);
78 return 2 + index;
79
80 default:
81 assert(!"invalid semantic name");
82 return 0;
83 }
84 }
85
86 /**
87 * Returns a unique index for a semantic name and index. The index must be
88 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
89 * calculated.
90 */
91 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index,
92 unsigned is_varying)
93 {
94 switch (semantic_name) {
95 case TGSI_SEMANTIC_POSITION:
96 return 0;
97 case TGSI_SEMANTIC_GENERIC:
98 /* Since some shader stages use the the highest used IO index
99 * to determine the size to allocate for inputs/outputs
100 * (in LDS, tess and GS rings). GENERIC should be placed right
101 * after POSITION to make that size as small as possible.
102 */
103 if (index < SI_MAX_IO_GENERIC)
104 return 1 + index;
105
106 assert(!"invalid generic index");
107 return 0;
108 case TGSI_SEMANTIC_FOG:
109 return SI_MAX_IO_GENERIC + 1;
110 case TGSI_SEMANTIC_COLOR:
111 assert(index < 2);
112 return SI_MAX_IO_GENERIC + 2 + index;
113 case TGSI_SEMANTIC_BCOLOR:
114 assert(index < 2);
115 /* If it's a varying, COLOR and BCOLOR alias. */
116 if (is_varying)
117 return SI_MAX_IO_GENERIC + 2 + index;
118 else
119 return SI_MAX_IO_GENERIC + 4 + index;
120 case TGSI_SEMANTIC_TEXCOORD:
121 assert(index < 8);
122 return SI_MAX_IO_GENERIC + 6 + index;
123
124 /* These are rarely used between LS and HS or ES and GS. */
125 case TGSI_SEMANTIC_CLIPDIST:
126 assert(index < 2);
127 return SI_MAX_IO_GENERIC + 6 + 8 + index;
128 case TGSI_SEMANTIC_CLIPVERTEX:
129 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
130 case TGSI_SEMANTIC_PSIZE:
131 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
132
133 /* These can't be written by LS, HS, and ES. */
134 case TGSI_SEMANTIC_LAYER:
135 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
136 case TGSI_SEMANTIC_VIEWPORT_INDEX:
137 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
138 case TGSI_SEMANTIC_PRIMID:
139 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
140 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
141 default:
142 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
143 assert(!"invalid semantic name");
144 return 0;
145 }
146 }
147
148 static void si_dump_streamout(struct pipe_stream_output_info *so)
149 {
150 unsigned i;
151
152 if (so->num_outputs)
153 fprintf(stderr, "STREAMOUT\n");
154
155 for (i = 0; i < so->num_outputs; i++) {
156 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
157 so->output[i].start_component;
158 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
159 i, so->output[i].output_buffer,
160 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
161 so->output[i].register_index,
162 mask & 1 ? "x" : "",
163 mask & 2 ? "y" : "",
164 mask & 4 ? "z" : "",
165 mask & 8 ? "w" : "");
166 }
167 }
168
169 static void declare_streamout_params(struct si_shader_context *ctx,
170 struct pipe_stream_output_info *so)
171 {
172 if (ctx->screen->use_ngg_streamout) {
173 if (ctx->type == PIPE_SHADER_TESS_EVAL)
174 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
175 return;
176 }
177
178 /* Streamout SGPRs. */
179 if (so->num_outputs) {
180 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
181 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
182 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
183 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
184 }
185
186 /* A streamout buffer offset is loaded if the stride is non-zero. */
187 for (int i = 0; i < 4; i++) {
188 if (!so->stride[i])
189 continue;
190
191 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
192 }
193 }
194
195 unsigned si_get_max_workgroup_size(const struct si_shader *shader)
196 {
197 switch (shader->selector->type) {
198 case PIPE_SHADER_VERTEX:
199 case PIPE_SHADER_TESS_EVAL:
200 return shader->key.as_ngg ? 128 : 0;
201
202 case PIPE_SHADER_TESS_CTRL:
203 /* Return this so that LLVM doesn't remove s_barrier
204 * instructions on chips where we use s_barrier. */
205 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
206
207 case PIPE_SHADER_GEOMETRY:
208 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
209
210 case PIPE_SHADER_COMPUTE:
211 break; /* see below */
212
213 default:
214 return 0;
215 }
216
217 const unsigned *properties = shader->selector->info.properties;
218 unsigned max_work_group_size =
219 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
220 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
221 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
222
223 if (!max_work_group_size) {
224 /* This is a variable group size compute shader,
225 * compile it for the maximum possible group size.
226 */
227 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
228 }
229 return max_work_group_size;
230 }
231
232 static void declare_const_and_shader_buffers(struct si_shader_context *ctx,
233 bool assign_params)
234 {
235 enum ac_arg_type const_shader_buf_type;
236
237 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
238 ctx->shader->selector->info.shader_buffers_declared == 0)
239 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
240 else
241 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
242
243 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
244 assign_params ? &ctx->const_and_shader_buffers :
245 &ctx->other_const_and_shader_buffers);
246 }
247
248 static void declare_samplers_and_images(struct si_shader_context *ctx,
249 bool assign_params)
250 {
251 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
252 assign_params ? &ctx->samplers_and_images :
253 &ctx->other_samplers_and_images);
254 }
255
256 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
257 bool assign_params)
258 {
259 declare_const_and_shader_buffers(ctx, assign_params);
260 declare_samplers_and_images(ctx, assign_params);
261 }
262
263 static void declare_global_desc_pointers(struct si_shader_context *ctx)
264 {
265 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
266 &ctx->rw_buffers);
267 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
268 &ctx->bindless_samplers_and_images);
269 }
270
271 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
272 {
273 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
274 if (!ctx->shader->is_gs_copy_shader) {
275 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
276 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
277 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
278 }
279 }
280
281 static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx)
282 {
283 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
284
285 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
286 if (num_vbos_in_user_sgprs) {
287 unsigned user_sgprs = ctx->args.num_sgprs_used;
288
289 if (si_is_merged_shader(ctx->shader))
290 user_sgprs -= 8;
291 assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
292
293 /* Declare unused SGPRs to align VB descriptors to 4 SGPRs (hw requirement). */
294 for (unsigned i = user_sgprs; i < SI_SGPR_VS_VB_DESCRIPTOR_FIRST; i++)
295 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
296
297 assert(num_vbos_in_user_sgprs <= ARRAY_SIZE(ctx->vb_descriptors));
298 for (unsigned i = 0; i < num_vbos_in_user_sgprs; i++)
299 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->vb_descriptors[i]);
300 }
301 }
302
303 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
304 unsigned *num_prolog_vgprs,
305 bool ngg_cull_shader)
306 {
307 struct si_shader *shader = ctx->shader;
308
309 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
310 if (shader->key.as_ls) {
311 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
312 if (ctx->screen->info.chip_class >= GFX10) {
313 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
314 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
315 } else {
316 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
317 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
318 }
319 } else if (ctx->screen->info.chip_class >= GFX10) {
320 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
321 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
322 &ctx->vs_prim_id); /* user vgpr or PrimID (legacy) */
323 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
324 } else {
325 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
326 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vs_prim_id);
327 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
328 }
329
330 if (!shader->is_gs_copy_shader) {
331 if (shader->key.opt.ngg_culling && !ngg_cull_shader) {
332 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
333 &ctx->ngg_old_thread_id);
334 }
335
336 /* Vertex load indices. */
337 if (shader->selector->info.num_inputs) {
338 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
339 &ctx->vertex_index0);
340 for (unsigned i = 1; i < shader->selector->info.num_inputs; i++)
341 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
342 }
343 *num_prolog_vgprs += shader->selector->info.num_inputs;
344 }
345 }
346
347 static void declare_vs_blit_inputs(struct si_shader_context *ctx,
348 unsigned vs_blit_property)
349 {
350 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
351 &ctx->vs_blit_inputs); /* i16 x1, y1 */
352 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* i16 x1, y1 */
353 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* depth */
354
355 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
356 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color0 */
357 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color1 */
358 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color2 */
359 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color3 */
360 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
361 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x1 */
362 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y1 */
363 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x2 */
364 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y2 */
365 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.z */
366 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.w */
367 }
368 }
369
370 static void declare_tes_input_vgprs(struct si_shader_context *ctx, bool ngg_cull_shader)
371 {
372 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
373 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
374 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
375 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
376
377 if (ctx->shader->key.opt.ngg_culling && !ngg_cull_shader) {
378 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
379 &ctx->ngg_old_thread_id);
380 }
381 }
382
383 enum {
384 /* Convenient merged shader definitions. */
385 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
386 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
387 };
388
389 void si_add_arg_checked(struct ac_shader_args *args,
390 enum ac_arg_regfile file,
391 unsigned registers, enum ac_arg_type type,
392 struct ac_arg *arg,
393 unsigned idx)
394 {
395 assert(args->arg_count == idx);
396 ac_add_arg(args, file, registers, type, arg);
397 }
398
399 void si_create_function(struct si_shader_context *ctx, bool ngg_cull_shader)
400 {
401 struct si_shader *shader = ctx->shader;
402 LLVMTypeRef returns[AC_MAX_ARGS];
403 unsigned i, num_return_sgprs;
404 unsigned num_returns = 0;
405 unsigned num_prolog_vgprs = 0;
406 unsigned type = ctx->type;
407 unsigned vs_blit_property =
408 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
409
410 memset(&ctx->args, 0, sizeof(ctx->args));
411
412 /* Set MERGED shaders. */
413 if (ctx->screen->info.chip_class >= GFX9) {
414 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
415 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
416 else if (shader->key.as_es || shader->key.as_ngg || type == PIPE_SHADER_GEOMETRY)
417 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
418 }
419
420 switch (type) {
421 case PIPE_SHADER_VERTEX:
422 declare_global_desc_pointers(ctx);
423
424 if (vs_blit_property) {
425 declare_vs_blit_inputs(ctx, vs_blit_property);
426
427 /* VGPRs */
428 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
429 break;
430 }
431
432 declare_per_stage_desc_pointers(ctx, true);
433 declare_vs_specific_input_sgprs(ctx);
434 if (!shader->is_gs_copy_shader)
435 declare_vb_descriptor_input_sgprs(ctx);
436
437 if (shader->key.as_es) {
438 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
439 &ctx->es2gs_offset);
440 } else if (shader->key.as_ls) {
441 /* no extra parameters */
442 } else {
443 /* The locations of the other parameters are assigned dynamically. */
444 declare_streamout_params(ctx, &shader->selector->so);
445 }
446
447 /* VGPRs */
448 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
449
450 /* Return values */
451 if (shader->key.opt.vs_as_prim_discard_cs) {
452 for (i = 0; i < 4; i++)
453 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
454 }
455 break;
456
457 case PIPE_SHADER_TESS_CTRL: /* GFX6-GFX8 */
458 declare_global_desc_pointers(ctx);
459 declare_per_stage_desc_pointers(ctx, true);
460 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
461 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
462 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
463 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
464 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
465 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
466
467 /* VGPRs */
468 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
469 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
470
471 /* param_tcs_offchip_offset and param_tcs_factor_offset are
472 * placed after the user SGPRs.
473 */
474 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
475 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
476 for (i = 0; i < 11; i++)
477 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
478 break;
479
480 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
481 /* Merged stages have 8 system SGPRs at the beginning. */
482 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
483 declare_per_stage_desc_pointers(ctx,
484 ctx->type == PIPE_SHADER_TESS_CTRL);
485 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
486 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
487 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
488 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
489 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
490 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
491
492 declare_global_desc_pointers(ctx);
493 declare_per_stage_desc_pointers(ctx,
494 ctx->type == PIPE_SHADER_VERTEX);
495 declare_vs_specific_input_sgprs(ctx);
496
497 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
498 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
499 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
500 declare_vb_descriptor_input_sgprs(ctx);
501
502 /* VGPRs (first TCS, then VS) */
503 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
504 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
505
506 if (ctx->type == PIPE_SHADER_VERTEX) {
507 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
508
509 /* LS return values are inputs to the TCS main shader part. */
510 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
511 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
512 for (i = 0; i < 2; i++)
513 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
514 } else {
515 /* TCS return values are inputs to the TCS epilog.
516 *
517 * param_tcs_offchip_offset, param_tcs_factor_offset,
518 * param_tcs_offchip_layout, and param_rw_buffers
519 * should be passed to the epilog.
520 */
521 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
522 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
523 for (i = 0; i < 11; i++)
524 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
525 }
526 break;
527
528 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
529 /* Merged stages have 8 system SGPRs at the beginning. */
530 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
531 declare_per_stage_desc_pointers(ctx,
532 ctx->type == PIPE_SHADER_GEOMETRY);
533
534 if (ctx->shader->key.as_ngg)
535 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_tg_info);
536 else
537 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
538
539 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
540 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
541 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
542 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
543 &ctx->small_prim_cull_info); /* SPI_SHADER_PGM_LO_GS << 8 */
544 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
545
546 declare_global_desc_pointers(ctx);
547 if (ctx->type != PIPE_SHADER_VERTEX || !vs_blit_property) {
548 declare_per_stage_desc_pointers(ctx,
549 (ctx->type == PIPE_SHADER_VERTEX ||
550 ctx->type == PIPE_SHADER_TESS_EVAL));
551 }
552
553 if (ctx->type == PIPE_SHADER_VERTEX) {
554 if (vs_blit_property)
555 declare_vs_blit_inputs(ctx, vs_blit_property);
556 else
557 declare_vs_specific_input_sgprs(ctx);
558 } else {
559 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
560 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
561 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
562 /* Declare as many input SGPRs as the VS has. */
563 }
564
565 if (ctx->type == PIPE_SHADER_VERTEX)
566 declare_vb_descriptor_input_sgprs(ctx);
567
568 /* VGPRs (first GS, then VS/TES) */
569 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx01_offset);
570 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx23_offset);
571 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
572 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
573 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
574
575 if (ctx->type == PIPE_SHADER_VERTEX) {
576 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
577 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
578 declare_tes_input_vgprs(ctx, ngg_cull_shader);
579 }
580
581 if ((ctx->shader->key.as_es || ngg_cull_shader) &&
582 (ctx->type == PIPE_SHADER_VERTEX ||
583 ctx->type == PIPE_SHADER_TESS_EVAL)) {
584 unsigned num_user_sgprs, num_vgprs;
585
586 if (ctx->type == PIPE_SHADER_VERTEX) {
587 /* For the NGG cull shader, add 1 SGPR to hold
588 * the vertex buffer pointer.
589 */
590 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR + ngg_cull_shader;
591
592 if (ngg_cull_shader && shader->selector->num_vbos_in_user_sgprs) {
593 assert(num_user_sgprs <= 8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
594 num_user_sgprs = SI_SGPR_VS_VB_DESCRIPTOR_FIRST +
595 shader->selector->num_vbos_in_user_sgprs * 4;
596 }
597 } else {
598 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
599 }
600
601 /* The NGG cull shader has to return all 9 VGPRs + the old thread ID.
602 *
603 * The normal merged ESGS shader only has to return the 5 VGPRs
604 * for the GS stage.
605 */
606 num_vgprs = ngg_cull_shader ? 10 : 5;
607
608 /* ES return values are inputs to GS. */
609 for (i = 0; i < 8 + num_user_sgprs; i++)
610 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
611 for (i = 0; i < num_vgprs; i++)
612 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
613 }
614 break;
615
616 case PIPE_SHADER_TESS_EVAL:
617 declare_global_desc_pointers(ctx);
618 declare_per_stage_desc_pointers(ctx, true);
619 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
620 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
621 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
622
623 if (shader->key.as_es) {
624 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
625 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
626 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
627 } else {
628 declare_streamout_params(ctx, &shader->selector->so);
629 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
630 }
631
632 /* VGPRs */
633 declare_tes_input_vgprs(ctx, ngg_cull_shader);
634 break;
635
636 case PIPE_SHADER_GEOMETRY:
637 declare_global_desc_pointers(ctx);
638 declare_per_stage_desc_pointers(ctx, true);
639 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
640 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_wave_id);
641
642 /* VGPRs */
643 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[0]);
644 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[1]);
645 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
646 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[2]);
647 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[3]);
648 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[4]);
649 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[5]);
650 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
651 break;
652
653 case PIPE_SHADER_FRAGMENT:
654 declare_global_desc_pointers(ctx);
655 declare_per_stage_desc_pointers(ctx, true);
656 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL,
657 SI_PARAM_ALPHA_REF);
658 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
659 &ctx->args.prim_mask, SI_PARAM_PRIM_MASK);
660
661 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_sample,
662 SI_PARAM_PERSP_SAMPLE);
663 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
664 &ctx->args.persp_center, SI_PARAM_PERSP_CENTER);
665 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
666 &ctx->args.persp_centroid, SI_PARAM_PERSP_CENTROID);
667 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
668 NULL, SI_PARAM_PERSP_PULL_MODEL);
669 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
670 &ctx->args.linear_sample, SI_PARAM_LINEAR_SAMPLE);
671 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
672 &ctx->args.linear_center, SI_PARAM_LINEAR_CENTER);
673 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
674 &ctx->args.linear_centroid, SI_PARAM_LINEAR_CENTROID);
675 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_FLOAT,
676 NULL, SI_PARAM_LINE_STIPPLE_TEX);
677 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
678 &ctx->args.frag_pos[0], SI_PARAM_POS_X_FLOAT);
679 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
680 &ctx->args.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
681 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
682 &ctx->args.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
683 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
684 &ctx->args.frag_pos[3], SI_PARAM_POS_W_FLOAT);
685 shader->info.face_vgpr_index = ctx->args.num_vgprs_used;
686 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
687 &ctx->args.front_face, SI_PARAM_FRONT_FACE);
688 shader->info.ancillary_vgpr_index = ctx->args.num_vgprs_used;
689 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
690 &ctx->args.ancillary, SI_PARAM_ANCILLARY);
691 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
692 &ctx->args.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
693 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
694 &ctx->pos_fixed_pt, SI_PARAM_POS_FIXED_PT);
695
696 /* Color inputs from the prolog. */
697 if (shader->selector->info.colors_read) {
698 unsigned num_color_elements =
699 util_bitcount(shader->selector->info.colors_read);
700
701 for (i = 0; i < num_color_elements; i++)
702 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
703
704 num_prolog_vgprs += num_color_elements;
705 }
706
707 /* Outputs for the epilog. */
708 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
709 num_returns =
710 num_return_sgprs +
711 util_bitcount(shader->selector->info.colors_written) * 4 +
712 shader->selector->info.writes_z +
713 shader->selector->info.writes_stencil +
714 shader->selector->info.writes_samplemask +
715 1 /* SampleMaskIn */;
716
717 num_returns = MAX2(num_returns,
718 num_return_sgprs +
719 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
720
721 for (i = 0; i < num_return_sgprs; i++)
722 returns[i] = ctx->ac.i32;
723 for (; i < num_returns; i++)
724 returns[i] = ctx->ac.f32;
725 break;
726
727 case PIPE_SHADER_COMPUTE:
728 declare_global_desc_pointers(ctx);
729 declare_per_stage_desc_pointers(ctx, true);
730 if (shader->selector->info.uses_grid_size)
731 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT,
732 &ctx->args.num_work_groups);
733 if (shader->selector->info.uses_block_size &&
734 shader->selector->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
735 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->block_size);
736
737 unsigned cs_user_data_dwords =
738 shader->selector->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
739 if (cs_user_data_dwords) {
740 ac_add_arg(&ctx->args, AC_ARG_SGPR, cs_user_data_dwords, AC_ARG_INT,
741 &ctx->cs_user_data);
742 }
743
744 /* Hardware SGPRs. */
745 for (i = 0; i < 3; i++) {
746 if (shader->selector->info.uses_block_id[i]) {
747 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
748 &ctx->args.workgroup_ids[i]);
749 }
750 }
751 if (shader->selector->info.uses_subgroup_info)
752 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tg_size);
753
754 /* Hardware VGPRs. */
755 ac_add_arg(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
756 &ctx->args.local_invocation_ids);
757 break;
758 default:
759 assert(0 && "unimplemented shader");
760 return;
761 }
762
763 si_llvm_create_func(ctx, ngg_cull_shader ? "ngg_cull_main" : "main",
764 returns, num_returns, si_get_max_workgroup_size(shader));
765
766 /* Reserve register locations for VGPR inputs the PS prolog may need. */
767 if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
768 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
769 "InitialPSInputAddr",
770 S_0286D0_PERSP_SAMPLE_ENA(1) |
771 S_0286D0_PERSP_CENTER_ENA(1) |
772 S_0286D0_PERSP_CENTROID_ENA(1) |
773 S_0286D0_LINEAR_SAMPLE_ENA(1) |
774 S_0286D0_LINEAR_CENTER_ENA(1) |
775 S_0286D0_LINEAR_CENTROID_ENA(1) |
776 S_0286D0_FRONT_FACE_ENA(1) |
777 S_0286D0_ANCILLARY_ENA(1) |
778 S_0286D0_POS_FIXED_PT_ENA(1));
779 }
780
781 shader->info.num_input_sgprs = ctx->args.num_sgprs_used;
782 shader->info.num_input_vgprs = ctx->args.num_vgprs_used;
783
784 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
785 shader->info.num_input_vgprs -= num_prolog_vgprs;
786
787 if (shader->key.as_ls || ctx->type == PIPE_SHADER_TESS_CTRL) {
788 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
789 /* The LSHS size is not known until draw time, so we append it
790 * at the end of whatever LDS use there may be in the rest of
791 * the shader (currently none, unless LLVM decides to do its
792 * own LDS-based lowering).
793 */
794 ctx->ac.lds = LLVMAddGlobalInAddressSpace(
795 ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0),
796 "__lds_end", AC_ADDR_SPACE_LDS);
797 LLVMSetAlignment(ctx->ac.lds, 256);
798 } else {
799 ac_declare_lds_as_pointer(&ctx->ac);
800 }
801 }
802
803 /* Unlike radv, we override these arguments in the prolog, so to the
804 * API shader they appear as normal arguments.
805 */
806 if (ctx->type == PIPE_SHADER_VERTEX) {
807 ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
808 ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
809 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
810 ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
811 ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
812 }
813 }
814
815 /* For the UMR disassembler. */
816 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
817 #define DEBUGGER_NUM_MARKERS 5
818
819 static bool si_shader_binary_open(struct si_screen *screen,
820 struct si_shader *shader,
821 struct ac_rtld_binary *rtld)
822 {
823 const struct si_shader_selector *sel = shader->selector;
824 const char *part_elfs[5];
825 size_t part_sizes[5];
826 unsigned num_parts = 0;
827
828 #define add_part(shader_or_part) \
829 if (shader_or_part) { \
830 part_elfs[num_parts] = (shader_or_part)->binary.elf_buffer; \
831 part_sizes[num_parts] = (shader_or_part)->binary.elf_size; \
832 num_parts++; \
833 }
834
835 add_part(shader->prolog);
836 add_part(shader->previous_stage);
837 add_part(shader->prolog2);
838 add_part(shader);
839 add_part(shader->epilog);
840
841 #undef add_part
842
843 struct ac_rtld_symbol lds_symbols[2];
844 unsigned num_lds_symbols = 0;
845
846 if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
847 (sel->type == PIPE_SHADER_GEOMETRY || shader->key.as_ngg)) {
848 /* We add this symbol even on LLVM <= 8 to ensure that
849 * shader->config.lds_size is set correctly below.
850 */
851 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
852 sym->name = "esgs_ring";
853 sym->size = shader->gs_info.esgs_ring_size;
854 sym->align = 64 * 1024;
855 }
856
857 if (shader->key.as_ngg && sel->type == PIPE_SHADER_GEOMETRY) {
858 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
859 sym->name = "ngg_emit";
860 sym->size = shader->ngg.ngg_emit_size * 4;
861 sym->align = 4;
862 }
863
864 bool ok = ac_rtld_open(rtld, (struct ac_rtld_open_info){
865 .info = &screen->info,
866 .options = {
867 .halt_at_entry = screen->options.halt_shaders,
868 },
869 .shader_type = tgsi_processor_to_shader_stage(sel->type),
870 .wave_size = si_get_shader_wave_size(shader),
871 .num_parts = num_parts,
872 .elf_ptrs = part_elfs,
873 .elf_sizes = part_sizes,
874 .num_shared_lds_symbols = num_lds_symbols,
875 .shared_lds_symbols = lds_symbols });
876
877 if (rtld->lds_size > 0) {
878 unsigned alloc_granularity = screen->info.chip_class >= GFX7 ? 512 : 256;
879 shader->config.lds_size =
880 align(rtld->lds_size, alloc_granularity) / alloc_granularity;
881 }
882
883 return ok;
884 }
885
886 static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_shader *shader)
887 {
888 struct ac_rtld_binary rtld;
889 si_shader_binary_open(screen, shader, &rtld);
890 return rtld.exec_size;
891 }
892
893 static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
894 {
895 uint64_t *scratch_va = data;
896
897 if (!strcmp(scratch_rsrc_dword0_symbol, name)) {
898 *value = (uint32_t)*scratch_va;
899 return true;
900 }
901 if (!strcmp(scratch_rsrc_dword1_symbol, name)) {
902 /* Enable scratch coalescing. */
903 *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) |
904 S_008F04_SWIZZLE_ENABLE(1);
905 return true;
906 }
907
908 return false;
909 }
910
911 bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader,
912 uint64_t scratch_va)
913 {
914 struct ac_rtld_binary binary;
915 if (!si_shader_binary_open(sscreen, shader, &binary))
916 return false;
917
918 si_resource_reference(&shader->bo, NULL);
919 shader->bo = si_aligned_buffer_create(&sscreen->b,
920 sscreen->info.cpdma_prefetch_writes_memory ?
921 0 : SI_RESOURCE_FLAG_READ_ONLY,
922 PIPE_USAGE_IMMUTABLE,
923 align(binary.rx_size, SI_CPDMA_ALIGNMENT),
924 256);
925 if (!shader->bo)
926 return false;
927
928 /* Upload. */
929 struct ac_rtld_upload_info u = {};
930 u.binary = &binary;
931 u.get_external_symbol = si_get_external_symbol;
932 u.cb_data = &scratch_va;
933 u.rx_va = shader->bo->gpu_address;
934 u.rx_ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
935 PIPE_TRANSFER_READ_WRITE |
936 PIPE_TRANSFER_UNSYNCHRONIZED |
937 RADEON_TRANSFER_TEMPORARY);
938 if (!u.rx_ptr)
939 return false;
940
941 bool ok = ac_rtld_upload(&u);
942
943 sscreen->ws->buffer_unmap(shader->bo->buf);
944 ac_rtld_close(&binary);
945
946 return ok;
947 }
948
949 static void si_shader_dump_disassembly(struct si_screen *screen,
950 const struct si_shader_binary *binary,
951 enum pipe_shader_type shader_type,
952 unsigned wave_size,
953 struct pipe_debug_callback *debug,
954 const char *name, FILE *file)
955 {
956 struct ac_rtld_binary rtld_binary;
957
958 if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
959 .info = &screen->info,
960 .shader_type = tgsi_processor_to_shader_stage(shader_type),
961 .wave_size = wave_size,
962 .num_parts = 1,
963 .elf_ptrs = &binary->elf_buffer,
964 .elf_sizes = &binary->elf_size }))
965 return;
966
967 const char *disasm;
968 size_t nbytes;
969
970 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
971 goto out;
972
973 if (nbytes > INT_MAX)
974 goto out;
975
976 if (debug && debug->debug_message) {
977 /* Very long debug messages are cut off, so send the
978 * disassembly one line at a time. This causes more
979 * overhead, but on the plus side it simplifies
980 * parsing of resulting logs.
981 */
982 pipe_debug_message(debug, SHADER_INFO,
983 "Shader Disassembly Begin");
984
985 uint64_t line = 0;
986 while (line < nbytes) {
987 int count = nbytes - line;
988 const char *nl = memchr(disasm + line, '\n', nbytes - line);
989 if (nl)
990 count = nl - (disasm + line);
991
992 if (count) {
993 pipe_debug_message(debug, SHADER_INFO,
994 "%.*s", count, disasm + line);
995 }
996
997 line += count + 1;
998 }
999
1000 pipe_debug_message(debug, SHADER_INFO,
1001 "Shader Disassembly End");
1002 }
1003
1004 if (file) {
1005 fprintf(file, "Shader %s disassembly:\n", name);
1006 fprintf(file, "%*s", (int)nbytes, disasm);
1007 }
1008
1009 out:
1010 ac_rtld_close(&rtld_binary);
1011 }
1012
1013 static void si_calculate_max_simd_waves(struct si_shader *shader)
1014 {
1015 struct si_screen *sscreen = shader->selector->screen;
1016 struct ac_shader_config *conf = &shader->config;
1017 unsigned num_inputs = shader->selector->info.num_inputs;
1018 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
1019 unsigned lds_per_wave = 0;
1020 unsigned max_simd_waves;
1021
1022 max_simd_waves = sscreen->info.max_wave64_per_simd;
1023
1024 /* Compute LDS usage for PS. */
1025 switch (shader->selector->type) {
1026 case PIPE_SHADER_FRAGMENT:
1027 /* The minimum usage per wave is (num_inputs * 48). The maximum
1028 * usage is (num_inputs * 48 * 16).
1029 * We can get anything in between and it varies between waves.
1030 *
1031 * The 48 bytes per input for a single primitive is equal to
1032 * 4 bytes/component * 4 components/input * 3 points.
1033 *
1034 * Other stages don't know the size at compile time or don't
1035 * allocate LDS per wave, but instead they do it per thread group.
1036 */
1037 lds_per_wave = conf->lds_size * lds_increment +
1038 align(num_inputs * 48, lds_increment);
1039 break;
1040 case PIPE_SHADER_COMPUTE:
1041 if (shader->selector) {
1042 unsigned max_workgroup_size =
1043 si_get_max_workgroup_size(shader);
1044 lds_per_wave = (conf->lds_size * lds_increment) /
1045 DIV_ROUND_UP(max_workgroup_size,
1046 sscreen->compute_wave_size);
1047 }
1048 break;
1049 default:;
1050 }
1051
1052 /* Compute the per-SIMD wave counts. */
1053 if (conf->num_sgprs) {
1054 max_simd_waves =
1055 MIN2(max_simd_waves,
1056 sscreen->info.num_physical_sgprs_per_simd / conf->num_sgprs);
1057 }
1058
1059 if (conf->num_vgprs) {
1060 /* Always print wave limits as Wave64, so that we can compare
1061 * Wave32 and Wave64 with shader-db fairly. */
1062 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd;
1063 max_simd_waves = MIN2(max_simd_waves, max_vgprs / conf->num_vgprs);
1064 }
1065
1066 unsigned max_lds_per_simd = sscreen->info.lds_size_per_workgroup / 4;
1067 if (lds_per_wave)
1068 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
1069
1070 shader->info.max_simd_waves = max_simd_waves;
1071 }
1072
1073 void si_shader_dump_stats_for_shader_db(struct si_screen *screen,
1074 struct si_shader *shader,
1075 struct pipe_debug_callback *debug)
1076 {
1077 const struct ac_shader_config *conf = &shader->config;
1078
1079 if (screen->options.debug_disassembly)
1080 si_shader_dump_disassembly(screen, &shader->binary,
1081 shader->selector->type,
1082 si_get_shader_wave_size(shader),
1083 debug, "main", NULL);
1084
1085 pipe_debug_message(debug, SHADER_INFO,
1086 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
1087 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
1088 "Spilled VGPRs: %d PrivMem VGPRs: %d",
1089 conf->num_sgprs, conf->num_vgprs,
1090 si_get_shader_binary_size(screen, shader),
1091 conf->lds_size, conf->scratch_bytes_per_wave,
1092 shader->info.max_simd_waves, conf->spilled_sgprs,
1093 conf->spilled_vgprs, shader->info.private_mem_vgprs);
1094 }
1095
1096 static void si_shader_dump_stats(struct si_screen *sscreen,
1097 struct si_shader *shader,
1098 FILE *file,
1099 bool check_debug_option)
1100 {
1101 const struct ac_shader_config *conf = &shader->config;
1102
1103 if (!check_debug_option ||
1104 si_can_dump_shader(sscreen, shader->selector->type)) {
1105 if (shader->selector->type == PIPE_SHADER_FRAGMENT) {
1106 fprintf(file, "*** SHADER CONFIG ***\n"
1107 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1108 "SPI_PS_INPUT_ENA = 0x%04x\n",
1109 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
1110 }
1111
1112 fprintf(file, "*** SHADER STATS ***\n"
1113 "SGPRS: %d\n"
1114 "VGPRS: %d\n"
1115 "Spilled SGPRs: %d\n"
1116 "Spilled VGPRs: %d\n"
1117 "Private memory VGPRs: %d\n"
1118 "Code Size: %d bytes\n"
1119 "LDS: %d blocks\n"
1120 "Scratch: %d bytes per wave\n"
1121 "Max Waves: %d\n"
1122 "********************\n\n\n",
1123 conf->num_sgprs, conf->num_vgprs,
1124 conf->spilled_sgprs, conf->spilled_vgprs,
1125 shader->info.private_mem_vgprs,
1126 si_get_shader_binary_size(sscreen, shader),
1127 conf->lds_size, conf->scratch_bytes_per_wave,
1128 shader->info.max_simd_waves);
1129 }
1130 }
1131
1132 const char *si_get_shader_name(const struct si_shader *shader)
1133 {
1134 switch (shader->selector->type) {
1135 case PIPE_SHADER_VERTEX:
1136 if (shader->key.as_es)
1137 return "Vertex Shader as ES";
1138 else if (shader->key.as_ls)
1139 return "Vertex Shader as LS";
1140 else if (shader->key.opt.vs_as_prim_discard_cs)
1141 return "Vertex Shader as Primitive Discard CS";
1142 else if (shader->key.as_ngg)
1143 return "Vertex Shader as ESGS";
1144 else
1145 return "Vertex Shader as VS";
1146 case PIPE_SHADER_TESS_CTRL:
1147 return "Tessellation Control Shader";
1148 case PIPE_SHADER_TESS_EVAL:
1149 if (shader->key.as_es)
1150 return "Tessellation Evaluation Shader as ES";
1151 else if (shader->key.as_ngg)
1152 return "Tessellation Evaluation Shader as ESGS";
1153 else
1154 return "Tessellation Evaluation Shader as VS";
1155 case PIPE_SHADER_GEOMETRY:
1156 if (shader->is_gs_copy_shader)
1157 return "GS Copy Shader as VS";
1158 else
1159 return "Geometry Shader";
1160 case PIPE_SHADER_FRAGMENT:
1161 return "Pixel Shader";
1162 case PIPE_SHADER_COMPUTE:
1163 return "Compute Shader";
1164 default:
1165 return "Unknown Shader";
1166 }
1167 }
1168
1169 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
1170 struct pipe_debug_callback *debug,
1171 FILE *file, bool check_debug_option)
1172 {
1173 enum pipe_shader_type shader_type = shader->selector->type;
1174
1175 if (!check_debug_option ||
1176 si_can_dump_shader(sscreen, shader_type))
1177 si_dump_shader_key(shader, file);
1178
1179 if (!check_debug_option && shader->binary.llvm_ir_string) {
1180 if (shader->previous_stage &&
1181 shader->previous_stage->binary.llvm_ir_string) {
1182 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
1183 si_get_shader_name(shader));
1184 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
1185 }
1186
1187 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
1188 si_get_shader_name(shader));
1189 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
1190 }
1191
1192 if (!check_debug_option ||
1193 (si_can_dump_shader(sscreen, shader_type) &&
1194 !(sscreen->debug_flags & DBG(NO_ASM)))) {
1195 unsigned wave_size = si_get_shader_wave_size(shader);
1196
1197 fprintf(file, "\n%s:\n", si_get_shader_name(shader));
1198
1199 if (shader->prolog)
1200 si_shader_dump_disassembly(sscreen, &shader->prolog->binary,
1201 shader_type, wave_size, debug, "prolog", file);
1202 if (shader->previous_stage)
1203 si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary,
1204 shader_type, wave_size, debug, "previous stage", file);
1205 if (shader->prolog2)
1206 si_shader_dump_disassembly(sscreen, &shader->prolog2->binary,
1207 shader_type, wave_size, debug, "prolog2", file);
1208
1209 si_shader_dump_disassembly(sscreen, &shader->binary, shader_type,
1210 wave_size, debug, "main", file);
1211
1212 if (shader->epilog)
1213 si_shader_dump_disassembly(sscreen, &shader->epilog->binary,
1214 shader_type, wave_size, debug, "epilog", file);
1215 fprintf(file, "\n");
1216 }
1217
1218 si_shader_dump_stats(sscreen, shader, file, check_debug_option);
1219 }
1220
1221 static void si_dump_shader_key_vs(const struct si_shader_key *key,
1222 const struct si_vs_prolog_bits *prolog,
1223 const char *prefix, FILE *f)
1224 {
1225 fprintf(f, " %s.instance_divisor_is_one = %u\n",
1226 prefix, prolog->instance_divisor_is_one);
1227 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
1228 prefix, prolog->instance_divisor_is_fetched);
1229 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n",
1230 prefix, prolog->unpack_instance_id_from_vertex_id);
1231 fprintf(f, " %s.ls_vgpr_fix = %u\n",
1232 prefix, prolog->ls_vgpr_fix);
1233
1234 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
1235 fprintf(f, " mono.vs.fix_fetch = {");
1236 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
1237 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
1238 if (i)
1239 fprintf(f, ", ");
1240 if (!fix.bits)
1241 fprintf(f, "0");
1242 else
1243 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size,
1244 fix.u.num_channels_m1, fix.u.format);
1245 }
1246 fprintf(f, "}\n");
1247 }
1248
1249 static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
1250 {
1251 const struct si_shader_key *key = &shader->key;
1252 enum pipe_shader_type shader_type = shader->selector->type;
1253
1254 fprintf(f, "SHADER KEY\n");
1255
1256 switch (shader_type) {
1257 case PIPE_SHADER_VERTEX:
1258 si_dump_shader_key_vs(key, &key->part.vs.prolog,
1259 "part.vs.prolog", f);
1260 fprintf(f, " as_es = %u\n", key->as_es);
1261 fprintf(f, " as_ls = %u\n", key->as_ls);
1262 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1263 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
1264 key->mono.u.vs_export_prim_id);
1265 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n",
1266 key->opt.vs_as_prim_discard_cs);
1267 fprintf(f, " opt.cs_prim_type = %s\n",
1268 tgsi_primitive_names[key->opt.cs_prim_type]);
1269 fprintf(f, " opt.cs_indexed = %u\n",
1270 key->opt.cs_indexed);
1271 fprintf(f, " opt.cs_instancing = %u\n",
1272 key->opt.cs_instancing);
1273 fprintf(f, " opt.cs_primitive_restart = %u\n",
1274 key->opt.cs_primitive_restart);
1275 fprintf(f, " opt.cs_provoking_vertex_first = %u\n",
1276 key->opt.cs_provoking_vertex_first);
1277 fprintf(f, " opt.cs_need_correct_orientation = %u\n",
1278 key->opt.cs_need_correct_orientation);
1279 fprintf(f, " opt.cs_cull_front = %u\n",
1280 key->opt.cs_cull_front);
1281 fprintf(f, " opt.cs_cull_back = %u\n",
1282 key->opt.cs_cull_back);
1283 fprintf(f, " opt.cs_cull_z = %u\n",
1284 key->opt.cs_cull_z);
1285 fprintf(f, " opt.cs_halfz_clip_space = %u\n",
1286 key->opt.cs_halfz_clip_space);
1287 break;
1288
1289 case PIPE_SHADER_TESS_CTRL:
1290 if (shader->selector->screen->info.chip_class >= GFX9) {
1291 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
1292 "part.tcs.ls_prolog", f);
1293 }
1294 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
1295 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
1296 break;
1297
1298 case PIPE_SHADER_TESS_EVAL:
1299 fprintf(f, " as_es = %u\n", key->as_es);
1300 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1301 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
1302 key->mono.u.vs_export_prim_id);
1303 break;
1304
1305 case PIPE_SHADER_GEOMETRY:
1306 if (shader->is_gs_copy_shader)
1307 break;
1308
1309 if (shader->selector->screen->info.chip_class >= GFX9 &&
1310 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
1311 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
1312 "part.gs.vs_prolog", f);
1313 }
1314 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
1315 fprintf(f, " part.gs.prolog.gfx9_prev_is_vs = %u\n", key->part.gs.prolog.gfx9_prev_is_vs);
1316 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1317 break;
1318
1319 case PIPE_SHADER_COMPUTE:
1320 break;
1321
1322 case PIPE_SHADER_FRAGMENT:
1323 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
1324 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
1325 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
1326 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
1327 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
1328 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
1329 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
1330 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
1331 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
1332 fprintf(f, " part.ps.prolog.samplemask_log_ps_iter = %u\n", key->part.ps.prolog.samplemask_log_ps_iter);
1333 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
1334 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
1335 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
1336 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
1337 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
1338 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
1339 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
1340 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
1341 fprintf(f, " mono.u.ps.interpolate_at_sample_force_center = %u\n", key->mono.u.ps.interpolate_at_sample_force_center);
1342 fprintf(f, " mono.u.ps.fbfetch_msaa = %u\n", key->mono.u.ps.fbfetch_msaa);
1343 fprintf(f, " mono.u.ps.fbfetch_is_1D = %u\n", key->mono.u.ps.fbfetch_is_1D);
1344 fprintf(f, " mono.u.ps.fbfetch_layered = %u\n", key->mono.u.ps.fbfetch_layered);
1345 break;
1346
1347 default:
1348 assert(0);
1349 }
1350
1351 if ((shader_type == PIPE_SHADER_GEOMETRY ||
1352 shader_type == PIPE_SHADER_TESS_EVAL ||
1353 shader_type == PIPE_SHADER_VERTEX) &&
1354 !key->as_es && !key->as_ls) {
1355 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
1356 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
1357 if (shader_type != PIPE_SHADER_GEOMETRY)
1358 fprintf(f, " opt.ngg_culling = 0x%x\n", key->opt.ngg_culling);
1359 }
1360 }
1361
1362 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
1363 {
1364 struct si_shader *shader = ctx->shader;
1365 struct si_shader_info *info = &shader->selector->info;
1366
1367 if ((ctx->type != PIPE_SHADER_VERTEX &&
1368 ctx->type != PIPE_SHADER_TESS_EVAL) ||
1369 shader->key.as_ls ||
1370 shader->key.as_es)
1371 return;
1372
1373 ac_optimize_vs_outputs(&ctx->ac,
1374 ctx->main_fn,
1375 shader->info.vs_output_param_offset,
1376 info->num_outputs,
1377 &shader->info.nr_param_exports);
1378 }
1379
1380 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
1381 const struct si_vs_prolog_bits *prolog_key,
1382 const struct si_shader_key *key,
1383 bool ngg_cull_shader)
1384 {
1385 /* VGPR initialization fixup for Vega10 and Raven is always done in the
1386 * VS prolog. */
1387 return sel->vs_needs_prolog ||
1388 prolog_key->ls_vgpr_fix ||
1389 prolog_key->unpack_instance_id_from_vertex_id ||
1390 (ngg_cull_shader && key->opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL);
1391 }
1392
1393 static bool si_build_main_function(struct si_shader_context *ctx,
1394 struct si_shader *shader,
1395 struct nir_shader *nir, bool free_nir,
1396 bool ngg_cull_shader)
1397 {
1398 struct si_shader_selector *sel = shader->selector;
1399 const struct si_shader_info *info = &sel->info;
1400
1401 ctx->shader = shader;
1402 ctx->type = sel->type;
1403
1404 ctx->num_const_buffers = util_last_bit(info->const_buffers_declared);
1405 ctx->num_shader_buffers = util_last_bit(info->shader_buffers_declared);
1406
1407 ctx->num_samplers = util_last_bit(info->samplers_declared);
1408 ctx->num_images = util_last_bit(info->images_declared);
1409
1410 si_llvm_init_resource_callbacks(ctx);
1411
1412 switch (ctx->type) {
1413 case PIPE_SHADER_VERTEX:
1414 si_llvm_init_vs_callbacks(ctx, ngg_cull_shader);
1415 break;
1416 case PIPE_SHADER_TESS_CTRL:
1417 si_llvm_init_tcs_callbacks(ctx);
1418 break;
1419 case PIPE_SHADER_TESS_EVAL:
1420 si_llvm_init_tes_callbacks(ctx, ngg_cull_shader);
1421 break;
1422 case PIPE_SHADER_GEOMETRY:
1423 si_llvm_init_gs_callbacks(ctx);
1424 break;
1425 case PIPE_SHADER_FRAGMENT:
1426 si_llvm_init_ps_callbacks(ctx);
1427 break;
1428 case PIPE_SHADER_COMPUTE:
1429 ctx->abi.load_local_group_size = si_llvm_get_block_size;
1430 break;
1431 default:
1432 assert(!"Unsupported shader type");
1433 return false;
1434 }
1435
1436 si_create_function(ctx, ngg_cull_shader);
1437
1438 if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)
1439 si_preload_esgs_ring(ctx);
1440
1441 if (ctx->type == PIPE_SHADER_GEOMETRY)
1442 si_preload_gs_rings(ctx);
1443 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1444 si_llvm_preload_tes_rings(ctx);
1445
1446 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
1447 sel->info.tessfactors_are_def_in_all_invocs) {
1448 for (unsigned i = 0; i < 6; i++) {
1449 ctx->invoc0_tess_factors[i] =
1450 ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
1451 }
1452 }
1453
1454 if (ctx->type == PIPE_SHADER_GEOMETRY) {
1455 for (unsigned i = 0; i < 4; i++) {
1456 ctx->gs_next_vertex[i] =
1457 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1458 }
1459 if (shader->key.as_ngg) {
1460 for (unsigned i = 0; i < 4; ++i) {
1461 ctx->gs_curprim_verts[i] =
1462 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1463 ctx->gs_generated_prims[i] =
1464 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1465 }
1466
1467 unsigned scratch_size = 8;
1468 if (sel->so.num_outputs)
1469 scratch_size = 44;
1470
1471 assert(!ctx->gs_ngg_scratch);
1472 LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, scratch_size);
1473 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1474 ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
1475 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
1476 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
1477
1478 ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1479 LLVMArrayType(ctx->ac.i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
1480 LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
1481 LLVMSetAlignment(ctx->gs_ngg_emit, 4);
1482 }
1483 }
1484
1485 if (ctx->type != PIPE_SHADER_GEOMETRY &&
1486 (shader->key.as_ngg && !shader->key.as_es)) {
1487 /* Unconditionally declare scratch space base for streamout and
1488 * vertex compaction. Whether space is actually allocated is
1489 * determined during linking / PM4 creation.
1490 *
1491 * Add an extra dword per vertex to ensure an odd stride, which
1492 * avoids bank conflicts for SoA accesses.
1493 */
1494 if (!gfx10_is_ngg_passthrough(shader))
1495 si_llvm_declare_esgs_ring(ctx);
1496
1497 /* This is really only needed when streamout and / or vertex
1498 * compaction is enabled.
1499 */
1500 if (!ctx->gs_ngg_scratch &&
1501 (sel->so.num_outputs || shader->key.opt.ngg_culling)) {
1502 LLVMTypeRef asi32 = LLVMArrayType(ctx->ac.i32, 8);
1503 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1504 asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
1505 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
1506 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
1507 }
1508 }
1509
1510 /* For GFX9 merged shaders:
1511 * - Set EXEC for the first shader. If the prolog is present, set
1512 * EXEC there instead.
1513 * - Add a barrier before the second shader.
1514 * - In the second shader, reset EXEC to ~0 and wrap the main part in
1515 * an if-statement. This is required for correctness in geometry
1516 * shaders, to ensure that empty GS waves do not send GS_EMIT and
1517 * GS_CUT messages.
1518 *
1519 * For monolithic merged shaders, the first shader is wrapped in an
1520 * if-block together with its prolog in si_build_wrapper_function.
1521 *
1522 * NGG vertex and tess eval shaders running as the last
1523 * vertex/geometry stage handle execution explicitly using
1524 * if-statements.
1525 */
1526 if (ctx->screen->info.chip_class >= GFX9) {
1527 if (!shader->is_monolithic &&
1528 (shader->key.as_es || shader->key.as_ls) &&
1529 (ctx->type == PIPE_SHADER_TESS_EVAL ||
1530 (ctx->type == PIPE_SHADER_VERTEX &&
1531 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog,
1532 &shader->key, ngg_cull_shader)))) {
1533 si_init_exec_from_input(ctx,
1534 ctx->merged_wave_info, 0);
1535 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
1536 ctx->type == PIPE_SHADER_GEOMETRY ||
1537 (shader->key.as_ngg && !shader->key.as_es)) {
1538 LLVMValueRef thread_enabled;
1539 bool nested_barrier;
1540
1541 if (!shader->is_monolithic ||
1542 (ctx->type == PIPE_SHADER_TESS_EVAL &&
1543 shader->key.as_ngg && !shader->key.as_es &&
1544 !shader->key.opt.ngg_culling))
1545 ac_init_exec_full_mask(&ctx->ac);
1546
1547 if ((ctx->type == PIPE_SHADER_VERTEX ||
1548 ctx->type == PIPE_SHADER_TESS_EVAL) &&
1549 shader->key.as_ngg && !shader->key.as_es &&
1550 !shader->key.opt.ngg_culling) {
1551 gfx10_ngg_build_sendmsg_gs_alloc_req(ctx);
1552
1553 /* Build the primitive export at the beginning
1554 * of the shader if possible.
1555 */
1556 if (gfx10_ngg_export_prim_early(shader))
1557 gfx10_ngg_build_export_prim(ctx, NULL, NULL);
1558 }
1559
1560 if (ctx->type == PIPE_SHADER_TESS_CTRL ||
1561 ctx->type == PIPE_SHADER_GEOMETRY) {
1562 if (ctx->type == PIPE_SHADER_GEOMETRY && shader->key.as_ngg) {
1563 gfx10_ngg_gs_emit_prologue(ctx);
1564 nested_barrier = false;
1565 } else {
1566 nested_barrier = true;
1567 }
1568
1569 thread_enabled = si_is_gs_thread(ctx);
1570 } else {
1571 thread_enabled = si_is_es_thread(ctx);
1572 nested_barrier = false;
1573 }
1574
1575 ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
1576 ctx->merged_wrap_if_label = 11500;
1577 ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
1578
1579 if (nested_barrier) {
1580 /* Execute a barrier before the second shader in
1581 * a merged shader.
1582 *
1583 * Execute the barrier inside the conditional block,
1584 * so that empty waves can jump directly to s_endpgm,
1585 * which will also signal the barrier.
1586 *
1587 * This is possible in gfx9, because an empty wave
1588 * for the second shader does not participate in
1589 * the epilogue. With NGG, empty waves may still
1590 * be required to export data (e.g. GS output vertices),
1591 * so we cannot let them exit early.
1592 *
1593 * If the shader is TCS and the TCS epilog is present
1594 * and contains a barrier, it will wait there and then
1595 * reach s_endpgm.
1596 */
1597 si_llvm_emit_barrier(ctx);
1598 }
1599 }
1600 }
1601
1602 bool success = si_nir_build_llvm(ctx, nir);
1603 if (free_nir)
1604 ralloc_free(nir);
1605 if (!success) {
1606 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
1607 return false;
1608 }
1609
1610 si_llvm_build_ret(ctx, ctx->return_value);
1611 return true;
1612 }
1613
1614 /**
1615 * Compute the VS prolog key, which contains all the information needed to
1616 * build the VS prolog function, and set shader->info bits where needed.
1617 *
1618 * \param info Shader info of the vertex shader.
1619 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
1620 * \param has_old_ Whether the preceding shader part is the NGG cull shader.
1621 * \param prolog_key Key of the VS prolog
1622 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
1623 * \param key Output shader part key.
1624 */
1625 static void si_get_vs_prolog_key(const struct si_shader_info *info,
1626 unsigned num_input_sgprs,
1627 bool ngg_cull_shader,
1628 const struct si_vs_prolog_bits *prolog_key,
1629 struct si_shader *shader_out,
1630 union si_shader_part_key *key)
1631 {
1632 memset(key, 0, sizeof(*key));
1633 key->vs_prolog.states = *prolog_key;
1634 key->vs_prolog.num_input_sgprs = num_input_sgprs;
1635 key->vs_prolog.num_inputs = info->num_inputs;
1636 key->vs_prolog.as_ls = shader_out->key.as_ls;
1637 key->vs_prolog.as_es = shader_out->key.as_es;
1638 key->vs_prolog.as_ngg = shader_out->key.as_ngg;
1639 key->vs_prolog.as_prim_discard_cs = shader_out->key.opt.vs_as_prim_discard_cs;
1640
1641 if (ngg_cull_shader) {
1642 key->vs_prolog.gs_fast_launch_tri_list = !!(shader_out->key.opt.ngg_culling &
1643 SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST);
1644 key->vs_prolog.gs_fast_launch_tri_strip = !!(shader_out->key.opt.ngg_culling &
1645 SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP);
1646 } else {
1647 key->vs_prolog.has_ngg_cull_inputs = !!shader_out->key.opt.ngg_culling;
1648 }
1649
1650 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
1651 key->vs_prolog.as_ls = 1;
1652 key->vs_prolog.num_merged_next_stage_vgprs = 2;
1653 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
1654 key->vs_prolog.as_es = 1;
1655 key->vs_prolog.num_merged_next_stage_vgprs = 5;
1656 } else if (shader_out->key.as_ngg) {
1657 key->vs_prolog.num_merged_next_stage_vgprs = 5;
1658 }
1659
1660 /* Only one of these combinations can be set. as_ngg can be set with as_es. */
1661 assert(key->vs_prolog.as_ls +
1662 key->vs_prolog.as_ngg +
1663 (key->vs_prolog.as_es && !key->vs_prolog.as_ngg) +
1664 key->vs_prolog.as_prim_discard_cs <= 1);
1665
1666 /* Enable loading the InstanceID VGPR. */
1667 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
1668
1669 if ((key->vs_prolog.states.instance_divisor_is_one |
1670 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
1671 shader_out->info.uses_instanceid = true;
1672 }
1673
1674 static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
1675 struct si_shader_selector *sel)
1676 {
1677 if (!compiler->low_opt_passes)
1678 return false;
1679
1680 /* Assume a slow CPU. */
1681 assert(!sel->screen->info.has_dedicated_vram &&
1682 sel->screen->info.chip_class <= GFX8);
1683
1684 /* For a crazy dEQP test containing 2597 memory opcodes, mostly
1685 * buffer stores. */
1686 return sel->type == PIPE_SHADER_COMPUTE &&
1687 sel->info.num_memory_instructions > 1000;
1688 }
1689
1690 static struct nir_shader *get_nir_shader(struct si_shader_selector *sel,
1691 bool *free_nir)
1692 {
1693 *free_nir = false;
1694
1695 if (sel->nir) {
1696 return sel->nir;
1697 } else if (sel->nir_binary) {
1698 struct pipe_screen *screen = &sel->screen->b;
1699 const void *options =
1700 screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR,
1701 sel->type);
1702
1703 struct blob_reader blob_reader;
1704 blob_reader_init(&blob_reader, sel->nir_binary, sel->nir_size);
1705 *free_nir = true;
1706 return nir_deserialize(NULL, options, &blob_reader);
1707 }
1708 return NULL;
1709 }
1710
1711 static bool si_llvm_compile_shader(struct si_screen *sscreen,
1712 struct ac_llvm_compiler *compiler,
1713 struct si_shader *shader,
1714 struct pipe_debug_callback *debug,
1715 struct nir_shader *nir,
1716 bool free_nir)
1717 {
1718 struct si_shader_selector *sel = shader->selector;
1719 struct si_shader_context ctx;
1720
1721 si_llvm_context_init(&ctx, sscreen, compiler, si_get_shader_wave_size(shader));
1722
1723 LLVMValueRef ngg_cull_main_fn = NULL;
1724 if (shader->key.opt.ngg_culling) {
1725 if (!si_build_main_function(&ctx, shader, nir, false, true)) {
1726 si_llvm_dispose(&ctx);
1727 return false;
1728 }
1729 ngg_cull_main_fn = ctx.main_fn;
1730 ctx.main_fn = NULL;
1731 }
1732
1733 if (!si_build_main_function(&ctx, shader, nir, free_nir, false)) {
1734 si_llvm_dispose(&ctx);
1735 return false;
1736 }
1737
1738 if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
1739 LLVMValueRef parts[4];
1740 unsigned num_parts = 0;
1741 bool has_prolog = false;
1742 LLVMValueRef main_fn = ctx.main_fn;
1743
1744 if (ngg_cull_main_fn) {
1745 if (si_vs_needs_prolog(sel, &shader->key.part.vs.prolog,
1746 &shader->key, true)) {
1747 union si_shader_part_key prolog_key;
1748 si_get_vs_prolog_key(&sel->info,
1749 shader->info.num_input_sgprs,
1750 true,
1751 &shader->key.part.vs.prolog,
1752 shader, &prolog_key);
1753 prolog_key.vs_prolog.is_monolithic = true;
1754 si_llvm_build_vs_prolog(&ctx, &prolog_key);
1755 parts[num_parts++] = ctx.main_fn;
1756 has_prolog = true;
1757 }
1758 parts[num_parts++] = ngg_cull_main_fn;
1759 }
1760
1761 if (si_vs_needs_prolog(sel, &shader->key.part.vs.prolog,
1762 &shader->key, false)) {
1763 union si_shader_part_key prolog_key;
1764 si_get_vs_prolog_key(&sel->info,
1765 shader->info.num_input_sgprs,
1766 false,
1767 &shader->key.part.vs.prolog,
1768 shader, &prolog_key);
1769 prolog_key.vs_prolog.is_monolithic = true;
1770 si_llvm_build_vs_prolog(&ctx, &prolog_key);
1771 parts[num_parts++] = ctx.main_fn;
1772 has_prolog = true;
1773 }
1774 parts[num_parts++] = main_fn;
1775
1776 si_build_wrapper_function(&ctx, parts, num_parts,
1777 has_prolog ? 1 : 0, 0);
1778
1779 if (ctx.shader->key.opt.vs_as_prim_discard_cs)
1780 si_build_prim_discard_compute_shader(&ctx);
1781 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_EVAL &&
1782 ngg_cull_main_fn) {
1783 LLVMValueRef parts[2];
1784
1785 parts[0] = ngg_cull_main_fn;
1786 parts[1] = ctx.main_fn;
1787
1788 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
1789 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
1790 if (sscreen->info.chip_class >= GFX9) {
1791 struct si_shader_selector *ls = shader->key.part.tcs.ls;
1792 LLVMValueRef parts[4];
1793 bool vs_needs_prolog =
1794 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog,
1795 &shader->key, false);
1796
1797 /* TCS main part */
1798 parts[2] = ctx.main_fn;
1799
1800 /* TCS epilog */
1801 union si_shader_part_key tcs_epilog_key;
1802 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
1803 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
1804 si_llvm_build_tcs_epilog(&ctx, &tcs_epilog_key);
1805 parts[3] = ctx.main_fn;
1806
1807 /* VS as LS main part */
1808 nir = get_nir_shader(ls, &free_nir);
1809 struct si_shader shader_ls = {};
1810 shader_ls.selector = ls;
1811 shader_ls.key.as_ls = 1;
1812 shader_ls.key.mono = shader->key.mono;
1813 shader_ls.key.opt = shader->key.opt;
1814 shader_ls.is_monolithic = true;
1815
1816 if (!si_build_main_function(&ctx, &shader_ls, nir, free_nir, false)) {
1817 si_llvm_dispose(&ctx);
1818 return false;
1819 }
1820 shader->info.uses_instanceid |= ls->info.uses_instanceid;
1821 parts[1] = ctx.main_fn;
1822
1823 /* LS prolog */
1824 if (vs_needs_prolog) {
1825 union si_shader_part_key vs_prolog_key;
1826 si_get_vs_prolog_key(&ls->info,
1827 shader_ls.info.num_input_sgprs,
1828 false,
1829 &shader->key.part.tcs.ls_prolog,
1830 shader, &vs_prolog_key);
1831 vs_prolog_key.vs_prolog.is_monolithic = true;
1832 si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1833 parts[0] = ctx.main_fn;
1834 }
1835
1836 /* Reset the shader context. */
1837 ctx.shader = shader;
1838 ctx.type = PIPE_SHADER_TESS_CTRL;
1839
1840 si_build_wrapper_function(&ctx,
1841 parts + !vs_needs_prolog,
1842 4 - !vs_needs_prolog, vs_needs_prolog,
1843 vs_needs_prolog ? 2 : 1);
1844 } else {
1845 LLVMValueRef parts[2];
1846 union si_shader_part_key epilog_key;
1847
1848 parts[0] = ctx.main_fn;
1849
1850 memset(&epilog_key, 0, sizeof(epilog_key));
1851 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
1852 si_llvm_build_tcs_epilog(&ctx, &epilog_key);
1853 parts[1] = ctx.main_fn;
1854
1855 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
1856 }
1857 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
1858 if (ctx.screen->info.chip_class >= GFX9) {
1859 struct si_shader_selector *es = shader->key.part.gs.es;
1860 LLVMValueRef es_prolog = NULL;
1861 LLVMValueRef es_main = NULL;
1862 LLVMValueRef gs_prolog = NULL;
1863 LLVMValueRef gs_main = ctx.main_fn;
1864
1865 /* GS prolog */
1866 union si_shader_part_key gs_prolog_key;
1867 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
1868 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
1869 gs_prolog_key.gs_prolog.is_monolithic = true;
1870 gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
1871 si_llvm_build_gs_prolog(&ctx, &gs_prolog_key);
1872 gs_prolog = ctx.main_fn;
1873
1874 /* ES main part */
1875 nir = get_nir_shader(es, &free_nir);
1876 struct si_shader shader_es = {};
1877 shader_es.selector = es;
1878 shader_es.key.as_es = 1;
1879 shader_es.key.as_ngg = shader->key.as_ngg;
1880 shader_es.key.mono = shader->key.mono;
1881 shader_es.key.opt = shader->key.opt;
1882 shader_es.is_monolithic = true;
1883
1884 if (!si_build_main_function(&ctx, &shader_es, nir, free_nir, false)) {
1885 si_llvm_dispose(&ctx);
1886 return false;
1887 }
1888 shader->info.uses_instanceid |= es->info.uses_instanceid;
1889 es_main = ctx.main_fn;
1890
1891 /* ES prolog */
1892 if (es->type == PIPE_SHADER_VERTEX &&
1893 si_vs_needs_prolog(es, &shader->key.part.gs.vs_prolog,
1894 &shader->key, false)) {
1895 union si_shader_part_key vs_prolog_key;
1896 si_get_vs_prolog_key(&es->info,
1897 shader_es.info.num_input_sgprs,
1898 false,
1899 &shader->key.part.gs.vs_prolog,
1900 shader, &vs_prolog_key);
1901 vs_prolog_key.vs_prolog.is_monolithic = true;
1902 si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1903 es_prolog = ctx.main_fn;
1904 }
1905
1906 /* Reset the shader context. */
1907 ctx.shader = shader;
1908 ctx.type = PIPE_SHADER_GEOMETRY;
1909
1910 /* Prepare the array of shader parts. */
1911 LLVMValueRef parts[4];
1912 unsigned num_parts = 0, main_part, next_first_part;
1913
1914 if (es_prolog)
1915 parts[num_parts++] = es_prolog;
1916
1917 parts[main_part = num_parts++] = es_main;
1918 parts[next_first_part = num_parts++] = gs_prolog;
1919 parts[num_parts++] = gs_main;
1920
1921 si_build_wrapper_function(&ctx, parts, num_parts,
1922 main_part, next_first_part);
1923 } else {
1924 LLVMValueRef parts[2];
1925 union si_shader_part_key prolog_key;
1926
1927 parts[1] = ctx.main_fn;
1928
1929 memset(&prolog_key, 0, sizeof(prolog_key));
1930 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
1931 si_llvm_build_gs_prolog(&ctx, &prolog_key);
1932 parts[0] = ctx.main_fn;
1933
1934 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
1935 }
1936 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
1937 si_llvm_build_monolithic_ps(&ctx, shader);
1938 }
1939
1940 si_llvm_optimize_module(&ctx);
1941
1942 /* Post-optimization transformations and analysis. */
1943 si_optimize_vs_outputs(&ctx);
1944
1945 if ((debug && debug->debug_message) ||
1946 si_can_dump_shader(sscreen, ctx.type)) {
1947 ctx.shader->info.private_mem_vgprs =
1948 ac_count_scratch_private_memory(ctx.main_fn);
1949 }
1950
1951 /* Make sure the input is a pointer and not integer followed by inttoptr. */
1952 assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) ==
1953 LLVMPointerTypeKind);
1954
1955 /* Compile to bytecode. */
1956 if (!si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler,
1957 &ctx.ac, debug, ctx.type, si_get_shader_name(shader),
1958 si_should_optimize_less(compiler, shader->selector))) {
1959 si_llvm_dispose(&ctx);
1960 fprintf(stderr, "LLVM failed to compile shader\n");
1961 return false;
1962 }
1963
1964 si_llvm_dispose(&ctx);
1965 return true;
1966 }
1967
1968 bool si_compile_shader(struct si_screen *sscreen,
1969 struct ac_llvm_compiler *compiler,
1970 struct si_shader *shader,
1971 struct pipe_debug_callback *debug)
1972 {
1973 struct si_shader_selector *sel = shader->selector;
1974 bool free_nir;
1975 struct nir_shader *nir = get_nir_shader(sel, &free_nir);
1976
1977 /* Dump NIR before doing NIR->LLVM conversion in case the
1978 * conversion fails. */
1979 if (si_can_dump_shader(sscreen, sel->type) &&
1980 !(sscreen->debug_flags & DBG(NO_NIR))) {
1981 nir_print_shader(nir, stderr);
1982 si_dump_streamout(&sel->so);
1983 }
1984
1985 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
1986 sizeof(shader->info.vs_output_param_offset));
1987
1988 shader->info.uses_instanceid = sel->info.uses_instanceid;
1989
1990 /* TODO: ACO could compile non-monolithic shaders here (starting
1991 * with PS and NGG VS), but monolithic shaders should be compiled
1992 * by LLVM due to more complicated compilation.
1993 */
1994 if (!si_llvm_compile_shader(sscreen, compiler, shader, debug, nir, free_nir))
1995 return false;
1996
1997 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
1998 * LLVM 3.9svn has this bug.
1999 */
2000 if (sel->type == PIPE_SHADER_COMPUTE) {
2001 unsigned wave_size = sscreen->compute_wave_size;
2002 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd *
2003 (wave_size == 32 ? 2 : 1);
2004 unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd;
2005 unsigned max_sgprs_per_wave = 128;
2006 unsigned simds_per_tg = 4; /* assuming WGP mode on gfx10 */
2007 unsigned threads_per_tg = si_get_max_workgroup_size(shader);
2008 unsigned waves_per_tg = DIV_ROUND_UP(threads_per_tg, wave_size);
2009 unsigned waves_per_simd = DIV_ROUND_UP(waves_per_tg, simds_per_tg);
2010
2011 max_vgprs = max_vgprs / waves_per_simd;
2012 max_sgprs = MIN2(max_sgprs / waves_per_simd, max_sgprs_per_wave);
2013
2014 if (shader->config.num_sgprs > max_sgprs ||
2015 shader->config.num_vgprs > max_vgprs) {
2016 fprintf(stderr, "LLVM failed to compile a shader correctly: "
2017 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
2018 shader->config.num_sgprs, shader->config.num_vgprs,
2019 max_sgprs, max_vgprs);
2020
2021 /* Just terminate the process, because dependent
2022 * shaders can hang due to bad input data, but use
2023 * the env var to allow shader-db to work.
2024 */
2025 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
2026 abort();
2027 }
2028 }
2029
2030 /* Add the scratch offset to input SGPRs. */
2031 if (shader->config.scratch_bytes_per_wave && !si_is_merged_shader(shader))
2032 shader->info.num_input_sgprs += 1; /* scratch byte offset */
2033
2034 /* Calculate the number of fragment input VGPRs. */
2035 if (sel->type == PIPE_SHADER_FRAGMENT) {
2036 shader->info.num_input_vgprs = ac_get_fs_input_vgpr_cnt(&shader->config,
2037 &shader->info.face_vgpr_index,
2038 &shader->info.ancillary_vgpr_index);
2039 }
2040
2041 si_calculate_max_simd_waves(shader);
2042 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
2043 return true;
2044 }
2045
2046 /**
2047 * Create, compile and return a shader part (prolog or epilog).
2048 *
2049 * \param sscreen screen
2050 * \param list list of shader parts of the same category
2051 * \param type shader type
2052 * \param key shader part key
2053 * \param prolog whether the part being requested is a prolog
2054 * \param tm LLVM target machine
2055 * \param debug debug callback
2056 * \param build the callback responsible for building the main function
2057 * \return non-NULL on success
2058 */
2059 static struct si_shader_part *
2060 si_get_shader_part(struct si_screen *sscreen,
2061 struct si_shader_part **list,
2062 enum pipe_shader_type type,
2063 bool prolog,
2064 union si_shader_part_key *key,
2065 struct ac_llvm_compiler *compiler,
2066 struct pipe_debug_callback *debug,
2067 void (*build)(struct si_shader_context *,
2068 union si_shader_part_key *),
2069 const char *name)
2070 {
2071 struct si_shader_part *result;
2072
2073 simple_mtx_lock(&sscreen->shader_parts_mutex);
2074
2075 /* Find existing. */
2076 for (result = *list; result; result = result->next) {
2077 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
2078 simple_mtx_unlock(&sscreen->shader_parts_mutex);
2079 return result;
2080 }
2081 }
2082
2083 /* Compile a new one. */
2084 result = CALLOC_STRUCT(si_shader_part);
2085 result->key = *key;
2086
2087 struct si_shader_selector sel = {};
2088 sel.screen = sscreen;
2089
2090 struct si_shader shader = {};
2091 shader.selector = &sel;
2092
2093 switch (type) {
2094 case PIPE_SHADER_VERTEX:
2095 shader.key.as_ls = key->vs_prolog.as_ls;
2096 shader.key.as_es = key->vs_prolog.as_es;
2097 shader.key.as_ngg = key->vs_prolog.as_ngg;
2098 shader.key.opt.vs_as_prim_discard_cs = key->vs_prolog.as_prim_discard_cs;
2099 break;
2100 case PIPE_SHADER_TESS_CTRL:
2101 assert(!prolog);
2102 shader.key.part.tcs.epilog = key->tcs_epilog.states;
2103 break;
2104 case PIPE_SHADER_GEOMETRY:
2105 assert(prolog);
2106 shader.key.as_ngg = key->gs_prolog.as_ngg;
2107 break;
2108 case PIPE_SHADER_FRAGMENT:
2109 if (prolog)
2110 shader.key.part.ps.prolog = key->ps_prolog.states;
2111 else
2112 shader.key.part.ps.epilog = key->ps_epilog.states;
2113 break;
2114 default:
2115 unreachable("bad shader part");
2116 }
2117
2118 struct si_shader_context ctx;
2119 si_llvm_context_init(&ctx, sscreen, compiler,
2120 si_get_wave_size(sscreen, type, shader.key.as_ngg,
2121 shader.key.as_es,
2122 shader.key.opt.vs_as_prim_discard_cs));
2123 ctx.shader = &shader;
2124 ctx.type = type;
2125
2126 build(&ctx, key);
2127
2128 /* Compile. */
2129 si_llvm_optimize_module(&ctx);
2130
2131 if (!si_compile_llvm(sscreen, &result->binary, &result->config, compiler,
2132 &ctx.ac, debug, ctx.type, name, false)) {
2133 FREE(result);
2134 result = NULL;
2135 goto out;
2136 }
2137
2138 result->next = *list;
2139 *list = result;
2140
2141 out:
2142 si_llvm_dispose(&ctx);
2143 simple_mtx_unlock(&sscreen->shader_parts_mutex);
2144 return result;
2145 }
2146
2147 static bool si_get_vs_prolog(struct si_screen *sscreen,
2148 struct ac_llvm_compiler *compiler,
2149 struct si_shader *shader,
2150 struct pipe_debug_callback *debug,
2151 struct si_shader *main_part,
2152 const struct si_vs_prolog_bits *key)
2153 {
2154 struct si_shader_selector *vs = main_part->selector;
2155
2156 if (!si_vs_needs_prolog(vs, key, &shader->key, false))
2157 return true;
2158
2159 /* Get the prolog. */
2160 union si_shader_part_key prolog_key;
2161 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs, false,
2162 key, shader, &prolog_key);
2163
2164 shader->prolog =
2165 si_get_shader_part(sscreen, &sscreen->vs_prologs,
2166 PIPE_SHADER_VERTEX, true, &prolog_key, compiler,
2167 debug, si_llvm_build_vs_prolog,
2168 "Vertex Shader Prolog");
2169 return shader->prolog != NULL;
2170 }
2171
2172 /**
2173 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
2174 */
2175 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
2176 struct ac_llvm_compiler *compiler,
2177 struct si_shader *shader,
2178 struct pipe_debug_callback *debug)
2179 {
2180 return si_get_vs_prolog(sscreen, compiler, shader, debug, shader,
2181 &shader->key.part.vs.prolog);
2182 }
2183
2184 /**
2185 * Select and compile (or reuse) TCS parts (epilog).
2186 */
2187 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
2188 struct ac_llvm_compiler *compiler,
2189 struct si_shader *shader,
2190 struct pipe_debug_callback *debug)
2191 {
2192 if (sscreen->info.chip_class >= GFX9) {
2193 struct si_shader *ls_main_part =
2194 shader->key.part.tcs.ls->main_shader_part_ls;
2195
2196 if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
2197 &shader->key.part.tcs.ls_prolog))
2198 return false;
2199
2200 shader->previous_stage = ls_main_part;
2201 }
2202
2203 /* Get the epilog. */
2204 union si_shader_part_key epilog_key;
2205 memset(&epilog_key, 0, sizeof(epilog_key));
2206 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
2207
2208 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
2209 PIPE_SHADER_TESS_CTRL, false,
2210 &epilog_key, compiler, debug,
2211 si_llvm_build_tcs_epilog,
2212 "Tessellation Control Shader Epilog");
2213 return shader->epilog != NULL;
2214 }
2215
2216 /**
2217 * Select and compile (or reuse) GS parts (prolog).
2218 */
2219 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
2220 struct ac_llvm_compiler *compiler,
2221 struct si_shader *shader,
2222 struct pipe_debug_callback *debug)
2223 {
2224 if (sscreen->info.chip_class >= GFX9) {
2225 struct si_shader *es_main_part;
2226 enum pipe_shader_type es_type = shader->key.part.gs.es->type;
2227
2228 if (shader->key.as_ngg)
2229 es_main_part = shader->key.part.gs.es->main_shader_part_ngg_es;
2230 else
2231 es_main_part = shader->key.part.gs.es->main_shader_part_es;
2232
2233 if (es_type == PIPE_SHADER_VERTEX &&
2234 !si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
2235 &shader->key.part.gs.vs_prolog))
2236 return false;
2237
2238 shader->previous_stage = es_main_part;
2239 }
2240
2241 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
2242 return true;
2243
2244 union si_shader_part_key prolog_key;
2245 memset(&prolog_key, 0, sizeof(prolog_key));
2246 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
2247 prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
2248
2249 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
2250 PIPE_SHADER_GEOMETRY, true,
2251 &prolog_key, compiler, debug,
2252 si_llvm_build_gs_prolog,
2253 "Geometry Shader Prolog");
2254 return shader->prolog2 != NULL;
2255 }
2256
2257 /**
2258 * Compute the PS prolog key, which contains all the information needed to
2259 * build the PS prolog function, and set related bits in shader->config.
2260 */
2261 void si_get_ps_prolog_key(struct si_shader *shader,
2262 union si_shader_part_key *key,
2263 bool separate_prolog)
2264 {
2265 struct si_shader_info *info = &shader->selector->info;
2266
2267 memset(key, 0, sizeof(*key));
2268 key->ps_prolog.states = shader->key.part.ps.prolog;
2269 key->ps_prolog.colors_read = info->colors_read;
2270 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
2271 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
2272 key->ps_prolog.wqm = info->uses_derivatives &&
2273 (key->ps_prolog.colors_read ||
2274 key->ps_prolog.states.force_persp_sample_interp ||
2275 key->ps_prolog.states.force_linear_sample_interp ||
2276 key->ps_prolog.states.force_persp_center_interp ||
2277 key->ps_prolog.states.force_linear_center_interp ||
2278 key->ps_prolog.states.bc_optimize_for_persp ||
2279 key->ps_prolog.states.bc_optimize_for_linear);
2280 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
2281
2282 if (info->colors_read) {
2283 unsigned *color = shader->selector->color_attr_index;
2284
2285 if (shader->key.part.ps.prolog.color_two_side) {
2286 /* BCOLORs are stored after the last input. */
2287 key->ps_prolog.num_interp_inputs = info->num_inputs;
2288 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
2289 if (separate_prolog)
2290 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
2291 }
2292
2293 for (unsigned i = 0; i < 2; i++) {
2294 unsigned interp = info->input_interpolate[color[i]];
2295 unsigned location = info->input_interpolate_loc[color[i]];
2296
2297 if (!(info->colors_read & (0xf << i*4)))
2298 continue;
2299
2300 key->ps_prolog.color_attr_index[i] = color[i];
2301
2302 if (shader->key.part.ps.prolog.flatshade_colors &&
2303 interp == TGSI_INTERPOLATE_COLOR)
2304 interp = TGSI_INTERPOLATE_CONSTANT;
2305
2306 switch (interp) {
2307 case TGSI_INTERPOLATE_CONSTANT:
2308 key->ps_prolog.color_interp_vgpr_index[i] = -1;
2309 break;
2310 case TGSI_INTERPOLATE_PERSPECTIVE:
2311 case TGSI_INTERPOLATE_COLOR:
2312 /* Force the interpolation location for colors here. */
2313 if (shader->key.part.ps.prolog.force_persp_sample_interp)
2314 location = TGSI_INTERPOLATE_LOC_SAMPLE;
2315 if (shader->key.part.ps.prolog.force_persp_center_interp)
2316 location = TGSI_INTERPOLATE_LOC_CENTER;
2317
2318 switch (location) {
2319 case TGSI_INTERPOLATE_LOC_SAMPLE:
2320 key->ps_prolog.color_interp_vgpr_index[i] = 0;
2321 if (separate_prolog) {
2322 shader->config.spi_ps_input_ena |=
2323 S_0286CC_PERSP_SAMPLE_ENA(1);
2324 }
2325 break;
2326 case TGSI_INTERPOLATE_LOC_CENTER:
2327 key->ps_prolog.color_interp_vgpr_index[i] = 2;
2328 if (separate_prolog) {
2329 shader->config.spi_ps_input_ena |=
2330 S_0286CC_PERSP_CENTER_ENA(1);
2331 }
2332 break;
2333 case TGSI_INTERPOLATE_LOC_CENTROID:
2334 key->ps_prolog.color_interp_vgpr_index[i] = 4;
2335 if (separate_prolog) {
2336 shader->config.spi_ps_input_ena |=
2337 S_0286CC_PERSP_CENTROID_ENA(1);
2338 }
2339 break;
2340 default:
2341 assert(0);
2342 }
2343 break;
2344 case TGSI_INTERPOLATE_LINEAR:
2345 /* Force the interpolation location for colors here. */
2346 if (shader->key.part.ps.prolog.force_linear_sample_interp)
2347 location = TGSI_INTERPOLATE_LOC_SAMPLE;
2348 if (shader->key.part.ps.prolog.force_linear_center_interp)
2349 location = TGSI_INTERPOLATE_LOC_CENTER;
2350
2351 /* The VGPR assignment for non-monolithic shaders
2352 * works because InitialPSInputAddr is set on the
2353 * main shader and PERSP_PULL_MODEL is never used.
2354 */
2355 switch (location) {
2356 case TGSI_INTERPOLATE_LOC_SAMPLE:
2357 key->ps_prolog.color_interp_vgpr_index[i] =
2358 separate_prolog ? 6 : 9;
2359 if (separate_prolog) {
2360 shader->config.spi_ps_input_ena |=
2361 S_0286CC_LINEAR_SAMPLE_ENA(1);
2362 }
2363 break;
2364 case TGSI_INTERPOLATE_LOC_CENTER:
2365 key->ps_prolog.color_interp_vgpr_index[i] =
2366 separate_prolog ? 8 : 11;
2367 if (separate_prolog) {
2368 shader->config.spi_ps_input_ena |=
2369 S_0286CC_LINEAR_CENTER_ENA(1);
2370 }
2371 break;
2372 case TGSI_INTERPOLATE_LOC_CENTROID:
2373 key->ps_prolog.color_interp_vgpr_index[i] =
2374 separate_prolog ? 10 : 13;
2375 if (separate_prolog) {
2376 shader->config.spi_ps_input_ena |=
2377 S_0286CC_LINEAR_CENTROID_ENA(1);
2378 }
2379 break;
2380 default:
2381 assert(0);
2382 }
2383 break;
2384 default:
2385 assert(0);
2386 }
2387 }
2388 }
2389 }
2390
2391 /**
2392 * Check whether a PS prolog is required based on the key.
2393 */
2394 bool si_need_ps_prolog(const union si_shader_part_key *key)
2395 {
2396 return key->ps_prolog.colors_read ||
2397 key->ps_prolog.states.force_persp_sample_interp ||
2398 key->ps_prolog.states.force_linear_sample_interp ||
2399 key->ps_prolog.states.force_persp_center_interp ||
2400 key->ps_prolog.states.force_linear_center_interp ||
2401 key->ps_prolog.states.bc_optimize_for_persp ||
2402 key->ps_prolog.states.bc_optimize_for_linear ||
2403 key->ps_prolog.states.poly_stipple ||
2404 key->ps_prolog.states.samplemask_log_ps_iter;
2405 }
2406
2407 /**
2408 * Compute the PS epilog key, which contains all the information needed to
2409 * build the PS epilog function.
2410 */
2411 void si_get_ps_epilog_key(struct si_shader *shader,
2412 union si_shader_part_key *key)
2413 {
2414 struct si_shader_info *info = &shader->selector->info;
2415 memset(key, 0, sizeof(*key));
2416 key->ps_epilog.colors_written = info->colors_written;
2417 key->ps_epilog.writes_z = info->writes_z;
2418 key->ps_epilog.writes_stencil = info->writes_stencil;
2419 key->ps_epilog.writes_samplemask = info->writes_samplemask;
2420 key->ps_epilog.states = shader->key.part.ps.epilog;
2421 }
2422
2423 /**
2424 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
2425 */
2426 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
2427 struct ac_llvm_compiler *compiler,
2428 struct si_shader *shader,
2429 struct pipe_debug_callback *debug)
2430 {
2431 union si_shader_part_key prolog_key;
2432 union si_shader_part_key epilog_key;
2433
2434 /* Get the prolog. */
2435 si_get_ps_prolog_key(shader, &prolog_key, true);
2436
2437 /* The prolog is a no-op if these aren't set. */
2438 if (si_need_ps_prolog(&prolog_key)) {
2439 shader->prolog =
2440 si_get_shader_part(sscreen, &sscreen->ps_prologs,
2441 PIPE_SHADER_FRAGMENT, true,
2442 &prolog_key, compiler, debug,
2443 si_llvm_build_ps_prolog,
2444 "Fragment Shader Prolog");
2445 if (!shader->prolog)
2446 return false;
2447 }
2448
2449 /* Get the epilog. */
2450 si_get_ps_epilog_key(shader, &epilog_key);
2451
2452 shader->epilog =
2453 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
2454 PIPE_SHADER_FRAGMENT, false,
2455 &epilog_key, compiler, debug,
2456 si_llvm_build_ps_epilog,
2457 "Fragment Shader Epilog");
2458 if (!shader->epilog)
2459 return false;
2460
2461 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
2462 if (shader->key.part.ps.prolog.poly_stipple) {
2463 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
2464 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
2465 }
2466
2467 /* Set up the enable bits for per-sample shading if needed. */
2468 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
2469 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
2470 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2471 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
2472 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
2473 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
2474 }
2475 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
2476 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
2477 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2478 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
2479 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
2480 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
2481 }
2482 if (shader->key.part.ps.prolog.force_persp_center_interp &&
2483 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
2484 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2485 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
2486 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
2487 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
2488 }
2489 if (shader->key.part.ps.prolog.force_linear_center_interp &&
2490 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
2491 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2492 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
2493 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
2494 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
2495 }
2496
2497 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
2498 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
2499 !(shader->config.spi_ps_input_ena & 0xf)) {
2500 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
2501 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
2502 }
2503
2504 /* At least one pair of interpolation weights must be enabled. */
2505 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
2506 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
2507 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
2508 }
2509
2510 /* Samplemask fixup requires the sample ID. */
2511 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
2512 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
2513 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
2514 }
2515
2516 /* The sample mask input is always enabled, because the API shader always
2517 * passes it through to the epilog. Disable it here if it's unused.
2518 */
2519 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
2520 !shader->selector->info.reads_samplemask)
2521 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
2522
2523 return true;
2524 }
2525
2526 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
2527 unsigned *lds_size)
2528 {
2529 /* If tessellation is all offchip and on-chip GS isn't used, this
2530 * workaround is not needed.
2531 */
2532 return;
2533
2534 /* SPI barrier management bug:
2535 * Make sure we have at least 4k of LDS in use to avoid the bug.
2536 * It applies to workgroup sizes of more than one wavefront.
2537 */
2538 if (sscreen->info.family == CHIP_BONAIRE ||
2539 sscreen->info.family == CHIP_KABINI)
2540 *lds_size = MAX2(*lds_size, 8);
2541 }
2542
2543 void si_fix_resource_usage(struct si_screen *sscreen, struct si_shader *shader)
2544 {
2545 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
2546
2547 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
2548
2549 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
2550 si_get_max_workgroup_size(shader) > sscreen->compute_wave_size) {
2551 si_multiwave_lds_size_workaround(sscreen,
2552 &shader->config.lds_size);
2553 }
2554 }
2555
2556 bool si_create_shader_variant(struct si_screen *sscreen,
2557 struct ac_llvm_compiler *compiler,
2558 struct si_shader *shader,
2559 struct pipe_debug_callback *debug)
2560 {
2561 struct si_shader_selector *sel = shader->selector;
2562 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
2563
2564 /* LS, ES, VS are compiled on demand if the main part hasn't been
2565 * compiled for that stage.
2566 *
2567 * GS are compiled on demand if the main part hasn't been compiled
2568 * for the chosen NGG-ness.
2569 *
2570 * Vertex shaders are compiled on demand when a vertex fetch
2571 * workaround must be applied.
2572 */
2573 if (shader->is_monolithic) {
2574 /* Monolithic shader (compiled as a whole, has many variants,
2575 * may take a long time to compile).
2576 */
2577 if (!si_compile_shader(sscreen, compiler, shader, debug))
2578 return false;
2579 } else {
2580 /* The shader consists of several parts:
2581 *
2582 * - the middle part is the user shader, it has 1 variant only
2583 * and it was compiled during the creation of the shader
2584 * selector
2585 * - the prolog part is inserted at the beginning
2586 * - the epilog part is inserted at the end
2587 *
2588 * The prolog and epilog have many (but simple) variants.
2589 *
2590 * Starting with gfx9, geometry and tessellation control
2591 * shaders also contain the prolog and user shader parts of
2592 * the previous shader stage.
2593 */
2594
2595 if (!mainp)
2596 return false;
2597
2598 /* Copy the compiled shader data over. */
2599 shader->is_binary_shared = true;
2600 shader->binary = mainp->binary;
2601 shader->config = mainp->config;
2602 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
2603 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
2604 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
2605 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
2606 memcpy(shader->info.vs_output_param_offset,
2607 mainp->info.vs_output_param_offset,
2608 sizeof(mainp->info.vs_output_param_offset));
2609 shader->info.uses_instanceid = mainp->info.uses_instanceid;
2610 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
2611 shader->info.nr_param_exports = mainp->info.nr_param_exports;
2612
2613 /* Select prologs and/or epilogs. */
2614 switch (sel->type) {
2615 case PIPE_SHADER_VERTEX:
2616 if (!si_shader_select_vs_parts(sscreen, compiler, shader, debug))
2617 return false;
2618 break;
2619 case PIPE_SHADER_TESS_CTRL:
2620 if (!si_shader_select_tcs_parts(sscreen, compiler, shader, debug))
2621 return false;
2622 break;
2623 case PIPE_SHADER_TESS_EVAL:
2624 break;
2625 case PIPE_SHADER_GEOMETRY:
2626 if (!si_shader_select_gs_parts(sscreen, compiler, shader, debug))
2627 return false;
2628 break;
2629 case PIPE_SHADER_FRAGMENT:
2630 if (!si_shader_select_ps_parts(sscreen, compiler, shader, debug))
2631 return false;
2632
2633 /* Make sure we have at least as many VGPRs as there
2634 * are allocated inputs.
2635 */
2636 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2637 shader->info.num_input_vgprs);
2638 break;
2639 default:;
2640 }
2641
2642 /* Update SGPR and VGPR counts. */
2643 if (shader->prolog) {
2644 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
2645 shader->prolog->config.num_sgprs);
2646 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2647 shader->prolog->config.num_vgprs);
2648 }
2649 if (shader->previous_stage) {
2650 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
2651 shader->previous_stage->config.num_sgprs);
2652 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2653 shader->previous_stage->config.num_vgprs);
2654 shader->config.spilled_sgprs =
2655 MAX2(shader->config.spilled_sgprs,
2656 shader->previous_stage->config.spilled_sgprs);
2657 shader->config.spilled_vgprs =
2658 MAX2(shader->config.spilled_vgprs,
2659 shader->previous_stage->config.spilled_vgprs);
2660 shader->info.private_mem_vgprs =
2661 MAX2(shader->info.private_mem_vgprs,
2662 shader->previous_stage->info.private_mem_vgprs);
2663 shader->config.scratch_bytes_per_wave =
2664 MAX2(shader->config.scratch_bytes_per_wave,
2665 shader->previous_stage->config.scratch_bytes_per_wave);
2666 shader->info.uses_instanceid |=
2667 shader->previous_stage->info.uses_instanceid;
2668 }
2669 if (shader->prolog2) {
2670 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
2671 shader->prolog2->config.num_sgprs);
2672 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2673 shader->prolog2->config.num_vgprs);
2674 }
2675 if (shader->epilog) {
2676 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
2677 shader->epilog->config.num_sgprs);
2678 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
2679 shader->epilog->config.num_vgprs);
2680 }
2681 si_calculate_max_simd_waves(shader);
2682 }
2683
2684 if (shader->key.as_ngg) {
2685 assert(!shader->key.as_es && !shader->key.as_ls);
2686 gfx10_ngg_calculate_subgroup_info(shader);
2687 } else if (sscreen->info.chip_class >= GFX9 && sel->type == PIPE_SHADER_GEOMETRY) {
2688 gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
2689 }
2690
2691 si_fix_resource_usage(sscreen, shader);
2692 si_shader_dump(sscreen, shader, debug, stderr, true);
2693
2694 /* Upload. */
2695 if (!si_shader_binary_upload(sscreen, shader, 0)) {
2696 fprintf(stderr, "LLVM failed to upload shader\n");
2697 return false;
2698 }
2699
2700 return true;
2701 }
2702
2703 void si_shader_binary_clean(struct si_shader_binary *binary)
2704 {
2705 free((void *)binary->elf_buffer);
2706 binary->elf_buffer = NULL;
2707
2708 free(binary->llvm_ir_string);
2709 binary->llvm_ir_string = NULL;
2710 }
2711
2712 void si_shader_destroy(struct si_shader *shader)
2713 {
2714 if (shader->scratch_bo)
2715 si_resource_reference(&shader->scratch_bo, NULL);
2716
2717 si_resource_reference(&shader->bo, NULL);
2718
2719 if (!shader->is_binary_shared)
2720 si_shader_binary_clean(&shader->binary);
2721
2722 free(shader->shader_log);
2723 }