radeonsi/nir: fix scanning of bindless images
[mesa.git] / src / gallium / drivers / radeonsi / si_shader_nir.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_shader_internal.h"
26 #include "si_pipe.h"
27
28 #include "ac_nir_to_llvm.h"
29
30 #include "tgsi/tgsi_from_mesa.h"
31
32 #include "compiler/nir/nir.h"
33 #include "compiler/nir_types.h"
34
35 static nir_variable* tex_get_texture_var(nir_tex_instr *instr)
36 {
37 for (unsigned i = 0; i < instr->num_srcs; i++) {
38 switch (instr->src[i].src_type) {
39 case nir_tex_src_texture_deref:
40 return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src));
41 default:
42 break;
43 }
44 }
45
46 return NULL;
47 }
48
49 static nir_variable* intrinsic_get_var(nir_intrinsic_instr *instr)
50 {
51 return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[0]));
52 }
53
54 static void gather_intrinsic_load_deref_info(const nir_shader *nir,
55 const nir_intrinsic_instr *instr,
56 nir_variable *var,
57 struct tgsi_shader_info *info)
58 {
59 assert(var && var->data.mode == nir_var_shader_in);
60
61 switch (nir->info.stage) {
62 case MESA_SHADER_VERTEX: {
63 unsigned i = var->data.driver_location;
64 unsigned attrib_count = glsl_count_attribute_slots(var->type, false);
65
66 for (unsigned j = 0; j < attrib_count; j++, i++) {
67 if (glsl_type_is_64bit(glsl_without_array(var->type))) {
68 /* TODO: set usage mask more accurately for doubles */
69 info->input_usage_mask[i] = TGSI_WRITEMASK_XYZW;
70 } else {
71 uint8_t mask = nir_ssa_def_components_read(&instr->dest.ssa);
72 info->input_usage_mask[i] |= mask << var->data.location_frac;
73 }
74 }
75 break;
76 }
77 default: {
78 unsigned semantic_name, semantic_index;
79 tgsi_get_gl_varying_semantic(var->data.location, true,
80 &semantic_name, &semantic_index);
81
82 if (semantic_name == TGSI_SEMANTIC_COLOR) {
83 uint8_t mask = nir_ssa_def_components_read(&instr->dest.ssa);
84 info->colors_read |= mask << (semantic_index * 4);
85 }
86 break;
87 }
88 }
89 }
90
91 static void scan_instruction(const struct nir_shader *nir,
92 struct tgsi_shader_info *info,
93 nir_instr *instr)
94 {
95 if (instr->type == nir_instr_type_alu) {
96 nir_alu_instr *alu = nir_instr_as_alu(instr);
97
98 switch (alu->op) {
99 case nir_op_fddx:
100 case nir_op_fddy:
101 case nir_op_fddx_fine:
102 case nir_op_fddy_fine:
103 case nir_op_fddx_coarse:
104 case nir_op_fddy_coarse:
105 info->uses_derivatives = true;
106 break;
107 default:
108 break;
109 }
110 } else if (instr->type == nir_instr_type_tex) {
111 nir_tex_instr *tex = nir_instr_as_tex(instr);
112 nir_variable *texture = tex_get_texture_var(tex);
113
114 if (!texture) {
115 info->samplers_declared |=
116 u_bit_consecutive(tex->sampler_index, 1);
117 } else {
118 if (texture->data.bindless)
119 info->uses_bindless_samplers = true;
120 }
121
122 switch (tex->op) {
123 case nir_texop_tex:
124 case nir_texop_txb:
125 case nir_texop_lod:
126 info->uses_derivatives = true;
127 break;
128 default:
129 break;
130 }
131 } else if (instr->type == nir_instr_type_intrinsic) {
132 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
133
134 switch (intr->intrinsic) {
135 case nir_intrinsic_load_front_face:
136 info->uses_frontface = 1;
137 break;
138 case nir_intrinsic_load_instance_id:
139 info->uses_instanceid = 1;
140 break;
141 case nir_intrinsic_load_invocation_id:
142 info->uses_invocationid = true;
143 break;
144 case nir_intrinsic_load_num_work_groups:
145 info->uses_grid_size = true;
146 break;
147 case nir_intrinsic_load_local_group_size:
148 /* The block size is translated to IMM with a fixed block size. */
149 if (info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
150 info->uses_block_size = true;
151 break;
152 case nir_intrinsic_load_local_invocation_id:
153 case nir_intrinsic_load_work_group_id: {
154 unsigned mask = nir_ssa_def_components_read(&intr->dest.ssa);
155 while (mask) {
156 unsigned i = u_bit_scan(&mask);
157
158 if (intr->intrinsic == nir_intrinsic_load_work_group_id)
159 info->uses_block_id[i] = true;
160 else
161 info->uses_thread_id[i] = true;
162 }
163 break;
164 }
165 case nir_intrinsic_load_vertex_id:
166 info->uses_vertexid = 1;
167 break;
168 case nir_intrinsic_load_vertex_id_zero_base:
169 info->uses_vertexid_nobase = 1;
170 break;
171 case nir_intrinsic_load_base_vertex:
172 info->uses_basevertex = 1;
173 break;
174 case nir_intrinsic_load_primitive_id:
175 info->uses_primid = 1;
176 break;
177 case nir_intrinsic_load_sample_mask_in:
178 info->reads_samplemask = true;
179 break;
180 case nir_intrinsic_load_tess_level_inner:
181 case nir_intrinsic_load_tess_level_outer:
182 info->reads_tess_factors = true;
183 break;
184 case nir_intrinsic_bindless_image_load:
185 info->uses_bindless_images = true;
186
187 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
188 info->uses_bindless_buffer_load = true;
189 else
190 info->uses_bindless_image_load = true;
191 break;
192 case nir_intrinsic_bindless_image_size:
193 case nir_intrinsic_bindless_image_samples:
194 info->uses_bindless_images = true;
195 break;
196 case nir_intrinsic_bindless_image_store:
197 info->uses_bindless_images = true;
198
199 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
200 info->uses_bindless_buffer_store = true;
201 else
202 info->uses_bindless_image_store = true;
203
204 info->writes_memory = true;
205 break;
206 case nir_intrinsic_image_deref_store:
207 info->writes_memory = true;
208 break;
209 case nir_intrinsic_bindless_image_atomic_add:
210 case nir_intrinsic_bindless_image_atomic_min:
211 case nir_intrinsic_bindless_image_atomic_max:
212 case nir_intrinsic_bindless_image_atomic_and:
213 case nir_intrinsic_bindless_image_atomic_or:
214 case nir_intrinsic_bindless_image_atomic_xor:
215 case nir_intrinsic_bindless_image_atomic_exchange:
216 case nir_intrinsic_bindless_image_atomic_comp_swap:
217 info->uses_bindless_images = true;
218
219 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
220 info->uses_bindless_buffer_atomic = true;
221 else
222 info->uses_bindless_image_atomic = true;
223
224 info->writes_memory = true;
225 break;
226 case nir_intrinsic_image_deref_atomic_add:
227 case nir_intrinsic_image_deref_atomic_min:
228 case nir_intrinsic_image_deref_atomic_max:
229 case nir_intrinsic_image_deref_atomic_and:
230 case nir_intrinsic_image_deref_atomic_or:
231 case nir_intrinsic_image_deref_atomic_xor:
232 case nir_intrinsic_image_deref_atomic_exchange:
233 case nir_intrinsic_image_deref_atomic_comp_swap:
234 info->writes_memory = true;
235 break;
236 case nir_intrinsic_store_ssbo:
237 case nir_intrinsic_ssbo_atomic_add:
238 case nir_intrinsic_ssbo_atomic_imin:
239 case nir_intrinsic_ssbo_atomic_umin:
240 case nir_intrinsic_ssbo_atomic_imax:
241 case nir_intrinsic_ssbo_atomic_umax:
242 case nir_intrinsic_ssbo_atomic_and:
243 case nir_intrinsic_ssbo_atomic_or:
244 case nir_intrinsic_ssbo_atomic_xor:
245 case nir_intrinsic_ssbo_atomic_exchange:
246 case nir_intrinsic_ssbo_atomic_comp_swap:
247 info->writes_memory = true;
248 break;
249 case nir_intrinsic_load_deref: {
250 nir_variable *var = intrinsic_get_var(intr);
251 nir_variable_mode mode = var->data.mode;
252 enum glsl_base_type base_type =
253 glsl_get_base_type(glsl_without_array(var->type));
254
255 if (mode == nir_var_shader_in) {
256 gather_intrinsic_load_deref_info(nir, intr, var, info);
257
258 switch (var->data.interpolation) {
259 case INTERP_MODE_NONE:
260 if (glsl_base_type_is_integer(base_type))
261 break;
262
263 /* fall-through */
264 case INTERP_MODE_SMOOTH:
265 if (var->data.sample)
266 info->uses_persp_sample = true;
267 else if (var->data.centroid)
268 info->uses_persp_centroid = true;
269 else
270 info->uses_persp_center = true;
271 break;
272
273 case INTERP_MODE_NOPERSPECTIVE:
274 if (var->data.sample)
275 info->uses_linear_sample = true;
276 else if (var->data.centroid)
277 info->uses_linear_centroid = true;
278 else
279 info->uses_linear_center = true;
280 break;
281 }
282 }
283 break;
284 }
285 case nir_intrinsic_interp_deref_at_centroid:
286 case nir_intrinsic_interp_deref_at_sample:
287 case nir_intrinsic_interp_deref_at_offset: {
288 enum glsl_interp_mode interp = intrinsic_get_var(intr)->data.interpolation;
289 switch (interp) {
290 case INTERP_MODE_SMOOTH:
291 case INTERP_MODE_NONE:
292 if (intr->intrinsic == nir_intrinsic_interp_deref_at_centroid)
293 info->uses_persp_opcode_interp_centroid = true;
294 else if (intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
295 info->uses_persp_opcode_interp_sample = true;
296 else
297 info->uses_persp_opcode_interp_offset = true;
298 break;
299 case INTERP_MODE_NOPERSPECTIVE:
300 if (intr->intrinsic == nir_intrinsic_interp_deref_at_centroid)
301 info->uses_linear_opcode_interp_centroid = true;
302 else if (intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
303 info->uses_linear_opcode_interp_sample = true;
304 else
305 info->uses_linear_opcode_interp_offset = true;
306 break;
307 case INTERP_MODE_FLAT:
308 break;
309 default:
310 unreachable("Unsupported interpoation type");
311 }
312 break;
313 }
314 default:
315 break;
316 }
317 }
318 }
319
320 void si_nir_scan_tess_ctrl(const struct nir_shader *nir,
321 struct tgsi_tessctrl_info *out)
322 {
323 memset(out, 0, sizeof(*out));
324
325 if (nir->info.stage != MESA_SHADER_TESS_CTRL)
326 return;
327
328 out->tessfactors_are_def_in_all_invocs =
329 ac_are_tessfactors_def_in_all_invocs(nir);
330 }
331
332 void si_nir_scan_shader(const struct nir_shader *nir,
333 struct tgsi_shader_info *info)
334 {
335 nir_function *func;
336 unsigned i;
337
338 info->processor = pipe_shader_type_from_mesa(nir->info.stage);
339 info->num_tokens = 2; /* indicate that the shader is non-empty */
340 info->num_instructions = 2;
341
342 info->properties[TGSI_PROPERTY_NEXT_SHADER] =
343 pipe_shader_type_from_mesa(nir->info.next_stage);
344
345 if (nir->info.stage == MESA_SHADER_VERTEX) {
346 info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] =
347 nir->info.vs.window_space_position;
348 }
349
350 if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
351 info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT] =
352 nir->info.tess.tcs_vertices_out;
353 }
354
355 if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
356 if (nir->info.tess.primitive_mode == GL_ISOLINES)
357 info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = PIPE_PRIM_LINES;
358 else
359 info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = nir->info.tess.primitive_mode;
360
361 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
362 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
363 PIPE_TESS_SPACING_FRACTIONAL_ODD);
364 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
365 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
366
367 info->properties[TGSI_PROPERTY_TES_SPACING] = (nir->info.tess.spacing + 1) % 3;
368 info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW] = !nir->info.tess.ccw;
369 info->properties[TGSI_PROPERTY_TES_POINT_MODE] = nir->info.tess.point_mode;
370 }
371
372 if (nir->info.stage == MESA_SHADER_GEOMETRY) {
373 info->properties[TGSI_PROPERTY_GS_INPUT_PRIM] = nir->info.gs.input_primitive;
374 info->properties[TGSI_PROPERTY_GS_OUTPUT_PRIM] = nir->info.gs.output_primitive;
375 info->properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES] = nir->info.gs.vertices_out;
376 info->properties[TGSI_PROPERTY_GS_INVOCATIONS] = nir->info.gs.invocations;
377 }
378
379 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
380 info->properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] =
381 nir->info.fs.early_fragment_tests | nir->info.fs.post_depth_coverage;
382 info->properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE] = nir->info.fs.post_depth_coverage;
383
384 if (nir->info.fs.pixel_center_integer) {
385 info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
386 TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
387 }
388
389 if (nir->info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
390 switch (nir->info.fs.depth_layout) {
391 case FRAG_DEPTH_LAYOUT_ANY:
392 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_ANY;
393 break;
394 case FRAG_DEPTH_LAYOUT_GREATER:
395 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_GREATER;
396 break;
397 case FRAG_DEPTH_LAYOUT_LESS:
398 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_LESS;
399 break;
400 case FRAG_DEPTH_LAYOUT_UNCHANGED:
401 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_UNCHANGED;
402 break;
403 default:
404 unreachable("Unknow depth layout");
405 }
406 }
407 }
408
409 if (gl_shader_stage_is_compute(nir->info.stage)) {
410 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.cs.local_size[0];
411 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.cs.local_size[1];
412 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH] = nir->info.cs.local_size[2];
413 }
414
415 i = 0;
416 uint64_t processed_inputs = 0;
417 unsigned num_inputs = 0;
418 nir_foreach_variable(variable, &nir->inputs) {
419 unsigned semantic_name, semantic_index;
420
421 const struct glsl_type *type = variable->type;
422 if (nir_is_per_vertex_io(variable, nir->info.stage)) {
423 assert(glsl_type_is_array(type));
424 type = glsl_get_array_element(type);
425 }
426
427 unsigned attrib_count = glsl_count_attribute_slots(type,
428 nir->info.stage == MESA_SHADER_VERTEX);
429
430 i = variable->data.driver_location;
431
432 /* Vertex shader inputs don't have semantics. The state
433 * tracker has already mapped them to attributes via
434 * variable->data.driver_location.
435 */
436 if (nir->info.stage == MESA_SHADER_VERTEX) {
437 if (glsl_type_is_dual_slot(glsl_without_array(variable->type)))
438 num_inputs++;
439
440 num_inputs++;
441 continue;
442 }
443
444 /* Fragment shader position is a system value. */
445 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
446 variable->data.location == VARYING_SLOT_POS) {
447 if (nir->info.fs.pixel_center_integer)
448 info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
449 TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
450
451 num_inputs++;
452 continue;
453 }
454
455 for (unsigned j = 0; j < attrib_count; j++, i++) {
456
457 if (processed_inputs & ((uint64_t)1 << i))
458 continue;
459
460 processed_inputs |= ((uint64_t)1 << i);
461 num_inputs++;
462
463 tgsi_get_gl_varying_semantic(variable->data.location + j, true,
464 &semantic_name, &semantic_index);
465
466 info->input_semantic_name[i] = semantic_name;
467 info->input_semantic_index[i] = semantic_index;
468
469 if (semantic_name == TGSI_SEMANTIC_PRIMID)
470 info->uses_primid = true;
471
472 if (variable->data.sample)
473 info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_SAMPLE;
474 else if (variable->data.centroid)
475 info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_CENTROID;
476 else
477 info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_CENTER;
478
479 enum glsl_base_type base_type =
480 glsl_get_base_type(glsl_without_array(variable->type));
481
482 switch (variable->data.interpolation) {
483 case INTERP_MODE_NONE:
484 if (glsl_base_type_is_integer(base_type)) {
485 info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
486 break;
487 }
488
489 if (semantic_name == TGSI_SEMANTIC_COLOR) {
490 info->input_interpolate[i] = TGSI_INTERPOLATE_COLOR;
491 break;
492 }
493 /* fall-through */
494
495 case INTERP_MODE_SMOOTH:
496 assert(!glsl_base_type_is_integer(base_type));
497
498 info->input_interpolate[i] = TGSI_INTERPOLATE_PERSPECTIVE;
499 break;
500
501 case INTERP_MODE_NOPERSPECTIVE:
502 assert(!glsl_base_type_is_integer(base_type));
503
504 info->input_interpolate[i] = TGSI_INTERPOLATE_LINEAR;
505 break;
506
507 case INTERP_MODE_FLAT:
508 info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
509 break;
510 }
511 }
512 }
513
514 info->num_inputs = num_inputs;
515
516
517 i = 0;
518 uint64_t processed_outputs = 0;
519 unsigned num_outputs = 0;
520 nir_foreach_variable(variable, &nir->outputs) {
521 unsigned semantic_name, semantic_index;
522
523 i = variable->data.driver_location;
524
525 const struct glsl_type *type = variable->type;
526 if (nir_is_per_vertex_io(variable, nir->info.stage)) {
527 assert(glsl_type_is_array(type));
528 type = glsl_get_array_element(type);
529 }
530
531 unsigned attrib_count = glsl_count_attribute_slots(type, false);
532 for (unsigned k = 0; k < attrib_count; k++, i++) {
533
534 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
535 tgsi_get_gl_frag_result_semantic(variable->data.location + k,
536 &semantic_name, &semantic_index);
537
538 /* Adjust for dual source blending */
539 if (variable->data.index > 0) {
540 semantic_index++;
541 }
542 } else {
543 tgsi_get_gl_varying_semantic(variable->data.location + k, true,
544 &semantic_name, &semantic_index);
545 }
546
547 unsigned num_components = 4;
548 unsigned vector_elements = glsl_get_vector_elements(glsl_without_array(variable->type));
549 if (vector_elements)
550 num_components = vector_elements;
551
552 unsigned component = variable->data.location_frac;
553 if (glsl_type_is_64bit(glsl_without_array(variable->type))) {
554 if (glsl_type_is_dual_slot(glsl_without_array(variable->type)) && k % 2) {
555 num_components = (num_components * 2) - 4;
556 component = 0;
557 } else {
558 num_components = MIN2(num_components * 2, 4);
559 }
560 }
561
562 ubyte usagemask = 0;
563 for (unsigned j = component; j < num_components + component; j++) {
564 switch (j) {
565 case 0:
566 usagemask |= TGSI_WRITEMASK_X;
567 break;
568 case 1:
569 usagemask |= TGSI_WRITEMASK_Y;
570 break;
571 case 2:
572 usagemask |= TGSI_WRITEMASK_Z;
573 break;
574 case 3:
575 usagemask |= TGSI_WRITEMASK_W;
576 break;
577 default:
578 unreachable("error calculating component index");
579 }
580 }
581
582 unsigned gs_out_streams;
583 if (variable->data.stream & (1u << 31)) {
584 gs_out_streams = variable->data.stream & ~(1u << 31);
585 } else {
586 assert(variable->data.stream < 4);
587 gs_out_streams = 0;
588 for (unsigned j = 0; j < num_components; ++j)
589 gs_out_streams |= variable->data.stream << (2 * (component + j));
590 }
591
592 unsigned streamx = gs_out_streams & 3;
593 unsigned streamy = (gs_out_streams >> 2) & 3;
594 unsigned streamz = (gs_out_streams >> 4) & 3;
595 unsigned streamw = (gs_out_streams >> 6) & 3;
596
597 if (usagemask & TGSI_WRITEMASK_X) {
598 info->output_usagemask[i] |= TGSI_WRITEMASK_X;
599 info->output_streams[i] |= streamx;
600 info->num_stream_output_components[streamx]++;
601 }
602 if (usagemask & TGSI_WRITEMASK_Y) {
603 info->output_usagemask[i] |= TGSI_WRITEMASK_Y;
604 info->output_streams[i] |= streamy << 2;
605 info->num_stream_output_components[streamy]++;
606 }
607 if (usagemask & TGSI_WRITEMASK_Z) {
608 info->output_usagemask[i] |= TGSI_WRITEMASK_Z;
609 info->output_streams[i] |= streamz << 4;
610 info->num_stream_output_components[streamz]++;
611 }
612 if (usagemask & TGSI_WRITEMASK_W) {
613 info->output_usagemask[i] |= TGSI_WRITEMASK_W;
614 info->output_streams[i] |= streamw << 6;
615 info->num_stream_output_components[streamw]++;
616 }
617
618 /* make sure we only count this location once against
619 * the num_outputs counter.
620 */
621 if (processed_outputs & ((uint64_t)1 << i))
622 continue;
623
624 processed_outputs |= ((uint64_t)1 << i);
625 num_outputs++;
626
627 info->output_semantic_name[i] = semantic_name;
628 info->output_semantic_index[i] = semantic_index;
629
630 switch (semantic_name) {
631 case TGSI_SEMANTIC_PRIMID:
632 info->writes_primid = true;
633 break;
634 case TGSI_SEMANTIC_VIEWPORT_INDEX:
635 info->writes_viewport_index = true;
636 break;
637 case TGSI_SEMANTIC_LAYER:
638 info->writes_layer = true;
639 break;
640 case TGSI_SEMANTIC_PSIZE:
641 info->writes_psize = true;
642 break;
643 case TGSI_SEMANTIC_CLIPVERTEX:
644 info->writes_clipvertex = true;
645 break;
646 case TGSI_SEMANTIC_COLOR:
647 info->colors_written |= 1 << semantic_index;
648 break;
649 case TGSI_SEMANTIC_STENCIL:
650 info->writes_stencil = true;
651 break;
652 case TGSI_SEMANTIC_SAMPLEMASK:
653 info->writes_samplemask = true;
654 break;
655 case TGSI_SEMANTIC_EDGEFLAG:
656 info->writes_edgeflag = true;
657 break;
658 case TGSI_SEMANTIC_POSITION:
659 if (info->processor == PIPE_SHADER_FRAGMENT)
660 info->writes_z = true;
661 else
662 info->writes_position = true;
663 break;
664 }
665
666 if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
667 switch (semantic_name) {
668 case TGSI_SEMANTIC_PATCH:
669 info->reads_perpatch_outputs = true;
670 break;
671 case TGSI_SEMANTIC_TESSINNER:
672 case TGSI_SEMANTIC_TESSOUTER:
673 info->reads_tessfactor_outputs = true;
674 break;
675 default:
676 info->reads_pervertex_outputs = true;
677 }
678 }
679 }
680
681 unsigned loc = variable->data.location;
682 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
683 loc == FRAG_RESULT_COLOR &&
684 nir->info.outputs_written & (1ull << loc)) {
685 assert(attrib_count == 1);
686 info->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] = true;
687 }
688 }
689
690 info->num_outputs = num_outputs;
691
692 struct set *ubo_set = _mesa_set_create(NULL, _mesa_hash_pointer,
693 _mesa_key_pointer_equal);
694 struct set *ssbo_set = _mesa_set_create(NULL, _mesa_hash_pointer,
695 _mesa_key_pointer_equal);
696
697 /* Intialise const_file_max[0] */
698 info->const_file_max[0] = -1;
699
700 /* The first 8 are reserved for atomic counters using ssbo */
701 unsigned ssbo_idx = 8;
702
703 unsigned ubo_idx = 1;
704 nir_foreach_variable(variable, &nir->uniforms) {
705 const struct glsl_type *type = variable->type;
706 enum glsl_base_type base_type =
707 glsl_get_base_type(glsl_without_array(type));
708 unsigned aoa_size = MAX2(1, glsl_get_aoa_size(type));
709 unsigned loc = variable->data.location;
710 int slot_count = glsl_count_attribute_slots(type, false);
711 int max_slot = MAX2(info->const_file_max[0], (int) loc) + slot_count;
712
713 /* Gather buffers declared bitmasks. Note: radeonsi doesn't
714 * really use the mask (other than ubo_idx == 1 for regular
715 * uniforms) its really only used for getting the buffer count
716 * so we don't need to worry about the ordering.
717 */
718 if (variable->interface_type != NULL) {
719 if (variable->data.mode == nir_var_uniform ||
720 variable->data.mode == nir_var_mem_ubo ||
721 variable->data.mode == nir_var_mem_ssbo) {
722
723 struct set *buf_set = variable->data.mode == nir_var_mem_ssbo ?
724 ssbo_set : ubo_set;
725
726 unsigned block_count;
727 if (base_type != GLSL_TYPE_INTERFACE) {
728 struct set_entry *entry =
729 _mesa_set_search(buf_set, variable->interface_type);
730
731 /* Check if we have already processed
732 * a member from this ubo.
733 */
734 if (entry)
735 continue;
736
737 block_count = 1;
738 } else {
739 block_count = aoa_size;
740 }
741
742 if (variable->data.mode == nir_var_uniform ||
743 variable->data.mode == nir_var_mem_ubo) {
744 info->const_buffers_declared |= u_bit_consecutive(ubo_idx, block_count);
745 ubo_idx += block_count;
746 } else {
747 assert(variable->data.mode == nir_var_mem_ssbo);
748
749 info->shader_buffers_declared |= u_bit_consecutive(ssbo_idx, block_count);
750 ssbo_idx += block_count;
751 }
752
753 _mesa_set_add(buf_set, variable->interface_type);
754 }
755
756 continue;
757 }
758
759 /* We rely on the fact that nir_lower_samplers_as_deref has
760 * eliminated struct dereferences.
761 */
762 if (base_type == GLSL_TYPE_SAMPLER) {
763 if (variable->data.bindless) {
764 info->const_buffers_declared |= 1;
765 info->const_file_max[0] = max_slot;
766 } else {
767 info->samplers_declared |=
768 u_bit_consecutive(variable->data.binding, aoa_size);
769 }
770 } else if (base_type == GLSL_TYPE_IMAGE) {
771 if (variable->data.bindless) {
772 info->const_buffers_declared |= 1;
773 info->const_file_max[0] = max_slot;
774 } else {
775 info->images_declared |=
776 u_bit_consecutive(variable->data.binding, aoa_size);
777 }
778 } else if (base_type != GLSL_TYPE_ATOMIC_UINT) {
779 if (strncmp(variable->name, "state.", 6) == 0 ||
780 strncmp(variable->name, "gl_", 3) == 0) {
781 /* FIXME: figure out why piglit tests with builtin
782 * uniforms are failing without this.
783 */
784 info->const_buffers_declared =
785 u_bit_consecutive(0, SI_NUM_CONST_BUFFERS);
786 } else {
787 info->const_buffers_declared |= 1;
788 info->const_file_max[0] = max_slot;
789 }
790 }
791 }
792
793 _mesa_set_destroy(ubo_set, NULL);
794 _mesa_set_destroy(ssbo_set, NULL);
795
796 info->num_written_clipdistance = nir->info.clip_distance_array_size;
797 info->num_written_culldistance = nir->info.cull_distance_array_size;
798 info->clipdist_writemask = u_bit_consecutive(0, info->num_written_clipdistance);
799 info->culldist_writemask = u_bit_consecutive(0, info->num_written_culldistance);
800
801 if (info->processor == PIPE_SHADER_FRAGMENT)
802 info->uses_kill = nir->info.fs.uses_discard;
803
804 func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
805 nir_foreach_block(block, func->impl) {
806 nir_foreach_instr(instr, block)
807 scan_instruction(nir, info, instr);
808 }
809 }
810
811 /**
812 * Perform "lowering" operations on the NIR that are run once when the shader
813 * selector is created.
814 */
815 void
816 si_lower_nir(struct si_shader_selector* sel)
817 {
818 /* Adjust the driver location of inputs and outputs. The state tracker
819 * interprets them as slots, while the ac/nir backend interprets them
820 * as individual components.
821 */
822 nir_foreach_variable(variable, &sel->nir->inputs)
823 variable->data.driver_location *= 4;
824
825 nir_foreach_variable(variable, &sel->nir->outputs) {
826 variable->data.driver_location *= 4;
827
828 if (sel->nir->info.stage == MESA_SHADER_FRAGMENT) {
829 if (variable->data.location == FRAG_RESULT_DEPTH)
830 variable->data.driver_location += 2;
831 else if (variable->data.location == FRAG_RESULT_STENCIL)
832 variable->data.driver_location += 1;
833 }
834 }
835
836 /* Perform lowerings (and optimizations) of code.
837 *
838 * Performance considerations aside, we must:
839 * - lower certain ALU operations
840 * - ensure constant offsets for texture instructions are folded
841 * and copy-propagated
842 */
843
844 static const struct nir_lower_tex_options lower_tex_options = {
845 .lower_txp = ~0u,
846 };
847 NIR_PASS_V(sel->nir, nir_lower_tex, &lower_tex_options);
848
849 const nir_lower_subgroups_options subgroups_options = {
850 .subgroup_size = 64,
851 .ballot_bit_size = 64,
852 .lower_to_scalar = true,
853 .lower_subgroup_masks = true,
854 .lower_vote_trivial = false,
855 .lower_vote_eq_to_ballot = true,
856 };
857 NIR_PASS_V(sel->nir, nir_lower_subgroups, &subgroups_options);
858
859 ac_lower_indirect_derefs(sel->nir, sel->screen->info.chip_class);
860
861 bool progress;
862 do {
863 progress = false;
864
865 NIR_PASS_V(sel->nir, nir_lower_vars_to_ssa);
866
867 NIR_PASS(progress, sel->nir, nir_opt_copy_prop_vars);
868 NIR_PASS(progress, sel->nir, nir_opt_dead_write_vars);
869
870 NIR_PASS_V(sel->nir, nir_lower_alu_to_scalar);
871 NIR_PASS_V(sel->nir, nir_lower_phis_to_scalar);
872
873 /* (Constant) copy propagation is needed for txf with offsets. */
874 NIR_PASS(progress, sel->nir, nir_copy_prop);
875 NIR_PASS(progress, sel->nir, nir_opt_remove_phis);
876 NIR_PASS(progress, sel->nir, nir_opt_dce);
877 if (nir_opt_trivial_continues(sel->nir)) {
878 progress = true;
879 NIR_PASS(progress, sel->nir, nir_copy_prop);
880 NIR_PASS(progress, sel->nir, nir_opt_dce);
881 }
882 NIR_PASS(progress, sel->nir, nir_opt_if, true);
883 NIR_PASS(progress, sel->nir, nir_opt_dead_cf);
884 NIR_PASS(progress, sel->nir, nir_opt_cse);
885 NIR_PASS(progress, sel->nir, nir_opt_peephole_select, 8, true, true);
886
887 /* Needed for algebraic lowering */
888 NIR_PASS(progress, sel->nir, nir_opt_algebraic);
889 NIR_PASS(progress, sel->nir, nir_opt_constant_folding);
890
891 NIR_PASS(progress, sel->nir, nir_opt_undef);
892 NIR_PASS(progress, sel->nir, nir_opt_conditional_discard);
893 if (sel->nir->options->max_unroll_iterations) {
894 NIR_PASS(progress, sel->nir, nir_opt_loop_unroll, 0);
895 }
896 } while (progress);
897
898 NIR_PASS_V(sel->nir, nir_lower_bool_to_int32);
899
900 /* Strip the resulting shader so that the shader cache is more likely
901 * to hit from other similar shaders.
902 */
903 nir_strip(sel->nir);
904 }
905
906 static void declare_nir_input_vs(struct si_shader_context *ctx,
907 struct nir_variable *variable,
908 unsigned input_index,
909 LLVMValueRef out[4])
910 {
911 si_llvm_load_input_vs(ctx, input_index, out);
912 }
913
914 static void declare_nir_input_fs(struct si_shader_context *ctx,
915 struct nir_variable *variable,
916 unsigned input_index,
917 LLVMValueRef out[4])
918 {
919 unsigned slot = variable->data.location;
920 if (slot == VARYING_SLOT_POS) {
921 out[0] = LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT);
922 out[1] = LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT);
923 out[2] = LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Z_FLOAT);
924 out[3] = ac_build_fdiv(&ctx->ac, ctx->ac.f32_1,
925 LLVMGetParam(ctx->main_fn, SI_PARAM_POS_W_FLOAT));
926 return;
927 }
928
929 si_llvm_load_input_fs(ctx, input_index, out);
930 }
931
932 LLVMValueRef
933 si_nir_lookup_interp_param(struct ac_shader_abi *abi,
934 enum glsl_interp_mode interp, unsigned location)
935 {
936 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
937 int interp_param_idx = -1;
938
939 switch (interp) {
940 case INTERP_MODE_FLAT:
941 return NULL;
942 case INTERP_MODE_SMOOTH:
943 case INTERP_MODE_NONE:
944 if (location == INTERP_CENTER)
945 interp_param_idx = SI_PARAM_PERSP_CENTER;
946 else if (location == INTERP_CENTROID)
947 interp_param_idx = SI_PARAM_PERSP_CENTROID;
948 else if (location == INTERP_SAMPLE)
949 interp_param_idx = SI_PARAM_PERSP_SAMPLE;
950 break;
951 case INTERP_MODE_NOPERSPECTIVE:
952 if (location == INTERP_CENTER)
953 interp_param_idx = SI_PARAM_LINEAR_CENTER;
954 else if (location == INTERP_CENTROID)
955 interp_param_idx = SI_PARAM_LINEAR_CENTROID;
956 else if (location == INTERP_SAMPLE)
957 interp_param_idx = SI_PARAM_LINEAR_SAMPLE;
958 break;
959 default:
960 assert(!"Unhandled interpolation mode.");
961 return NULL;
962 }
963
964 return interp_param_idx != -1 ?
965 LLVMGetParam(ctx->main_fn, interp_param_idx) : NULL;
966 }
967
968 static LLVMValueRef
969 si_nir_load_sampler_desc(struct ac_shader_abi *abi,
970 unsigned descriptor_set, unsigned base_index,
971 unsigned constant_index, LLVMValueRef dynamic_index,
972 enum ac_descriptor_type desc_type, bool image,
973 bool write, bool bindless)
974 {
975 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
976 const struct tgsi_shader_info *info = &ctx->shader->selector->info;
977 LLVMBuilderRef builder = ctx->ac.builder;
978 unsigned const_index = base_index + constant_index;
979 bool dcc_off = write;
980
981 /* TODO: images_store and images_atomic are not set */
982 if (!dynamic_index && image &&
983 (info->images_store | info->images_atomic) & (1 << const_index))
984 dcc_off = true;
985
986 assert(!descriptor_set);
987 assert(!image || desc_type == AC_DESC_IMAGE || desc_type == AC_DESC_BUFFER);
988
989 if (bindless) {
990 LLVMValueRef list =
991 LLVMGetParam(ctx->main_fn, ctx->param_bindless_samplers_and_images);
992
993 /* dynamic_index is the bindless handle */
994 if (image) {
995 /* For simplicity, bindless image descriptors use fixed
996 * 16-dword slots for now.
997 */
998 dynamic_index = LLVMBuildMul(ctx->ac.builder, dynamic_index,
999 LLVMConstInt(ctx->i32, 2, 0), "");
1000
1001 return si_load_image_desc(ctx, list, dynamic_index, desc_type,
1002 dcc_off, true);
1003 }
1004
1005 /* Since bindless handle arithmetic can contain an unsigned integer
1006 * wraparound and si_load_sampler_desc assumes there isn't any,
1007 * use GEP without "inbounds" (inside ac_build_pointer_add)
1008 * to prevent incorrect code generation and hangs.
1009 */
1010 dynamic_index = LLVMBuildMul(ctx->ac.builder, dynamic_index,
1011 LLVMConstInt(ctx->i32, 2, 0), "");
1012 list = ac_build_pointer_add(&ctx->ac, list, dynamic_index);
1013 return si_load_sampler_desc(ctx, list, ctx->i32_0, desc_type);
1014 }
1015
1016 unsigned num_slots = image ? ctx->num_images : ctx->num_samplers;
1017 assert(const_index < num_slots);
1018
1019 LLVMValueRef list = LLVMGetParam(ctx->main_fn, ctx->param_samplers_and_images);
1020 LLVMValueRef index = LLVMConstInt(ctx->ac.i32, const_index, false);
1021
1022 if (dynamic_index) {
1023 index = LLVMBuildAdd(builder, index, dynamic_index, "");
1024
1025 /* From the GL_ARB_shader_image_load_store extension spec:
1026 *
1027 * If a shader performs an image load, store, or atomic
1028 * operation using an image variable declared as an array,
1029 * and if the index used to select an individual element is
1030 * negative or greater than or equal to the size of the
1031 * array, the results of the operation are undefined but may
1032 * not lead to termination.
1033 */
1034 index = si_llvm_bound_index(ctx, index, num_slots);
1035 }
1036
1037 if (image) {
1038 index = LLVMBuildSub(ctx->ac.builder,
1039 LLVMConstInt(ctx->i32, SI_NUM_IMAGES - 1, 0),
1040 index, "");
1041 return si_load_image_desc(ctx, list, index, desc_type, dcc_off, false);
1042 }
1043
1044 index = LLVMBuildAdd(ctx->ac.builder, index,
1045 LLVMConstInt(ctx->i32, SI_NUM_IMAGES / 2, 0), "");
1046 return si_load_sampler_desc(ctx, list, index, desc_type);
1047 }
1048
1049 static void bitcast_inputs(struct si_shader_context *ctx,
1050 LLVMValueRef data[4],
1051 unsigned input_idx)
1052 {
1053 for (unsigned chan = 0; chan < 4; chan++) {
1054 ctx->inputs[input_idx + chan] =
1055 LLVMBuildBitCast(ctx->ac.builder, data[chan], ctx->ac.i32, "");
1056 }
1057 }
1058
1059 bool si_nir_build_llvm(struct si_shader_context *ctx, struct nir_shader *nir)
1060 {
1061 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1062
1063 if (nir->info.stage == MESA_SHADER_VERTEX ||
1064 nir->info.stage == MESA_SHADER_FRAGMENT) {
1065 uint64_t processed_inputs = 0;
1066 nir_foreach_variable(variable, &nir->inputs) {
1067 unsigned attrib_count = glsl_count_attribute_slots(variable->type,
1068 nir->info.stage == MESA_SHADER_VERTEX);
1069 unsigned input_idx = variable->data.driver_location;
1070
1071 LLVMValueRef data[4];
1072 unsigned loc = variable->data.location;
1073
1074 if (loc >= VARYING_SLOT_VAR0 && nir->info.stage == MESA_SHADER_FRAGMENT)
1075 ctx->abi.fs_input_attr_indices[loc - VARYING_SLOT_VAR0] = input_idx / 4;
1076
1077 for (unsigned i = 0; i < attrib_count; i++) {
1078 /* Packed components share the same location so skip
1079 * them if we have already processed the location.
1080 */
1081 if (processed_inputs & ((uint64_t)1 << (loc + i))) {
1082 input_idx += 4;
1083 continue;
1084 }
1085
1086 if (nir->info.stage == MESA_SHADER_VERTEX) {
1087 declare_nir_input_vs(ctx, variable, input_idx / 4, data);
1088 bitcast_inputs(ctx, data, input_idx);
1089 if (glsl_type_is_dual_slot(variable->type)) {
1090 input_idx += 4;
1091 declare_nir_input_vs(ctx, variable, input_idx / 4, data);
1092 bitcast_inputs(ctx, data, input_idx);
1093 }
1094 } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
1095 declare_nir_input_fs(ctx, variable, input_idx / 4, data);
1096 bitcast_inputs(ctx, data, input_idx);
1097 }
1098
1099 processed_inputs |= ((uint64_t)1 << (loc + i));
1100 input_idx += 4;
1101 }
1102 }
1103 }
1104
1105 ctx->abi.inputs = &ctx->inputs[0];
1106 ctx->abi.load_sampler_desc = si_nir_load_sampler_desc;
1107 ctx->abi.clamp_shadow_reference = true;
1108
1109 ctx->num_samplers = util_last_bit(info->samplers_declared);
1110 ctx->num_images = util_last_bit(info->images_declared);
1111
1112 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE]) {
1113 assert(gl_shader_stage_is_compute(nir->info.stage));
1114 si_declare_compute_memory(ctx);
1115 }
1116 ac_nir_translate(&ctx->ac, &ctx->abi, nir);
1117
1118 return true;
1119 }