nir: Add nir_foreach_shader_in/out_variable helpers
[mesa.git] / src / gallium / auxiliary / nir / nir_to_tgsi_info.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /*
26 * This is ported mostly out of radeonsi, if we can drop TGSI, we can likely
27 * make a lot this go away.
28 */
29
30 #include "nir_to_tgsi_info.h"
31 #include "util/u_math.h"
32 #include "nir.h"
33 #include "nir_deref.h"
34 #include "tgsi/tgsi_scan.h"
35 #include "tgsi/tgsi_from_mesa.h"
36
37 static nir_variable* tex_get_texture_var(nir_tex_instr *instr)
38 {
39 for (unsigned i = 0; i < instr->num_srcs; i++) {
40 switch (instr->src[i].src_type) {
41 case nir_tex_src_texture_deref:
42 return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src));
43 default:
44 break;
45 }
46 }
47
48 return NULL;
49 }
50
51 static nir_variable* intrinsic_get_var(nir_intrinsic_instr *instr)
52 {
53 return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[0]));
54 }
55
56
57 static void gather_usage_helper(const nir_deref_instr **deref_ptr,
58 unsigned location,
59 uint8_t mask,
60 uint8_t *usage_mask)
61 {
62 for (; *deref_ptr; deref_ptr++) {
63 const nir_deref_instr *deref = *deref_ptr;
64 switch (deref->deref_type) {
65 case nir_deref_type_array: {
66 unsigned elem_size =
67 glsl_count_attribute_slots(deref->type, false);
68 if (nir_src_is_const(deref->arr.index)) {
69 location += elem_size * nir_src_as_uint(deref->arr.index);
70 } else {
71 unsigned array_elems =
72 glsl_get_length(deref_ptr[-1]->type);
73 for (unsigned i = 0; i < array_elems; i++) {
74 gather_usage_helper(deref_ptr + 1,
75 location + elem_size * i,
76 mask, usage_mask);
77 }
78 return;
79 }
80 break;
81 }
82 case nir_deref_type_struct: {
83 const struct glsl_type *parent_type =
84 deref_ptr[-1]->type;
85 unsigned index = deref->strct.index;
86 for (unsigned i = 0; i < index; i++) {
87 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
88 location += glsl_count_attribute_slots(ft, false);
89 }
90 break;
91 }
92 default:
93 unreachable("Unhandled deref type in gather_components_used_helper");
94 }
95 }
96
97 usage_mask[location] |= mask & 0xf;
98 if (mask & 0xf0)
99 usage_mask[location + 1] |= (mask >> 4) & 0xf;
100 }
101
102 static void gather_usage(const nir_deref_instr *deref,
103 uint8_t mask,
104 uint8_t *usage_mask)
105 {
106 nir_deref_path path;
107 nir_deref_path_init(&path, (nir_deref_instr *)deref, NULL);
108
109 unsigned location_frac = path.path[0]->var->data.location_frac;
110 if (glsl_type_is_64bit(deref->type)) {
111 uint8_t new_mask = 0;
112 for (unsigned i = 0; i < 4; i++) {
113 if (mask & (1 << i))
114 new_mask |= 0x3 << (2 * i);
115 }
116 mask = new_mask << location_frac;
117 } else {
118 mask <<= location_frac;
119 mask &= 0xf;
120 }
121
122 gather_usage_helper((const nir_deref_instr **)&path.path[1],
123 path.path[0]->var->data.driver_location,
124 mask, usage_mask);
125
126 nir_deref_path_finish(&path);
127 }
128
129 static void gather_intrinsic_load_deref_info(const nir_shader *nir,
130 const nir_intrinsic_instr *instr,
131 const nir_deref_instr *deref,
132 bool need_texcoord,
133 nir_variable *var,
134 struct tgsi_shader_info *info)
135 {
136 assert(var && var->data.mode == nir_var_shader_in);
137
138 if (nir->info.stage == MESA_SHADER_FRAGMENT)
139 gather_usage(deref, nir_ssa_def_components_read(&instr->dest.ssa),
140 info->input_usage_mask);
141
142 switch (nir->info.stage) {
143 case MESA_SHADER_VERTEX: {
144
145 break;
146 }
147 default: {
148 unsigned semantic_name, semantic_index;
149 tgsi_get_gl_varying_semantic(var->data.location, need_texcoord,
150 &semantic_name, &semantic_index);
151
152 if (semantic_name == TGSI_SEMANTIC_COLOR) {
153 uint8_t mask = nir_ssa_def_components_read(&instr->dest.ssa);
154 info->colors_read |= mask << (semantic_index * 4);
155 }
156 if (semantic_name == TGSI_SEMANTIC_FACE) {
157 info->uses_frontface = true;
158 }
159 break;
160 }
161 }
162 }
163
164 static void scan_instruction(const struct nir_shader *nir,
165 bool need_texcoord,
166 struct tgsi_shader_info *info,
167 nir_instr *instr)
168 {
169 if (instr->type == nir_instr_type_alu) {
170 nir_alu_instr *alu = nir_instr_as_alu(instr);
171
172 switch (alu->op) {
173 case nir_op_fddx:
174 case nir_op_fddy:
175 case nir_op_fddx_fine:
176 case nir_op_fddy_fine:
177 case nir_op_fddx_coarse:
178 case nir_op_fddy_coarse:
179 info->uses_derivatives = true;
180 break;
181 default:
182 break;
183 }
184 } else if (instr->type == nir_instr_type_tex) {
185 nir_tex_instr *tex = nir_instr_as_tex(instr);
186 nir_variable *texture = tex_get_texture_var(tex);
187
188 if (!texture) {
189 info->samplers_declared |=
190 u_bit_consecutive(tex->sampler_index, 1);
191 } else {
192 if (texture->data.bindless)
193 info->uses_bindless_samplers = true;
194 }
195
196 switch (tex->op) {
197 case nir_texop_tex:
198 case nir_texop_txb:
199 case nir_texop_lod:
200 info->uses_derivatives = true;
201 break;
202 default:
203 break;
204 }
205 } else if (instr->type == nir_instr_type_intrinsic) {
206 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
207
208 switch (intr->intrinsic) {
209 case nir_intrinsic_load_front_face:
210 info->uses_frontface = 1;
211 break;
212 case nir_intrinsic_load_instance_id:
213 info->uses_instanceid = 1;
214 break;
215 case nir_intrinsic_load_invocation_id:
216 info->uses_invocationid = true;
217 break;
218 case nir_intrinsic_load_num_work_groups:
219 info->uses_grid_size = true;
220 break;
221 case nir_intrinsic_load_local_group_size:
222 /* The block size is translated to IMM with a fixed block size. */
223 if (info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
224 info->uses_block_size = true;
225 break;
226 case nir_intrinsic_load_local_invocation_id:
227 case nir_intrinsic_load_work_group_id: {
228 unsigned mask = nir_ssa_def_components_read(&intr->dest.ssa);
229 while (mask) {
230 unsigned i = u_bit_scan(&mask);
231
232 if (intr->intrinsic == nir_intrinsic_load_work_group_id)
233 info->uses_block_id[i] = true;
234 else
235 info->uses_thread_id[i] = true;
236 }
237 break;
238 }
239 case nir_intrinsic_load_vertex_id:
240 info->uses_vertexid = 1;
241 break;
242 case nir_intrinsic_load_vertex_id_zero_base:
243 info->uses_vertexid_nobase = 1;
244 break;
245 case nir_intrinsic_load_base_vertex:
246 info->uses_basevertex = 1;
247 break;
248 case nir_intrinsic_load_draw_id:
249 info->uses_drawid = 1;
250 break;
251 case nir_intrinsic_load_primitive_id:
252 info->uses_primid = 1;
253 break;
254 case nir_intrinsic_load_sample_mask_in:
255 info->reads_samplemask = true;
256 break;
257 case nir_intrinsic_load_tess_level_inner:
258 case nir_intrinsic_load_tess_level_outer:
259 info->reads_tess_factors = true;
260 break;
261 case nir_intrinsic_bindless_image_load:
262 info->uses_bindless_images = true;
263
264 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
265 info->uses_bindless_buffer_load = true;
266 else
267 info->uses_bindless_image_load = true;
268 break;
269 case nir_intrinsic_bindless_image_size:
270 case nir_intrinsic_bindless_image_samples:
271 info->uses_bindless_images = true;
272 break;
273 case nir_intrinsic_bindless_image_store:
274 info->uses_bindless_images = true;
275
276 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
277 info->uses_bindless_buffer_store = true;
278 else
279 info->uses_bindless_image_store = true;
280
281 info->writes_memory = true;
282 break;
283 case nir_intrinsic_image_deref_store:
284 info->writes_memory = true;
285 break;
286 case nir_intrinsic_bindless_image_atomic_add:
287 case nir_intrinsic_bindless_image_atomic_imin:
288 case nir_intrinsic_bindless_image_atomic_imax:
289 case nir_intrinsic_bindless_image_atomic_umin:
290 case nir_intrinsic_bindless_image_atomic_umax:
291 case nir_intrinsic_bindless_image_atomic_and:
292 case nir_intrinsic_bindless_image_atomic_or:
293 case nir_intrinsic_bindless_image_atomic_xor:
294 case nir_intrinsic_bindless_image_atomic_exchange:
295 case nir_intrinsic_bindless_image_atomic_comp_swap:
296 info->uses_bindless_images = true;
297
298 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
299 info->uses_bindless_buffer_atomic = true;
300 else
301 info->uses_bindless_image_atomic = true;
302
303 info->writes_memory = true;
304 break;
305 case nir_intrinsic_image_deref_atomic_add:
306 case nir_intrinsic_image_deref_atomic_imin:
307 case nir_intrinsic_image_deref_atomic_imax:
308 case nir_intrinsic_image_deref_atomic_umin:
309 case nir_intrinsic_image_deref_atomic_umax:
310 case nir_intrinsic_image_deref_atomic_and:
311 case nir_intrinsic_image_deref_atomic_or:
312 case nir_intrinsic_image_deref_atomic_xor:
313 case nir_intrinsic_image_deref_atomic_exchange:
314 case nir_intrinsic_image_deref_atomic_comp_swap:
315 info->writes_memory = true;
316 break;
317 case nir_intrinsic_store_ssbo:
318 case nir_intrinsic_ssbo_atomic_add:
319 case nir_intrinsic_ssbo_atomic_imin:
320 case nir_intrinsic_ssbo_atomic_umin:
321 case nir_intrinsic_ssbo_atomic_imax:
322 case nir_intrinsic_ssbo_atomic_umax:
323 case nir_intrinsic_ssbo_atomic_and:
324 case nir_intrinsic_ssbo_atomic_or:
325 case nir_intrinsic_ssbo_atomic_xor:
326 case nir_intrinsic_ssbo_atomic_exchange:
327 case nir_intrinsic_ssbo_atomic_comp_swap:
328 info->writes_memory = true;
329 break;
330 case nir_intrinsic_load_deref: {
331 nir_variable *var = intrinsic_get_var(intr);
332 nir_variable_mode mode = var->data.mode;
333 nir_deref_instr *const deref = nir_src_as_deref(intr->src[0]);
334 enum glsl_base_type base_type =
335 glsl_get_base_type(glsl_without_array(var->type));
336
337 if (nir_deref_instr_has_indirect(deref)) {
338 if (mode == nir_var_shader_in)
339 info->indirect_files |= (1 << TGSI_FILE_INPUT);
340 }
341 if (mode == nir_var_shader_in) {
342 gather_intrinsic_load_deref_info(nir, intr, deref, need_texcoord, var, info);
343
344 switch (var->data.interpolation) {
345 case INTERP_MODE_NONE:
346 if (glsl_base_type_is_integer(base_type))
347 break;
348
349 /* fall-through */
350 case INTERP_MODE_SMOOTH:
351 if (var->data.sample)
352 info->uses_persp_sample = true;
353 else if (var->data.centroid)
354 info->uses_persp_centroid = true;
355 else
356 info->uses_persp_center = true;
357 break;
358
359 case INTERP_MODE_NOPERSPECTIVE:
360 if (var->data.sample)
361 info->uses_linear_sample = true;
362 else if (var->data.centroid)
363 info->uses_linear_centroid = true;
364 else
365 info->uses_linear_center = true;
366 break;
367 }
368 }
369 break;
370 }
371 case nir_intrinsic_interp_deref_at_centroid:
372 case nir_intrinsic_interp_deref_at_sample:
373 case nir_intrinsic_interp_deref_at_offset: {
374 enum glsl_interp_mode interp = intrinsic_get_var(intr)->data.interpolation;
375 switch (interp) {
376 case INTERP_MODE_SMOOTH:
377 case INTERP_MODE_NONE:
378 if (intr->intrinsic == nir_intrinsic_interp_deref_at_centroid)
379 info->uses_persp_opcode_interp_centroid = true;
380 else if (intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
381 info->uses_persp_opcode_interp_sample = true;
382 else
383 info->uses_persp_opcode_interp_offset = true;
384 break;
385 case INTERP_MODE_NOPERSPECTIVE:
386 if (intr->intrinsic == nir_intrinsic_interp_deref_at_centroid)
387 info->uses_linear_opcode_interp_centroid = true;
388 else if (intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
389 info->uses_linear_opcode_interp_sample = true;
390 else
391 info->uses_linear_opcode_interp_offset = true;
392 break;
393 case INTERP_MODE_FLAT:
394 break;
395 default:
396 unreachable("Unsupported interpoation type");
397 }
398 break;
399 }
400 default:
401 break;
402 }
403 }
404 }
405
406 void nir_tgsi_scan_shader(const struct nir_shader *nir,
407 struct tgsi_shader_info *info,
408 bool need_texcoord)
409 {
410 nir_function *func;
411 unsigned i;
412
413 info->processor = pipe_shader_type_from_mesa(nir->info.stage);
414 info->num_tokens = 2; /* indicate that the shader is non-empty */
415 info->num_instructions = 2;
416
417 info->properties[TGSI_PROPERTY_NEXT_SHADER] =
418 pipe_shader_type_from_mesa(nir->info.next_stage);
419
420 if (nir->info.stage == MESA_SHADER_VERTEX) {
421 info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] =
422 nir->info.vs.window_space_position;
423 }
424
425 if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
426 info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT] =
427 nir->info.tess.tcs_vertices_out;
428 }
429
430 if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
431 if (nir->info.tess.primitive_mode == GL_ISOLINES)
432 info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = PIPE_PRIM_LINES;
433 else
434 info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = nir->info.tess.primitive_mode;
435
436 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
437 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
438 PIPE_TESS_SPACING_FRACTIONAL_ODD);
439 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
440 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
441
442 info->properties[TGSI_PROPERTY_TES_SPACING] = (nir->info.tess.spacing + 1) % 3;
443 info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW] = !nir->info.tess.ccw;
444 info->properties[TGSI_PROPERTY_TES_POINT_MODE] = nir->info.tess.point_mode;
445 }
446
447 if (nir->info.stage == MESA_SHADER_GEOMETRY) {
448 info->properties[TGSI_PROPERTY_GS_INPUT_PRIM] = nir->info.gs.input_primitive;
449 info->properties[TGSI_PROPERTY_GS_OUTPUT_PRIM] = nir->info.gs.output_primitive;
450 info->properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES] = nir->info.gs.vertices_out;
451 info->properties[TGSI_PROPERTY_GS_INVOCATIONS] = nir->info.gs.invocations;
452 }
453
454 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
455 info->properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] =
456 nir->info.fs.early_fragment_tests | nir->info.fs.post_depth_coverage;
457 info->properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE] = nir->info.fs.post_depth_coverage;
458
459 if (nir->info.fs.pixel_center_integer) {
460 info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
461 TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
462 }
463
464 if (nir->info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
465 switch (nir->info.fs.depth_layout) {
466 case FRAG_DEPTH_LAYOUT_ANY:
467 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_ANY;
468 break;
469 case FRAG_DEPTH_LAYOUT_GREATER:
470 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_GREATER;
471 break;
472 case FRAG_DEPTH_LAYOUT_LESS:
473 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_LESS;
474 break;
475 case FRAG_DEPTH_LAYOUT_UNCHANGED:
476 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_UNCHANGED;
477 break;
478 default:
479 unreachable("Unknow depth layout");
480 }
481 }
482 }
483
484 if (gl_shader_stage_is_compute(nir->info.stage)) {
485 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.cs.local_size[0];
486 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.cs.local_size[1];
487 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH] = nir->info.cs.local_size[2];
488 }
489
490 i = 0;
491 uint64_t processed_inputs = 0;
492 nir_foreach_shader_in_variable(variable, nir) {
493 unsigned semantic_name, semantic_index;
494
495 const struct glsl_type *type = variable->type;
496 if (nir_is_per_vertex_io(variable, nir->info.stage)) {
497 assert(glsl_type_is_array(type));
498 type = glsl_get_array_element(type);
499 }
500
501 unsigned attrib_count = glsl_count_attribute_slots(type,
502 nir->info.stage == MESA_SHADER_VERTEX);
503
504 i = variable->data.driver_location;
505
506 /* Vertex shader inputs don't have semantics. The state
507 * tracker has already mapped them to attributes via
508 * variable->data.driver_location.
509 */
510 if (nir->info.stage == MESA_SHADER_VERTEX) {
511 continue;
512 }
513
514 for (unsigned j = 0; j < attrib_count; j++, i++) {
515
516 if (processed_inputs & ((uint64_t)1 << i))
517 continue;
518
519 processed_inputs |= ((uint64_t)1 << i);
520
521 tgsi_get_gl_varying_semantic(variable->data.location + j, need_texcoord,
522 &semantic_name, &semantic_index);
523
524 info->input_semantic_name[i] = semantic_name;
525 info->input_semantic_index[i] = semantic_index;
526
527 if (semantic_name == TGSI_SEMANTIC_PRIMID)
528 info->uses_primid = true;
529
530 enum glsl_base_type base_type =
531 glsl_get_base_type(glsl_without_array(variable->type));
532
533 if (variable->data.centroid)
534 info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_CENTROID;
535 if (variable->data.sample)
536 info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_SAMPLE;
537
538 switch (variable->data.interpolation) {
539 case INTERP_MODE_NONE:
540 if (glsl_base_type_is_integer(base_type)) {
541 info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
542 break;
543 }
544
545 if (semantic_name == TGSI_SEMANTIC_COLOR) {
546 info->input_interpolate[i] = TGSI_INTERPOLATE_COLOR;
547 break;
548 }
549 /* fall-through */
550
551 case INTERP_MODE_SMOOTH:
552 assert(!glsl_base_type_is_integer(base_type));
553
554 info->input_interpolate[i] = TGSI_INTERPOLATE_PERSPECTIVE;
555 break;
556
557 case INTERP_MODE_NOPERSPECTIVE:
558 assert(!glsl_base_type_is_integer(base_type));
559
560 info->input_interpolate[i] = TGSI_INTERPOLATE_LINEAR;
561 break;
562
563 case INTERP_MODE_FLAT:
564 info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
565 break;
566 }
567 }
568 }
569
570 info->num_inputs = nir->num_inputs;
571 info->file_max[TGSI_FILE_INPUT] = nir->num_inputs - 1;
572
573 i = 0;
574 uint64_t processed_outputs = 0;
575 unsigned num_outputs = 0;
576 nir_foreach_shader_out_variable(variable, nir) {
577 unsigned semantic_name, semantic_index;
578
579 i = variable->data.driver_location;
580
581 const struct glsl_type *type = variable->type;
582 if (nir_is_per_vertex_io(variable, nir->info.stage)) {
583 assert(glsl_type_is_array(type));
584 type = glsl_get_array_element(type);
585 }
586
587 unsigned attrib_count = glsl_count_attribute_slots(type, false);
588 for (unsigned k = 0; k < attrib_count; k++, i++) {
589
590 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
591 tgsi_get_gl_frag_result_semantic(variable->data.location + k,
592 &semantic_name, &semantic_index);
593
594 /* Adjust for dual source blending */
595 if (variable->data.index > 0) {
596 semantic_index++;
597 }
598 } else {
599 tgsi_get_gl_varying_semantic(variable->data.location + k, need_texcoord,
600 &semantic_name, &semantic_index);
601 }
602
603 unsigned num_components = 4;
604 unsigned vector_elements = glsl_get_vector_elements(glsl_without_array(variable->type));
605 if (vector_elements)
606 num_components = vector_elements;
607
608 unsigned component = variable->data.location_frac;
609 if (glsl_type_is_64bit(glsl_without_array(variable->type))) {
610 if (glsl_type_is_dual_slot(glsl_without_array(variable->type)) && k % 2) {
611 num_components = (num_components * 2) - 4;
612 component = 0;
613 } else {
614 num_components = MIN2(num_components * 2, 4);
615 }
616 }
617
618 ubyte usagemask = 0;
619 for (unsigned j = component; j < num_components + component; j++) {
620 switch (j) {
621 case 0:
622 usagemask |= TGSI_WRITEMASK_X;
623 break;
624 case 1:
625 usagemask |= TGSI_WRITEMASK_Y;
626 break;
627 case 2:
628 usagemask |= TGSI_WRITEMASK_Z;
629 break;
630 case 3:
631 usagemask |= TGSI_WRITEMASK_W;
632 break;
633 default:
634 unreachable("error calculating component index");
635 }
636 }
637
638 unsigned gs_out_streams;
639 if (variable->data.stream & NIR_STREAM_PACKED) {
640 gs_out_streams = variable->data.stream & ~NIR_STREAM_PACKED;
641 } else {
642 assert(variable->data.stream < 4);
643 gs_out_streams = 0;
644 for (unsigned j = 0; j < num_components; ++j)
645 gs_out_streams |= variable->data.stream << (2 * (component + j));
646 }
647
648 unsigned streamx = gs_out_streams & 3;
649 unsigned streamy = (gs_out_streams >> 2) & 3;
650 unsigned streamz = (gs_out_streams >> 4) & 3;
651 unsigned streamw = (gs_out_streams >> 6) & 3;
652
653 if (usagemask & TGSI_WRITEMASK_X) {
654 info->output_usagemask[i] |= TGSI_WRITEMASK_X;
655 info->output_streams[i] |= streamx;
656 info->num_stream_output_components[streamx]++;
657 }
658 if (usagemask & TGSI_WRITEMASK_Y) {
659 info->output_usagemask[i] |= TGSI_WRITEMASK_Y;
660 info->output_streams[i] |= streamy << 2;
661 info->num_stream_output_components[streamy]++;
662 }
663 if (usagemask & TGSI_WRITEMASK_Z) {
664 info->output_usagemask[i] |= TGSI_WRITEMASK_Z;
665 info->output_streams[i] |= streamz << 4;
666 info->num_stream_output_components[streamz]++;
667 }
668 if (usagemask & TGSI_WRITEMASK_W) {
669 info->output_usagemask[i] |= TGSI_WRITEMASK_W;
670 info->output_streams[i] |= streamw << 6;
671 info->num_stream_output_components[streamw]++;
672 }
673
674 /* make sure we only count this location once against
675 * the num_outputs counter.
676 */
677 if (processed_outputs & ((uint64_t)1 << i))
678 continue;
679
680 processed_outputs |= ((uint64_t)1 << i);
681 num_outputs++;
682
683 info->output_semantic_name[i] = semantic_name;
684 info->output_semantic_index[i] = semantic_index;
685
686 switch (semantic_name) {
687 case TGSI_SEMANTIC_PRIMID:
688 info->writes_primid = true;
689 break;
690 case TGSI_SEMANTIC_VIEWPORT_INDEX:
691 info->writes_viewport_index = true;
692 break;
693 case TGSI_SEMANTIC_LAYER:
694 info->writes_layer = true;
695 break;
696 case TGSI_SEMANTIC_PSIZE:
697 info->writes_psize = true;
698 break;
699 case TGSI_SEMANTIC_CLIPVERTEX:
700 info->writes_clipvertex = true;
701 break;
702 case TGSI_SEMANTIC_COLOR:
703 info->colors_written |= 1 << semantic_index;
704 break;
705 case TGSI_SEMANTIC_STENCIL:
706 info->writes_stencil = true;
707 break;
708 case TGSI_SEMANTIC_SAMPLEMASK:
709 info->writes_samplemask = true;
710 break;
711 case TGSI_SEMANTIC_EDGEFLAG:
712 info->writes_edgeflag = true;
713 break;
714 case TGSI_SEMANTIC_POSITION:
715 if (info->processor == PIPE_SHADER_FRAGMENT)
716 info->writes_z = true;
717 else
718 info->writes_position = true;
719 break;
720 }
721
722 if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
723 switch (semantic_name) {
724 case TGSI_SEMANTIC_PATCH:
725 info->reads_perpatch_outputs = true;
726 break;
727 case TGSI_SEMANTIC_TESSINNER:
728 case TGSI_SEMANTIC_TESSOUTER:
729 info->reads_tessfactor_outputs = true;
730 break;
731 default:
732 info->reads_pervertex_outputs = true;
733 }
734 }
735 }
736
737 unsigned loc = variable->data.location;
738 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
739 loc == FRAG_RESULT_COLOR &&
740 nir->info.outputs_written & (1ull << loc)) {
741 assert(attrib_count == 1);
742 info->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] = true;
743 }
744 }
745
746 info->num_outputs = num_outputs;
747
748 info->const_file_max[0] = nir->num_uniforms - 1;
749 info->const_buffers_declared = u_bit_consecutive(1, nir->info.num_ubos);
750 if (nir->num_uniforms > 0)
751 info->const_buffers_declared |= 1;
752 info->images_declared = u_bit_consecutive(0, nir->info.num_images);
753 info->samplers_declared = nir->info.textures_used;
754
755 info->file_max[TGSI_FILE_SAMPLER] = util_last_bit(info->samplers_declared) - 1;
756 info->file_max[TGSI_FILE_SAMPLER_VIEW] = info->file_max[TGSI_FILE_SAMPLER];
757 info->file_mask[TGSI_FILE_SAMPLER] = info->file_mask[TGSI_FILE_SAMPLER_VIEW] = info->samplers_declared;
758 info->file_max[TGSI_FILE_IMAGE] = util_last_bit(info->images_declared) - 1;
759 info->file_mask[TGSI_FILE_IMAGE] = info->images_declared;
760
761 info->num_written_clipdistance = nir->info.clip_distance_array_size;
762 info->num_written_culldistance = nir->info.cull_distance_array_size;
763 info->clipdist_writemask = u_bit_consecutive(0, info->num_written_clipdistance);
764 info->culldist_writemask = u_bit_consecutive(0, info->num_written_culldistance);
765
766 if (info->processor == PIPE_SHADER_FRAGMENT)
767 info->uses_kill = nir->info.fs.uses_discard;
768
769 func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
770 nir_foreach_block(block, func->impl) {
771 nir_foreach_instr(instr, block)
772 scan_instruction(nir, need_texcoord, info, instr);
773 }
774 }