8d15a426230018998127bbef3ad311cbfcddc368
[mesa.git] / src / gallium / auxiliary / nir / nir_to_tgsi_info.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /*
26 * This is ported mostly out of radeonsi, if we can drop TGSI, we can likely
27 * make a lot this go away.
28 */
29
30 #include "nir_to_tgsi_info.h"
31 #include "util/u_math.h"
32 #include "nir.h"
33 #include "nir_deref.h"
34 #include "tgsi/tgsi_scan.h"
35 #include "tgsi/tgsi_from_mesa.h"
36
37 static nir_variable* tex_get_texture_var(nir_tex_instr *instr)
38 {
39 for (unsigned i = 0; i < instr->num_srcs; i++) {
40 switch (instr->src[i].src_type) {
41 case nir_tex_src_texture_deref:
42 return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src));
43 default:
44 break;
45 }
46 }
47
48 return NULL;
49 }
50
51 static nir_variable* intrinsic_get_var(nir_intrinsic_instr *instr)
52 {
53 return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[0]));
54 }
55
56
57 static void gather_usage_helper(const nir_deref_instr **deref_ptr,
58 unsigned location,
59 uint8_t mask,
60 uint8_t *usage_mask)
61 {
62 for (; *deref_ptr; deref_ptr++) {
63 const nir_deref_instr *deref = *deref_ptr;
64 switch (deref->deref_type) {
65 case nir_deref_type_array: {
66 bool is_compact = nir_deref_instr_get_variable(deref)->data.compact;
67 unsigned elem_size = is_compact ? DIV_ROUND_UP(glsl_get_length(deref->type), 4) :
68 glsl_count_attribute_slots(deref->type, false);
69 if (nir_src_is_const(deref->arr.index)) {
70 if (is_compact) {
71 location += nir_src_as_uint(deref->arr.index) / 4;
72 mask <<= nir_src_as_uint(deref->arr.index) % 4;
73 } else
74 location += elem_size * nir_src_as_uint(deref->arr.index);
75 } else {
76 unsigned array_elems =
77 glsl_get_length(deref_ptr[-1]->type);
78 for (unsigned i = 0; i < array_elems; i++) {
79 gather_usage_helper(deref_ptr + 1,
80 location + elem_size * i,
81 mask, usage_mask);
82 }
83 return;
84 }
85 break;
86 }
87 case nir_deref_type_struct: {
88 const struct glsl_type *parent_type =
89 deref_ptr[-1]->type;
90 unsigned index = deref->strct.index;
91 for (unsigned i = 0; i < index; i++) {
92 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
93 location += glsl_count_attribute_slots(ft, false);
94 }
95 break;
96 }
97 default:
98 unreachable("Unhandled deref type in gather_components_used_helper");
99 }
100 }
101
102 usage_mask[location] |= mask & 0xf;
103 if (mask & 0xf0)
104 usage_mask[location + 1] |= (mask >> 4) & 0xf;
105 }
106
107 static void gather_usage(const nir_deref_instr *deref,
108 uint8_t mask,
109 uint8_t *usage_mask)
110 {
111 nir_deref_path path;
112 nir_deref_path_init(&path, (nir_deref_instr *)deref, NULL);
113
114 unsigned location_frac = path.path[0]->var->data.location_frac;
115 if (glsl_type_is_64bit(deref->type)) {
116 uint8_t new_mask = 0;
117 for (unsigned i = 0; i < 4; i++) {
118 if (mask & (1 << i))
119 new_mask |= 0x3 << (2 * i);
120 }
121 mask = new_mask << location_frac;
122 } else {
123 mask <<= location_frac;
124 mask &= 0xf;
125 }
126
127 gather_usage_helper((const nir_deref_instr **)&path.path[1],
128 path.path[0]->var->data.driver_location,
129 mask, usage_mask);
130
131 nir_deref_path_finish(&path);
132 }
133
134 static void gather_intrinsic_load_deref_info(const nir_shader *nir,
135 const nir_intrinsic_instr *instr,
136 const nir_deref_instr *deref,
137 bool need_texcoord,
138 nir_variable *var,
139 struct tgsi_shader_info *info)
140 {
141 assert(var && var->data.mode == nir_var_shader_in);
142
143 if (nir->info.stage == MESA_SHADER_FRAGMENT)
144 gather_usage(deref, nir_ssa_def_components_read(&instr->dest.ssa),
145 info->input_usage_mask);
146
147 switch (nir->info.stage) {
148 case MESA_SHADER_VERTEX: {
149
150 break;
151 }
152 default: {
153 unsigned semantic_name, semantic_index;
154 tgsi_get_gl_varying_semantic(var->data.location, need_texcoord,
155 &semantic_name, &semantic_index);
156
157 if (semantic_name == TGSI_SEMANTIC_COLOR) {
158 uint8_t mask = nir_ssa_def_components_read(&instr->dest.ssa);
159 info->colors_read |= mask << (semantic_index * 4);
160 }
161 if (semantic_name == TGSI_SEMANTIC_FACE) {
162 info->uses_frontface = true;
163 }
164 break;
165 }
166 }
167 }
168
169 static void scan_instruction(const struct nir_shader *nir,
170 bool need_texcoord,
171 struct tgsi_shader_info *info,
172 nir_instr *instr)
173 {
174 if (instr->type == nir_instr_type_alu) {
175 nir_alu_instr *alu = nir_instr_as_alu(instr);
176
177 switch (alu->op) {
178 case nir_op_fddx:
179 case nir_op_fddy:
180 case nir_op_fddx_fine:
181 case nir_op_fddy_fine:
182 case nir_op_fddx_coarse:
183 case nir_op_fddy_coarse:
184 info->uses_derivatives = true;
185 break;
186 default:
187 break;
188 }
189 } else if (instr->type == nir_instr_type_tex) {
190 nir_tex_instr *tex = nir_instr_as_tex(instr);
191 nir_variable *texture = tex_get_texture_var(tex);
192
193 if (!texture) {
194 info->samplers_declared |=
195 u_bit_consecutive(tex->sampler_index, 1);
196 } else {
197 if (texture->data.bindless)
198 info->uses_bindless_samplers = true;
199 }
200
201 switch (tex->op) {
202 case nir_texop_tex:
203 case nir_texop_txb:
204 case nir_texop_lod:
205 info->uses_derivatives = true;
206 break;
207 default:
208 break;
209 }
210 } else if (instr->type == nir_instr_type_intrinsic) {
211 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
212
213 switch (intr->intrinsic) {
214 case nir_intrinsic_load_front_face:
215 info->uses_frontface = 1;
216 break;
217 case nir_intrinsic_load_instance_id:
218 info->uses_instanceid = 1;
219 break;
220 case nir_intrinsic_load_invocation_id:
221 info->uses_invocationid = true;
222 break;
223 case nir_intrinsic_load_num_work_groups:
224 info->uses_grid_size = true;
225 break;
226 case nir_intrinsic_load_local_group_size:
227 /* The block size is translated to IMM with a fixed block size. */
228 if (info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
229 info->uses_block_size = true;
230 break;
231 case nir_intrinsic_load_local_invocation_id:
232 case nir_intrinsic_load_work_group_id: {
233 unsigned mask = nir_ssa_def_components_read(&intr->dest.ssa);
234 while (mask) {
235 unsigned i = u_bit_scan(&mask);
236
237 if (intr->intrinsic == nir_intrinsic_load_work_group_id)
238 info->uses_block_id[i] = true;
239 else
240 info->uses_thread_id[i] = true;
241 }
242 break;
243 }
244 case nir_intrinsic_load_vertex_id:
245 info->uses_vertexid = 1;
246 break;
247 case nir_intrinsic_load_vertex_id_zero_base:
248 info->uses_vertexid_nobase = 1;
249 break;
250 case nir_intrinsic_load_base_vertex:
251 info->uses_basevertex = 1;
252 break;
253 case nir_intrinsic_load_draw_id:
254 info->uses_drawid = 1;
255 break;
256 case nir_intrinsic_load_primitive_id:
257 info->uses_primid = 1;
258 break;
259 case nir_intrinsic_load_sample_mask_in:
260 info->reads_samplemask = true;
261 break;
262 case nir_intrinsic_load_tess_level_inner:
263 case nir_intrinsic_load_tess_level_outer:
264 info->reads_tess_factors = true;
265 break;
266 case nir_intrinsic_bindless_image_load:
267 info->uses_bindless_images = true;
268
269 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
270 info->uses_bindless_buffer_load = true;
271 else
272 info->uses_bindless_image_load = true;
273 break;
274 case nir_intrinsic_bindless_image_size:
275 case nir_intrinsic_bindless_image_samples:
276 info->uses_bindless_images = true;
277 break;
278 case nir_intrinsic_bindless_image_store:
279 info->uses_bindless_images = true;
280
281 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
282 info->uses_bindless_buffer_store = true;
283 else
284 info->uses_bindless_image_store = true;
285
286 info->writes_memory = true;
287 break;
288 case nir_intrinsic_image_deref_store:
289 info->writes_memory = true;
290 break;
291 case nir_intrinsic_bindless_image_atomic_add:
292 case nir_intrinsic_bindless_image_atomic_imin:
293 case nir_intrinsic_bindless_image_atomic_imax:
294 case nir_intrinsic_bindless_image_atomic_umin:
295 case nir_intrinsic_bindless_image_atomic_umax:
296 case nir_intrinsic_bindless_image_atomic_and:
297 case nir_intrinsic_bindless_image_atomic_or:
298 case nir_intrinsic_bindless_image_atomic_xor:
299 case nir_intrinsic_bindless_image_atomic_exchange:
300 case nir_intrinsic_bindless_image_atomic_comp_swap:
301 info->uses_bindless_images = true;
302
303 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
304 info->uses_bindless_buffer_atomic = true;
305 else
306 info->uses_bindless_image_atomic = true;
307
308 info->writes_memory = true;
309 break;
310 case nir_intrinsic_image_deref_atomic_add:
311 case nir_intrinsic_image_deref_atomic_imin:
312 case nir_intrinsic_image_deref_atomic_imax:
313 case nir_intrinsic_image_deref_atomic_umin:
314 case nir_intrinsic_image_deref_atomic_umax:
315 case nir_intrinsic_image_deref_atomic_and:
316 case nir_intrinsic_image_deref_atomic_or:
317 case nir_intrinsic_image_deref_atomic_xor:
318 case nir_intrinsic_image_deref_atomic_exchange:
319 case nir_intrinsic_image_deref_atomic_comp_swap:
320 info->writes_memory = true;
321 break;
322 case nir_intrinsic_store_ssbo:
323 case nir_intrinsic_ssbo_atomic_add:
324 case nir_intrinsic_ssbo_atomic_imin:
325 case nir_intrinsic_ssbo_atomic_umin:
326 case nir_intrinsic_ssbo_atomic_imax:
327 case nir_intrinsic_ssbo_atomic_umax:
328 case nir_intrinsic_ssbo_atomic_and:
329 case nir_intrinsic_ssbo_atomic_or:
330 case nir_intrinsic_ssbo_atomic_xor:
331 case nir_intrinsic_ssbo_atomic_exchange:
332 case nir_intrinsic_ssbo_atomic_comp_swap:
333 info->writes_memory = true;
334 break;
335 case nir_intrinsic_load_deref: {
336 nir_variable *var = intrinsic_get_var(intr);
337 nir_variable_mode mode = var->data.mode;
338 nir_deref_instr *const deref = nir_src_as_deref(intr->src[0]);
339 enum glsl_base_type base_type =
340 glsl_get_base_type(glsl_without_array(var->type));
341
342 if (nir_deref_instr_has_indirect(deref)) {
343 if (mode == nir_var_shader_in)
344 info->indirect_files |= (1 << TGSI_FILE_INPUT);
345 }
346 if (mode == nir_var_shader_in) {
347 gather_intrinsic_load_deref_info(nir, intr, deref, need_texcoord, var, info);
348
349 switch (var->data.interpolation) {
350 case INTERP_MODE_NONE:
351 if (glsl_base_type_is_integer(base_type))
352 break;
353
354 /* fall-through */
355 case INTERP_MODE_SMOOTH:
356 if (var->data.sample)
357 info->uses_persp_sample = true;
358 else if (var->data.centroid)
359 info->uses_persp_centroid = true;
360 else
361 info->uses_persp_center = true;
362 break;
363
364 case INTERP_MODE_NOPERSPECTIVE:
365 if (var->data.sample)
366 info->uses_linear_sample = true;
367 else if (var->data.centroid)
368 info->uses_linear_centroid = true;
369 else
370 info->uses_linear_center = true;
371 break;
372 }
373 }
374 break;
375 }
376 case nir_intrinsic_interp_deref_at_centroid:
377 case nir_intrinsic_interp_deref_at_sample:
378 case nir_intrinsic_interp_deref_at_offset: {
379 enum glsl_interp_mode interp = intrinsic_get_var(intr)->data.interpolation;
380 switch (interp) {
381 case INTERP_MODE_SMOOTH:
382 case INTERP_MODE_NONE:
383 if (intr->intrinsic == nir_intrinsic_interp_deref_at_centroid)
384 info->uses_persp_opcode_interp_centroid = true;
385 else if (intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
386 info->uses_persp_opcode_interp_sample = true;
387 else
388 info->uses_persp_opcode_interp_offset = true;
389 break;
390 case INTERP_MODE_NOPERSPECTIVE:
391 if (intr->intrinsic == nir_intrinsic_interp_deref_at_centroid)
392 info->uses_linear_opcode_interp_centroid = true;
393 else if (intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
394 info->uses_linear_opcode_interp_sample = true;
395 else
396 info->uses_linear_opcode_interp_offset = true;
397 break;
398 case INTERP_MODE_FLAT:
399 break;
400 default:
401 unreachable("Unsupported interpoation type");
402 }
403 break;
404 }
405 default:
406 break;
407 }
408 }
409 }
410
411 void nir_tgsi_scan_shader(const struct nir_shader *nir,
412 struct tgsi_shader_info *info,
413 bool need_texcoord)
414 {
415 nir_function *func;
416 unsigned i;
417
418 info->processor = pipe_shader_type_from_mesa(nir->info.stage);
419 info->num_tokens = 2; /* indicate that the shader is non-empty */
420 info->num_instructions = 2;
421
422 info->properties[TGSI_PROPERTY_NEXT_SHADER] =
423 pipe_shader_type_from_mesa(nir->info.next_stage);
424
425 if (nir->info.stage == MESA_SHADER_VERTEX) {
426 info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] =
427 nir->info.vs.window_space_position;
428 }
429
430 if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
431 info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT] =
432 nir->info.tess.tcs_vertices_out;
433 }
434
435 if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
436 if (nir->info.tess.primitive_mode == GL_ISOLINES)
437 info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = PIPE_PRIM_LINES;
438 else
439 info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = nir->info.tess.primitive_mode;
440
441 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
442 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
443 PIPE_TESS_SPACING_FRACTIONAL_ODD);
444 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
445 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
446
447 info->properties[TGSI_PROPERTY_TES_SPACING] = (nir->info.tess.spacing + 1) % 3;
448 info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW] = !nir->info.tess.ccw;
449 info->properties[TGSI_PROPERTY_TES_POINT_MODE] = nir->info.tess.point_mode;
450 }
451
452 if (nir->info.stage == MESA_SHADER_GEOMETRY) {
453 info->properties[TGSI_PROPERTY_GS_INPUT_PRIM] = nir->info.gs.input_primitive;
454 info->properties[TGSI_PROPERTY_GS_OUTPUT_PRIM] = nir->info.gs.output_primitive;
455 info->properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES] = nir->info.gs.vertices_out;
456 info->properties[TGSI_PROPERTY_GS_INVOCATIONS] = nir->info.gs.invocations;
457 }
458
459 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
460 info->properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] =
461 nir->info.fs.early_fragment_tests | nir->info.fs.post_depth_coverage;
462 info->properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE] = nir->info.fs.post_depth_coverage;
463
464 if (nir->info.fs.pixel_center_integer) {
465 info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
466 TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
467 }
468
469 if (nir->info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
470 switch (nir->info.fs.depth_layout) {
471 case FRAG_DEPTH_LAYOUT_ANY:
472 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_ANY;
473 break;
474 case FRAG_DEPTH_LAYOUT_GREATER:
475 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_GREATER;
476 break;
477 case FRAG_DEPTH_LAYOUT_LESS:
478 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_LESS;
479 break;
480 case FRAG_DEPTH_LAYOUT_UNCHANGED:
481 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_UNCHANGED;
482 break;
483 default:
484 unreachable("Unknow depth layout");
485 }
486 }
487 }
488
489 if (gl_shader_stage_is_compute(nir->info.stage)) {
490 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.cs.local_size[0];
491 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.cs.local_size[1];
492 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH] = nir->info.cs.local_size[2];
493 }
494
495 i = 0;
496 uint64_t processed_inputs = 0;
497 nir_foreach_shader_in_variable(variable, nir) {
498 unsigned semantic_name, semantic_index;
499
500 const struct glsl_type *type = variable->type;
501 if (nir_is_per_vertex_io(variable, nir->info.stage)) {
502 assert(glsl_type_is_array(type));
503 type = glsl_get_array_element(type);
504 }
505
506 unsigned attrib_count = variable->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4) :
507 glsl_count_attribute_slots(type, nir->info.stage == MESA_SHADER_VERTEX);
508
509 i = variable->data.driver_location;
510
511 /* Vertex shader inputs don't have semantics. The state
512 * tracker has already mapped them to attributes via
513 * variable->data.driver_location.
514 */
515 if (nir->info.stage == MESA_SHADER_VERTEX) {
516 continue;
517 }
518
519 for (unsigned j = 0; j < attrib_count; j++, i++) {
520
521 if (processed_inputs & ((uint64_t)1 << i))
522 continue;
523
524 processed_inputs |= ((uint64_t)1 << i);
525
526 tgsi_get_gl_varying_semantic(variable->data.location + j, need_texcoord,
527 &semantic_name, &semantic_index);
528
529 info->input_semantic_name[i] = semantic_name;
530 info->input_semantic_index[i] = semantic_index;
531
532 if (semantic_name == TGSI_SEMANTIC_PRIMID)
533 info->uses_primid = true;
534
535 enum glsl_base_type base_type =
536 glsl_get_base_type(glsl_without_array(variable->type));
537
538 if (variable->data.centroid)
539 info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_CENTROID;
540 if (variable->data.sample)
541 info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_SAMPLE;
542
543 switch (variable->data.interpolation) {
544 case INTERP_MODE_NONE:
545 if (glsl_base_type_is_integer(base_type)) {
546 info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
547 break;
548 }
549
550 if (semantic_name == TGSI_SEMANTIC_COLOR) {
551 info->input_interpolate[i] = TGSI_INTERPOLATE_COLOR;
552 break;
553 }
554 /* fall-through */
555
556 case INTERP_MODE_SMOOTH:
557 assert(!glsl_base_type_is_integer(base_type));
558
559 info->input_interpolate[i] = TGSI_INTERPOLATE_PERSPECTIVE;
560 break;
561
562 case INTERP_MODE_NOPERSPECTIVE:
563 assert(!glsl_base_type_is_integer(base_type));
564
565 info->input_interpolate[i] = TGSI_INTERPOLATE_LINEAR;
566 break;
567
568 case INTERP_MODE_FLAT:
569 info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
570 break;
571 }
572 }
573 }
574
575 info->num_inputs = nir->num_inputs;
576 info->file_max[TGSI_FILE_INPUT] = nir->num_inputs - 1;
577
578 i = 0;
579 uint64_t processed_outputs = 0;
580 unsigned num_outputs = 0;
581 nir_foreach_shader_out_variable(variable, nir) {
582 unsigned semantic_name, semantic_index;
583
584 i = variable->data.driver_location;
585
586 const struct glsl_type *type = variable->type;
587 if (nir_is_per_vertex_io(variable, nir->info.stage)) {
588 assert(glsl_type_is_array(type));
589 type = glsl_get_array_element(type);
590 }
591
592 unsigned attrib_count = variable->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4) :
593 glsl_count_attribute_slots(type, false);
594 for (unsigned k = 0; k < attrib_count; k++, i++) {
595
596 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
597 tgsi_get_gl_frag_result_semantic(variable->data.location + k,
598 &semantic_name, &semantic_index);
599
600 /* Adjust for dual source blending */
601 if (variable->data.index > 0) {
602 semantic_index++;
603 }
604 } else {
605 tgsi_get_gl_varying_semantic(variable->data.location + k, need_texcoord,
606 &semantic_name, &semantic_index);
607 }
608
609 unsigned num_components = 4;
610 unsigned vector_elements = glsl_get_vector_elements(glsl_without_array(variable->type));
611 if (vector_elements)
612 num_components = vector_elements;
613
614 unsigned component = variable->data.location_frac;
615 if (glsl_type_is_64bit(glsl_without_array(variable->type))) {
616 if (glsl_type_is_dual_slot(glsl_without_array(variable->type)) && k % 2) {
617 num_components = (num_components * 2) - 4;
618 component = 0;
619 } else {
620 num_components = MIN2(num_components * 2, 4);
621 }
622 }
623
624 ubyte usagemask = 0;
625 for (unsigned j = component; j < num_components + component; j++) {
626 switch (j) {
627 case 0:
628 usagemask |= TGSI_WRITEMASK_X;
629 break;
630 case 1:
631 usagemask |= TGSI_WRITEMASK_Y;
632 break;
633 case 2:
634 usagemask |= TGSI_WRITEMASK_Z;
635 break;
636 case 3:
637 usagemask |= TGSI_WRITEMASK_W;
638 break;
639 default:
640 unreachable("error calculating component index");
641 }
642 }
643
644 unsigned gs_out_streams;
645 if (variable->data.stream & NIR_STREAM_PACKED) {
646 gs_out_streams = variable->data.stream & ~NIR_STREAM_PACKED;
647 } else {
648 assert(variable->data.stream < 4);
649 gs_out_streams = 0;
650 for (unsigned j = 0; j < num_components; ++j)
651 gs_out_streams |= variable->data.stream << (2 * (component + j));
652 }
653
654 unsigned streamx = gs_out_streams & 3;
655 unsigned streamy = (gs_out_streams >> 2) & 3;
656 unsigned streamz = (gs_out_streams >> 4) & 3;
657 unsigned streamw = (gs_out_streams >> 6) & 3;
658
659 if (usagemask & TGSI_WRITEMASK_X) {
660 info->output_usagemask[i] |= TGSI_WRITEMASK_X;
661 info->output_streams[i] |= streamx;
662 info->num_stream_output_components[streamx]++;
663 }
664 if (usagemask & TGSI_WRITEMASK_Y) {
665 info->output_usagemask[i] |= TGSI_WRITEMASK_Y;
666 info->output_streams[i] |= streamy << 2;
667 info->num_stream_output_components[streamy]++;
668 }
669 if (usagemask & TGSI_WRITEMASK_Z) {
670 info->output_usagemask[i] |= TGSI_WRITEMASK_Z;
671 info->output_streams[i] |= streamz << 4;
672 info->num_stream_output_components[streamz]++;
673 }
674 if (usagemask & TGSI_WRITEMASK_W) {
675 info->output_usagemask[i] |= TGSI_WRITEMASK_W;
676 info->output_streams[i] |= streamw << 6;
677 info->num_stream_output_components[streamw]++;
678 }
679
680 /* make sure we only count this location once against
681 * the num_outputs counter.
682 */
683 if (processed_outputs & ((uint64_t)1 << i))
684 continue;
685
686 processed_outputs |= ((uint64_t)1 << i);
687 num_outputs++;
688
689 info->output_semantic_name[i] = semantic_name;
690 info->output_semantic_index[i] = semantic_index;
691
692 switch (semantic_name) {
693 case TGSI_SEMANTIC_PRIMID:
694 info->writes_primid = true;
695 break;
696 case TGSI_SEMANTIC_VIEWPORT_INDEX:
697 info->writes_viewport_index = true;
698 break;
699 case TGSI_SEMANTIC_LAYER:
700 info->writes_layer = true;
701 break;
702 case TGSI_SEMANTIC_PSIZE:
703 info->writes_psize = true;
704 break;
705 case TGSI_SEMANTIC_CLIPVERTEX:
706 info->writes_clipvertex = true;
707 break;
708 case TGSI_SEMANTIC_COLOR:
709 info->colors_written |= 1 << semantic_index;
710 break;
711 case TGSI_SEMANTIC_STENCIL:
712 info->writes_stencil = true;
713 break;
714 case TGSI_SEMANTIC_SAMPLEMASK:
715 info->writes_samplemask = true;
716 break;
717 case TGSI_SEMANTIC_EDGEFLAG:
718 info->writes_edgeflag = true;
719 break;
720 case TGSI_SEMANTIC_POSITION:
721 if (info->processor == PIPE_SHADER_FRAGMENT)
722 info->writes_z = true;
723 else
724 info->writes_position = true;
725 break;
726 }
727
728 if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
729 switch (semantic_name) {
730 case TGSI_SEMANTIC_PATCH:
731 info->reads_perpatch_outputs = true;
732 break;
733 case TGSI_SEMANTIC_TESSINNER:
734 case TGSI_SEMANTIC_TESSOUTER:
735 info->reads_tessfactor_outputs = true;
736 break;
737 default:
738 info->reads_pervertex_outputs = true;
739 }
740 }
741 }
742
743 unsigned loc = variable->data.location;
744 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
745 loc == FRAG_RESULT_COLOR &&
746 nir->info.outputs_written & (1ull << loc)) {
747 assert(attrib_count == 1);
748 info->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] = true;
749 }
750 }
751
752 info->num_outputs = num_outputs;
753
754 info->const_file_max[0] = nir->num_uniforms - 1;
755 info->const_buffers_declared = u_bit_consecutive(1, nir->info.num_ubos);
756 if (nir->num_uniforms > 0)
757 info->const_buffers_declared |= 1;
758 info->images_declared = u_bit_consecutive(0, nir->info.num_images);
759 info->samplers_declared = nir->info.textures_used;
760
761 info->file_max[TGSI_FILE_SAMPLER] = util_last_bit(info->samplers_declared) - 1;
762 info->file_max[TGSI_FILE_SAMPLER_VIEW] = info->file_max[TGSI_FILE_SAMPLER];
763 info->file_mask[TGSI_FILE_SAMPLER] = info->file_mask[TGSI_FILE_SAMPLER_VIEW] = info->samplers_declared;
764 info->file_max[TGSI_FILE_IMAGE] = util_last_bit(info->images_declared) - 1;
765 info->file_mask[TGSI_FILE_IMAGE] = info->images_declared;
766
767 info->num_written_clipdistance = nir->info.clip_distance_array_size;
768 info->num_written_culldistance = nir->info.cull_distance_array_size;
769 info->clipdist_writemask = u_bit_consecutive(0, info->num_written_clipdistance);
770 info->culldist_writemask = u_bit_consecutive(0, info->num_written_culldistance);
771
772 if (info->processor == PIPE_SHADER_FRAGMENT)
773 info->uses_kill = nir->info.fs.uses_discard;
774
775 func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
776 nir_foreach_block(block, func->impl) {
777 nir_foreach_instr(instr, block)
778 scan_instruction(nir, need_texcoord, info, instr);
779 }
780 }