tgsi/mesa: handle KERNEL case
[mesa.git] / src / gallium / auxiliary / nir / nir_to_tgsi_info.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /*
26 * This is ported mostly out of radeonsi, if we can drop TGSI, we can likely
27 * make a lot this go away.
28 */
29
30 #include "nir_to_tgsi_info.h"
31 #include "util/u_math.h"
32 #include "nir.h"
33 #include "nir_deref.h"
34 #include "tgsi/tgsi_scan.h"
35 #include "tgsi/tgsi_from_mesa.h"
36
37 static nir_variable* tex_get_texture_var(nir_tex_instr *instr)
38 {
39 for (unsigned i = 0; i < instr->num_srcs; i++) {
40 switch (instr->src[i].src_type) {
41 case nir_tex_src_texture_deref:
42 return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src));
43 default:
44 break;
45 }
46 }
47
48 return NULL;
49 }
50
51 static nir_variable* intrinsic_get_var(nir_intrinsic_instr *instr)
52 {
53 return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[0]));
54 }
55
56
57 static void gather_usage_helper(const nir_deref_instr **deref_ptr,
58 unsigned location,
59 uint8_t mask,
60 uint8_t *usage_mask)
61 {
62 for (; *deref_ptr; deref_ptr++) {
63 const nir_deref_instr *deref = *deref_ptr;
64 switch (deref->deref_type) {
65 case nir_deref_type_array: {
66 unsigned elem_size =
67 glsl_count_attribute_slots(deref->type, false);
68 if (nir_src_is_const(deref->arr.index)) {
69 location += elem_size * nir_src_as_uint(deref->arr.index);
70 } else {
71 unsigned array_elems =
72 glsl_get_length(deref_ptr[-1]->type);
73 for (unsigned i = 0; i < array_elems; i++) {
74 gather_usage_helper(deref_ptr + 1,
75 location + elem_size * i,
76 mask, usage_mask);
77 }
78 return;
79 }
80 break;
81 }
82 case nir_deref_type_struct: {
83 const struct glsl_type *parent_type =
84 deref_ptr[-1]->type;
85 unsigned index = deref->strct.index;
86 for (unsigned i = 0; i < index; i++) {
87 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
88 location += glsl_count_attribute_slots(ft, false);
89 }
90 break;
91 }
92 default:
93 unreachable("Unhandled deref type in gather_components_used_helper");
94 }
95 }
96
97 usage_mask[location] |= mask & 0xf;
98 if (mask & 0xf0)
99 usage_mask[location + 1] |= (mask >> 4) & 0xf;
100 }
101
102 static void gather_usage(const nir_deref_instr *deref,
103 uint8_t mask,
104 uint8_t *usage_mask)
105 {
106 nir_deref_path path;
107 nir_deref_path_init(&path, (nir_deref_instr *)deref, NULL);
108
109 unsigned location_frac = path.path[0]->var->data.location_frac;
110 if (glsl_type_is_64bit(deref->type)) {
111 uint8_t new_mask = 0;
112 for (unsigned i = 0; i < 4; i++) {
113 if (mask & (1 << i))
114 new_mask |= 0x3 << (2 * i);
115 }
116 mask = new_mask << location_frac;
117 } else {
118 mask <<= location_frac;
119 mask &= 0xf;
120 }
121
122 gather_usage_helper((const nir_deref_instr **)&path.path[1],
123 path.path[0]->var->data.driver_location,
124 mask, usage_mask);
125
126 nir_deref_path_finish(&path);
127 }
128
129 static void gather_intrinsic_load_deref_info(const nir_shader *nir,
130 const nir_intrinsic_instr *instr,
131 const nir_deref_instr *deref,
132 bool need_texcoord,
133 nir_variable *var,
134 struct tgsi_shader_info *info)
135 {
136 assert(var && var->data.mode == nir_var_shader_in);
137
138 gather_usage(deref, nir_ssa_def_components_read(&instr->dest.ssa),
139 info->input_usage_mask);
140 switch (nir->info.stage) {
141 case MESA_SHADER_VERTEX: {
142 break;
143 }
144 default: {
145 unsigned semantic_name, semantic_index;
146 tgsi_get_gl_varying_semantic(var->data.location, need_texcoord,
147 &semantic_name, &semantic_index);
148
149 if (semantic_name == TGSI_SEMANTIC_COLOR) {
150 uint8_t mask = nir_ssa_def_components_read(&instr->dest.ssa);
151 info->colors_read |= mask << (semantic_index * 4);
152 }
153 if (semantic_name == TGSI_SEMANTIC_FACE) {
154 info->uses_frontface = true;
155 }
156 break;
157 }
158 }
159 }
160
161 static void scan_instruction(const struct nir_shader *nir,
162 bool need_texcoord,
163 struct tgsi_shader_info *info,
164 nir_instr *instr)
165 {
166 if (instr->type == nir_instr_type_alu) {
167 nir_alu_instr *alu = nir_instr_as_alu(instr);
168
169 switch (alu->op) {
170 case nir_op_fddx:
171 case nir_op_fddy:
172 case nir_op_fddx_fine:
173 case nir_op_fddy_fine:
174 case nir_op_fddx_coarse:
175 case nir_op_fddy_coarse:
176 info->uses_derivatives = true;
177 break;
178 default:
179 break;
180 }
181 } else if (instr->type == nir_instr_type_tex) {
182 nir_tex_instr *tex = nir_instr_as_tex(instr);
183 nir_variable *texture = tex_get_texture_var(tex);
184
185 if (!texture) {
186 info->samplers_declared |=
187 u_bit_consecutive(tex->sampler_index, 1);
188 } else {
189 if (texture->data.bindless)
190 info->uses_bindless_samplers = true;
191 }
192
193 switch (tex->op) {
194 case nir_texop_tex:
195 case nir_texop_txb:
196 case nir_texop_lod:
197 info->uses_derivatives = true;
198 break;
199 default:
200 break;
201 }
202 } else if (instr->type == nir_instr_type_intrinsic) {
203 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
204
205 switch (intr->intrinsic) {
206 case nir_intrinsic_load_front_face:
207 info->uses_frontface = 1;
208 break;
209 case nir_intrinsic_load_instance_id:
210 info->uses_instanceid = 1;
211 break;
212 case nir_intrinsic_load_invocation_id:
213 info->uses_invocationid = true;
214 break;
215 case nir_intrinsic_load_num_work_groups:
216 info->uses_grid_size = true;
217 break;
218 case nir_intrinsic_load_local_group_size:
219 /* The block size is translated to IMM with a fixed block size. */
220 if (info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
221 info->uses_block_size = true;
222 break;
223 case nir_intrinsic_load_local_invocation_id:
224 case nir_intrinsic_load_work_group_id: {
225 unsigned mask = nir_ssa_def_components_read(&intr->dest.ssa);
226 while (mask) {
227 unsigned i = u_bit_scan(&mask);
228
229 if (intr->intrinsic == nir_intrinsic_load_work_group_id)
230 info->uses_block_id[i] = true;
231 else
232 info->uses_thread_id[i] = true;
233 }
234 break;
235 }
236 case nir_intrinsic_load_vertex_id:
237 info->uses_vertexid = 1;
238 break;
239 case nir_intrinsic_load_vertex_id_zero_base:
240 info->uses_vertexid_nobase = 1;
241 break;
242 case nir_intrinsic_load_base_vertex:
243 info->uses_basevertex = 1;
244 break;
245 case nir_intrinsic_load_draw_id:
246 info->uses_drawid = 1;
247 break;
248 case nir_intrinsic_load_primitive_id:
249 info->uses_primid = 1;
250 break;
251 case nir_intrinsic_load_sample_mask_in:
252 info->reads_samplemask = true;
253 break;
254 case nir_intrinsic_load_tess_level_inner:
255 case nir_intrinsic_load_tess_level_outer:
256 info->reads_tess_factors = true;
257 break;
258 case nir_intrinsic_bindless_image_load:
259 info->uses_bindless_images = true;
260
261 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
262 info->uses_bindless_buffer_load = true;
263 else
264 info->uses_bindless_image_load = true;
265 break;
266 case nir_intrinsic_bindless_image_size:
267 case nir_intrinsic_bindless_image_samples:
268 info->uses_bindless_images = true;
269 break;
270 case nir_intrinsic_bindless_image_store:
271 info->uses_bindless_images = true;
272
273 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
274 info->uses_bindless_buffer_store = true;
275 else
276 info->uses_bindless_image_store = true;
277
278 info->writes_memory = true;
279 break;
280 case nir_intrinsic_image_deref_store:
281 info->writes_memory = true;
282 break;
283 case nir_intrinsic_bindless_image_atomic_add:
284 case nir_intrinsic_bindless_image_atomic_imin:
285 case nir_intrinsic_bindless_image_atomic_imax:
286 case nir_intrinsic_bindless_image_atomic_umin:
287 case nir_intrinsic_bindless_image_atomic_umax:
288 case nir_intrinsic_bindless_image_atomic_and:
289 case nir_intrinsic_bindless_image_atomic_or:
290 case nir_intrinsic_bindless_image_atomic_xor:
291 case nir_intrinsic_bindless_image_atomic_exchange:
292 case nir_intrinsic_bindless_image_atomic_comp_swap:
293 info->uses_bindless_images = true;
294
295 if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
296 info->uses_bindless_buffer_atomic = true;
297 else
298 info->uses_bindless_image_atomic = true;
299
300 info->writes_memory = true;
301 break;
302 case nir_intrinsic_image_deref_atomic_add:
303 case nir_intrinsic_image_deref_atomic_imin:
304 case nir_intrinsic_image_deref_atomic_imax:
305 case nir_intrinsic_image_deref_atomic_umin:
306 case nir_intrinsic_image_deref_atomic_umax:
307 case nir_intrinsic_image_deref_atomic_and:
308 case nir_intrinsic_image_deref_atomic_or:
309 case nir_intrinsic_image_deref_atomic_xor:
310 case nir_intrinsic_image_deref_atomic_exchange:
311 case nir_intrinsic_image_deref_atomic_comp_swap:
312 info->writes_memory = true;
313 break;
314 case nir_intrinsic_store_ssbo:
315 case nir_intrinsic_ssbo_atomic_add:
316 case nir_intrinsic_ssbo_atomic_imin:
317 case nir_intrinsic_ssbo_atomic_umin:
318 case nir_intrinsic_ssbo_atomic_imax:
319 case nir_intrinsic_ssbo_atomic_umax:
320 case nir_intrinsic_ssbo_atomic_and:
321 case nir_intrinsic_ssbo_atomic_or:
322 case nir_intrinsic_ssbo_atomic_xor:
323 case nir_intrinsic_ssbo_atomic_exchange:
324 case nir_intrinsic_ssbo_atomic_comp_swap:
325 info->writes_memory = true;
326 break;
327 case nir_intrinsic_load_deref: {
328 nir_variable *var = intrinsic_get_var(intr);
329 nir_variable_mode mode = var->data.mode;
330 nir_deref_instr *const deref = nir_src_as_deref(intr->src[0]);
331 enum glsl_base_type base_type =
332 glsl_get_base_type(glsl_without_array(var->type));
333
334 if (nir_deref_instr_has_indirect(deref)) {
335 if (mode == nir_var_shader_in)
336 info->indirect_files |= (1 << TGSI_FILE_INPUT);
337 }
338 if (mode == nir_var_shader_in) {
339 gather_intrinsic_load_deref_info(nir, intr, deref, need_texcoord, var, info);
340
341 switch (var->data.interpolation) {
342 case INTERP_MODE_NONE:
343 if (glsl_base_type_is_integer(base_type))
344 break;
345
346 /* fall-through */
347 case INTERP_MODE_SMOOTH:
348 if (var->data.sample)
349 info->uses_persp_sample = true;
350 else if (var->data.centroid)
351 info->uses_persp_centroid = true;
352 else
353 info->uses_persp_center = true;
354 break;
355
356 case INTERP_MODE_NOPERSPECTIVE:
357 if (var->data.sample)
358 info->uses_linear_sample = true;
359 else if (var->data.centroid)
360 info->uses_linear_centroid = true;
361 else
362 info->uses_linear_center = true;
363 break;
364 }
365 }
366 break;
367 }
368 case nir_intrinsic_interp_deref_at_centroid:
369 case nir_intrinsic_interp_deref_at_sample:
370 case nir_intrinsic_interp_deref_at_offset: {
371 enum glsl_interp_mode interp = intrinsic_get_var(intr)->data.interpolation;
372 switch (interp) {
373 case INTERP_MODE_SMOOTH:
374 case INTERP_MODE_NONE:
375 if (intr->intrinsic == nir_intrinsic_interp_deref_at_centroid)
376 info->uses_persp_opcode_interp_centroid = true;
377 else if (intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
378 info->uses_persp_opcode_interp_sample = true;
379 else
380 info->uses_persp_opcode_interp_offset = true;
381 break;
382 case INTERP_MODE_NOPERSPECTIVE:
383 if (intr->intrinsic == nir_intrinsic_interp_deref_at_centroid)
384 info->uses_linear_opcode_interp_centroid = true;
385 else if (intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
386 info->uses_linear_opcode_interp_sample = true;
387 else
388 info->uses_linear_opcode_interp_offset = true;
389 break;
390 case INTERP_MODE_FLAT:
391 break;
392 default:
393 unreachable("Unsupported interpoation type");
394 }
395 break;
396 }
397 default:
398 break;
399 }
400 }
401 }
402
403 void nir_tgsi_scan_shader(const struct nir_shader *nir,
404 struct tgsi_shader_info *info,
405 bool need_texcoord)
406 {
407 nir_function *func;
408 unsigned i;
409
410 info->processor = pipe_shader_type_from_mesa(nir->info.stage);
411 info->num_tokens = 2; /* indicate that the shader is non-empty */
412 info->num_instructions = 2;
413
414 info->properties[TGSI_PROPERTY_NEXT_SHADER] =
415 pipe_shader_type_from_mesa(nir->info.next_stage);
416
417 if (nir->info.stage == MESA_SHADER_VERTEX) {
418 info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] =
419 nir->info.vs.window_space_position;
420 }
421
422 if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
423 info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT] =
424 nir->info.tess.tcs_vertices_out;
425 }
426
427 if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
428 if (nir->info.tess.primitive_mode == GL_ISOLINES)
429 info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = PIPE_PRIM_LINES;
430 else
431 info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = nir->info.tess.primitive_mode;
432
433 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
434 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
435 PIPE_TESS_SPACING_FRACTIONAL_ODD);
436 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
437 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
438
439 info->properties[TGSI_PROPERTY_TES_SPACING] = (nir->info.tess.spacing + 1) % 3;
440 info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW] = !nir->info.tess.ccw;
441 info->properties[TGSI_PROPERTY_TES_POINT_MODE] = nir->info.tess.point_mode;
442 }
443
444 if (nir->info.stage == MESA_SHADER_GEOMETRY) {
445 info->properties[TGSI_PROPERTY_GS_INPUT_PRIM] = nir->info.gs.input_primitive;
446 info->properties[TGSI_PROPERTY_GS_OUTPUT_PRIM] = nir->info.gs.output_primitive;
447 info->properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES] = nir->info.gs.vertices_out;
448 info->properties[TGSI_PROPERTY_GS_INVOCATIONS] = nir->info.gs.invocations;
449 }
450
451 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
452 info->properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] =
453 nir->info.fs.early_fragment_tests | nir->info.fs.post_depth_coverage;
454 info->properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE] = nir->info.fs.post_depth_coverage;
455
456 if (nir->info.fs.pixel_center_integer) {
457 info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
458 TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
459 }
460
461 if (nir->info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
462 switch (nir->info.fs.depth_layout) {
463 case FRAG_DEPTH_LAYOUT_ANY:
464 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_ANY;
465 break;
466 case FRAG_DEPTH_LAYOUT_GREATER:
467 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_GREATER;
468 break;
469 case FRAG_DEPTH_LAYOUT_LESS:
470 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_LESS;
471 break;
472 case FRAG_DEPTH_LAYOUT_UNCHANGED:
473 info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_UNCHANGED;
474 break;
475 default:
476 unreachable("Unknow depth layout");
477 }
478 }
479 }
480
481 if (gl_shader_stage_is_compute(nir->info.stage)) {
482 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.cs.local_size[0];
483 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.cs.local_size[1];
484 info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH] = nir->info.cs.local_size[2];
485 }
486
487 i = 0;
488 uint64_t processed_inputs = 0;
489 nir_foreach_variable(variable, &nir->inputs) {
490 unsigned semantic_name, semantic_index;
491
492 const struct glsl_type *type = variable->type;
493 if (nir_is_per_vertex_io(variable, nir->info.stage)) {
494 assert(glsl_type_is_array(type));
495 type = glsl_get_array_element(type);
496 }
497
498 unsigned attrib_count = glsl_count_attribute_slots(type,
499 nir->info.stage == MESA_SHADER_VERTEX);
500
501 i = variable->data.driver_location;
502
503 /* Vertex shader inputs don't have semantics. The state
504 * tracker has already mapped them to attributes via
505 * variable->data.driver_location.
506 */
507 if (nir->info.stage == MESA_SHADER_VERTEX) {
508 continue;
509 }
510
511 for (unsigned j = 0; j < attrib_count; j++, i++) {
512
513 if (processed_inputs & ((uint64_t)1 << i))
514 continue;
515
516 processed_inputs |= ((uint64_t)1 << i);
517
518 tgsi_get_gl_varying_semantic(variable->data.location + j, need_texcoord,
519 &semantic_name, &semantic_index);
520
521 info->input_semantic_name[i] = semantic_name;
522 info->input_semantic_index[i] = semantic_index;
523
524 if (semantic_name == TGSI_SEMANTIC_PRIMID)
525 info->uses_primid = true;
526
527 enum glsl_base_type base_type =
528 glsl_get_base_type(glsl_without_array(variable->type));
529
530 switch (variable->data.interpolation) {
531 case INTERP_MODE_NONE:
532 if (glsl_base_type_is_integer(base_type)) {
533 info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
534 break;
535 }
536
537 if (semantic_name == TGSI_SEMANTIC_COLOR) {
538 info->input_interpolate[i] = TGSI_INTERPOLATE_COLOR;
539 break;
540 }
541 /* fall-through */
542
543 case INTERP_MODE_SMOOTH:
544 assert(!glsl_base_type_is_integer(base_type));
545
546 info->input_interpolate[i] = TGSI_INTERPOLATE_PERSPECTIVE;
547 break;
548
549 case INTERP_MODE_NOPERSPECTIVE:
550 assert(!glsl_base_type_is_integer(base_type));
551
552 info->input_interpolate[i] = TGSI_INTERPOLATE_LINEAR;
553 break;
554
555 case INTERP_MODE_FLAT:
556 info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
557 break;
558 }
559 }
560 }
561
562 info->num_inputs = nir->num_inputs;
563 info->file_max[TGSI_FILE_INPUT] = nir->num_inputs - 1;
564
565 i = 0;
566 uint64_t processed_outputs = 0;
567 unsigned num_outputs = 0;
568 nir_foreach_variable(variable, &nir->outputs) {
569 unsigned semantic_name, semantic_index;
570
571 i = variable->data.driver_location;
572
573 const struct glsl_type *type = variable->type;
574 if (nir_is_per_vertex_io(variable, nir->info.stage)) {
575 assert(glsl_type_is_array(type));
576 type = glsl_get_array_element(type);
577 }
578
579 unsigned attrib_count = glsl_count_attribute_slots(type, false);
580 for (unsigned k = 0; k < attrib_count; k++, i++) {
581
582 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
583 tgsi_get_gl_frag_result_semantic(variable->data.location + k,
584 &semantic_name, &semantic_index);
585
586 /* Adjust for dual source blending */
587 if (variable->data.index > 0) {
588 semantic_index++;
589 }
590 } else {
591 tgsi_get_gl_varying_semantic(variable->data.location + k, need_texcoord,
592 &semantic_name, &semantic_index);
593 }
594
595 unsigned num_components = 4;
596 unsigned vector_elements = glsl_get_vector_elements(glsl_without_array(variable->type));
597 if (vector_elements)
598 num_components = vector_elements;
599
600 unsigned component = variable->data.location_frac;
601 if (glsl_type_is_64bit(glsl_without_array(variable->type))) {
602 if (glsl_type_is_dual_slot(glsl_without_array(variable->type)) && k % 2) {
603 num_components = (num_components * 2) - 4;
604 component = 0;
605 } else {
606 num_components = MIN2(num_components * 2, 4);
607 }
608 }
609
610 ubyte usagemask = 0;
611 for (unsigned j = component; j < num_components + component; j++) {
612 switch (j) {
613 case 0:
614 usagemask |= TGSI_WRITEMASK_X;
615 break;
616 case 1:
617 usagemask |= TGSI_WRITEMASK_Y;
618 break;
619 case 2:
620 usagemask |= TGSI_WRITEMASK_Z;
621 break;
622 case 3:
623 usagemask |= TGSI_WRITEMASK_W;
624 break;
625 default:
626 unreachable("error calculating component index");
627 }
628 }
629
630 unsigned gs_out_streams;
631 if (variable->data.stream & NIR_STREAM_PACKED) {
632 gs_out_streams = variable->data.stream & ~NIR_STREAM_PACKED;
633 } else {
634 assert(variable->data.stream < 4);
635 gs_out_streams = 0;
636 for (unsigned j = 0; j < num_components; ++j)
637 gs_out_streams |= variable->data.stream << (2 * (component + j));
638 }
639
640 unsigned streamx = gs_out_streams & 3;
641 unsigned streamy = (gs_out_streams >> 2) & 3;
642 unsigned streamz = (gs_out_streams >> 4) & 3;
643 unsigned streamw = (gs_out_streams >> 6) & 3;
644
645 if (usagemask & TGSI_WRITEMASK_X) {
646 info->output_usagemask[i] |= TGSI_WRITEMASK_X;
647 info->output_streams[i] |= streamx;
648 info->num_stream_output_components[streamx]++;
649 }
650 if (usagemask & TGSI_WRITEMASK_Y) {
651 info->output_usagemask[i] |= TGSI_WRITEMASK_Y;
652 info->output_streams[i] |= streamy << 2;
653 info->num_stream_output_components[streamy]++;
654 }
655 if (usagemask & TGSI_WRITEMASK_Z) {
656 info->output_usagemask[i] |= TGSI_WRITEMASK_Z;
657 info->output_streams[i] |= streamz << 4;
658 info->num_stream_output_components[streamz]++;
659 }
660 if (usagemask & TGSI_WRITEMASK_W) {
661 info->output_usagemask[i] |= TGSI_WRITEMASK_W;
662 info->output_streams[i] |= streamw << 6;
663 info->num_stream_output_components[streamw]++;
664 }
665
666 /* make sure we only count this location once against
667 * the num_outputs counter.
668 */
669 if (processed_outputs & ((uint64_t)1 << i))
670 continue;
671
672 processed_outputs |= ((uint64_t)1 << i);
673 num_outputs++;
674
675 info->output_semantic_name[i] = semantic_name;
676 info->output_semantic_index[i] = semantic_index;
677
678 switch (semantic_name) {
679 case TGSI_SEMANTIC_PRIMID:
680 info->writes_primid = true;
681 break;
682 case TGSI_SEMANTIC_VIEWPORT_INDEX:
683 info->writes_viewport_index = true;
684 break;
685 case TGSI_SEMANTIC_LAYER:
686 info->writes_layer = true;
687 break;
688 case TGSI_SEMANTIC_PSIZE:
689 info->writes_psize = true;
690 break;
691 case TGSI_SEMANTIC_CLIPVERTEX:
692 info->writes_clipvertex = true;
693 break;
694 case TGSI_SEMANTIC_COLOR:
695 info->colors_written |= 1 << semantic_index;
696 break;
697 case TGSI_SEMANTIC_STENCIL:
698 info->writes_stencil = true;
699 break;
700 case TGSI_SEMANTIC_SAMPLEMASK:
701 info->writes_samplemask = true;
702 break;
703 case TGSI_SEMANTIC_EDGEFLAG:
704 info->writes_edgeflag = true;
705 break;
706 case TGSI_SEMANTIC_POSITION:
707 if (info->processor == PIPE_SHADER_FRAGMENT)
708 info->writes_z = true;
709 else
710 info->writes_position = true;
711 break;
712 }
713
714 if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
715 switch (semantic_name) {
716 case TGSI_SEMANTIC_PATCH:
717 info->reads_perpatch_outputs = true;
718 break;
719 case TGSI_SEMANTIC_TESSINNER:
720 case TGSI_SEMANTIC_TESSOUTER:
721 info->reads_tessfactor_outputs = true;
722 break;
723 default:
724 info->reads_pervertex_outputs = true;
725 }
726 }
727 }
728
729 unsigned loc = variable->data.location;
730 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
731 loc == FRAG_RESULT_COLOR &&
732 nir->info.outputs_written & (1ull << loc)) {
733 assert(attrib_count == 1);
734 info->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] = true;
735 }
736 }
737
738 info->num_outputs = num_outputs;
739
740 info->const_file_max[0] = nir->num_uniforms - 1;
741 info->const_buffers_declared = u_bit_consecutive(1, nir->info.num_ubos);
742 if (nir->num_uniforms > 0)
743 info->const_buffers_declared |= 1;
744 info->images_declared = u_bit_consecutive(0, nir->info.num_images);
745 info->samplers_declared = nir->info.textures_used;
746
747 info->file_max[TGSI_FILE_SAMPLER] = util_last_bit(info->samplers_declared) - 1;
748 info->file_max[TGSI_FILE_SAMPLER_VIEW] = info->file_max[TGSI_FILE_SAMPLER];
749 info->file_mask[TGSI_FILE_SAMPLER] = info->file_mask[TGSI_FILE_SAMPLER_VIEW] = info->samplers_declared;
750 info->file_max[TGSI_FILE_IMAGE] = util_last_bit(info->images_declared) - 1;
751 info->file_mask[TGSI_FILE_IMAGE] = info->images_declared;
752
753 info->num_written_clipdistance = nir->info.clip_distance_array_size;
754 info->num_written_culldistance = nir->info.cull_distance_array_size;
755 info->clipdist_writemask = u_bit_consecutive(0, info->num_written_clipdistance);
756 info->culldist_writemask = u_bit_consecutive(0, info->num_written_culldistance);
757
758 if (info->processor == PIPE_SHADER_FRAGMENT)
759 info->uses_kill = nir->info.fs.uses_discard;
760
761 func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
762 nir_foreach_block(block, func->impl) {
763 nir_foreach_instr(instr, block)
764 scan_instruction(nir, need_texcoord, info, instr);
765 }
766 }