nir: Get rid of nir_shader::stage
[mesa.git] / src / gallium / auxiliary / nir / tgsi_to_nir.c
1 /*
2 * Copyright © 2014-2015 Broadcom
3 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "util/ralloc.h"
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_control_flow.h"
28 #include "compiler/nir/nir_builder.h"
29 #include "compiler/glsl/list.h"
30 #include "compiler/shader_enums.h"
31
32 #include "tgsi_to_nir.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_dump.h"
35 #include "tgsi/tgsi_info.h"
36 #include "tgsi/tgsi_scan.h"
37
38 #define SWIZ(X, Y, Z, W) (unsigned[4]){ \
39 TGSI_SWIZZLE_##X, \
40 TGSI_SWIZZLE_##Y, \
41 TGSI_SWIZZLE_##Z, \
42 TGSI_SWIZZLE_##W, \
43 }
44
45 struct ttn_reg_info {
46 /** nir register containing this TGSI index. */
47 nir_register *reg;
48 nir_variable *var;
49 /** Offset (in vec4s) from the start of var for this TGSI index. */
50 int offset;
51 };
52
53 struct ttn_compile {
54 union tgsi_full_token *token;
55 nir_builder build;
56 struct tgsi_shader_info *scan;
57
58 struct ttn_reg_info *output_regs;
59 struct ttn_reg_info *temp_regs;
60 nir_ssa_def **imm_defs;
61
62 unsigned num_samp_types;
63 nir_alu_type *samp_types;
64
65 nir_register *addr_reg;
66
67 /**
68 * Stack of nir_cursors where instructions should be pushed as we pop
69 * back out of the control flow stack.
70 *
71 * For each IF/ELSE/ENDIF block, if_stack[if_stack_pos] has where the else
72 * instructions should be placed, and if_stack[if_stack_pos - 1] has where
73 * the next instructions outside of the if/then/else block go.
74 */
75 nir_cursor *if_stack;
76 unsigned if_stack_pos;
77
78 /**
79 * Stack of nir_cursors where instructions should be pushed as we pop
80 * back out of the control flow stack.
81 *
82 * loop_stack[loop_stack_pos - 1] contains the cf_node_list for the outside
83 * of the loop.
84 */
85 nir_cursor *loop_stack;
86 unsigned loop_stack_pos;
87
88 /* How many TGSI_FILE_IMMEDIATE vec4s have been parsed so far. */
89 unsigned next_imm;
90 };
91
92 #define ttn_swizzle(b, src, x, y, z, w) \
93 nir_swizzle(b, src, SWIZ(x, y, z, w), 4, false)
94 #define ttn_channel(b, src, swiz) \
95 nir_swizzle(b, src, SWIZ(swiz, swiz, swiz, swiz), 1, false)
96
97 static gl_varying_slot
98 tgsi_varying_semantic_to_slot(unsigned semantic, unsigned index)
99 {
100 switch (semantic) {
101 case TGSI_SEMANTIC_POSITION:
102 return VARYING_SLOT_POS;
103 case TGSI_SEMANTIC_COLOR:
104 if (index == 0)
105 return VARYING_SLOT_COL0;
106 else
107 return VARYING_SLOT_COL1;
108 case TGSI_SEMANTIC_BCOLOR:
109 if (index == 0)
110 return VARYING_SLOT_BFC0;
111 else
112 return VARYING_SLOT_BFC1;
113 case TGSI_SEMANTIC_FOG:
114 return VARYING_SLOT_FOGC;
115 case TGSI_SEMANTIC_PSIZE:
116 return VARYING_SLOT_PSIZ;
117 case TGSI_SEMANTIC_GENERIC:
118 return VARYING_SLOT_VAR0 + index;
119 case TGSI_SEMANTIC_FACE:
120 return VARYING_SLOT_FACE;
121 case TGSI_SEMANTIC_EDGEFLAG:
122 return VARYING_SLOT_EDGE;
123 case TGSI_SEMANTIC_PRIMID:
124 return VARYING_SLOT_PRIMITIVE_ID;
125 case TGSI_SEMANTIC_CLIPDIST:
126 if (index == 0)
127 return VARYING_SLOT_CLIP_DIST0;
128 else
129 return VARYING_SLOT_CLIP_DIST1;
130 case TGSI_SEMANTIC_CLIPVERTEX:
131 return VARYING_SLOT_CLIP_VERTEX;
132 case TGSI_SEMANTIC_TEXCOORD:
133 return VARYING_SLOT_TEX0 + index;
134 case TGSI_SEMANTIC_PCOORD:
135 return VARYING_SLOT_PNTC;
136 case TGSI_SEMANTIC_VIEWPORT_INDEX:
137 return VARYING_SLOT_VIEWPORT;
138 case TGSI_SEMANTIC_LAYER:
139 return VARYING_SLOT_LAYER;
140 default:
141 fprintf(stderr, "Bad TGSI semantic: %d/%d\n", semantic, index);
142 abort();
143 }
144 }
145
146 /* Temporary helper to remap back to TGSI style semantic name/index
147 * values, for use in drivers that haven't been converted to using
148 * VARYING_SLOT_
149 */
150 void
151 varying_slot_to_tgsi_semantic(gl_varying_slot slot,
152 unsigned *semantic_name, unsigned *semantic_index)
153 {
154 static const unsigned map[][2] = {
155 [VARYING_SLOT_POS] = { TGSI_SEMANTIC_POSITION, 0 },
156 [VARYING_SLOT_COL0] = { TGSI_SEMANTIC_COLOR, 0 },
157 [VARYING_SLOT_COL1] = { TGSI_SEMANTIC_COLOR, 1 },
158 [VARYING_SLOT_BFC0] = { TGSI_SEMANTIC_BCOLOR, 0 },
159 [VARYING_SLOT_BFC1] = { TGSI_SEMANTIC_BCOLOR, 1 },
160 [VARYING_SLOT_FOGC] = { TGSI_SEMANTIC_FOG, 0 },
161 [VARYING_SLOT_PSIZ] = { TGSI_SEMANTIC_PSIZE, 0 },
162 [VARYING_SLOT_FACE] = { TGSI_SEMANTIC_FACE, 0 },
163 [VARYING_SLOT_EDGE] = { TGSI_SEMANTIC_EDGEFLAG, 0 },
164 [VARYING_SLOT_PRIMITIVE_ID] = { TGSI_SEMANTIC_PRIMID, 0 },
165 [VARYING_SLOT_CLIP_DIST0] = { TGSI_SEMANTIC_CLIPDIST, 0 },
166 [VARYING_SLOT_CLIP_DIST1] = { TGSI_SEMANTIC_CLIPDIST, 1 },
167 [VARYING_SLOT_CLIP_VERTEX] = { TGSI_SEMANTIC_CLIPVERTEX, 0 },
168 [VARYING_SLOT_PNTC] = { TGSI_SEMANTIC_PCOORD, 0 },
169 [VARYING_SLOT_VIEWPORT] = { TGSI_SEMANTIC_VIEWPORT_INDEX, 0 },
170 [VARYING_SLOT_LAYER] = { TGSI_SEMANTIC_LAYER, 0 },
171 };
172
173 if (slot >= VARYING_SLOT_VAR0) {
174 *semantic_name = TGSI_SEMANTIC_GENERIC;
175 *semantic_index = slot - VARYING_SLOT_VAR0;
176 return;
177 }
178
179 if (slot >= VARYING_SLOT_TEX0 && slot <= VARYING_SLOT_TEX7) {
180 *semantic_name = TGSI_SEMANTIC_TEXCOORD;
181 *semantic_index = slot - VARYING_SLOT_TEX0;
182 return;
183 }
184
185 if (slot >= ARRAY_SIZE(map)) {
186 fprintf(stderr, "Unknown varying slot %d\n", slot);
187 abort();
188 }
189
190 *semantic_name = map[slot][0];
191 *semantic_index = map[slot][1];
192 }
193
194 /* Temporary helper to remap back to TGSI style semantic name/index
195 * values, for use in drivers that haven't been converted to using
196 * FRAG_RESULT_
197 */
198 void
199 frag_result_to_tgsi_semantic(gl_frag_result slot,
200 unsigned *semantic_name, unsigned *semantic_index)
201 {
202 static const unsigned map[][2] = {
203 [FRAG_RESULT_DEPTH] = { TGSI_SEMANTIC_POSITION, 0 },
204 [FRAG_RESULT_COLOR] = { TGSI_SEMANTIC_COLOR, -1 },
205 [FRAG_RESULT_DATA0 + 0] = { TGSI_SEMANTIC_COLOR, 0 },
206 [FRAG_RESULT_DATA0 + 1] = { TGSI_SEMANTIC_COLOR, 1 },
207 [FRAG_RESULT_DATA0 + 2] = { TGSI_SEMANTIC_COLOR, 2 },
208 [FRAG_RESULT_DATA0 + 3] = { TGSI_SEMANTIC_COLOR, 3 },
209 [FRAG_RESULT_DATA0 + 4] = { TGSI_SEMANTIC_COLOR, 4 },
210 [FRAG_RESULT_DATA0 + 5] = { TGSI_SEMANTIC_COLOR, 5 },
211 [FRAG_RESULT_DATA0 + 6] = { TGSI_SEMANTIC_COLOR, 6 },
212 [FRAG_RESULT_DATA0 + 7] = { TGSI_SEMANTIC_COLOR, 7 },
213 };
214
215 *semantic_name = map[slot][0];
216 *semantic_index = map[slot][1];
217 }
218
219 static nir_ssa_def *
220 ttn_src_for_dest(nir_builder *b, nir_alu_dest *dest)
221 {
222 nir_alu_src src;
223 memset(&src, 0, sizeof(src));
224
225 if (dest->dest.is_ssa)
226 src.src = nir_src_for_ssa(&dest->dest.ssa);
227 else {
228 assert(!dest->dest.reg.indirect);
229 src.src = nir_src_for_reg(dest->dest.reg.reg);
230 src.src.reg.base_offset = dest->dest.reg.base_offset;
231 }
232
233 for (int i = 0; i < 4; i++)
234 src.swizzle[i] = i;
235
236 return nir_fmov_alu(b, src, 4);
237 }
238
239 static void
240 ttn_emit_declaration(struct ttn_compile *c)
241 {
242 nir_builder *b = &c->build;
243 struct tgsi_full_declaration *decl = &c->token->FullDeclaration;
244 unsigned array_size = decl->Range.Last - decl->Range.First + 1;
245 unsigned file = decl->Declaration.File;
246 unsigned i;
247
248 if (file == TGSI_FILE_TEMPORARY) {
249 if (decl->Declaration.Array) {
250 /* for arrays, we create variables instead of registers: */
251 nir_variable *var = rzalloc(b->shader, nir_variable);
252
253 var->type = glsl_array_type(glsl_vec4_type(), array_size);
254 var->data.mode = nir_var_global;
255 var->name = ralloc_asprintf(var, "arr_%d", decl->Array.ArrayID);
256
257 exec_list_push_tail(&b->shader->globals, &var->node);
258
259 for (i = 0; i < array_size; i++) {
260 /* point all the matching slots to the same var,
261 * with appropriate offset set, mostly just so
262 * we know what to do when tgsi does a non-indirect
263 * access
264 */
265 c->temp_regs[decl->Range.First + i].reg = NULL;
266 c->temp_regs[decl->Range.First + i].var = var;
267 c->temp_regs[decl->Range.First + i].offset = i;
268 }
269 } else {
270 for (i = 0; i < array_size; i++) {
271 nir_register *reg = nir_local_reg_create(b->impl);
272 reg->num_components = 4;
273 c->temp_regs[decl->Range.First + i].reg = reg;
274 c->temp_regs[decl->Range.First + i].var = NULL;
275 c->temp_regs[decl->Range.First + i].offset = 0;
276 }
277 }
278 } else if (file == TGSI_FILE_ADDRESS) {
279 c->addr_reg = nir_local_reg_create(b->impl);
280 c->addr_reg->num_components = 4;
281 } else if (file == TGSI_FILE_SYSTEM_VALUE) {
282 /* Nothing to record for system values. */
283 } else if (file == TGSI_FILE_SAMPLER) {
284 /* Nothing to record for samplers. */
285 } else if (file == TGSI_FILE_SAMPLER_VIEW) {
286 struct tgsi_declaration_sampler_view *sview = &decl->SamplerView;
287 nir_alu_type type;
288
289 assert((sview->ReturnTypeX == sview->ReturnTypeY) &&
290 (sview->ReturnTypeX == sview->ReturnTypeZ) &&
291 (sview->ReturnTypeX == sview->ReturnTypeW));
292
293 switch (sview->ReturnTypeX) {
294 case TGSI_RETURN_TYPE_SINT:
295 type = nir_type_int;
296 break;
297 case TGSI_RETURN_TYPE_UINT:
298 type = nir_type_uint;
299 break;
300 case TGSI_RETURN_TYPE_FLOAT:
301 default:
302 type = nir_type_float;
303 break;
304 }
305
306 for (i = 0; i < array_size; i++) {
307 c->samp_types[decl->Range.First + i] = type;
308 }
309 } else {
310 bool is_array = (array_size > 1);
311
312 assert(file == TGSI_FILE_INPUT ||
313 file == TGSI_FILE_OUTPUT ||
314 file == TGSI_FILE_CONSTANT);
315
316 /* nothing to do for UBOs: */
317 if ((file == TGSI_FILE_CONSTANT) && decl->Declaration.Dimension &&
318 decl->Dim.Index2D != 0) {
319 b->shader->info.num_ubos =
320 MAX2(b->shader->info.num_ubos, decl->Dim.Index2D);
321 return;
322 }
323
324 if ((file == TGSI_FILE_INPUT) || (file == TGSI_FILE_OUTPUT)) {
325 is_array = (is_array && decl->Declaration.Array &&
326 (decl->Array.ArrayID != 0));
327 }
328
329 for (i = 0; i < array_size; i++) {
330 unsigned idx = decl->Range.First + i;
331 nir_variable *var = rzalloc(b->shader, nir_variable);
332
333 var->data.driver_location = idx;
334
335 var->type = glsl_vec4_type();
336 if (is_array)
337 var->type = glsl_array_type(var->type, array_size);
338
339 switch (file) {
340 case TGSI_FILE_INPUT:
341 var->data.read_only = true;
342 var->data.mode = nir_var_shader_in;
343 var->name = ralloc_asprintf(var, "in_%d", idx);
344
345 if (c->scan->processor == PIPE_SHADER_FRAGMENT) {
346 if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
347 var->data.location = SYSTEM_VALUE_FRONT_FACE;
348 var->data.mode = nir_var_system_value;
349 } else {
350 var->data.location =
351 tgsi_varying_semantic_to_slot(decl->Semantic.Name,
352 decl->Semantic.Index);
353 }
354 } else {
355 assert(!decl->Declaration.Semantic);
356 var->data.location = VERT_ATTRIB_GENERIC0 + idx;
357 }
358 var->data.index = 0;
359
360 /* We definitely need to translate the interpolation field, because
361 * nir_print will decode it.
362 */
363 switch (decl->Interp.Interpolate) {
364 case TGSI_INTERPOLATE_CONSTANT:
365 var->data.interpolation = INTERP_MODE_FLAT;
366 break;
367 case TGSI_INTERPOLATE_LINEAR:
368 var->data.interpolation = INTERP_MODE_NOPERSPECTIVE;
369 break;
370 case TGSI_INTERPOLATE_PERSPECTIVE:
371 var->data.interpolation = INTERP_MODE_SMOOTH;
372 break;
373 }
374
375 exec_list_push_tail(&b->shader->inputs, &var->node);
376
377 for (int i = 0; i < array_size; i++)
378 b->shader->info.inputs_read |= 1 << (var->data.location + i);
379
380 break;
381 case TGSI_FILE_OUTPUT: {
382 int semantic_name = decl->Semantic.Name;
383 int semantic_index = decl->Semantic.Index;
384 /* Since we can't load from outputs in the IR, we make temporaries
385 * for the outputs and emit stores to the real outputs at the end of
386 * the shader.
387 */
388 nir_register *reg = nir_local_reg_create(b->impl);
389 reg->num_components = 4;
390 if (is_array)
391 reg->num_array_elems = array_size;
392
393 var->data.mode = nir_var_shader_out;
394 var->name = ralloc_asprintf(var, "out_%d", idx);
395 var->data.index = 0;
396
397 if (c->scan->processor == PIPE_SHADER_FRAGMENT) {
398 switch (semantic_name) {
399 case TGSI_SEMANTIC_COLOR: {
400 /* TODO tgsi loses some information, so we cannot
401 * actually differentiate here between DSB and MRT
402 * at this point. But so far no drivers using tgsi-
403 * to-nir support dual source blend:
404 */
405 bool dual_src_blend = false;
406 if (dual_src_blend && (semantic_index == 1)) {
407 var->data.location = FRAG_RESULT_DATA0;
408 var->data.index = 1;
409 } else {
410 if (c->scan->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS])
411 var->data.location = FRAG_RESULT_COLOR;
412 else
413 var->data.location = FRAG_RESULT_DATA0 + semantic_index;
414 }
415 break;
416 }
417 case TGSI_SEMANTIC_POSITION:
418 var->data.location = FRAG_RESULT_DEPTH;
419 break;
420 default:
421 fprintf(stderr, "Bad TGSI semantic: %d/%d\n",
422 decl->Semantic.Name, decl->Semantic.Index);
423 abort();
424 }
425 } else {
426 var->data.location =
427 tgsi_varying_semantic_to_slot(semantic_name, semantic_index);
428 }
429
430 if (is_array) {
431 unsigned j;
432 for (j = 0; j < array_size; j++) {
433 c->output_regs[idx + j].offset = i + j;
434 c->output_regs[idx + j].reg = reg;
435 }
436 } else {
437 c->output_regs[idx].offset = i;
438 c->output_regs[idx].reg = reg;
439 }
440
441 exec_list_push_tail(&b->shader->outputs, &var->node);
442
443 for (int i = 0; i < array_size; i++)
444 b->shader->info.outputs_written |= 1 << (var->data.location + i);
445 }
446 break;
447 case TGSI_FILE_CONSTANT:
448 var->data.mode = nir_var_uniform;
449 var->name = ralloc_asprintf(var, "uniform_%d", idx);
450
451 exec_list_push_tail(&b->shader->uniforms, &var->node);
452 break;
453 default:
454 unreachable("bad declaration file");
455 return;
456 }
457
458 if (is_array)
459 break;
460 }
461
462 }
463 }
464
465 static void
466 ttn_emit_immediate(struct ttn_compile *c)
467 {
468 nir_builder *b = &c->build;
469 struct tgsi_full_immediate *tgsi_imm = &c->token->FullImmediate;
470 nir_load_const_instr *load_const;
471 int i;
472
473 load_const = nir_load_const_instr_create(b->shader, 4, 32);
474 c->imm_defs[c->next_imm] = &load_const->def;
475 c->next_imm++;
476
477 for (i = 0; i < 4; i++)
478 load_const->value.u32[i] = tgsi_imm->u[i].Uint;
479
480 nir_builder_instr_insert(b, &load_const->instr);
481 }
482
483 static nir_ssa_def *
484 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect);
485
486 /* generate either a constant or indirect deref chain for accessing an
487 * array variable.
488 */
489 static nir_deref_var *
490 ttn_array_deref(struct ttn_compile *c, nir_intrinsic_instr *instr,
491 nir_variable *var, unsigned offset,
492 struct tgsi_ind_register *indirect)
493 {
494 nir_deref_var *deref = nir_deref_var_create(instr, var);
495 nir_deref_array *arr = nir_deref_array_create(deref);
496
497 arr->base_offset = offset;
498 arr->deref.type = glsl_get_array_element(var->type);
499
500 if (indirect) {
501 arr->deref_array_type = nir_deref_array_type_indirect;
502 arr->indirect = nir_src_for_ssa(ttn_src_for_indirect(c, indirect));
503 } else {
504 arr->deref_array_type = nir_deref_array_type_direct;
505 }
506
507 deref->deref.child = &arr->deref;
508
509 return deref;
510 }
511
512 static nir_src
513 ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
514 struct tgsi_ind_register *indirect,
515 struct tgsi_dimension *dim,
516 struct tgsi_ind_register *dimind)
517 {
518 nir_builder *b = &c->build;
519 nir_src src;
520
521 memset(&src, 0, sizeof(src));
522
523 switch (file) {
524 case TGSI_FILE_TEMPORARY:
525 if (c->temp_regs[index].var) {
526 unsigned offset = c->temp_regs[index].offset;
527 nir_variable *var = c->temp_regs[index].var;
528 nir_intrinsic_instr *load;
529
530 load = nir_intrinsic_instr_create(b->shader,
531 nir_intrinsic_load_var);
532 load->num_components = 4;
533 load->variables[0] = ttn_array_deref(c, load, var, offset, indirect);
534 nir_ssa_dest_init(&load->instr, &load->dest,
535 4, 32, NULL);
536 nir_builder_instr_insert(b, &load->instr);
537
538 src = nir_src_for_ssa(&load->dest.ssa);
539
540 } else {
541 assert(!indirect);
542 src.reg.reg = c->temp_regs[index].reg;
543 }
544 assert(!dim);
545 break;
546
547 case TGSI_FILE_ADDRESS:
548 src.reg.reg = c->addr_reg;
549 assert(!dim);
550 break;
551
552 case TGSI_FILE_IMMEDIATE:
553 src = nir_src_for_ssa(c->imm_defs[index]);
554 assert(!indirect);
555 assert(!dim);
556 break;
557
558 case TGSI_FILE_SYSTEM_VALUE: {
559 nir_intrinsic_instr *load;
560 nir_intrinsic_op op;
561 unsigned ncomp = 1;
562
563 assert(!indirect);
564 assert(!dim);
565
566 switch (c->scan->system_value_semantic_name[index]) {
567 case TGSI_SEMANTIC_VERTEXID_NOBASE:
568 op = nir_intrinsic_load_vertex_id_zero_base;
569 break;
570 case TGSI_SEMANTIC_VERTEXID:
571 op = nir_intrinsic_load_vertex_id;
572 break;
573 case TGSI_SEMANTIC_BASEVERTEX:
574 op = nir_intrinsic_load_base_vertex;
575 break;
576 case TGSI_SEMANTIC_INSTANCEID:
577 op = nir_intrinsic_load_instance_id;
578 break;
579 default:
580 unreachable("bad system value");
581 }
582
583 load = nir_intrinsic_instr_create(b->shader, op);
584 load->num_components = ncomp;
585
586 nir_ssa_dest_init(&load->instr, &load->dest, ncomp, 32, NULL);
587 nir_builder_instr_insert(b, &load->instr);
588
589 src = nir_src_for_ssa(&load->dest.ssa);
590
591 b->shader->info.system_values_read |=
592 (1 << nir_system_value_from_intrinsic(op));
593
594 break;
595 }
596
597 case TGSI_FILE_INPUT:
598 case TGSI_FILE_CONSTANT: {
599 nir_intrinsic_instr *load;
600 nir_intrinsic_op op;
601 unsigned srcn = 0;
602
603 switch (file) {
604 case TGSI_FILE_INPUT:
605 /* Special case: Turn the frontface varying into a load of the
606 * frontface intrinsic plus math, and appending the silly floats.
607 */
608 if (c->scan->processor == PIPE_SHADER_FRAGMENT &&
609 c->scan->input_semantic_name[index] == TGSI_SEMANTIC_FACE) {
610 nir_ssa_def *tgsi_frontface[4] = {
611 nir_bcsel(&c->build,
612 nir_load_system_value(&c->build,
613 nir_intrinsic_load_front_face, 0),
614 nir_imm_float(&c->build, 1.0),
615 nir_imm_float(&c->build, -1.0)),
616 nir_imm_float(&c->build, 0.0),
617 nir_imm_float(&c->build, 0.0),
618 nir_imm_float(&c->build, 1.0),
619 };
620
621 return nir_src_for_ssa(nir_vec(&c->build, tgsi_frontface, 4));
622 }
623
624 op = nir_intrinsic_load_input;
625 assert(!dim);
626 break;
627 case TGSI_FILE_CONSTANT:
628 if (dim && (dim->Index > 0 || dim->Indirect)) {
629 op = nir_intrinsic_load_ubo;
630 } else {
631 op = nir_intrinsic_load_uniform;
632 }
633 break;
634 default:
635 unreachable("No other load files supported");
636 break;
637 }
638
639 load = nir_intrinsic_instr_create(b->shader, op);
640
641 load->num_components = 4;
642 if (dim && (dim->Index > 0 || dim->Indirect)) {
643 if (dimind) {
644 load->src[srcn] =
645 ttn_src_for_file_and_index(c, dimind->File, dimind->Index,
646 NULL, NULL, NULL);
647 } else {
648 /* UBOs start at index 1 in TGSI: */
649 load->src[srcn] =
650 nir_src_for_ssa(nir_imm_int(b, dim->Index - 1));
651 }
652 srcn++;
653 }
654
655 nir_ssa_def *offset;
656 if (op == nir_intrinsic_load_ubo) {
657 /* UBO loads don't have a base offset. */
658 offset = nir_imm_int(b, index);
659 if (indirect) {
660 offset = nir_iadd(b, offset, ttn_src_for_indirect(c, indirect));
661 }
662 /* UBO offsets are in bytes, but TGSI gives them to us in vec4's */
663 offset = nir_ishl(b, offset, nir_imm_int(b, 4));
664 } else {
665 nir_intrinsic_set_base(load, index);
666 if (indirect) {
667 offset = ttn_src_for_indirect(c, indirect);
668 } else {
669 offset = nir_imm_int(b, 0);
670 }
671 }
672 load->src[srcn++] = nir_src_for_ssa(offset);
673
674 nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
675 nir_builder_instr_insert(b, &load->instr);
676
677 src = nir_src_for_ssa(&load->dest.ssa);
678 break;
679 }
680
681 default:
682 unreachable("bad src file");
683 }
684
685
686 return src;
687 }
688
689 static nir_ssa_def *
690 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect)
691 {
692 nir_builder *b = &c->build;
693 nir_alu_src src;
694 memset(&src, 0, sizeof(src));
695 for (int i = 0; i < 4; i++)
696 src.swizzle[i] = indirect->Swizzle;
697 src.src = ttn_src_for_file_and_index(c,
698 indirect->File,
699 indirect->Index,
700 NULL, NULL, NULL);
701 return nir_imov_alu(b, src, 1);
702 }
703
704 static nir_alu_dest
705 ttn_get_dest(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
706 {
707 struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
708 nir_alu_dest dest;
709 unsigned index = tgsi_dst->Index;
710
711 memset(&dest, 0, sizeof(dest));
712
713 if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
714 if (c->temp_regs[index].var) {
715 nir_register *reg;
716
717 /* this works, because TGSI will give us a base offset
718 * (in case of indirect index) that points back into
719 * the array. Access can be direct or indirect, we
720 * don't really care. Just create a one-shot dst reg
721 * that will get store_var'd back into the array var
722 * at the end of ttn_emit_instruction()
723 */
724 reg = nir_local_reg_create(c->build.impl);
725 reg->num_components = 4;
726 dest.dest.reg.reg = reg;
727 dest.dest.reg.base_offset = 0;
728 } else {
729 assert(!tgsi_dst->Indirect);
730 dest.dest.reg.reg = c->temp_regs[index].reg;
731 dest.dest.reg.base_offset = c->temp_regs[index].offset;
732 }
733 } else if (tgsi_dst->File == TGSI_FILE_OUTPUT) {
734 dest.dest.reg.reg = c->output_regs[index].reg;
735 dest.dest.reg.base_offset = c->output_regs[index].offset;
736 } else if (tgsi_dst->File == TGSI_FILE_ADDRESS) {
737 assert(index == 0);
738 dest.dest.reg.reg = c->addr_reg;
739 }
740
741 dest.write_mask = tgsi_dst->WriteMask;
742 dest.saturate = false;
743
744 if (tgsi_dst->Indirect && (tgsi_dst->File != TGSI_FILE_TEMPORARY)) {
745 nir_src *indirect = ralloc(c->build.shader, nir_src);
746 *indirect = nir_src_for_ssa(ttn_src_for_indirect(c, &tgsi_fdst->Indirect));
747 dest.dest.reg.indirect = indirect;
748 }
749
750 return dest;
751 }
752
753 static nir_variable *
754 ttn_get_var(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
755 {
756 struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
757 unsigned index = tgsi_dst->Index;
758
759 if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
760 /* we should not have an indirect when there is no var! */
761 if (!c->temp_regs[index].var)
762 assert(!tgsi_dst->Indirect);
763 return c->temp_regs[index].var;
764 }
765
766 return NULL;
767 }
768
769 static nir_ssa_def *
770 ttn_get_src(struct ttn_compile *c, struct tgsi_full_src_register *tgsi_fsrc,
771 int src_idx)
772 {
773 nir_builder *b = &c->build;
774 struct tgsi_src_register *tgsi_src = &tgsi_fsrc->Register;
775 unsigned tgsi_opcode = c->token->FullInstruction.Instruction.Opcode;
776 unsigned tgsi_src_type = tgsi_opcode_infer_src_type(tgsi_opcode, src_idx);
777 bool src_is_float = !(tgsi_src_type == TGSI_TYPE_SIGNED ||
778 tgsi_src_type == TGSI_TYPE_UNSIGNED);
779 nir_alu_src src;
780
781 memset(&src, 0, sizeof(src));
782
783 if (tgsi_src->File == TGSI_FILE_NULL) {
784 return nir_imm_float(b, 0.0);
785 } else if (tgsi_src->File == TGSI_FILE_SAMPLER) {
786 /* Only the index of the sampler gets used in texturing, and it will
787 * handle looking that up on its own instead of using the nir_alu_src.
788 */
789 assert(!tgsi_src->Indirect);
790 return NULL;
791 } else {
792 struct tgsi_ind_register *ind = NULL;
793 struct tgsi_dimension *dim = NULL;
794 struct tgsi_ind_register *dimind = NULL;
795 if (tgsi_src->Indirect)
796 ind = &tgsi_fsrc->Indirect;
797 if (tgsi_src->Dimension) {
798 dim = &tgsi_fsrc->Dimension;
799 if (dim->Indirect)
800 dimind = &tgsi_fsrc->DimIndirect;
801 }
802 src.src = ttn_src_for_file_and_index(c,
803 tgsi_src->File,
804 tgsi_src->Index,
805 ind, dim, dimind);
806 }
807
808 src.swizzle[0] = tgsi_src->SwizzleX;
809 src.swizzle[1] = tgsi_src->SwizzleY;
810 src.swizzle[2] = tgsi_src->SwizzleZ;
811 src.swizzle[3] = tgsi_src->SwizzleW;
812
813 nir_ssa_def *def = nir_fmov_alu(b, src, 4);
814
815 if (tgsi_src->Absolute) {
816 if (src_is_float)
817 def = nir_fabs(b, def);
818 else
819 def = nir_iabs(b, def);
820 }
821
822 if (tgsi_src->Negate) {
823 if (src_is_float)
824 def = nir_fneg(b, def);
825 else
826 def = nir_ineg(b, def);
827 }
828
829 return def;
830 }
831
832 static void
833 ttn_alu(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
834 {
835 unsigned num_srcs = nir_op_infos[op].num_inputs;
836 nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
837 unsigned i;
838
839 for (i = 0; i < num_srcs; i++)
840 instr->src[i].src = nir_src_for_ssa(src[i]);
841
842 instr->dest = dest;
843 nir_builder_instr_insert(b, &instr->instr);
844 }
845
846 static void
847 ttn_move_dest_masked(nir_builder *b, nir_alu_dest dest,
848 nir_ssa_def *def, unsigned write_mask)
849 {
850 if (!(dest.write_mask & write_mask))
851 return;
852
853 nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_imov);
854 mov->dest = dest;
855 mov->dest.write_mask &= write_mask;
856 mov->src[0].src = nir_src_for_ssa(def);
857 for (unsigned i = def->num_components; i < 4; i++)
858 mov->src[0].swizzle[i] = def->num_components - 1;
859 nir_builder_instr_insert(b, &mov->instr);
860 }
861
862 static void
863 ttn_move_dest(nir_builder *b, nir_alu_dest dest, nir_ssa_def *def)
864 {
865 ttn_move_dest_masked(b, dest, def, TGSI_WRITEMASK_XYZW);
866 }
867
868 static void
869 ttn_arl(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
870 {
871 ttn_move_dest(b, dest, nir_f2i32(b, nir_ffloor(b, src[0])));
872 }
873
874 /* EXP - Approximate Exponential Base 2
875 * dst.x = 2^{\lfloor src.x\rfloor}
876 * dst.y = src.x - \lfloor src.x\rfloor
877 * dst.z = 2^{src.x}
878 * dst.w = 1.0
879 */
880 static void
881 ttn_exp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
882 {
883 nir_ssa_def *srcx = ttn_channel(b, src[0], X);
884
885 ttn_move_dest_masked(b, dest, nir_fexp2(b, nir_ffloor(b, srcx)),
886 TGSI_WRITEMASK_X);
887 ttn_move_dest_masked(b, dest, nir_fsub(b, srcx, nir_ffloor(b, srcx)),
888 TGSI_WRITEMASK_Y);
889 ttn_move_dest_masked(b, dest, nir_fexp2(b, srcx), TGSI_WRITEMASK_Z);
890 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
891 }
892
893 /* LOG - Approximate Logarithm Base 2
894 * dst.x = \lfloor\log_2{|src.x|}\rfloor
895 * dst.y = \frac{|src.x|}{2^{\lfloor\log_2{|src.x|}\rfloor}}
896 * dst.z = \log_2{|src.x|}
897 * dst.w = 1.0
898 */
899 static void
900 ttn_log(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
901 {
902 nir_ssa_def *abs_srcx = nir_fabs(b, ttn_channel(b, src[0], X));
903 nir_ssa_def *log2 = nir_flog2(b, abs_srcx);
904
905 ttn_move_dest_masked(b, dest, nir_ffloor(b, log2), TGSI_WRITEMASK_X);
906 ttn_move_dest_masked(b, dest,
907 nir_fdiv(b, abs_srcx, nir_fexp2(b, nir_ffloor(b, log2))),
908 TGSI_WRITEMASK_Y);
909 ttn_move_dest_masked(b, dest, nir_flog2(b, abs_srcx), TGSI_WRITEMASK_Z);
910 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
911 }
912
913 /* DST - Distance Vector
914 * dst.x = 1.0
915 * dst.y = src0.y \times src1.y
916 * dst.z = src0.z
917 * dst.w = src1.w
918 */
919 static void
920 ttn_dst(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
921 {
922 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_X);
923 ttn_move_dest_masked(b, dest, nir_fmul(b, src[0], src[1]), TGSI_WRITEMASK_Y);
924 ttn_move_dest_masked(b, dest, nir_fmov(b, src[0]), TGSI_WRITEMASK_Z);
925 ttn_move_dest_masked(b, dest, nir_fmov(b, src[1]), TGSI_WRITEMASK_W);
926 }
927
928 /* LIT - Light Coefficients
929 * dst.x = 1.0
930 * dst.y = max(src.x, 0.0)
931 * dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
932 * dst.w = 1.0
933 */
934 static void
935 ttn_lit(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
936 {
937 ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_XW);
938
939 ttn_move_dest_masked(b, dest, nir_fmax(b, ttn_channel(b, src[0], X),
940 nir_imm_float(b, 0.0)), TGSI_WRITEMASK_Y);
941
942 if (dest.write_mask & TGSI_WRITEMASK_Z) {
943 nir_ssa_def *src0_y = ttn_channel(b, src[0], Y);
944 nir_ssa_def *wclamp = nir_fmax(b, nir_fmin(b, ttn_channel(b, src[0], W),
945 nir_imm_float(b, 128.0)),
946 nir_imm_float(b, -128.0));
947 nir_ssa_def *pow = nir_fpow(b, nir_fmax(b, src0_y, nir_imm_float(b, 0.0)),
948 wclamp);
949
950 ttn_move_dest_masked(b, dest,
951 nir_bcsel(b,
952 nir_fge(b,
953 nir_imm_float(b, 0.0),
954 ttn_channel(b, src[0], X)),
955 nir_imm_float(b, 0.0),
956 pow),
957 TGSI_WRITEMASK_Z);
958 }
959 }
960
961 static void
962 ttn_sle(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
963 {
964 ttn_move_dest(b, dest, nir_sge(b, src[1], src[0]));
965 }
966
967 static void
968 ttn_sgt(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
969 {
970 ttn_move_dest(b, dest, nir_slt(b, src[1], src[0]));
971 }
972
973 static void
974 ttn_dp2(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
975 {
976 ttn_move_dest(b, dest, nir_fdot2(b, src[0], src[1]));
977 }
978
979 static void
980 ttn_dp3(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
981 {
982 ttn_move_dest(b, dest, nir_fdot3(b, src[0], src[1]));
983 }
984
985 static void
986 ttn_dp4(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
987 {
988 ttn_move_dest(b, dest, nir_fdot4(b, src[0], src[1]));
989 }
990
991 static void
992 ttn_umad(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
993 {
994 ttn_move_dest(b, dest, nir_iadd(b, nir_imul(b, src[0], src[1]), src[2]));
995 }
996
997 static void
998 ttn_arr(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
999 {
1000 ttn_move_dest(b, dest, nir_ffloor(b, nir_fadd(b, src[0], nir_imm_float(b, 0.5))));
1001 }
1002
1003 static void
1004 ttn_cmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1005 {
1006 ttn_move_dest(b, dest, nir_bcsel(b,
1007 nir_flt(b, src[0], nir_imm_float(b, 0.0)),
1008 src[1], src[2]));
1009 }
1010
1011 static void
1012 ttn_ucmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1013 {
1014 ttn_move_dest(b, dest, nir_bcsel(b,
1015 nir_ine(b, src[0], nir_imm_int(b, 0)),
1016 src[1], src[2]));
1017 }
1018
1019 static void
1020 ttn_kill(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1021 {
1022 nir_intrinsic_instr *discard =
1023 nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
1024 nir_builder_instr_insert(b, &discard->instr);
1025 b->shader->info.fs.uses_discard = true;
1026 }
1027
1028 static void
1029 ttn_kill_if(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1030 {
1031 nir_ssa_def *cmp = nir_bany_inequal4(b, nir_flt(b, src[0],
1032 nir_imm_float(b, 0.0)),
1033 nir_imm_int(b, 0));
1034 nir_intrinsic_instr *discard =
1035 nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard_if);
1036 discard->src[0] = nir_src_for_ssa(cmp);
1037 nir_builder_instr_insert(b, &discard->instr);
1038 b->shader->info.fs.uses_discard = true;
1039 }
1040
1041 static void
1042 ttn_if(struct ttn_compile *c, nir_ssa_def *src, bool is_uint)
1043 {
1044 nir_builder *b = &c->build;
1045
1046 src = ttn_channel(b, src, X);
1047
1048 nir_if *if_stmt = nir_if_create(b->shader);
1049 if (is_uint) {
1050 if_stmt->condition = nir_src_for_ssa(nir_ine(b, src, nir_imm_int(b, 0)));
1051 } else {
1052 if_stmt->condition = nir_src_for_ssa(nir_fne(b, src, nir_imm_int(b, 0)));
1053 }
1054 nir_builder_cf_insert(b, &if_stmt->cf_node);
1055
1056 c->if_stack[c->if_stack_pos] = nir_after_cf_node(&if_stmt->cf_node);
1057 c->if_stack_pos++;
1058
1059 b->cursor = nir_after_cf_list(&if_stmt->then_list);
1060
1061 c->if_stack[c->if_stack_pos] = nir_after_cf_list(&if_stmt->else_list);
1062 c->if_stack_pos++;
1063 }
1064
1065 static void
1066 ttn_else(struct ttn_compile *c)
1067 {
1068 nir_builder *b = &c->build;
1069
1070 b->cursor = c->if_stack[c->if_stack_pos - 1];
1071 }
1072
1073 static void
1074 ttn_endif(struct ttn_compile *c)
1075 {
1076 nir_builder *b = &c->build;
1077
1078 c->if_stack_pos -= 2;
1079 b->cursor = c->if_stack[c->if_stack_pos];
1080 }
1081
1082 static void
1083 ttn_bgnloop(struct ttn_compile *c)
1084 {
1085 nir_builder *b = &c->build;
1086
1087 nir_loop *loop = nir_loop_create(b->shader);
1088 nir_builder_cf_insert(b, &loop->cf_node);
1089
1090 c->loop_stack[c->loop_stack_pos] = nir_after_cf_node(&loop->cf_node);
1091 c->loop_stack_pos++;
1092
1093 b->cursor = nir_after_cf_list(&loop->body);
1094 }
1095
1096 static void
1097 ttn_cont(nir_builder *b)
1098 {
1099 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_continue);
1100 nir_builder_instr_insert(b, &instr->instr);
1101 }
1102
1103 static void
1104 ttn_brk(nir_builder *b)
1105 {
1106 nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_break);
1107 nir_builder_instr_insert(b, &instr->instr);
1108 }
1109
1110 static void
1111 ttn_endloop(struct ttn_compile *c)
1112 {
1113 nir_builder *b = &c->build;
1114
1115 c->loop_stack_pos--;
1116 b->cursor = c->loop_stack[c->loop_stack_pos];
1117 }
1118
1119 static void
1120 setup_texture_info(nir_tex_instr *instr, unsigned texture)
1121 {
1122 switch (texture) {
1123 case TGSI_TEXTURE_BUFFER:
1124 instr->sampler_dim = GLSL_SAMPLER_DIM_BUF;
1125 break;
1126 case TGSI_TEXTURE_1D:
1127 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1128 break;
1129 case TGSI_TEXTURE_1D_ARRAY:
1130 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1131 instr->is_array = true;
1132 break;
1133 case TGSI_TEXTURE_SHADOW1D:
1134 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1135 instr->is_shadow = true;
1136 break;
1137 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1138 instr->sampler_dim = GLSL_SAMPLER_DIM_1D;
1139 instr->is_shadow = true;
1140 instr->is_array = true;
1141 break;
1142 case TGSI_TEXTURE_2D:
1143 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1144 break;
1145 case TGSI_TEXTURE_2D_ARRAY:
1146 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1147 instr->is_array = true;
1148 break;
1149 case TGSI_TEXTURE_2D_MSAA:
1150 instr->sampler_dim = GLSL_SAMPLER_DIM_MS;
1151 break;
1152 case TGSI_TEXTURE_2D_ARRAY_MSAA:
1153 instr->sampler_dim = GLSL_SAMPLER_DIM_MS;
1154 instr->is_array = true;
1155 break;
1156 case TGSI_TEXTURE_SHADOW2D:
1157 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1158 instr->is_shadow = true;
1159 break;
1160 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1161 instr->sampler_dim = GLSL_SAMPLER_DIM_2D;
1162 instr->is_shadow = true;
1163 instr->is_array = true;
1164 break;
1165 case TGSI_TEXTURE_3D:
1166 instr->sampler_dim = GLSL_SAMPLER_DIM_3D;
1167 break;
1168 case TGSI_TEXTURE_CUBE:
1169 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1170 break;
1171 case TGSI_TEXTURE_CUBE_ARRAY:
1172 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1173 instr->is_array = true;
1174 break;
1175 case TGSI_TEXTURE_SHADOWCUBE:
1176 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1177 instr->is_shadow = true;
1178 break;
1179 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
1180 instr->sampler_dim = GLSL_SAMPLER_DIM_CUBE;
1181 instr->is_shadow = true;
1182 instr->is_array = true;
1183 break;
1184 case TGSI_TEXTURE_RECT:
1185 instr->sampler_dim = GLSL_SAMPLER_DIM_RECT;
1186 break;
1187 case TGSI_TEXTURE_SHADOWRECT:
1188 instr->sampler_dim = GLSL_SAMPLER_DIM_RECT;
1189 instr->is_shadow = true;
1190 break;
1191 default:
1192 fprintf(stderr, "Unknown TGSI texture target %d\n", texture);
1193 abort();
1194 }
1195 }
1196
1197 static void
1198 ttn_tex(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1199 {
1200 nir_builder *b = &c->build;
1201 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1202 nir_tex_instr *instr;
1203 nir_texop op;
1204 unsigned num_srcs, samp = 1, sview, i;
1205
1206 switch (tgsi_inst->Instruction.Opcode) {
1207 case TGSI_OPCODE_TEX:
1208 op = nir_texop_tex;
1209 num_srcs = 1;
1210 break;
1211 case TGSI_OPCODE_TEX2:
1212 op = nir_texop_tex;
1213 num_srcs = 1;
1214 samp = 2;
1215 break;
1216 case TGSI_OPCODE_TXP:
1217 op = nir_texop_tex;
1218 num_srcs = 2;
1219 break;
1220 case TGSI_OPCODE_TXB:
1221 op = nir_texop_txb;
1222 num_srcs = 2;
1223 break;
1224 case TGSI_OPCODE_TXB2:
1225 op = nir_texop_txb;
1226 num_srcs = 2;
1227 samp = 2;
1228 break;
1229 case TGSI_OPCODE_TXL:
1230 op = nir_texop_txl;
1231 num_srcs = 2;
1232 break;
1233 case TGSI_OPCODE_TXL2:
1234 op = nir_texop_txl;
1235 num_srcs = 2;
1236 samp = 2;
1237 break;
1238 case TGSI_OPCODE_TXF:
1239 if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
1240 tgsi_inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA) {
1241 op = nir_texop_txf_ms;
1242 } else {
1243 op = nir_texop_txf;
1244 }
1245 num_srcs = 2;
1246 break;
1247 case TGSI_OPCODE_TXD:
1248 op = nir_texop_txd;
1249 num_srcs = 3;
1250 samp = 3;
1251 break;
1252 case TGSI_OPCODE_LODQ:
1253 op = nir_texop_lod;
1254 num_srcs = 1;
1255 break;
1256
1257 default:
1258 fprintf(stderr, "unknown TGSI tex op %d\n", tgsi_inst->Instruction.Opcode);
1259 abort();
1260 }
1261
1262 if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
1263 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
1264 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
1265 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
1266 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
1267 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
1268 tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
1269 num_srcs++;
1270 }
1271
1272 num_srcs += tgsi_inst->Texture.NumOffsets;
1273
1274 instr = nir_tex_instr_create(b->shader, num_srcs);
1275 instr->op = op;
1276
1277 setup_texture_info(instr, tgsi_inst->Texture.Texture);
1278
1279 switch (instr->sampler_dim) {
1280 case GLSL_SAMPLER_DIM_1D:
1281 case GLSL_SAMPLER_DIM_BUF:
1282 instr->coord_components = 1;
1283 break;
1284 case GLSL_SAMPLER_DIM_2D:
1285 case GLSL_SAMPLER_DIM_RECT:
1286 case GLSL_SAMPLER_DIM_EXTERNAL:
1287 case GLSL_SAMPLER_DIM_MS:
1288 instr->coord_components = 2;
1289 break;
1290 case GLSL_SAMPLER_DIM_3D:
1291 case GLSL_SAMPLER_DIM_CUBE:
1292 instr->coord_components = 3;
1293 break;
1294 case GLSL_SAMPLER_DIM_SUBPASS:
1295 case GLSL_SAMPLER_DIM_SUBPASS_MS:
1296 unreachable("invalid sampler_dim");
1297 }
1298
1299 if (instr->is_array)
1300 instr->coord_components++;
1301
1302 assert(tgsi_inst->Src[samp].Register.File == TGSI_FILE_SAMPLER);
1303 instr->texture_index = tgsi_inst->Src[samp].Register.Index;
1304 instr->sampler_index = tgsi_inst->Src[samp].Register.Index;
1305
1306 /* TODO if we supported any opc's which take an explicit SVIEW
1307 * src, we would use that here instead. But for the "legacy"
1308 * texture opc's the SVIEW index is same as SAMP index:
1309 */
1310 sview = instr->texture_index;
1311
1312 if (op == nir_texop_lod) {
1313 instr->dest_type = nir_type_float;
1314 } else if (sview < c->num_samp_types) {
1315 instr->dest_type = c->samp_types[sview];
1316 } else {
1317 instr->dest_type = nir_type_float;
1318 }
1319
1320 unsigned src_number = 0;
1321
1322 instr->src[src_number].src =
1323 nir_src_for_ssa(nir_swizzle(b, src[0], SWIZ(X, Y, Z, W),
1324 instr->coord_components, false));
1325 instr->src[src_number].src_type = nir_tex_src_coord;
1326 src_number++;
1327
1328 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
1329 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1330 instr->src[src_number].src_type = nir_tex_src_projector;
1331 src_number++;
1332 }
1333
1334 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB) {
1335 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1336 instr->src[src_number].src_type = nir_tex_src_bias;
1337 src_number++;
1338 }
1339
1340 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB2) {
1341 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1342 instr->src[src_number].src_type = nir_tex_src_bias;
1343 src_number++;
1344 }
1345
1346 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL) {
1347 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1348 instr->src[src_number].src_type = nir_tex_src_lod;
1349 src_number++;
1350 }
1351
1352 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
1353 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1354 instr->src[src_number].src_type = nir_tex_src_lod;
1355 src_number++;
1356 }
1357
1358 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
1359 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1360 if (op == nir_texop_txf_ms)
1361 instr->src[src_number].src_type = nir_tex_src_ms_index;
1362 else
1363 instr->src[src_number].src_type = nir_tex_src_lod;
1364 src_number++;
1365 }
1366
1367 if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
1368 instr->src[src_number].src_type = nir_tex_src_ddx;
1369 instr->src[src_number].src =
1370 nir_src_for_ssa(nir_swizzle(b, src[1], SWIZ(X, Y, Z, W),
1371 nir_tex_instr_src_size(instr, src_number),
1372 false));
1373 src_number++;
1374 instr->src[src_number].src_type = nir_tex_src_ddy;
1375 instr->src[src_number].src =
1376 nir_src_for_ssa(nir_swizzle(b, src[2], SWIZ(X, Y, Z, W),
1377 nir_tex_instr_src_size(instr, src_number),
1378 false));
1379 src_number++;
1380 }
1381
1382 if (instr->is_shadow) {
1383 if (instr->coord_components == 4)
1384 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1385 else if (instr->coord_components == 3)
1386 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1387 else
1388 instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], Z));
1389
1390 instr->src[src_number].src_type = nir_tex_src_comparator;
1391 src_number++;
1392 }
1393
1394 for (i = 0; i < tgsi_inst->Texture.NumOffsets; i++) {
1395 struct tgsi_texture_offset *tex_offset = &tgsi_inst->TexOffsets[i];
1396 /* since TexOffset ins't using tgsi_full_src_register we get to
1397 * do some extra gymnastics:
1398 */
1399 nir_alu_src src;
1400
1401 memset(&src, 0, sizeof(src));
1402
1403 src.src = ttn_src_for_file_and_index(c,
1404 tex_offset->File,
1405 tex_offset->Index,
1406 NULL, NULL, NULL);
1407
1408 src.swizzle[0] = tex_offset->SwizzleX;
1409 src.swizzle[1] = tex_offset->SwizzleY;
1410 src.swizzle[2] = tex_offset->SwizzleZ;
1411 src.swizzle[3] = TGSI_SWIZZLE_W;
1412
1413 instr->src[src_number].src_type = nir_tex_src_offset;
1414 instr->src[src_number].src = nir_src_for_ssa(
1415 nir_fmov_alu(b, src, nir_tex_instr_src_size(instr, src_number)));
1416 src_number++;
1417 }
1418
1419 assert(src_number == num_srcs);
1420
1421 nir_ssa_dest_init(&instr->instr, &instr->dest,
1422 nir_tex_instr_dest_size(instr),
1423 32, NULL);
1424 nir_builder_instr_insert(b, &instr->instr);
1425
1426 /* Resolve the writemask on the texture op. */
1427 ttn_move_dest(b, dest, &instr->dest.ssa);
1428 }
1429
1430 /* TGSI_OPCODE_TXQ is actually two distinct operations:
1431 *
1432 * dst.x = texture\_width(unit, lod)
1433 * dst.y = texture\_height(unit, lod)
1434 * dst.z = texture\_depth(unit, lod)
1435 * dst.w = texture\_levels(unit)
1436 *
1437 * dst.xyz map to NIR txs opcode, and dst.w maps to query_levels
1438 */
1439 static void
1440 ttn_txq(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1441 {
1442 nir_builder *b = &c->build;
1443 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1444 nir_tex_instr *txs, *qlv;
1445
1446 txs = nir_tex_instr_create(b->shader, 1);
1447 txs->op = nir_texop_txs;
1448 setup_texture_info(txs, tgsi_inst->Texture.Texture);
1449
1450 qlv = nir_tex_instr_create(b->shader, 0);
1451 qlv->op = nir_texop_query_levels;
1452 setup_texture_info(qlv, tgsi_inst->Texture.Texture);
1453
1454 assert(tgsi_inst->Src[1].Register.File == TGSI_FILE_SAMPLER);
1455 txs->texture_index = tgsi_inst->Src[1].Register.Index;
1456 qlv->texture_index = tgsi_inst->Src[1].Register.Index;
1457
1458 /* only single src, the lod: */
1459 txs->src[0].src = nir_src_for_ssa(ttn_channel(b, src[0], X));
1460 txs->src[0].src_type = nir_tex_src_lod;
1461
1462 nir_ssa_dest_init(&txs->instr, &txs->dest,
1463 nir_tex_instr_dest_size(txs), 32, NULL);
1464 nir_builder_instr_insert(b, &txs->instr);
1465
1466 nir_ssa_dest_init(&qlv->instr, &qlv->dest, 1, 32, NULL);
1467 nir_builder_instr_insert(b, &qlv->instr);
1468
1469 ttn_move_dest_masked(b, dest, &txs->dest.ssa, TGSI_WRITEMASK_XYZ);
1470 ttn_move_dest_masked(b, dest, &qlv->dest.ssa, TGSI_WRITEMASK_W);
1471 }
1472
1473 static const nir_op op_trans[TGSI_OPCODE_LAST] = {
1474 [TGSI_OPCODE_ARL] = 0,
1475 [TGSI_OPCODE_MOV] = nir_op_fmov,
1476 [TGSI_OPCODE_LIT] = 0,
1477 [TGSI_OPCODE_RCP] = nir_op_frcp,
1478 [TGSI_OPCODE_RSQ] = nir_op_frsq,
1479 [TGSI_OPCODE_EXP] = 0,
1480 [TGSI_OPCODE_LOG] = 0,
1481 [TGSI_OPCODE_MUL] = nir_op_fmul,
1482 [TGSI_OPCODE_ADD] = nir_op_fadd,
1483 [TGSI_OPCODE_DP3] = 0,
1484 [TGSI_OPCODE_DP4] = 0,
1485 [TGSI_OPCODE_DST] = 0,
1486 [TGSI_OPCODE_MIN] = nir_op_fmin,
1487 [TGSI_OPCODE_MAX] = nir_op_fmax,
1488 [TGSI_OPCODE_SLT] = nir_op_slt,
1489 [TGSI_OPCODE_SGE] = nir_op_sge,
1490 [TGSI_OPCODE_MAD] = nir_op_ffma,
1491 [TGSI_OPCODE_LRP] = 0,
1492 [TGSI_OPCODE_SQRT] = nir_op_fsqrt,
1493 [TGSI_OPCODE_FRC] = nir_op_ffract,
1494 [TGSI_OPCODE_FLR] = nir_op_ffloor,
1495 [TGSI_OPCODE_ROUND] = nir_op_fround_even,
1496 [TGSI_OPCODE_EX2] = nir_op_fexp2,
1497 [TGSI_OPCODE_LG2] = nir_op_flog2,
1498 [TGSI_OPCODE_POW] = nir_op_fpow,
1499 [TGSI_OPCODE_COS] = nir_op_fcos,
1500 [TGSI_OPCODE_DDX] = nir_op_fddx,
1501 [TGSI_OPCODE_DDY] = nir_op_fddy,
1502 [TGSI_OPCODE_KILL] = 0,
1503 [TGSI_OPCODE_PK2H] = 0, /* XXX */
1504 [TGSI_OPCODE_PK2US] = 0, /* XXX */
1505 [TGSI_OPCODE_PK4B] = 0, /* XXX */
1506 [TGSI_OPCODE_PK4UB] = 0, /* XXX */
1507 [TGSI_OPCODE_SEQ] = nir_op_seq,
1508 [TGSI_OPCODE_SGT] = 0,
1509 [TGSI_OPCODE_SIN] = nir_op_fsin,
1510 [TGSI_OPCODE_SNE] = nir_op_sne,
1511 [TGSI_OPCODE_SLE] = 0,
1512 [TGSI_OPCODE_TEX] = 0,
1513 [TGSI_OPCODE_TXD] = 0,
1514 [TGSI_OPCODE_TXP] = 0,
1515 [TGSI_OPCODE_UP2H] = 0, /* XXX */
1516 [TGSI_OPCODE_UP2US] = 0, /* XXX */
1517 [TGSI_OPCODE_UP4B] = 0, /* XXX */
1518 [TGSI_OPCODE_UP4UB] = 0, /* XXX */
1519 [TGSI_OPCODE_ARR] = 0,
1520
1521 /* No function calls, yet. */
1522 [TGSI_OPCODE_CAL] = 0, /* XXX */
1523 [TGSI_OPCODE_RET] = 0, /* XXX */
1524
1525 [TGSI_OPCODE_SSG] = nir_op_fsign,
1526 [TGSI_OPCODE_CMP] = 0,
1527 [TGSI_OPCODE_TXB] = 0,
1528 [TGSI_OPCODE_DIV] = nir_op_fdiv,
1529 [TGSI_OPCODE_DP2] = 0,
1530 [TGSI_OPCODE_TXL] = 0,
1531
1532 [TGSI_OPCODE_BRK] = 0,
1533 [TGSI_OPCODE_IF] = 0,
1534 [TGSI_OPCODE_UIF] = 0,
1535 [TGSI_OPCODE_ELSE] = 0,
1536 [TGSI_OPCODE_ENDIF] = 0,
1537
1538 [TGSI_OPCODE_DDX_FINE] = nir_op_fddx_fine,
1539 [TGSI_OPCODE_DDY_FINE] = nir_op_fddy_fine,
1540
1541 [TGSI_OPCODE_CEIL] = nir_op_fceil,
1542 [TGSI_OPCODE_I2F] = nir_op_i2f32,
1543 [TGSI_OPCODE_NOT] = nir_op_inot,
1544 [TGSI_OPCODE_TRUNC] = nir_op_ftrunc,
1545 [TGSI_OPCODE_SHL] = nir_op_ishl,
1546 [TGSI_OPCODE_AND] = nir_op_iand,
1547 [TGSI_OPCODE_OR] = nir_op_ior,
1548 [TGSI_OPCODE_MOD] = nir_op_umod,
1549 [TGSI_OPCODE_XOR] = nir_op_ixor,
1550 [TGSI_OPCODE_TXF] = 0,
1551 [TGSI_OPCODE_TXQ] = 0,
1552
1553 [TGSI_OPCODE_CONT] = 0,
1554
1555 [TGSI_OPCODE_EMIT] = 0, /* XXX */
1556 [TGSI_OPCODE_ENDPRIM] = 0, /* XXX */
1557
1558 [TGSI_OPCODE_BGNLOOP] = 0,
1559 [TGSI_OPCODE_BGNSUB] = 0, /* XXX: no function calls */
1560 [TGSI_OPCODE_ENDLOOP] = 0,
1561 [TGSI_OPCODE_ENDSUB] = 0, /* XXX: no function calls */
1562
1563 [TGSI_OPCODE_NOP] = 0,
1564 [TGSI_OPCODE_FSEQ] = nir_op_feq,
1565 [TGSI_OPCODE_FSGE] = nir_op_fge,
1566 [TGSI_OPCODE_FSLT] = nir_op_flt,
1567 [TGSI_OPCODE_FSNE] = nir_op_fne,
1568
1569 [TGSI_OPCODE_KILL_IF] = 0,
1570
1571 [TGSI_OPCODE_END] = 0,
1572
1573 [TGSI_OPCODE_F2I] = nir_op_f2i32,
1574 [TGSI_OPCODE_IDIV] = nir_op_idiv,
1575 [TGSI_OPCODE_IMAX] = nir_op_imax,
1576 [TGSI_OPCODE_IMIN] = nir_op_imin,
1577 [TGSI_OPCODE_INEG] = nir_op_ineg,
1578 [TGSI_OPCODE_ISGE] = nir_op_ige,
1579 [TGSI_OPCODE_ISHR] = nir_op_ishr,
1580 [TGSI_OPCODE_ISLT] = nir_op_ilt,
1581 [TGSI_OPCODE_F2U] = nir_op_f2u32,
1582 [TGSI_OPCODE_U2F] = nir_op_u2f32,
1583 [TGSI_OPCODE_UADD] = nir_op_iadd,
1584 [TGSI_OPCODE_UDIV] = nir_op_udiv,
1585 [TGSI_OPCODE_UMAD] = 0,
1586 [TGSI_OPCODE_UMAX] = nir_op_umax,
1587 [TGSI_OPCODE_UMIN] = nir_op_umin,
1588 [TGSI_OPCODE_UMOD] = nir_op_umod,
1589 [TGSI_OPCODE_UMUL] = nir_op_imul,
1590 [TGSI_OPCODE_USEQ] = nir_op_ieq,
1591 [TGSI_OPCODE_USGE] = nir_op_uge,
1592 [TGSI_OPCODE_USHR] = nir_op_ushr,
1593 [TGSI_OPCODE_USLT] = nir_op_ult,
1594 [TGSI_OPCODE_USNE] = nir_op_ine,
1595
1596 [TGSI_OPCODE_SWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1597 [TGSI_OPCODE_CASE] = 0, /* not emitted by glsl_to_tgsi.cpp */
1598 [TGSI_OPCODE_DEFAULT] = 0, /* not emitted by glsl_to_tgsi.cpp */
1599 [TGSI_OPCODE_ENDSWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1600
1601 /* XXX: SAMPLE opcodes */
1602
1603 [TGSI_OPCODE_UARL] = nir_op_imov,
1604 [TGSI_OPCODE_UCMP] = 0,
1605 [TGSI_OPCODE_IABS] = nir_op_iabs,
1606 [TGSI_OPCODE_ISSG] = nir_op_isign,
1607
1608 /* XXX: atomics */
1609
1610 [TGSI_OPCODE_TEX2] = 0,
1611 [TGSI_OPCODE_TXB2] = 0,
1612 [TGSI_OPCODE_TXL2] = 0,
1613
1614 [TGSI_OPCODE_IMUL_HI] = nir_op_imul_high,
1615 [TGSI_OPCODE_UMUL_HI] = nir_op_umul_high,
1616
1617 [TGSI_OPCODE_TG4] = 0,
1618 [TGSI_OPCODE_LODQ] = 0,
1619
1620 [TGSI_OPCODE_IBFE] = nir_op_ibitfield_extract,
1621 [TGSI_OPCODE_UBFE] = nir_op_ubitfield_extract,
1622 [TGSI_OPCODE_BFI] = nir_op_bitfield_insert,
1623 [TGSI_OPCODE_BREV] = nir_op_bitfield_reverse,
1624 [TGSI_OPCODE_POPC] = nir_op_bit_count,
1625 [TGSI_OPCODE_LSB] = nir_op_find_lsb,
1626 [TGSI_OPCODE_IMSB] = nir_op_ifind_msb,
1627 [TGSI_OPCODE_UMSB] = nir_op_ufind_msb,
1628
1629 [TGSI_OPCODE_INTERP_CENTROID] = 0, /* XXX */
1630 [TGSI_OPCODE_INTERP_SAMPLE] = 0, /* XXX */
1631 [TGSI_OPCODE_INTERP_OFFSET] = 0, /* XXX */
1632 };
1633
1634 static void
1635 ttn_emit_instruction(struct ttn_compile *c)
1636 {
1637 nir_builder *b = &c->build;
1638 struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1639 unsigned i;
1640 unsigned tgsi_op = tgsi_inst->Instruction.Opcode;
1641 struct tgsi_full_dst_register *tgsi_dst = &tgsi_inst->Dst[0];
1642
1643 if (tgsi_op == TGSI_OPCODE_END)
1644 return;
1645
1646 nir_ssa_def *src[TGSI_FULL_MAX_SRC_REGISTERS];
1647 for (i = 0; i < tgsi_inst->Instruction.NumSrcRegs; i++) {
1648 src[i] = ttn_get_src(c, &tgsi_inst->Src[i], i);
1649 }
1650 nir_alu_dest dest = ttn_get_dest(c, tgsi_dst);
1651
1652 switch (tgsi_op) {
1653 case TGSI_OPCODE_RSQ:
1654 ttn_move_dest(b, dest, nir_frsq(b, ttn_channel(b, src[0], X)));
1655 break;
1656
1657 case TGSI_OPCODE_SQRT:
1658 ttn_move_dest(b, dest, nir_fsqrt(b, ttn_channel(b, src[0], X)));
1659 break;
1660
1661 case TGSI_OPCODE_RCP:
1662 ttn_move_dest(b, dest, nir_frcp(b, ttn_channel(b, src[0], X)));
1663 break;
1664
1665 case TGSI_OPCODE_EX2:
1666 ttn_move_dest(b, dest, nir_fexp2(b, ttn_channel(b, src[0], X)));
1667 break;
1668
1669 case TGSI_OPCODE_LG2:
1670 ttn_move_dest(b, dest, nir_flog2(b, ttn_channel(b, src[0], X)));
1671 break;
1672
1673 case TGSI_OPCODE_POW:
1674 ttn_move_dest(b, dest, nir_fpow(b,
1675 ttn_channel(b, src[0], X),
1676 ttn_channel(b, src[1], X)));
1677 break;
1678
1679 case TGSI_OPCODE_COS:
1680 ttn_move_dest(b, dest, nir_fcos(b, ttn_channel(b, src[0], X)));
1681 break;
1682
1683 case TGSI_OPCODE_SIN:
1684 ttn_move_dest(b, dest, nir_fsin(b, ttn_channel(b, src[0], X)));
1685 break;
1686
1687 case TGSI_OPCODE_ARL:
1688 ttn_arl(b, op_trans[tgsi_op], dest, src);
1689 break;
1690
1691 case TGSI_OPCODE_EXP:
1692 ttn_exp(b, op_trans[tgsi_op], dest, src);
1693 break;
1694
1695 case TGSI_OPCODE_LOG:
1696 ttn_log(b, op_trans[tgsi_op], dest, src);
1697 break;
1698
1699 case TGSI_OPCODE_DST:
1700 ttn_dst(b, op_trans[tgsi_op], dest, src);
1701 break;
1702
1703 case TGSI_OPCODE_LIT:
1704 ttn_lit(b, op_trans[tgsi_op], dest, src);
1705 break;
1706
1707 case TGSI_OPCODE_DP2:
1708 ttn_dp2(b, op_trans[tgsi_op], dest, src);
1709 break;
1710
1711 case TGSI_OPCODE_DP3:
1712 ttn_dp3(b, op_trans[tgsi_op], dest, src);
1713 break;
1714
1715 case TGSI_OPCODE_DP4:
1716 ttn_dp4(b, op_trans[tgsi_op], dest, src);
1717 break;
1718
1719 case TGSI_OPCODE_UMAD:
1720 ttn_umad(b, op_trans[tgsi_op], dest, src);
1721 break;
1722
1723 case TGSI_OPCODE_LRP:
1724 ttn_move_dest(b, dest, nir_flrp(b, src[2], src[1], src[0]));
1725 break;
1726
1727 case TGSI_OPCODE_KILL:
1728 ttn_kill(b, op_trans[tgsi_op], dest, src);
1729 break;
1730
1731 case TGSI_OPCODE_ARR:
1732 ttn_arr(b, op_trans[tgsi_op], dest, src);
1733 break;
1734
1735 case TGSI_OPCODE_CMP:
1736 ttn_cmp(b, op_trans[tgsi_op], dest, src);
1737 break;
1738
1739 case TGSI_OPCODE_UCMP:
1740 ttn_ucmp(b, op_trans[tgsi_op], dest, src);
1741 break;
1742
1743 case TGSI_OPCODE_SGT:
1744 ttn_sgt(b, op_trans[tgsi_op], dest, src);
1745 break;
1746
1747 case TGSI_OPCODE_SLE:
1748 ttn_sle(b, op_trans[tgsi_op], dest, src);
1749 break;
1750
1751 case TGSI_OPCODE_KILL_IF:
1752 ttn_kill_if(b, op_trans[tgsi_op], dest, src);
1753 break;
1754
1755 case TGSI_OPCODE_TEX:
1756 case TGSI_OPCODE_TXP:
1757 case TGSI_OPCODE_TXL:
1758 case TGSI_OPCODE_TXB:
1759 case TGSI_OPCODE_TXD:
1760 case TGSI_OPCODE_TEX2:
1761 case TGSI_OPCODE_TXL2:
1762 case TGSI_OPCODE_TXB2:
1763 case TGSI_OPCODE_TXF:
1764 case TGSI_OPCODE_TG4:
1765 case TGSI_OPCODE_LODQ:
1766 ttn_tex(c, dest, src);
1767 break;
1768
1769 case TGSI_OPCODE_TXQ:
1770 ttn_txq(c, dest, src);
1771 break;
1772
1773 case TGSI_OPCODE_NOP:
1774 break;
1775
1776 case TGSI_OPCODE_IF:
1777 ttn_if(c, src[0], false);
1778 break;
1779
1780 case TGSI_OPCODE_UIF:
1781 ttn_if(c, src[0], true);
1782 break;
1783
1784 case TGSI_OPCODE_ELSE:
1785 ttn_else(c);
1786 break;
1787
1788 case TGSI_OPCODE_ENDIF:
1789 ttn_endif(c);
1790 break;
1791
1792 case TGSI_OPCODE_BGNLOOP:
1793 ttn_bgnloop(c);
1794 break;
1795
1796 case TGSI_OPCODE_BRK:
1797 ttn_brk(b);
1798 break;
1799
1800 case TGSI_OPCODE_CONT:
1801 ttn_cont(b);
1802 break;
1803
1804 case TGSI_OPCODE_ENDLOOP:
1805 ttn_endloop(c);
1806 break;
1807
1808 default:
1809 if (op_trans[tgsi_op] != 0 || tgsi_op == TGSI_OPCODE_MOV) {
1810 ttn_alu(b, op_trans[tgsi_op], dest, src);
1811 } else {
1812 fprintf(stderr, "unknown TGSI opcode: %s\n",
1813 tgsi_get_opcode_name(tgsi_op));
1814 abort();
1815 }
1816 break;
1817 }
1818
1819 if (tgsi_inst->Instruction.Saturate) {
1820 assert(!dest.dest.is_ssa);
1821 ttn_move_dest(b, dest, nir_fsat(b, ttn_src_for_dest(b, &dest)));
1822 }
1823
1824 /* if the dst has a matching var, append store_var to move
1825 * output from reg to var
1826 */
1827 nir_variable *var = ttn_get_var(c, tgsi_dst);
1828 if (var) {
1829 unsigned index = tgsi_dst->Register.Index;
1830 unsigned offset = c->temp_regs[index].offset;
1831 nir_intrinsic_instr *store =
1832 nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
1833 struct tgsi_ind_register *indirect = tgsi_dst->Register.Indirect ?
1834 &tgsi_dst->Indirect : NULL;
1835
1836 store->num_components = 4;
1837 nir_intrinsic_set_write_mask(store, dest.write_mask);
1838 store->variables[0] = ttn_array_deref(c, store, var, offset, indirect);
1839 store->src[0] = nir_src_for_reg(dest.dest.reg.reg);
1840
1841 nir_builder_instr_insert(b, &store->instr);
1842 }
1843 }
1844
1845 /**
1846 * Puts a NIR intrinsic to store of each TGSI_FILE_OUTPUT value to the output
1847 * variables at the end of the shader.
1848 *
1849 * We don't generate these incrementally as the TGSI_FILE_OUTPUT values are
1850 * written, because there's no output load intrinsic, which means we couldn't
1851 * handle writemasks.
1852 */
1853 static void
1854 ttn_add_output_stores(struct ttn_compile *c)
1855 {
1856 nir_builder *b = &c->build;
1857
1858 foreach_list_typed(nir_variable, var, node, &b->shader->outputs) {
1859 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1860 unsigned i;
1861
1862 for (i = 0; i < array_len; i++) {
1863 nir_intrinsic_instr *store =
1864 nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_output);
1865 unsigned loc = var->data.driver_location + i;
1866
1867 nir_src src = nir_src_for_reg(c->output_regs[loc].reg);
1868 src.reg.base_offset = c->output_regs[loc].offset;
1869
1870 if (c->build.shader->info.stage == MESA_SHADER_FRAGMENT &&
1871 var->data.location == FRAG_RESULT_DEPTH) {
1872 /* TGSI uses TGSI_SEMANTIC_POSITION.z for the depth output, while
1873 * NIR uses a single float FRAG_RESULT_DEPTH.
1874 */
1875 src = nir_src_for_ssa(nir_channel(b, nir_ssa_for_src(b, src, 4), 2));
1876 store->num_components = 1;
1877 } else {
1878 store->num_components = 4;
1879 }
1880 store->src[0] = src;
1881
1882 nir_intrinsic_set_base(store, loc);
1883 nir_intrinsic_set_write_mask(store, 0xf);
1884 store->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
1885 nir_builder_instr_insert(b, &store->instr);
1886 }
1887 }
1888 }
1889
1890 static gl_shader_stage
1891 tgsi_processor_to_shader_stage(unsigned processor)
1892 {
1893 switch (processor) {
1894 case PIPE_SHADER_FRAGMENT: return MESA_SHADER_FRAGMENT;
1895 case PIPE_SHADER_VERTEX: return MESA_SHADER_VERTEX;
1896 case PIPE_SHADER_GEOMETRY: return MESA_SHADER_GEOMETRY;
1897 case PIPE_SHADER_TESS_CTRL: return MESA_SHADER_TESS_CTRL;
1898 case PIPE_SHADER_TESS_EVAL: return MESA_SHADER_TESS_EVAL;
1899 case PIPE_SHADER_COMPUTE: return MESA_SHADER_COMPUTE;
1900 default:
1901 unreachable("invalid TGSI processor");
1902 }
1903 }
1904
1905 struct nir_shader *
1906 tgsi_to_nir(const void *tgsi_tokens,
1907 const nir_shader_compiler_options *options)
1908 {
1909 struct tgsi_parse_context parser;
1910 struct tgsi_shader_info scan;
1911 struct ttn_compile *c;
1912 struct nir_shader *s;
1913 int ret;
1914
1915 c = rzalloc(NULL, struct ttn_compile);
1916
1917 tgsi_scan_shader(tgsi_tokens, &scan);
1918 c->scan = &scan;
1919
1920 nir_builder_init_simple_shader(&c->build, NULL,
1921 tgsi_processor_to_shader_stage(scan.processor),
1922 options);
1923 s = c->build.shader;
1924
1925 s->num_inputs = scan.file_max[TGSI_FILE_INPUT] + 1;
1926 s->num_uniforms = scan.const_file_max[0] + 1;
1927 s->num_outputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
1928
1929 c->output_regs = rzalloc_array(c, struct ttn_reg_info,
1930 scan.file_max[TGSI_FILE_OUTPUT] + 1);
1931 c->temp_regs = rzalloc_array(c, struct ttn_reg_info,
1932 scan.file_max[TGSI_FILE_TEMPORARY] + 1);
1933 c->imm_defs = rzalloc_array(c, nir_ssa_def *,
1934 scan.file_max[TGSI_FILE_IMMEDIATE] + 1);
1935
1936 c->num_samp_types = scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
1937 c->samp_types = rzalloc_array(c, nir_alu_type, c->num_samp_types);
1938
1939 c->if_stack = rzalloc_array(c, nir_cursor,
1940 (scan.opcode_count[TGSI_OPCODE_IF] +
1941 scan.opcode_count[TGSI_OPCODE_UIF]) * 2);
1942 c->loop_stack = rzalloc_array(c, nir_cursor,
1943 scan.opcode_count[TGSI_OPCODE_BGNLOOP]);
1944
1945 ret = tgsi_parse_init(&parser, tgsi_tokens);
1946 assert(ret == TGSI_PARSE_OK);
1947
1948 while (!tgsi_parse_end_of_tokens(&parser)) {
1949 tgsi_parse_token(&parser);
1950 c->token = &parser.FullToken;
1951
1952 switch (parser.FullToken.Token.Type) {
1953 case TGSI_TOKEN_TYPE_DECLARATION:
1954 ttn_emit_declaration(c);
1955 break;
1956
1957 case TGSI_TOKEN_TYPE_INSTRUCTION:
1958 ttn_emit_instruction(c);
1959 break;
1960
1961 case TGSI_TOKEN_TYPE_IMMEDIATE:
1962 ttn_emit_immediate(c);
1963 break;
1964 }
1965 }
1966
1967 tgsi_parse_free(&parser);
1968
1969 ttn_add_output_stores(c);
1970
1971 ralloc_free(c);
1972 return s;
1973 }